diff options
1720 files changed, 27397 insertions, 14806 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-ge-achc b/Documentation/ABI/testing/sysfs-driver-ge-achc new file mode 100644 index 000000000000..a9e7a079190c --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-ge-achc @@ -0,0 +1,15 @@ +What: /sys/bus/spi/<dev>/update_firmware +Date: Jul 2021 +Contact: sebastian.reichel@collabora.com +Description: Write 1 to this file to update the ACHC microcontroller + firmware via the EzPort interface. For this the kernel + will load "achc.bin" via the firmware API (so usually + from /lib/firmware). The write will block until the FW + has either been flashed successfully or an error occured. + +What: /sys/bus/spi/<dev>/reset +Date: Jul 2021 +Contact: sebastian.reichel@collabora.com +Description: This file represents the microcontroller's reset line. + 1 means the reset line is asserted, 0 means it's not + asserted. The file is read and writable. diff --git a/Documentation/ABI/testing/sysfs-ptp b/Documentation/ABI/testing/sysfs-ptp index 2363ad810ddb..d378f57c1b73 100644 --- a/Documentation/ABI/testing/sysfs-ptp +++ b/Documentation/ABI/testing/sysfs-ptp @@ -33,6 +33,13 @@ Description: frequency adjustment value (a positive integer) in parts per billion. +What: /sys/class/ptp/ptpN/max_vclocks +Date: May 2021 +Contact: Yangbo Lu <yangbo.lu@nxp.com> +Description: + This file contains the maximum number of ptp vclocks. + Write integer to re-configure it. + What: /sys/class/ptp/ptpN/n_alarms Date: September 2010 Contact: Richard Cochran <richardcochran@gmail.com> @@ -61,6 +68,19 @@ Description: This file contains the number of programmable pins offered by the PTP hardware clock. +What: /sys/class/ptp/ptpN/n_vclocks +Date: May 2021 +Contact: Yangbo Lu <yangbo.lu@nxp.com> +Description: + This file contains the number of virtual PTP clocks in + use. By default, the value is 0 meaning that only the + physical clock is in use. Setting the value creates + the corresponding number of virtual clocks and causes + the physical clock to become free running. Setting the + value back to 0 deletes the virtual clocks and + switches the physical clock back to normal, adjustable + operation. + What: /sys/class/ptp/ptpN/pins Date: March 2014 Contact: Richard Cochran <richardcochran@gmail.com> diff --git a/Documentation/admin-guide/binderfs.rst b/Documentation/admin-guide/binderfs.rst index 199d84314a14..41a4db00df8d 100644 --- a/Documentation/admin-guide/binderfs.rst +++ b/Documentation/admin-guide/binderfs.rst @@ -72,3 +72,16 @@ that the `rm() <rm_>`_ tool can be used to delete them. Note that the ``binder-control`` device cannot be deleted since this would make the binderfs instance unusable. The ``binder-control`` device will be deleted when the binderfs instance is unmounted and all references to it have been dropped. + +Binder features +--------------- + +Assuming an instance of binderfs has been mounted at ``/dev/binderfs``, the +features supported by the binder driver can be located under +``/dev/binderfs/features/``. The presence of individual files can be tested +to determine whether a particular feature is supported by the driver. + +Example:: + + cat /dev/binderfs/features/oneway_spam_detection + 1 diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst index 459e6b66ff68..0c9120ec58ae 100644 --- a/Documentation/arm64/tagged-address-abi.rst +++ b/Documentation/arm64/tagged-address-abi.rst @@ -45,14 +45,24 @@ how the user addresses are used by the kernel: 1. User addresses not accessed by the kernel but used for address space management (e.g. ``mprotect()``, ``madvise()``). The use of valid - tagged pointers in this context is allowed with the exception of - ``brk()``, ``mmap()`` and the ``new_address`` argument to - ``mremap()`` as these have the potential to alias with existing - user addresses. - - NOTE: This behaviour changed in v5.6 and so some earlier kernels may - incorrectly accept valid tagged pointers for the ``brk()``, - ``mmap()`` and ``mremap()`` system calls. + tagged pointers in this context is allowed with these exceptions: + + - ``brk()``, ``mmap()`` and the ``new_address`` argument to + ``mremap()`` as these have the potential to alias with existing + user addresses. + + NOTE: This behaviour changed in v5.6 and so some earlier kernels may + incorrectly accept valid tagged pointers for the ``brk()``, + ``mmap()`` and ``mremap()`` system calls. + + - The ``range.start``, ``start`` and ``dst`` arguments to the + ``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from + ``userfaultfd()``, as fault addresses subsequently obtained by reading + the file descriptor will be untagged, which may otherwise confuse + tag-unaware programs. + + NOTE: This behaviour changed in v5.14 and so some earlier kernels may + incorrectly accept valid tagged pointers for this system call. 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI relaxation is disabled by default and the application thread needs to diff --git a/Documentation/bpf/libbpf/libbpf_naming_convention.rst b/Documentation/bpf/libbpf/libbpf_naming_convention.rst index 3de1d51e41da..6bf9c5ac7576 100644 --- a/Documentation/bpf/libbpf/libbpf_naming_convention.rst +++ b/Documentation/bpf/libbpf/libbpf_naming_convention.rst @@ -108,7 +108,7 @@ This bump in ABI version is at most once per kernel development cycle. For example, if current state of ``libbpf.map`` is: -.. code-block:: c +.. code-block:: none LIBBPF_0.0.1 { global: @@ -121,7 +121,7 @@ For example, if current state of ``libbpf.map`` is: , and a new symbol ``bpf_func_c`` is being introduced, then ``libbpf.map`` should be changed like this: -.. code-block:: c +.. code-block:: none LIBBPF_0.0.1 { global: diff --git a/Documentation/dev-tools/kunit/running_tips.rst b/Documentation/dev-tools/kunit/running_tips.rst index 7d99386cf94a..d1626d548fa5 100644 --- a/Documentation/dev-tools/kunit/running_tips.rst +++ b/Documentation/dev-tools/kunit/running_tips.rst @@ -86,19 +86,7 @@ Generating code coverage reports under UML .. note:: TODO(brendanhiggins@google.com): There are various issues with UML and versions of gcc 7 and up. You're likely to run into missing ``.gcda`` - files or compile errors. We know one `faulty GCC commit - <https://github.com/gcc-mirror/gcc/commit/8c9434c2f9358b8b8bad2c1990edf10a21645f9d>`_ - but not how we'd go about getting this fixed. The compile errors still - need some investigation. - -.. note:: - TODO(brendanhiggins@google.com): for recent versions of Linux - (5.10-5.12, maybe earlier), there's a bug with gcov counters not being - flushed in UML. This translates to very low (<1%) reported coverage. This is - related to the above issue and can be worked around by replacing the - one call to ``uml_abort()`` (it's in ``os_dump_core()``) with a plain - ``exit()``. - + files or compile errors. This is different from the "normal" way of getting coverage information that is documented in Documentation/dev-tools/gcov.rst. diff --git a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml index 8dc7b404ee12..1174c9aa9934 100644 --- a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml +++ b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml @@ -50,7 +50,6 @@ properties: reg: minItems: 1 - maxItems: 3 items: - description: base register - description: power register diff --git a/Documentation/devicetree/bindings/display/renesas,du.yaml b/Documentation/devicetree/bindings/display/renesas,du.yaml index 5f4345d43020..e3ca5389c17d 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.yaml +++ b/Documentation/devicetree/bindings/display/renesas,du.yaml @@ -92,7 +92,6 @@ required: - reg - clocks - interrupts - - resets - ports allOf: diff --git a/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt b/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt deleted file mode 100644 index 18c3aea90df2..000000000000 --- a/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt +++ /dev/null @@ -1,44 +0,0 @@ ------------------------------------------------------------------ -Device Tree Bindings for the Xilinx Zynq MPSoC Firmware Interface ------------------------------------------------------------------ - -The zynqmp-firmware node describes the interface to platform firmware. -ZynqMP has an interface to communicate with secure firmware. Firmware -driver provides an interface to firmware APIs. Interface APIs can be -used by any driver to communicate to PMUFW(Platform Management Unit). -These requests include clock management, pin control, device control, -power management service, FPGA service and other platform management -services. - -Required properties: - - compatible: Must contain any of below: - "xlnx,zynqmp-firmware" for Zynq Ultrascale+ MPSoC - "xlnx,versal-firmware" for Versal - - method: The method of calling the PM-API firmware layer. - Permitted values are: - - "smc" : SMC #0, following the SMCCC - - "hvc" : HVC #0, following the SMCCC - -------- -Example -------- - -Zynq Ultrascale+ MPSoC ----------------------- -firmware { - zynqmp_firmware: zynqmp-firmware { - compatible = "xlnx,zynqmp-firmware"; - method = "smc"; - ... - }; -}; - -Versal ------- -firmware { - versal_firmware: versal-firmware { - compatible = "xlnx,versal-firmware"; - method = "smc"; - ... - }; -}; diff --git a/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.yaml b/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.yaml new file mode 100644 index 000000000000..f14f7b454f07 --- /dev/null +++ b/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.yaml @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/firmware/xilinx/xlnx,zynqmp-firmware.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Xilinx firmware driver + +maintainers: + - Nava kishore Manne <nava.manne@xilinx.com> + +description: The zynqmp-firmware node describes the interface to platform + firmware. ZynqMP has an interface to communicate with secure firmware. + Firmware driver provides an interface to firmware APIs. Interface APIs + can be used by any driver to communicate to PMUFW(Platform Management Unit). + These requests include clock management, pin control, device control, + power management service, FPGA service and other platform management + services. + +properties: + compatible: + oneOf: + - description: For implementations complying for Zynq Ultrascale+ MPSoC. + const: xlnx,zynqmp-firmware + + - description: For implementations complying for Versal. + const: xlnx,versal-firmware + + method: + description: | + The method of calling the PM-API firmware layer. + Permitted values are. + - "smc" : SMC #0, following the SMCCC + - "hvc" : HVC #0, following the SMCCC + + $ref: /schemas/types.yaml#/definitions/string-array + enum: + - smc + - hvc + + versal_fpga: + $ref: /schemas/fpga/xlnx,versal-fpga.yaml# + description: Compatible of the FPGA device. + type: object + + zynqmp-aes: + $ref: /schemas/crypto/xlnx,zynqmp-aes.yaml# + description: The ZynqMP AES-GCM hardened cryptographic accelerator is + used to encrypt or decrypt the data with provided key and initialization + vector. + type: object + + clock-controller: + $ref: /schemas/clock/xlnx,versal-clk.yaml# + description: The clock controller is a hardware block of Xilinx versal + clock tree. It reads required input clock frequencies from the devicetree + and acts as clock provider for all clock consumers of PS clocks.list of + clock specifiers which are external input clocks to the given clock + controller. + type: object + +required: + - compatible + +additionalProperties: false + +examples: + - | + versal-firmware { + compatible = "xlnx,versal-firmware"; + method = "smc"; + + versal_fpga: versal_fpga { + compatible = "xlnx,versal-fpga"; + }; + + xlnx_aes: zynqmp-aes { + compatible = "xlnx,zynqmp-aes"; + }; + + versal_clk: clock-controller { + #clock-cells = <1>; + compatible = "xlnx,versal-clk"; + clocks = <&ref>, <&alt_ref>, <&pl_alt_ref>; + clock-names = "ref", "alt_ref", "pl_alt_ref"; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.yaml b/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.yaml new file mode 100644 index 000000000000..ac6a207278d5 --- /dev/null +++ b/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.yaml @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/fpga/xlnx,versal-fpga.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Xilinx Versal FPGA driver. + +maintainers: + - Nava kishore Manne <nava.manne@xilinx.com> + +description: | + Device Tree Versal FPGA bindings for the Versal SoC, controlled + using firmware interface. + +properties: + compatible: + items: + - enum: + - xlnx,versal-fpga + +required: + - compatible + +additionalProperties: false + +examples: + - | + versal_fpga: versal_fpga { + compatible = "xlnx,versal-fpga"; + }; + +... diff --git a/Documentation/devicetree/bindings/hwmon/adt7475.yaml b/Documentation/devicetree/bindings/hwmon/adt7475.yaml index ad0ec9f35bd8..7d9c083632b9 100644 --- a/Documentation/devicetree/bindings/hwmon/adt7475.yaml +++ b/Documentation/devicetree/bindings/hwmon/adt7475.yaml @@ -39,17 +39,7 @@ properties: reg: maxItems: 1 -patternProperties: - "^adi,bypass-attenuator-in[0-4]$": - description: | - Configures bypassing the individual voltage input attenuator. If - set to 1 the attenuator is bypassed if set to 0 the attenuator is - not bypassed. If the property is absent then the attenuator - retains it's configuration from the bios/bootloader. - $ref: /schemas/types.yaml#/definitions/uint32 - enum: [0, 1] - - "^adi,pwm-active-state$": + adi,pwm-active-state: description: | Integer array, represents the active state of the pwm outputs If set to 0 the pwm uses a logic low output for 100% duty cycle. If set to 1 the pwm @@ -61,6 +51,16 @@ patternProperties: enum: [0, 1] default: 1 +patternProperties: + "^adi,bypass-attenuator-in[0-4]$": + description: | + Configures bypassing the individual voltage input attenuator. If + set to 1 the attenuator is bypassed if set to 0 the attenuator is + not bypassed. If the property is absent then the attenuator + retains it's configuration from the bios/bootloader. + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1] + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml index b2a1e42c56fa..71de5631ebae 100644 --- a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml +++ b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml @@ -152,47 +152,6 @@ allOf: maxItems: 1 st,drdy-int-pin: false - - if: - properties: - compatible: - enum: - # Two intertial interrupts i.e. accelerometer/gyro interrupts - - st,h3lis331dl-accel - - st,l3g4200d-gyro - - st,l3g4is-gyro - - st,l3gd20-gyro - - st,l3gd20h-gyro - - st,lis2de12 - - st,lis2dw12 - - st,lis2hh12 - - st,lis2dh12-accel - - st,lis331dl-accel - - st,lis331dlh-accel - - st,lis3de - - st,lis3dh-accel - - st,lis3dhh - - st,lis3mdl-magn - - st,lng2dm-accel - - st,lps331ap-press - - st,lsm303agr-accel - - st,lsm303dlh-accel - - st,lsm303dlhc-accel - - st,lsm303dlm-accel - - st,lsm330-accel - - st,lsm330-gyro - - st,lsm330d-accel - - st,lsm330d-gyro - - st,lsm330dl-accel - - st,lsm330dl-gyro - - st,lsm330dlc-accel - - st,lsm330dlc-gyro - - st,lsm9ds0-gyro - - st,lsm9ds1-magn - then: - properties: - interrupts: - maxItems: 2 - required: - compatible - reg diff --git a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml index d6a95c3cb26f..e701524ee811 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml @@ -18,6 +18,7 @@ properties: compatible: enum: - qcom,sc7180-osm-l3 + - qcom,sc8180x-osm-l3 - qcom,sdm845-osm-l3 - qcom,sm8150-osm-l3 - qcom,sm8250-epss-l3 diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml index 5accc0d113be..3fd1a134162d 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml @@ -49,6 +49,17 @@ properties: - qcom,sc7280-mmss-noc - qcom,sc7280-nsp-noc - qcom,sc7280-system-noc + - qcom,sc8180x-aggre1-noc + - qcom,sc8180x-aggre2-noc + - qcom,sc8180x-camnoc-virt + - qcom,sc8180x-compute-noc + - qcom,sc8180x-config-noc + - qcom,sc8180x-dc-noc + - qcom,sc8180x-gem-noc + - qcom,sc8180x-ipa-virt + - qcom,sc8180x-mc-virt + - qcom,sc8180x-mmss-noc + - qcom,sc8180x-system-noc - qcom,sdm845-aggre1-noc - qcom,sdm845-aggre2-noc - qcom,sdm845-config-noc diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml index 1181b590db71..03f2b2d4db30 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml @@ -52,16 +52,14 @@ properties: items: - const: marvell,ap806-smmu-500 - const: arm,mmu-500 - - description: NVIDIA SoCs that program two ARM MMU-500s identically - items: - description: NVIDIA SoCs that require memory controller interaction and may program multiple ARM MMU-500s identically with the memory controller interleaving translations between multiple instances for improved performance. items: - enum: - - const: nvidia,tegra194-smmu - - const: nvidia,tegra186-smmu + - nvidia,tegra194-smmu + - nvidia,tegra186-smmu - const: nvidia,smmu-500 - items: - const: arm,mmu-500 diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml b/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml index d2e28a9e3545..ba9124f721f1 100644 --- a/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml +++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml @@ -28,14 +28,12 @@ properties: - description: configuration registers for MMU instance 0 - description: configuration registers for MMU instance 1 minItems: 1 - maxItems: 2 interrupts: items: - description: interruption for MMU instance 0 - description: interruption for MMU instance 1 minItems: 1 - maxItems: 2 clocks: items: diff --git a/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml b/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml index 7a63c85ef8c5..01c9acf9275d 100644 --- a/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml @@ -57,7 +57,6 @@ properties: ranges: minItems: 1 - maxItems: 3 description: | Memory bus areas for interacting with the devices. Reflects the memory layout with four integer values following: diff --git a/Documentation/devicetree/bindings/misc/ge-achc.txt b/Documentation/devicetree/bindings/misc/ge-achc.txt deleted file mode 100644 index 77df94d7a32f..000000000000 --- a/Documentation/devicetree/bindings/misc/ge-achc.txt +++ /dev/null @@ -1,26 +0,0 @@ -* GE Healthcare USB Management Controller - -A device which handles data aquisition from compatible USB based peripherals. -SPI is used for device management. - -Note: This device does not expose the peripherals as USB devices. - -Required properties: - -- compatible : Should be "ge,achc" - -Required SPI properties: - -- reg : Should be address of the device chip select within - the controller. - -- spi-max-frequency : Maximum SPI clocking speed of device in Hz, should be - 1MHz for the GE ACHC. - -Example: - -spidev0: spi@0 { - compatible = "ge,achc"; - reg = <0>; - spi-max-frequency = <1000000>; -}; diff --git a/Documentation/devicetree/bindings/misc/ge-achc.yaml b/Documentation/devicetree/bindings/misc/ge-achc.yaml new file mode 100644 index 000000000000..ff07aa62ed57 --- /dev/null +++ b/Documentation/devicetree/bindings/misc/ge-achc.yaml @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +# Copyright (C) 2021 GE Inc. +# Copyright (C) 2021 Collabora Ltd. +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/misc/ge-achc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: GE Healthcare USB Management Controller + +description: | + A device which handles data acquisition from compatible USB based peripherals. + SPI is used for device management. + + Note: This device does not expose the peripherals as USB devices. + +maintainers: + - Sebastian Reichel <sre@kernel.org> + +properties: + compatible: + items: + - const: ge,achc + - const: nxp,kinetis-k20 + + clocks: + maxItems: 1 + + vdd-supply: + description: Digital power supply regulator on VDD pin + + vdda-supply: + description: Analog power supply regulator on VDDA pin + + reg: + items: + - description: Control interface + - description: Firmware programming interface + + reset-gpios: + description: GPIO used for hardware reset. + maxItems: 1 + +required: + - compatible + - clocks + - reg + - reset-gpios + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + spi { + #address-cells = <1>; + #size-cells = <0>; + + spi@1 { + compatible = "ge,achc", "nxp,kinetis-k20"; + reg = <1>, <0>; + clocks = <&achc_24M>; + reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>; + }; + }; diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml index e5f1a33332a5..dd5a64969e37 100644 --- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml +++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml @@ -84,7 +84,6 @@ properties: interrupts: minItems: 1 - maxItems: 3 items: - description: NAND CTLRDY interrupt - description: FLASH_DMA_DONE if flash DMA is available @@ -92,7 +91,6 @@ properties: interrupt-names: minItems: 1 - maxItems: 3 items: - const: nand_ctlrdy - const: flash_dma_done @@ -148,8 +146,6 @@ allOf: then: properties: reg-names: - minItems: 2 - maxItems: 2 items: - const: nand - const: nand-int-base @@ -161,8 +157,6 @@ allOf: then: properties: reg-names: - minItems: 3 - maxItems: 3 items: - const: nand - const: nand-int-base @@ -175,8 +169,6 @@ allOf: then: properties: reg-names: - minItems: 3 - maxItems: 3 items: - const: nand - const: iproc-idm diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml index 0b8a05dd52e6..f978f8719d8e 100644 --- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml +++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml @@ -67,8 +67,8 @@ properties: reg: oneOf: - enum: - - 0 - - 1 + - 0 + - 1 required: - compatible diff --git a/Documentation/devicetree/bindings/net/gpmc-eth.txt b/Documentation/devicetree/bindings/net/gpmc-eth.txt index f7da3d73ca1b..32821066a85b 100644 --- a/Documentation/devicetree/bindings/net/gpmc-eth.txt +++ b/Documentation/devicetree/bindings/net/gpmc-eth.txt @@ -13,7 +13,7 @@ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt For the properties relevant to the ethernet controller connected to the GPMC refer to the binding documentation of the device. For example, the documentation -for the SMSC 911x is Documentation/devicetree/bindings/net/smsc911x.txt +for the SMSC 911x is Documentation/devicetree/bindings/net/smsc,lan9115.yaml Child nodes need to specify the GPMC bus address width using the "bank-width" property but is possible that an ethernet controller also has a property to diff --git a/Documentation/devicetree/bindings/net/imx-dwmac.txt b/Documentation/devicetree/bindings/net/imx-dwmac.txt deleted file mode 100644 index 921d522fe8d7..000000000000 --- a/Documentation/devicetree/bindings/net/imx-dwmac.txt +++ /dev/null @@ -1,56 +0,0 @@ -IMX8 glue layer controller, NXP imx8 families support Synopsys MAC 5.10a IP. - -This file documents platform glue layer for IMX. -Please see stmmac.txt for the other unchanged properties. - -The device node has following properties. - -Required properties: -- compatible: Should be "nxp,imx8mp-dwmac-eqos" to select glue layer - and "snps,dwmac-5.10a" to select IP version. -- clocks: Must contain a phandle for each entry in clock-names. -- clock-names: Should be "stmmaceth" for the host clock. - Should be "pclk" for the MAC apb clock. - Should be "ptp_ref" for the MAC timer clock. - Should be "tx" for the MAC RGMII TX clock: - Should be "mem" for EQOS MEM clock. - - "mem" clock is required for imx8dxl platform. - - "mem" clock is not required for imx8mp platform. -- interrupt-names: Should contain a list of interrupt names corresponding to - the interrupts in the interrupts property, if available. - Should be "macirq" for the main MAC IRQ - Should be "eth_wake_irq" for the IT which wake up system -- intf_mode: Should be phandle/offset pair. The phandle to the syscon node which - encompases the GPR register, and the offset of the GPR register. - - required for imx8mp platform. - - is optional for imx8dxl platform. - -Optional properties: -- intf_mode: is optional for imx8dxl platform. -- snps,rmii_refclk_ext: to select RMII reference clock from external. - -Example: - eqos: ethernet@30bf0000 { - compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a"; - reg = <0x30bf0000 0x10000>; - interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "eth_wake_irq", "macirq"; - clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>, - <&clk IMX8MP_CLK_QOS_ENET_ROOT>, - <&clk IMX8MP_CLK_ENET_QOS_TIMER>, - <&clk IMX8MP_CLK_ENET_QOS>; - clock-names = "stmmaceth", "pclk", "ptp_ref", "tx"; - assigned-clocks = <&clk IMX8MP_CLK_ENET_AXI>, - <&clk IMX8MP_CLK_ENET_QOS_TIMER>, - <&clk IMX8MP_CLK_ENET_QOS>; - assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>, - <&clk IMX8MP_SYS_PLL2_100M>, - <&clk IMX8MP_SYS_PLL2_125M>; - assigned-clock-rates = <0>, <100000000>, <125000000>; - nvmem-cells = <ð_mac0>; - nvmem-cell-names = "mac-address"; - nvmem_macaddr_swap; - intf_mode = <&gpr 0x4>; - status = "disabled"; - }; diff --git a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml new file mode 100644 index 000000000000..5629b2e4ccf8 --- /dev/null +++ b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml @@ -0,0 +1,93 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/nxp,dwmac-imx.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP i.MX8 DWMAC glue layer Device Tree Bindings + +maintainers: + - Joakim Zhang <qiangqing.zhang@nxp.com> + +# We need a select here so we don't match all nodes with 'snps,dwmac' +select: + properties: + compatible: + contains: + enum: + - nxp,imx8mp-dwmac-eqos + - nxp,imx8dxl-dwmac-eqos + required: + - compatible + +allOf: + - $ref: "snps,dwmac.yaml#" + +properties: + compatible: + oneOf: + - items: + - enum: + - nxp,imx8mp-dwmac-eqos + - nxp,imx8dxl-dwmac-eqos + - const: snps,dwmac-5.10a + + clocks: + minItems: 3 + maxItems: 5 + items: + - description: MAC host clock + - description: MAC apb clock + - description: MAC timer clock + - description: MAC RGMII TX clock + - description: EQOS MEM clock + + clock-names: + minItems: 3 + maxItems: 5 + contains: + enum: + - stmmaceth + - pclk + - ptp_ref + - tx + - mem + + intf_mode: + $ref: /schemas/types.yaml#/definitions/phandle-array + description: + Should be phandle/offset pair. The phandle to the syscon node which + encompases the GPR register, and the offset of the GPR register. + + snps,rmii_refclk_ext: + $ref: /schemas/types.yaml#/definitions/flag + description: + To select RMII reference clock from external. + +required: + - compatible + - clocks + - clock-names + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/interrupt-controller/irq.h> + #include <dt-bindings/clock/imx8mp-clock.h> + + eqos: ethernet@30bf0000 { + compatible = "nxp,imx8mp-dwmac-eqos","snps,dwmac-5.10a"; + reg = <0x30bf0000 0x10000>; + interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "macirq", "eth_wake_irq"; + clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>, + <&clk IMX8MP_CLK_QOS_ENET_ROOT>, + <&clk IMX8MP_CLK_ENET_QOS_TIMER>, + <&clk IMX8MP_CLK_ENET_QOS>; + clock-names = "stmmaceth", "pclk", "ptp_ref", "tx"; + phy-mode = "rgmii"; + status = "disabled"; + }; diff --git a/Documentation/devicetree/bindings/net/smsc,lan9115.yaml b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml new file mode 100644 index 000000000000..f86667cbcca8 --- /dev/null +++ b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/smsc,lan9115.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller + +maintainers: + - Shawn Guo <shawnguo@kernel.org> + +allOf: + - $ref: ethernet-controller.yaml# + +properties: + compatible: + oneOf: + - const: smsc,lan9115 + - items: + - enum: + - smsc,lan89218 + - smsc,lan9117 + - smsc,lan9118 + - smsc,lan9220 + - smsc,lan9221 + - const: smsc,lan9115 + + reg: + maxItems: 1 + + reg-shift: true + + reg-io-width: + enum: [ 2, 4 ] + default: 2 + + interrupts: + minItems: 1 + items: + - description: + LAN interrupt line + - description: + Optional PME (power management event) interrupt that is able to wake + up the host system with a 50ms pulse on network activity + + clocks: + maxItems: 1 + + phy-mode: true + + smsc,irq-active-high: + type: boolean + description: Indicates the IRQ polarity is active-high + + smsc,irq-push-pull: + type: boolean + description: Indicates the IRQ type is push-pull + + smsc,force-internal-phy: + type: boolean + description: Forces SMSC LAN controller to use internal PHY + + smsc,force-external-phy: + type: boolean + description: Forces SMSC LAN controller to use external PHY + + smsc,save-mac-address: + type: boolean + description: + Indicates that MAC address needs to be saved before resetting the + controller + + reset-gpios: + maxItems: 1 + description: + A GPIO line connected to the RESET (active low) signal of the device. + On many systems this is wired high so the device goes out of reset at + power-on, but if it is under program control, this optional GPIO can + wake up in response to it. + + vdd33a-supply: + description: 3.3V analog power supply + + vddvario-supply: + description: IO logic power supply + +required: + - compatible + - reg + - interrupts + +# There are lots of bus-specific properties ("qcom,*", "samsung,*", "fsl,*", +# "gpmc,*", ...) to be found, that actually depend on the compatible value of +# the parent node. +additionalProperties: true + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + ethernet@f4000000 { + compatible = "smsc,lan9220", "smsc,lan9115"; + reg = <0xf4000000 0x2000000>; + phy-mode = "mii"; + interrupt-parent = <&gpio1>; + interrupts = <31>, <32>; + reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; + reg-io-width = <4>; + smsc,irq-push-pull; + }; diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt deleted file mode 100644 index acfafc8e143c..000000000000 --- a/Documentation/devicetree/bindings/net/smsc911x.txt +++ /dev/null @@ -1,43 +0,0 @@ -* Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller - -Required properties: -- compatible : Should be "smsc,lan<model>", "smsc,lan9115" -- reg : Address and length of the io space for SMSC LAN -- interrupts : one or two interrupt specifiers - - The first interrupt is the SMSC LAN interrupt line - - The second interrupt (if present) is the PME (power - management event) interrupt that is able to wake up the host - system with a 50ms pulse on network activity -- phy-mode : See ethernet.txt file in the same directory - -Optional properties: -- reg-shift : Specify the quantity to shift the register offsets by -- reg-io-width : Specify the size (in bytes) of the IO accesses that - should be performed on the device. Valid value for SMSC LAN is - 2 or 4. If it's omitted or invalid, the size would be 2. -- smsc,irq-active-high : Indicates the IRQ polarity is active-high -- smsc,irq-push-pull : Indicates the IRQ type is push-pull -- smsc,force-internal-phy : Forces SMSC LAN controller to use - internal PHY -- smsc,force-external-phy : Forces SMSC LAN controller to use - external PHY -- smsc,save-mac-address : Indicates that mac address needs to be saved - before resetting the controller -- reset-gpios : a GPIO line connected to the RESET (active low) signal - of the device. On many systems this is wired high so the device goes - out of reset at power-on, but if it is under program control, this - optional GPIO can wake up in response to it. -- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies - -Examples: - -lan9220@f4000000 { - compatible = "smsc,lan9220", "smsc,lan9115"; - reg = <0xf4000000 0x2000000>; - phy-mode = "mii"; - interrupt-parent = <&gpio1>; - interrupts = <31>, <32>; - reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; - reg-io-width = <4>; - smsc,irq-push-pull; -}; diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml index d7652596a09b..42689b7d03a2 100644 --- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml +++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml @@ -28,6 +28,7 @@ select: - snps,dwmac-4.00 - snps,dwmac-4.10a - snps,dwmac-4.20a + - snps,dwmac-5.10a - snps,dwxgmac - snps,dwxgmac-2.10 @@ -82,6 +83,7 @@ properties: - snps,dwmac-4.00 - snps,dwmac-4.10a - snps,dwmac-4.20a + - snps,dwmac-5.10a - snps,dwxgmac - snps,dwxgmac-2.10 @@ -375,6 +377,7 @@ allOf: - snps,dwmac-4.00 - snps,dwmac-4.10a - snps,dwmac-4.20a + - snps,dwmac-5.10a - snps,dwxgmac - snps,dwxgmac-2.10 - st,spear600-gmac diff --git a/Documentation/devicetree/bindings/nvmem/nintendo-otp.yaml b/Documentation/devicetree/bindings/nvmem/nintendo-otp.yaml new file mode 100644 index 000000000000..dbe4ffdd644c --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/nintendo-otp.yaml @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/nvmem/nintendo-otp.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Nintendo Wii and Wii U OTP Device Tree Bindings + +description: | + This binding represents the OTP memory as found on a Nintendo Wii or Wii U, + which contains common and per-console keys, signatures and related data + required to access peripherals. + + See https://wiiubrew.org/wiki/Hardware/OTP + +maintainers: + - Emmanuel Gil Peyrot <linkmauve@linkmauve.fr> + +allOf: + - $ref: "nvmem.yaml#" + +properties: + compatible: + enum: + - nintendo,hollywood-otp + - nintendo,latte-otp + + reg: + maxItems: 1 + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + otp@d8001ec { + compatible = "nintendo,latte-otp"; + reg = <0x0d8001ec 0x8>; + }; + +... diff --git a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml index 861b205016b1..dede8892ee01 100644 --- a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml +++ b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml @@ -51,6 +51,9 @@ properties: vcc-supply: description: Our power supply. + power-domains: + maxItems: 1 + # Needed if any child nodes are present. "#address-cells": const: 1 diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt deleted file mode 100644 index 7c70f2ad9942..000000000000 --- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt +++ /dev/null @@ -1,20 +0,0 @@ -* Freescale i.MX8MQ USB3 PHY binding - -Required properties: -- compatible: Should be "fsl,imx8mq-usb-phy" or "fsl,imx8mp-usb-phy" -- #phys-cells: must be 0 (see phy-bindings.txt in this directory) -- reg: The base address and length of the registers -- clocks: phandles to the clocks for each clock listed in clock-names -- clock-names: must contain "phy" - -Optional properties: -- vbus-supply: A phandle to the regulator for USB VBUS. - -Example: - usb3_phy0: phy@381f0040 { - compatible = "fsl,imx8mq-usb-phy"; - reg = <0x381f0040 0x40>; - clocks = <&clk IMX8MQ_CLK_USB1_PHY_ROOT>; - clock-names = "phy"; - #phy-cells = <0>; - }; diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml new file mode 100644 index 000000000000..2936f3510a6a --- /dev/null +++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/fsl,imx8mq-usb-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale i.MX8MQ USB3 PHY binding + +maintainers: + - Li Jun <jun.li@nxp.com> + +properties: + compatible: + enum: + - fsl,imx8mq-usb-phy + - fsl,imx8mp-usb-phy + + reg: + maxItems: 1 + + "#phy-cells": + const: 0 + + clocks: + maxItems: 1 + + clock-names: + items: + - const: phy + + vbus-supply: + description: + A phandle to the regulator for USB VBUS. + +required: + - compatible + - reg + - "#phy-cells" + - clocks + - clock-names + +additionalProperties: false + +examples: + - | + #include <dt-bindings/clock/imx8mq-clock.h> + usb3_phy0: phy@381f0040 { + compatible = "fsl,imx8mq-usb-phy"; + reg = <0x381f0040 0x40>; + clocks = <&clk IMX8MQ_CLK_USB1_PHY_ROOT>; + clock-names = "phy"; + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/phy/intel,phy-keembay-usb.yaml b/Documentation/devicetree/bindings/phy/intel,keembay-phy-usb.yaml index a217bb8ac5bc..52815b6c2b88 100644 --- a/Documentation/devicetree/bindings/phy/intel,phy-keembay-usb.yaml +++ b/Documentation/devicetree/bindings/phy/intel,keembay-phy-usb.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) %YAML 1.2 --- -$id: http://devicetree.org/schemas/phy/intel,phy-keembay-usb.yaml# +$id: http://devicetree.org/schemas/phy/intel,keembay-phy-usb.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Intel Keem Bay USB PHY bindings diff --git a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml index ef9d9d4e6875..9e6c0f43f1c6 100644 --- a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml +++ b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml @@ -15,7 +15,7 @@ description: | controllers on MediaTek SoCs, includes USB2.0, USB3.0, PCIe and SATA. Layout differences of banks between T-PHY V1 (mt8173/mt2701) and - T-PHY V2 (mt2712) when works on USB mode: + T-PHY V2 (mt2712) / V3 (mt8195) when works on USB mode: ----------------------------------- Version 1: port offset bank @@ -34,7 +34,7 @@ description: | u2 port2 0x1800 U2PHY_COM ... - Version 2: + Version 2/3: port offset bank u2 port0 0x0000 MISC 0x0100 FMREG @@ -59,7 +59,8 @@ description: | SPLLC shared by u3 ports and FMREG shared by u2 ports on V1 are put back into each port; a new bank MISC for u2 ports and CHIP for u3 ports are - added on V2. + added on V2; the FMREG bank for slew rate calibration is not used anymore + and reserved on V3; properties: $nodename: @@ -79,8 +80,11 @@ properties: - mediatek,mt2712-tphy - mediatek,mt7629-tphy - mediatek,mt8183-tphy - - mediatek,mt8195-tphy - const: mediatek,generic-tphy-v2 + - items: + - enum: + - mediatek,mt8195-tphy + - const: mediatek,generic-tphy-v3 - const: mediatek,mt2701-u3phy deprecated: true - const: mediatek,mt2712-u3phy @@ -91,7 +95,7 @@ properties: description: Register shared by multiple ports, exclude port's private register. It is needed for T-PHY V1, such as mt2701 and mt8173, but not for - T-PHY V2, such as mt2712. + T-PHY V2/V3, such as mt2712. maxItems: 1 "#address-cells": @@ -197,6 +201,22 @@ patternProperties: Specify the flag to enable BC1.2 if support it type: boolean + mediatek,syscon-type: + $ref: /schemas/types.yaml#/definitions/phandle-array + maxItems: 1 + description: + A phandle to syscon used to access the register of type switch, + the field should always be 3 cells long. + items: + items: + - description: + The first cell represents a phandle to syscon + - description: + The second cell represents the register offset + - description: + The third cell represents the index of config segment + enum: [0, 1, 2, 3] + required: - reg - "#phy-cells" diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml index f0497b8623ad..75be5650a198 100644 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml @@ -18,6 +18,7 @@ properties: compatible: enum: - qcom,ipq6018-qmp-pcie-phy + - qcom,ipq6018-qmp-usb3-phy - qcom,ipq8074-qmp-pcie-phy - qcom,ipq8074-qmp-usb3-phy - qcom,msm8996-qmp-pcie-phy @@ -27,6 +28,7 @@ properties: - qcom,msm8998-qmp-ufs-phy - qcom,msm8998-qmp-usb3-phy - qcom,sc7180-qmp-usb3-phy + - qcom,sc8180x-qmp-pcie-phy - qcom,sc8180x-qmp-ufs-phy - qcom,sc8180x-qmp-usb3-phy - qcom,sdm845-qhp-pcie-phy @@ -34,6 +36,7 @@ properties: - qcom,sdm845-qmp-ufs-phy - qcom,sdm845-qmp-usb3-phy - qcom,sdm845-qmp-usb3-uni-phy + - qcom,sm6115-qmp-ufs-phy - qcom,sm8150-qmp-ufs-phy - qcom,sm8150-qmp-usb3-phy - qcom,sm8150-qmp-usb3-uni-phy @@ -326,6 +329,7 @@ allOf: compatible: contains: enum: + - qcom,sc8180x-qmp-pcie-phy - qcom,sdm845-qhp-pcie-phy - qcom,sdm845-qmp-pcie-phy - qcom,sdx55-qmp-pcie-phy diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml index 217aa6c91893..1d49cc3d4eae 100644 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml @@ -14,6 +14,7 @@ properties: compatible: enum: - qcom,sc7180-qmp-usb3-dp-phy + - qcom,sc8180x-qmp-usb3-dp-phy - qcom,sdm845-qmp-usb3-dp-phy - qcom,sm8250-qmp-usb3-dp-phy reg: diff --git a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml index d5dc5a3cdceb..3a6e1165419c 100644 --- a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml +++ b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml @@ -30,6 +30,11 @@ properties: - renesas,usb2-phy-r8a77995 # R-Car D3 - const: renesas,rcar-gen3-usb2-phy + - items: + - enum: + - renesas,usb2-phy-r9a07g044 # RZ/G2{L,LC} + - const: renesas,rzg2l-usb2-phy # RZ/G2L family + reg: maxItems: 1 @@ -91,6 +96,16 @@ required: - clocks - '#phy-cells' +allOf: + - if: + properties: + compatible: + contains: + const: renesas,rzg2l-usb2-phy + then: + required: + - resets + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml index 636cc501b54f..f6ed1a005e7a 100644 --- a/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml +++ b/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml @@ -16,6 +16,7 @@ properties: compatible: enum: - samsung,exynos7-ufs-phy + - samsung,exynosautov9-ufs-phy reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.txt b/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.txt deleted file mode 100644 index 64b286d2d398..000000000000 --- a/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.txt +++ /dev/null @@ -1,82 +0,0 @@ -TI AM654 SERDES - -Required properties: - - compatible: Should be "ti,phy-am654-serdes" - - reg : Address and length of the register set for the device. - - #phy-cells: determine the number of cells that should be given in the - phandle while referencing this phy. Should be "2". The 1st cell - corresponds to the phy type (should be one of the types specified in - include/dt-bindings/phy/phy.h) and the 2nd cell should be the serdes - lane function. - If SERDES0 is referenced 2nd cell should be: - 0 - USB3 - 1 - PCIe0 Lane0 - 2 - ICSS2 SGMII Lane0 - If SERDES1 is referenced 2nd cell should be: - 0 - PCIe1 Lane0 - 1 - PCIe0 Lane1 - 2 - ICSS2 SGMII Lane1 - - power-domains: As documented by the generic PM domain bindings in - Documentation/devicetree/bindings/power/power_domain.txt. - - clocks: List of clock-specifiers representing the input to the SERDES. - Should have 3 items representing the left input clock, external - reference clock and right input clock in that order. - - clock-output-names: List of clock names for each of the clock outputs of - SERDES. Should have 3 items for CMU reference clock, - left output clock and right output clock in that order. - - assigned-clocks: As defined in - Documentation/devicetree/bindings/clock/clock-bindings.txt - - assigned-clock-parents: As defined in - Documentation/devicetree/bindings/clock/clock-bindings.txt - - #clock-cells: Should be <1> to choose between the 3 output clocks. - Defined in Documentation/devicetree/bindings/clock/clock-bindings.txt - - The following macros are defined in dt-bindings/phy/phy-am654-serdes.h - for selecting the correct reference clock. This can be used while - specifying the clocks created by SERDES. - => AM654_SERDES_CMU_REFCLK - => AM654_SERDES_LO_REFCLK - => AM654_SERDES_RO_REFCLK - - - mux-controls: Phandle to the multiplexer that is used to select the lane - function. See #phy-cells above to see the multiplex values. - -Example: - -Example for SERDES0 is given below. It has 3 clock inputs; -left input reference clock as indicated by <&k3_clks 153 4>, external -reference clock as indicated by <&k3_clks 153 1> and right input -reference clock as indicated by <&serdes1 AM654_SERDES_LO_REFCLK>. (The -right input of SERDES0 is connected to the left output of SERDES1). - -SERDES0 registers 3 clock outputs as indicated in clock-output-names. The -first refers to the CMU reference clock, second refers to the left output -reference clock and the third refers to the right output reference clock. - -The assigned-clocks and assigned-clock-parents is used here to set the -parent of left input reference clock to MAINHSDIV_CLKOUT4 and parent of -CMU reference clock to left input reference clock. - -serdes0: serdes@900000 { - compatible = "ti,phy-am654-serdes"; - reg = <0x0 0x900000 0x0 0x2000>; - reg-names = "serdes"; - #phy-cells = <2>; - power-domains = <&k3_pds 153>; - clocks = <&k3_clks 153 4>, <&k3_clks 153 1>, - <&serdes1 AM654_SERDES_LO_REFCLK>; - clock-output-names = "serdes0_cmu_refclk", "serdes0_lo_refclk", - "serdes0_ro_refclk"; - assigned-clocks = <&k3_clks 153 4>, <&serdes0 AM654_SERDES_CMU_REFCLK>; - assigned-clock-parents = <&k3_clks 153 8>, <&k3_clks 153 4>; - ti,serdes-clk = <&serdes0_clk>; - mux-controls = <&serdes_mux 0>; - #clock-cells = <1>; -}; - -Example for PCIe consumer node using the SERDES PHY specifier is given below. -&pcie0_rc { - num-lanes = <2>; - phys = <&serdes0 PHY_TYPE_PCIE 1>, <&serdes1 PHY_TYPE_PCIE 1>; - phy-names = "pcie-phy0", "pcie-phy1"; -}; diff --git a/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.yaml b/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.yaml new file mode 100644 index 000000000000..62dcb84c08aa --- /dev/null +++ b/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.yaml @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/ti,phy-am654-serdes.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TI AM654 SERDES binding + +description: + This binding describes the TI AM654 SERDES. AM654 SERDES can be configured + to be used with either PCIe or USB or SGMII. + +maintainers: + - Kishon Vijay Abraham I <kishon@ti.com> + +properties: + compatible: + enum: + - ti,phy-am654-serdes + + reg: + maxItems: 1 + + reg-names: + items: + - const: serdes + + power-domains: + maxItems: 1 + + clocks: + maxItems: 3 + description: + Three input clocks referring to left input reference clock, refclk and right input reference + clock. + + assigned-clocks: + $ref: "/schemas/types.yaml#/definitions/phandle-array" + assigned-clock-parents: + $ref: "/schemas/types.yaml#/definitions/phandle-array" + + '#phy-cells': + const: 2 + description: + The 1st cell corresponds to the phy type (should be one of the types specified in + include/dt-bindings/phy/phy.h) and the 2nd cell should be the serdes lane function. + + ti,serdes-clk: + description: Phandle to the SYSCON entry required for configuring SERDES clock selection. + $ref: /schemas/types.yaml#/definitions/phandle + + '#clock-cells': + const: 1 + + mux-controls: + maxItems: 1 + description: Phandle to the SYSCON entry required for configuring SERDES lane function. + + clock-output-names: + oneOf: + - description: Clock output names for SERDES 0 + items: + - const: serdes0_cmu_refclk + - const: serdes0_lo_refclk + - const: serdes0_ro_refclk + - description: Clock output names for SERDES 1 + items: + - const: serdes1_cmu_refclk + - const: serdes1_lo_refclk + - const: serdes1_ro_refclk + +required: + - compatible + - reg + - power-domains + - clocks + - assigned-clocks + - assigned-clock-parents + - ti,serdes-clk + - mux-controls + - clock-output-names + +additionalProperties: false + +examples: + - | + #include <dt-bindings/phy/phy-am654-serdes.h> + + serdes0: serdes@900000 { + compatible = "ti,phy-am654-serdes"; + reg = <0x900000 0x2000>; + reg-names = "serdes"; + #phy-cells = <2>; + power-domains = <&k3_pds 153>; + clocks = <&k3_clks 153 4>, <&k3_clks 153 1>, + <&serdes1 AM654_SERDES_LO_REFCLK>; + clock-output-names = "serdes0_cmu_refclk", "serdes0_lo_refclk", "serdes0_ro_refclk"; + assigned-clocks = <&k3_clks 153 4>, <&serdes0 AM654_SERDES_CMU_REFCLK>; + assigned-clock-parents = <&k3_clks 153 8>, <&k3_clks 153 4>; + ti,serdes-clk = <&serdes0_clk>; + mux-controls = <&serdes_mux 0>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml index 5272b6f284ba..dcd63908aeae 100644 --- a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml +++ b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml @@ -77,6 +77,34 @@ properties: Type-C spec states minimum CC pin debounce of 100 ms and maximum of 200 ms. However, some solutions might need more than 200 ms. + refclk-dig: + type: object + description: | + WIZ node should have subnode for refclk_dig to select the reference + clock source for the reference clock used in the PHY and PMA digital + logic. + properties: + clocks: + minItems: 2 + maxItems: 4 + description: Phandle to two (Torrent) or four (Sierra) clock nodes representing + the inputs to refclk_dig + + "#clock-cells": + const: 0 + + assigned-clocks: + maxItems: 1 + + assigned-clock-parents: + maxItems: 1 + + required: + - clocks + - "#clock-cells" + - assigned-clocks + - assigned-clock-parents + patternProperties: "^pll[0|1]-refclk$": type: object @@ -121,34 +149,6 @@ patternProperties: - clocks - "#clock-cells" - "^refclk-dig$": - type: object - description: | - WIZ node should have subnode for refclk_dig to select the reference - clock source for the reference clock used in the PHY and PMA digital - logic. - properties: - clocks: - minItems: 2 - maxItems: 4 - description: Phandle to two (Torrent) or four (Sierra) clock nodes representing - the inputs to refclk_dig - - "#clock-cells": - const: 0 - - assigned-clocks: - maxItems: 1 - - assigned-clock-parents: - maxItems: 1 - - required: - - clocks - - "#clock-cells" - - assigned-clocks - - assigned-clock-parents - "^serdes@[0-9a-f]+$": type: object description: | diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml index 8850c01bd470..9b131c6facbc 100644 --- a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml +++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml @@ -57,12 +57,14 @@ properties: maxItems: 1 power-domains: + deprecated: true description: Power domain to use for enable control. This binding is only available if the compatible is chosen to regulator-fixed-domain. maxItems: 1 required-opps: + deprecated: true description: Performance state to use for enable control. This binding is only available if the compatible is chosen to regulator-fixed-domain. The diff --git a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml index 12b8963615c3..c2e8c54e5311 100644 --- a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml +++ b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml @@ -36,12 +36,12 @@ properties: switching frequency must be one of following corresponding value 1.1MHz, 1.65MHz, 2.2MHz, 2.75MHz - patternProperties: - "^ldo[1-4]$": + ldortc: type: object $ref: regulator.yaml# - "^ldortc$": + patternProperties: + "^ldo[1-4]$": type: object $ref: regulator.yaml# diff --git a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml index 8761437ed8ad..aabf50f5b39e 100644 --- a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml +++ b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml @@ -83,7 +83,8 @@ properties: unevaluatedProperties: false - "^vsnvs$": + properties: + vsnvs: type: object $ref: regulator.yaml# description: diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml index 1d38ff76d18f..2b1f91603897 100644 --- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml +++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml @@ -24,10 +24,10 @@ allOf: select: properties: compatible: - items: - - enum: - - sifive,fu540-c000-ccache - - sifive,fu740-c000-ccache + contains: + enum: + - sifive,fu540-c000-ccache + - sifive,fu740-c000-ccache required: - compatible diff --git a/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml index 657c13b62b67..056d42daae06 100644 --- a/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml +++ b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml @@ -30,7 +30,6 @@ properties: maxItems: 1 clocks: - minItems: 2 items: - description: PCLK clocks - description: EXTCLK clocks. Faraday calls it CLK1HZ and says the clock diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml index ee936d1aa724..c2930d65728e 100644 --- a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml +++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml @@ -114,7 +114,7 @@ properties: ports: $ref: /schemas/graph.yaml#/properties/ports - properties: + patternProperties: port(@[0-9a-f]+)?: $ref: audio-graph-port.yaml# unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml index faef4f6f55b8..8246891602e7 100644 --- a/Documentation/devicetree/bindings/spi/spi-controller.yaml +++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml @@ -79,22 +79,7 @@ properties: description: The SPI controller acts as a slave, instead of a master. -allOf: - - if: - not: - required: - - spi-slave - then: - properties: - "#address-cells": - const: 1 - else: - properties: - "#address-cells": - const: 0 - -patternProperties: - "^slave$": + slave: type: object properties: @@ -105,6 +90,7 @@ patternProperties: required: - compatible +patternProperties: "^.*@[0-9a-f]+$": type: object @@ -180,6 +166,20 @@ patternProperties: - compatible - reg +allOf: + - if: + not: + required: + - spi-slave + then: + properties: + "#address-cells": + const: 1 + else: + properties: + "#address-cells": + const: 0 + additionalProperties: true examples: diff --git a/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml b/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml index a88f99adfe8e..f238848ad094 100644 --- a/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml +++ b/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml @@ -25,14 +25,12 @@ properties: interrupts: minItems: 1 - maxItems: 2 items: - description: Host controller interrupt - description: Device controller interrupt in isp1761 interrupt-names: minItems: 1 - maxItems: 2 items: - const: host - const: peripheral diff --git a/Documentation/driver-api/early-userspace/early_userspace_support.rst b/Documentation/driver-api/early-userspace/early_userspace_support.rst index 8a58c61932ff..61bdeac1bae5 100644 --- a/Documentation/driver-api/early-userspace/early_userspace_support.rst +++ b/Documentation/driver-api/early-userspace/early_userspace_support.rst @@ -69,17 +69,17 @@ early userspace image can be built by an unprivileged user. As a technical note, when directories and files are specified, the entire CONFIG_INITRAMFS_SOURCE is passed to -usr/gen_initramfs_list.sh. This means that CONFIG_INITRAMFS_SOURCE +usr/gen_initramfs.sh. This means that CONFIG_INITRAMFS_SOURCE can really be interpreted as any legal argument to -gen_initramfs_list.sh. If a directory is specified as an argument then +gen_initramfs.sh. If a directory is specified as an argument then the contents are scanned, uid/gid translation is performed, and usr/gen_init_cpio file directives are output. If a directory is -specified as an argument to usr/gen_initramfs_list.sh then the +specified as an argument to usr/gen_initramfs.sh then the contents of the file are simply copied to the output. All of the output directives from directory scanning and file contents copying are processed by usr/gen_init_cpio. -See also 'usr/gen_initramfs_list.sh -h'. +See also 'usr/gen_initramfs.sh -h'. Where's this all leading? ========================= diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst index 198aadafd3e7..8d650b4e2ce6 100644 --- a/Documentation/driver-api/fpga/fpga-bridge.rst +++ b/Documentation/driver-api/fpga/fpga-bridge.rst @@ -4,11 +4,11 @@ FPGA Bridge API to implement a new FPGA bridge ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -* struct fpga_bridge — The FPGA Bridge structure -* struct fpga_bridge_ops — Low level Bridge driver ops -* devm_fpga_bridge_create() — Allocate and init a bridge struct -* fpga_bridge_register() — Register a bridge -* fpga_bridge_unregister() — Unregister a bridge +* struct fpga_bridge - The FPGA Bridge structure +* struct fpga_bridge_ops - Low level Bridge driver ops +* devm_fpga_bridge_create() - Allocate and init a bridge struct +* fpga_bridge_register() - Register a bridge +* fpga_bridge_unregister() - Unregister a bridge .. kernel-doc:: include/linux/fpga/fpga-bridge.h :functions: fpga_bridge diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst index 917ee22db429..4d926b452cb3 100644 --- a/Documentation/driver-api/fpga/fpga-mgr.rst +++ b/Documentation/driver-api/fpga/fpga-mgr.rst @@ -101,12 +101,12 @@ in state. API for implementing a new FPGA Manager driver ---------------------------------------------- -* ``fpga_mgr_states`` — Values for :c:expr:`fpga_manager->state`. -* struct fpga_manager — the FPGA manager struct -* struct fpga_manager_ops — Low level FPGA manager driver ops -* devm_fpga_mgr_create() — Allocate and init a manager struct -* fpga_mgr_register() — Register an FPGA manager -* fpga_mgr_unregister() — Unregister an FPGA manager +* ``fpga_mgr_states`` - Values for :c:expr:`fpga_manager->state`. +* struct fpga_manager - the FPGA manager struct +* struct fpga_manager_ops - Low level FPGA manager driver ops +* devm_fpga_mgr_create() - Allocate and init a manager struct +* fpga_mgr_register() - Register an FPGA manager +* fpga_mgr_unregister() - Unregister an FPGA manager .. kernel-doc:: include/linux/fpga/fpga-mgr.h :functions: fpga_mgr_states diff --git a/Documentation/driver-api/fpga/fpga-programming.rst b/Documentation/driver-api/fpga/fpga-programming.rst index 002392dab04f..fb4da4240e96 100644 --- a/Documentation/driver-api/fpga/fpga-programming.rst +++ b/Documentation/driver-api/fpga/fpga-programming.rst @@ -84,10 +84,10 @@ will generate that list. Here's some sample code of what to do next:: API for programming an FPGA --------------------------- -* fpga_region_program_fpga() — Program an FPGA -* fpga_image_info() — Specifies what FPGA image to program -* fpga_image_info_alloc() — Allocate an FPGA image info struct -* fpga_image_info_free() — Free an FPGA image info struct +* fpga_region_program_fpga() - Program an FPGA +* fpga_image_info() - Specifies what FPGA image to program +* fpga_image_info_alloc() - Allocate an FPGA image info struct +* fpga_image_info_free() - Free an FPGA image info struct .. kernel-doc:: drivers/fpga/fpga-region.c :functions: fpga_region_program_fpga diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst index 363a8171ab0a..2636a27c11b2 100644 --- a/Documentation/driver-api/fpga/fpga-region.rst +++ b/Documentation/driver-api/fpga/fpga-region.rst @@ -45,19 +45,19 @@ An example of usage can be seen in the probe function of [#f2]_. API to add a new FPGA region ---------------------------- -* struct fpga_region — The FPGA region struct -* devm_fpga_region_create() — Allocate and init a region struct -* fpga_region_register() — Register an FPGA region -* fpga_region_unregister() — Unregister an FPGA region +* struct fpga_region - The FPGA region struct +* devm_fpga_region_create() - Allocate and init a region struct +* fpga_region_register() - Register an FPGA region +* fpga_region_unregister() - Unregister an FPGA region The FPGA region's probe function will need to get a reference to the FPGA Manager it will be using to do the programming. This usually would happen during the region's probe function. -* fpga_mgr_get() — Get a reference to an FPGA manager, raise ref count -* of_fpga_mgr_get() — Get a reference to an FPGA manager, raise ref count, +* fpga_mgr_get() - Get a reference to an FPGA manager, raise ref count +* of_fpga_mgr_get() - Get a reference to an FPGA manager, raise ref count, given a device node. -* fpga_mgr_put() — Put an FPGA manager +* fpga_mgr_put() - Put an FPGA manager The FPGA region will need to specify which bridges to control while programming the FPGA. The region driver can build a list of bridges during probe time @@ -66,11 +66,11 @@ the list of bridges to program just before programming (:c:expr:`fpga_region->get_bridges`). The FPGA bridge framework supplies the following APIs to handle building or tearing down that list. -* fpga_bridge_get_to_list() — Get a ref of an FPGA bridge, add it to a +* fpga_bridge_get_to_list() - Get a ref of an FPGA bridge, add it to a list -* of_fpga_bridge_get_to_list() — Get a ref of an FPGA bridge, add it to a +* of_fpga_bridge_get_to_list() - Get a ref of an FPGA bridge, add it to a list, given a device node -* fpga_bridges_put() — Given a list of bridges, put them +* fpga_bridges_put() - Given a list of bridges, put them .. kernel-doc:: include/linux/fpga/fpga-region.h :functions: fpga_region diff --git a/Documentation/fault-injection/provoke-crashes.rst b/Documentation/fault-injection/provoke-crashes.rst index a20ba5d93932..3abe84225613 100644 --- a/Documentation/fault-injection/provoke-crashes.rst +++ b/Documentation/fault-injection/provoke-crashes.rst @@ -29,8 +29,7 @@ recur_count cpoint_name Where in the kernel to trigger the action. It can be one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY, - FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD, - IDE_CORE_CP, or DIRECT + FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_QUEUE_RQ, or DIRECT. cpoint_type Indicates the action to be taken on hitting the crash point. diff --git a/Documentation/features/core/thread-info-in-task/arch-support.txt b/Documentation/features/core/thread-info-in-task/arch-support.txt new file mode 100644 index 000000000000..9f0259bbd7df --- /dev/null +++ b/Documentation/features/core/thread-info-in-task/arch-support.txt @@ -0,0 +1,32 @@ +# +# Feature name: thread-info-in-task +# Kconfig: THREAD_INFO_IN_TASK +# description: arch makes use of the core kernel facility to embedd thread_info in task_struct +# + ----------------------- + | arch |status| + ----------------------- + | alpha: | TODO | + | arc: | TODO | + | arm: | TODO | + | arm64: | ok | + | csky: | TODO | + | h8300: | TODO | + | hexagon: | TODO | + | ia64: | TODO | + | m68k: | TODO | + | microblaze: | TODO | + | mips: | TODO | + | nds32: | ok | + | nios2: | TODO | + | openrisc: | TODO | + | parisc: | TODO | + | powerpc: | ok | + | riscv: | ok | + | s390: | ok | + | sh: | TODO | + | sparc: | TODO | + | um: | TODO | + | x86: | ok | + | xtensa: | TODO | + ----------------------- diff --git a/Documentation/features/time/arch-tick-broadcast/arch-support.txt b/Documentation/features/time/arch-tick-broadcast/arch-support.txt index 8639fe8315f5..8dcaab070c7b 100644 --- a/Documentation/features/time/arch-tick-broadcast/arch-support.txt +++ b/Documentation/features/time/arch-tick-broadcast/arch-support.txt @@ -22,7 +22,7 @@ | openrisc: | TODO | | parisc: | TODO | | powerpc: | ok | - | riscv: | TODO | + | riscv: | ok | | s390: | TODO | | sh: | ok | | sparc: | TODO | diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.rst b/Documentation/filesystems/ramfs-rootfs-initramfs.rst index 4598b0d90b60..164960631925 100644 --- a/Documentation/filesystems/ramfs-rootfs-initramfs.rst +++ b/Documentation/filesystems/ramfs-rootfs-initramfs.rst @@ -170,7 +170,7 @@ Documentation/driver-api/early-userspace/early_userspace_support.rst for more de The kernel does not depend on external cpio tools. If you specify a directory instead of a configuration file, the kernel's build infrastructure creates a configuration file from that directory (usr/Makefile calls -usr/gen_initramfs_list.sh), and proceeds to package up that directory +usr/gen_initramfs.sh), and proceeds to package up that directory using the config file (by feeding it to usr/gen_init_cpio, which is created from usr/gen_init_cpio.c). The kernel's build-time cpio creation code is entirely self-contained, and the kernel's boot-time extractor is also diff --git a/Documentation/fpga/dfl.rst b/Documentation/fpga/dfl.rst index 75df90d1e54c..ef9eec71f6f3 100644 --- a/Documentation/fpga/dfl.rst +++ b/Documentation/fpga/dfl.rst @@ -10,7 +10,7 @@ Authors: - Xu Yilun <yilun.xu@intel.com> The Device Feature List (DFL) FPGA framework (and drivers according to -this framework) hides the very details of low layer hardwares and provides +this framework) hides the very details of low layer hardware and provides unified interfaces to userspace. Applications could use these interfaces to configure, enumerate, open and access FPGA accelerators on platforms which implement the DFL in the device memory. Besides this, the DFL framework @@ -205,7 +205,7 @@ given Device Feature Lists and create platform devices for feature devices also abstracts operations for the private features and exposes common ops to feature device drivers. -The FPGA DFL Device could be different hardwares, e.g. PCIe device, platform +The FPGA DFL Device could be different hardware, e.g. PCIe device, platform device and etc. Its driver module is always loaded first once the device is created by the system. This driver plays an infrastructural role in the driver architecture. It locates the DFLs in the device memory, handles them diff --git a/Documentation/gpu/rfc/i915_gem_lmem.rst b/Documentation/gpu/rfc/i915_gem_lmem.rst index 675ba8620d66..b421a3c1806e 100644 --- a/Documentation/gpu/rfc/i915_gem_lmem.rst +++ b/Documentation/gpu/rfc/i915_gem_lmem.rst @@ -18,114 +18,5 @@ real, with all the uAPI bits is: * Route shmem backend over to TTM SYSTEM for discrete * TTM purgeable object support * Move i915 buddy allocator over to TTM - * MMAP ioctl mode(see `I915 MMAP`_) - * SET/GET ioctl caching(see `I915 SET/GET CACHING`_) * Send RFC(with mesa-dev on cc) for final sign off on the uAPI * Add pciid for DG1 and turn on uAPI for real - -New object placement and region query uAPI -========================================== -Starting from DG1 we need to give userspace the ability to allocate buffers from -device local-memory. Currently the driver supports gem_create, which can place -buffers in system memory via shmem, and the usual assortment of other -interfaces, like dumb buffers and userptr. - -To support this new capability, while also providing a uAPI which will work -beyond just DG1, we propose to offer three new bits of uAPI: - -DRM_I915_QUERY_MEMORY_REGIONS ------------------------------ -New query ID which allows userspace to discover the list of supported memory -regions(like system-memory and local-memory) for a given device. We identify -each region with a class and instance pair, which should be unique. The class -here would be DEVICE or SYSTEM, and the instance would be zero, on platforms -like DG1. - -Side note: The class/instance design is borrowed from our existing engine uAPI, -where we describe every physical engine in terms of its class, and the -particular instance, since we can have more than one per class. - -In the future we also want to expose more information which can further -describe the capabilities of a region. - -.. kernel-doc:: include/uapi/drm/i915_drm.h - :functions: drm_i915_gem_memory_class drm_i915_gem_memory_class_instance drm_i915_memory_region_info drm_i915_query_memory_regions - -GEM_CREATE_EXT --------------- -New ioctl which is basically just gem_create but now allows userspace to provide -a chain of possible extensions. Note that if we don't provide any extensions and -set flags=0 then we get the exact same behaviour as gem_create. - -Side note: We also need to support PXP[1] in the near future, which is also -applicable to integrated platforms, and adds its own gem_create_ext extension, -which basically lets userspace mark a buffer as "protected". - -.. kernel-doc:: include/uapi/drm/i915_drm.h - :functions: drm_i915_gem_create_ext - -I915_GEM_CREATE_EXT_MEMORY_REGIONS ----------------------------------- -Implemented as an extension for gem_create_ext, we would now allow userspace to -optionally provide an immutable list of preferred placements at creation time, -in priority order, for a given buffer object. For the placements we expect -them each to use the class/instance encoding, as per the output of the regions -query. Having the list in priority order will be useful in the future when -placing an object, say during eviction. - -.. kernel-doc:: include/uapi/drm/i915_drm.h - :functions: drm_i915_gem_create_ext_memory_regions - -One fair criticism here is that this seems a little over-engineered[2]. If we -just consider DG1 then yes, a simple gem_create.flags or something is totally -all that's needed to tell the kernel to allocate the buffer in local-memory or -whatever. However looking to the future we need uAPI which can also support -upcoming Xe HP multi-tile architecture in a sane way, where there can be -multiple local-memory instances for a given device, and so using both class and -instance in our uAPI to describe regions is desirable, although specifically -for DG1 it's uninteresting, since we only have a single local-memory instance. - -Existing uAPI issues -==================== -Some potential issues we still need to resolve. - -I915 MMAP ---------- -In i915 there are multiple ways to MMAP GEM object, including mapping the same -object using different mapping types(WC vs WB), i.e multiple active mmaps per -object. TTM expects one MMAP at most for the lifetime of the object. If it -turns out that we have to backpedal here, there might be some potential -userspace fallout. - -I915 SET/GET CACHING --------------------- -In i915 we have set/get_caching ioctl. TTM doesn't let us to change this, but -DG1 doesn't support non-snooped pcie transactions, so we can just always -allocate as WB for smem-only buffers. If/when our hw gains support for -non-snooped pcie transactions then we must fix this mode at allocation time as -a new GEM extension. - -This is related to the mmap problem, because in general (meaning, when we're -not running on intel cpus) the cpu mmap must not, ever, be inconsistent with -allocation mode. - -Possible idea is to let the kernel picks the mmap mode for userspace from the -following table: - -smem-only: WB. Userspace does not need to call clflush. - -smem+lmem: We only ever allow a single mode, so simply allocate this as uncached -memory, and always give userspace a WC mapping. GPU still does snooped access -here(assuming we can't turn it off like on DG1), which is a bit inefficient. - -lmem only: always WC - -This means on discrete you only get a single mmap mode, all others must be -rejected. That's probably going to be a new default mode or something like -that. - -Links -===== -[1] https://patchwork.freedesktop.org/series/86798/ - -[2] https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5599#note_553791 diff --git a/Documentation/i2c/index.rst b/Documentation/i2c/index.rst index 8b76217e370a..6270f1fd7d4e 100644 --- a/Documentation/i2c/index.rst +++ b/Documentation/i2c/index.rst @@ -17,6 +17,7 @@ Introduction busses/index i2c-topology muxes/i2c-mux-gpio + i2c-sysfs Writing device drivers ====================== diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst index 42576880aa4a..60b217b436be 100644 --- a/Documentation/networking/af_xdp.rst +++ b/Documentation/networking/af_xdp.rst @@ -243,8 +243,8 @@ Configuration Flags and Socket Options These are the various configuration flags that can be used to control and monitor the behavior of AF_XDP sockets. -XDP_COPY and XDP_ZERO_COPY bind flags -------------------------------------- +XDP_COPY and XDP_ZEROCOPY bind flags +------------------------------------ When you bind to a socket, the kernel will first try to use zero-copy copy. If zero-copy is not supported, it will fall back on using copy @@ -252,7 +252,7 @@ mode, i.e. copying all packets out to user space. But if you would like to force a certain mode, you can use the following flags. If you pass the XDP_COPY flag to the bind call, the kernel will force the socket into copy mode. If it cannot use copy mode, the bind call will -fail with an error. Conversely, the XDP_ZERO_COPY flag will force the +fail with an error. Conversely, the XDP_ZEROCOPY flag will force the socket into zero-copy mode or fail. XDP_SHARED_UMEM bind flag diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 6ea91e41593f..c86628e6a235 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -212,6 +212,7 @@ Userspace to kernel: ``ETHTOOL_MSG_FEC_SET`` set FEC settings ``ETHTOOL_MSG_MODULE_EEPROM_GET`` read SFP module EEPROM ``ETHTOOL_MSG_STATS_GET`` get standard statistics + ``ETHTOOL_MSG_PHC_VCLOCKS_GET`` get PHC virtual clocks info ===================================== ================================ Kernel to userspace: @@ -250,6 +251,7 @@ Kernel to userspace: ``ETHTOOL_MSG_FEC_NTF`` FEC settings ``ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY`` read SFP module EEPROM ``ETHTOOL_MSG_STATS_GET_REPLY`` standard statistics + ``ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY`` PHC virtual clocks info ======================================== ================================= ``GET`` requests are sent by userspace applications to retrieve device @@ -1477,6 +1479,25 @@ Low and high bounds are inclusive, for example: etherStatsPkts512to1023Octets 512 1023 ============================= ==== ==== +PHC_VCLOCKS_GET +=============== + +Query device PHC virtual clocks information. + +Request contents: + + ==================================== ====== ========================== + ``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested request header + ==================================== ====== ========================== + +Kernel response contents: + + ==================================== ====== ========================== + ``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested reply header + ``ETHTOOL_A_PHC_VCLOCKS_NUM`` u32 PHC virtual clocks number + ``ETHTOOL_A_PHC_VCLOCKS_INDEX`` s32 PHC index array + ==================================== ====== ========================== + Request translation =================== @@ -1575,4 +1596,5 @@ are netlink only. n/a ``ETHTOOL_MSG_CABLE_TEST_ACT`` n/a ``ETHTOOL_MSG_CABLE_TEST_TDR_ACT`` n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET`` + n/a ``ETHTOOL_MSG_PHC_VCLOCKS_GET`` =================================== ===================================== diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index b3fa522e4cd9..316c7dfa9693 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -826,7 +826,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER initial value when the blackhole issue goes away. 0 to disable the blackhole detection. - By default, it is set to 1hr. + By default, it is set to 0 (feature is disabled). tcp_fastopen_key - list of comma separated 32-digit hexadecimal INTEGERs The list consists of a primary key and an optional backup key. The diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst index 91b2cf712801..e26532f49760 100644 --- a/Documentation/networking/netdev-FAQ.rst +++ b/Documentation/networking/netdev-FAQ.rst @@ -228,6 +228,23 @@ before posting to the mailing list. The patchwork build bot instance gets overloaded very easily and netdev@vger really doesn't need more traffic if we can help it. +netdevsim is great, can I extend it for my out-of-tree tests? +------------------------------------------------------------- + +No, `netdevsim` is a test vehicle solely for upstream tests. +(Please add your tests under tools/testing/selftests/.) + +We also give no guarantees that `netdevsim` won't change in the future +in a way which would break what would normally be considered uAPI. + +Is netdevsim considered a "user" of an API? +------------------------------------------- + +Linux kernel has a long standing rule that no API should be added unless +it has a real, in-tree user. Mock-ups and tests based on `netdevsim` are +strongly encouraged when adding new APIs, but `netdevsim` in itself +is **not** considered a use case/user. + Any other tips to help ensure my net/net-next patch gets OK'd? -------------------------------------------------------------- Attention to detail. Re-read your own work as if you were the diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst index 0467b30e4abe..024d784157c8 100644 --- a/Documentation/networking/nf_conntrack-sysctl.rst +++ b/Documentation/networking/nf_conntrack-sysctl.rst @@ -110,6 +110,12 @@ nf_conntrack_tcp_be_liberal - BOOLEAN Be conservative in what you do, be liberal in what you accept from others. If it's non-zero, we mark only out of window RST segments as INVALID. +nf_conntrack_tcp_ignore_invalid_rst - BOOLEAN + - 0 - disabled (default) + - 1 - enabled + + If it's 1, we don't mark out of window RST segments as INVALID. + nf_conntrack_tcp_loose - BOOLEAN - 0 - disabled - not 0 - enabled (default) @@ -185,19 +191,9 @@ nf_flowtable_tcp_timeout - INTEGER (seconds) TCP connections may be offloaded from nf conntrack to nf flow table. Once aged, the connection is returned to nf conntrack with tcp pickup timeout. -nf_flowtable_tcp_pickup - INTEGER (seconds) - default 120 - - TCP connection timeout after being aged from nf flow table offload. - nf_flowtable_udp_timeout - INTEGER (seconds) default 30 Control offload timeout for udp connections. UDP connections may be offloaded from nf conntrack to nf flow table. Once aged, the connection is returned to nf conntrack with udp pickup timeout. - -nf_flowtable_udp_pickup - INTEGER (seconds) - default 30 - - UDP connection timeout after being aged from nf flow table offload. diff --git a/Documentation/networking/operstates.rst b/Documentation/networking/operstates.rst index 9c918f7cb0e8..1ee2141e8ef1 100644 --- a/Documentation/networking/operstates.rst +++ b/Documentation/networking/operstates.rst @@ -73,7 +73,9 @@ IF_OPER_LOWERLAYERDOWN (3): state (f.e. VLAN). IF_OPER_TESTING (4): - Unused in current kernel. + Interface is in testing mode, for example executing driver self-tests + or media (cable) test. It can't be used for normal traffic until tests + complete. IF_OPER_DORMANT (5): Interface is L1 up, but waiting for an external event, f.e. for a @@ -111,7 +113,7 @@ it as lower layer. Note that for certain kind of soft-devices, which are not managing any real hardware, it is possible to set this bit from userspace. One -should use TVL IFLA_CARRIER to do so. +should use TLV IFLA_CARRIER to do so. netif_carrier_ok() can be used to query that bit. diff --git a/Documentation/networking/tipc.rst b/Documentation/networking/tipc.rst index 76775f24cdc8..ab63d298cca2 100644 --- a/Documentation/networking/tipc.rst +++ b/Documentation/networking/tipc.rst @@ -4,10 +4,125 @@ Linux Kernel TIPC ================= -TIPC (Transparent Inter Process Communication) is a protocol that is -specially designed for intra-cluster communication. +Introduction +============ -For more information about TIPC, see http://tipc.sourceforge.net. +TIPC (Transparent Inter Process Communication) is a protocol that is specially +designed for intra-cluster communication. It can be configured to transmit +messages either on UDP or directly across Ethernet. Message delivery is +sequence guaranteed, loss free and flow controlled. Latency times are shorter +than with any other known protocol, while maximal throughput is comparable to +that of TCP. + +TIPC Features +------------- + +- Cluster wide IPC service + + Have you ever wished you had the convenience of Unix Domain Sockets even when + transmitting data between cluster nodes? Where you yourself determine the + addresses you want to bind to and use? Where you don't have to perform DNS + lookups and worry about IP addresses? Where you don't have to start timers + to monitor the continuous existence of peer sockets? And yet without the + downsides of that socket type, such as the risk of lingering inodes? + + Welcome to the Transparent Inter Process Communication service, TIPC in short, + which gives you all of this, and a lot more. + +- Service Addressing + + A fundamental concept in TIPC is that of Service Addressing which makes it + possible for a programmer to chose his own address, bind it to a server + socket and let client programs use only that address for sending messages. + +- Service Tracking + + A client wanting to wait for the availability of a server, uses the Service + Tracking mechanism to subscribe for binding and unbinding/close events for + sockets with the associated service address. + + The service tracking mechanism can also be used for Cluster Topology Tracking, + i.e., subscribing for availability/non-availability of cluster nodes. + + Likewise, the service tracking mechanism can be used for Cluster Connectivity + Tracking, i.e., subscribing for up/down events for individual links between + cluster nodes. + +- Transmission Modes + + Using a service address, a client can send datagram messages to a server socket. + + Using the same address type, it can establish a connection towards an accepting + server socket. + + It can also use a service address to create and join a Communication Group, + which is the TIPC manifestation of a brokerless message bus. + + Multicast with very good performance and scalability is available both in + datagram mode and in communication group mode. + +- Inter Node Links + + Communication between any two nodes in a cluster is maintained by one or two + Inter Node Links, which both guarantee data traffic integrity and monitor + the peer node's availability. + +- Cluster Scalability + + By applying the Overlapping Ring Monitoring algorithm on the inter node links + it is possible to scale TIPC clusters up to 1000 nodes with a maintained + neighbor failure discovery time of 1-2 seconds. For smaller clusters this + time can be made much shorter. + +- Neighbor Discovery + + Neighbor Node Discovery in the cluster is done by Ethernet broadcast or UDP + multicast, when any of those services are available. If not, configured peer + IP addresses can be used. + +- Configuration + + When running TIPC in single node mode no configuration whatsoever is needed. + When running in cluster mode TIPC must as a minimum be given a node address + (before Linux 4.17) and told which interface to attach to. The "tipc" + configuration tool makes is possible to add and maintain many more + configuration parameters. + +- Performance + + TIPC message transfer latency times are better than in any other known protocol. + Maximal byte throughput for inter-node connections is still somewhat lower than + for TCP, while they are superior for intra-node and inter-container throughput + on the same host. + +- Language Support + + The TIPC user API has support for C, Python, Perl, Ruby, D and Go. + +More Information +---------------- + +- How to set up TIPC: + + http://tipc.io/getting_started.html + +- How to program with TIPC: + + http://tipc.io/programming.html + +- How to contribute to TIPC: + +- http://tipc.io/contacts.html + +- More details about TIPC specification: + + http://tipc.io/protocol.html + + +Implementation +============== + +TIPC is implemented as a kernel module in net/tipc/ directory. TIPC Base Types --------------- diff --git a/Documentation/trace/coresight/coresight-config.rst b/Documentation/trace/coresight/coresight-config.rst new file mode 100644 index 000000000000..a4e3ef295240 --- /dev/null +++ b/Documentation/trace/coresight/coresight-config.rst @@ -0,0 +1,244 @@ +.. SPDX-License-Identifier: GPL-2.0 + +====================================== +CoreSight System Configuration Manager +====================================== + + :Author: Mike Leach <mike.leach@linaro.org> + :Date: October 2020 + +Introduction +============ + +The CoreSight System Configuration manager is an API that allows the +programming of the CoreSight system with pre-defined configurations that +can then be easily enabled from sysfs or perf. + +Many CoreSight components can be programmed in complex ways - especially ETMs. +In addition, components can interact across the CoreSight system, often via +the cross trigger components such as CTI and CTM. These system settings can +be defined and enabled as named configurations. + + +Basic Concepts +============== + +This section introduces the basic concepts of a CoreSight system configuration. + + +Features +-------- + +A feature is a named set of programming for a CoreSight device. The programming +is device dependent, and can be defined in terms of absolute register values, +resource usage and parameter values. + +The feature is defined using a descriptor. This descriptor is used to load onto +a matching device, either when the feature is loaded into the system, or when the +CoreSight device is registered with the configuration manager. + +The load process involves interpreting the descriptor into a set of register +accesses in the driver - the resource usage and parameter descriptions +translated into appropriate register accesses. This interpretation makes it easy +and efficient for the feature to be programmed onto the device when required. + +The feature will not be active on the device until the feature is enabled, and +the device itself is enabled. When the device is enabled then enabled features +will be programmed into the device hardware. + +A feature is enabled as part of a configuration being enabled on the system. + + +Parameter Value +~~~~~~~~~~~~~~~ + +A parameter value is a named value that may be set by the user prior to the +feature being enabled that can adjust the behaviour of the operation programmed +by the feature. + +For example, this could be a count value in a programmed operation that repeats +at a given rate. When the feature is enabled then the current value of the +parameter is used in programming the device. + +The feature descriptor defines a default value for a parameter, which is used +if the user does not supply a new value. + +Users can update parameter values using the configfs API for the CoreSight +system - which is described below. + +The current value of the parameter is loaded into the device when the feature +is enabled on that device. + + +Configurations +-------------- + +A configuration defines a set of features that are to be used in a trace +session where the configuration is selected. For any trace session only one +configuration may be selected. + +The features defined may be on any type of device that is registered +to support system configuration. A configuration may select features to be +enabled on a class of devices - i.e. any ETMv4, or specific devices, e.g. a +specific CTI on the system. + +As with the feature, a descriptor is used to define the configuration. +This will define the features that must be enabled as part of the configuration +as well as any preset values that can be used to override default parameter +values. + + +Preset Values +~~~~~~~~~~~~~ + +Preset values are easily selectable sets of parameter values for the features +that the configuration uses. The number of values in a single preset set, equals +the sum of parameter values in the features used by the configuration. + +e.g. a configuration consists of 3 features, one has 2 parameters, one has +a single parameter, and another has no parameters. A single preset set will +therefore have 3 values. + +Presets are optionally defined by the configuration, up to 15 can be defined. +If no preset is selected, then the parameter values defined in the feature +are used as normal. + + +Operation +~~~~~~~~~ + +The following steps take place in the operation of a configuration. + +1) In this example, the configuration is 'autofdo', which has an + associated feature 'strobing' that works on ETMv4 CoreSight Devices. + +2) The configuration is enabled. For example 'perf' may select the + configuration as part of its command line:: + + perf record -e cs_etm/autofdo/ myapp + + which will enable the 'autofdo' configuration. + +3) perf starts tracing on the system. As each ETMv4 that perf uses for + trace is enabled, the configuration manager will check if the ETMv4 + has a feature that relates to the currently active configuration. + In this case 'strobing' is enabled & programmed into the ETMv4. + +4) When the ETMv4 is disabled, any registers marked as needing to be + saved will be read back. + +5) At the end of the perf session, the configuration will be disabled. + + +Viewing Configurations and Features +=================================== + +The set of configurations and features that are currently loaded into the +system can be viewed using the configfs API. + +Mount configfs as normal and the 'cs-syscfg' subsystem will appear:: + + $ ls /config + cs-syscfg stp-policy + +This has two sub-directories:: + + $ cd cs-syscfg/ + $ ls + configurations features + +The system has the configuration 'autofdo' built in. It may be examined as +follows:: + + $ cd configurations/ + $ ls + autofdo + $ cd autofdo/ + $ ls + description preset1 preset3 preset5 preset7 preset9 + feature_refs preset2 preset4 preset6 preset8 + $ cat description + Setup ETMs with strobing for autofdo + $ cat feature_refs + strobing + +Each preset declared has a preset<n> subdirectory declared. The values for +the preset can be examined:: + + $ cat preset1/values + strobing.window = 0x1388 strobing.period = 0x2 + $ cat preset2/values + strobing.window = 0x1388 strobing.period = 0x4 + +The features referenced by the configuration can be examined in the features +directory:: + + $ cd ../../features/strobing/ + $ ls + description matches nr_params params + $ cat description + Generate periodic trace capture windows. + parameter 'window': a number of CPU cycles (W) + parameter 'period': trace enabled for W cycles every period x W cycles + $ cat matches + SRC_ETMV4 + $ cat nr_params + 2 + +Move to the params directory to examine and adjust parameters:: + + cd params + $ ls + period window + $ cd period + $ ls + value + $ cat value + 0x2710 + # echo 15000 > value + # cat value + 0x3a98 + +Parameters adjusted in this way are reflected in all device instances that have +loaded the feature. + + +Using Configurations in perf +============================ + +The configurations loaded into the CoreSight configuration management are +also declared in the perf 'cs_etm' event infrastructure so that they can +be selected when running trace under perf:: + + $ ls /sys/devices/cs_etm + configurations format perf_event_mux_interval_ms sinks type + events nr_addr_filters power + +Key directories here are 'configurations' - which lists the loaded +configurations, and 'events' - a generic perf directory which allows +selection on the perf command line.:: + + $ ls configurations/ + autofdo + $ cat configurations/autofdo + 0xa7c3dddd + +As with the sinks entries, this provides a hash of the configuration name. +The entry in the 'events' directory uses perfs built in syntax generator +to substitute the syntax for the name when evaluating the command:: + + $ ls events/ + autofdo + $ cat events/autofdo + configid=0xa7c3dddd + +The 'autofdo' configuration may be selected on the perf command line:: + + $ perf record -e cs_etm/autofdo/u --per-thread <application> + +A preset to override the current parameter values can also be selected:: + + $ perf record -e cs_etm/autofdo,preset=1/u --per-thread <application> + +When configurations are selected in this way, then the trace sink used is +automatically selected. diff --git a/Documentation/trace/coresight/coresight.rst b/Documentation/trace/coresight/coresight.rst index 1ec8dc35b1d8..a15571d96cc8 100644 --- a/Documentation/trace/coresight/coresight.rst +++ b/Documentation/trace/coresight/coresight.rst @@ -620,6 +620,19 @@ channels on the CTM (Cross Trigger Matrix). A separate documentation file is provided to explain the use of these devices. (Documentation/trace/coresight/coresight-ect.rst) [#fourth]_. +CoreSight System Configuration +------------------------------ + +CoreSight components can be complex devices with many programming options. +Furthermore, components can be programmed to interact with each other across the +complete system. + +A CoreSight System Configuration manager is provided to allow these complex programming +configurations to be selected and used easily from perf and sysfs. + +See the separate document for further information. +(Documentation/trace/coresight/coresight-config.rst) [#fifth]_. + .. [#first] Documentation/ABI/testing/sysfs-bus-coresight-devices-stm @@ -628,3 +641,5 @@ A separate documentation file is provided to explain the use of these devices. .. [#third] https://github.com/Linaro/perf-opencsd .. [#fourth] Documentation/trace/coresight/coresight-ect.rst + +.. [#fifth] Documentation/trace/coresight/coresight-config.rst diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index b71e09f745c3..f99be8062bc8 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -191,7 +191,7 @@ Documentation written by Tom Zanussi with the event, in nanoseconds. May be modified by .usecs to have timestamps interpreted as microseconds. - cpu int the cpu on which the event occurred. + common_cpu int the cpu on which the event occurred. ====================== ==== ======================================= Extended error information diff --git a/Documentation/translations/zh_CN/process/2.Process.rst b/Documentation/translations/zh_CN/process/2.Process.rst index 229629e305ca..4a6ed0219494 100644 --- a/Documentation/translations/zh_CN/process/2.Process.rst +++ b/Documentation/translations/zh_CN/process/2.Process.rst @@ -47,7 +47,7 @@ (顺便说一句,值得注意的是,合并窗口期间集成的更改并不是凭空产生的;它们是经 提前收集、测试和分级的。稍后将详细描述该过程的工作方式。) -合并窗口持续大约两周。在这段时间结束时,LinusTorvalds将声明窗口已关闭,并 +合并窗口持续大约两周。在这段时间结束时,Linus Torvalds将声明窗口已关闭,并 释放第一个“rc”内核。例如,对于目标为5.6的内核,在合并窗口结束时发生的释放 将被称为5.6-rc1。-rc1 版本是一个信号,表示合并新特性的时间已经过去,稳定下一 个内核的时间已经到来。 @@ -168,7 +168,7 @@ Greg Kroah-Hartman领导。稳定团队将使用5.x.y编号方案不定期地发 补丁如何进入内核 ---------------- -只有一个人可以将补丁合并到主线内核存储库中:LinusTorvalds。但是,在进入 +只有一个人可以将补丁合并到主线内核存储库中:Linus Torvalds。但是,在进入 2.6.38内核的9500多个补丁中,只有112个(大约1.3%)是由Linus自己直接选择的。 内核项目已经发展到一个没有一个开发人员可以在没有支持的情况下检查和选择每个 补丁的规模。内核开发人员处理这种增长的方式是使用围绕信任链构建的助理系统。 diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst index d61219889e49..539e9d4a4860 100644 --- a/Documentation/userspace-api/seccomp_filter.rst +++ b/Documentation/userspace-api/seccomp_filter.rst @@ -263,7 +263,7 @@ Userspace can also add file descriptors to the notifying process via ``ioctl(SECCOMP_IOCTL_NOTIF_ADDFD)``. The ``id`` member of ``struct seccomp_notif_addfd`` should be the same ``id`` as in ``struct seccomp_notif``. The ``newfd_flags`` flag may be used to set flags -like O_EXEC on the file descriptor in the notifying process. If the supervisor +like O_CLOEXEC on the file descriptor in the notifying process. If the supervisor wants to inject the file descriptor with a specific number, the ``SECCOMP_ADDFD_FLAG_SETFD`` flag can be used, and set the ``newfd`` member to the specific number to use. If that file descriptor is already open in the diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index c7b165ca70b6..dae68e68ca23 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -855,7 +855,7 @@ in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for specific cpus. The irq field is interpreted like this:: - bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 | + bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 | field: | vcpu2_index | irq_type | vcpu_index | irq_id | The irq_type field has the following values: @@ -2149,10 +2149,10 @@ prior to calling the KVM_RUN ioctl. Errors: ====== ============================================================ - ENOENT no such register - EINVAL invalid register ID, or no such register or used with VMs in + ENOENT no such register + EINVAL invalid register ID, or no such register or used with VMs in protected virtualization mode on s390 - EPERM (arm64) register access not allowed before vcpu finalization + EPERM (arm64) register access not allowed before vcpu finalization ====== ============================================================ (These error codes are indicative only: do not rely on a specific error @@ -2590,10 +2590,10 @@ following id bit patterns:: Errors include: ======== ============================================================ - ENOENT no such register - EINVAL invalid register ID, or no such register or used with VMs in + ENOENT no such register + EINVAL invalid register ID, or no such register or used with VMs in protected virtualization mode on s390 - EPERM (arm64) register access not allowed before vcpu finalization + EPERM (arm64) register access not allowed before vcpu finalization ======== ============================================================ (These error codes are indicative only: do not rely on a specific error @@ -3112,13 +3112,13 @@ current state. "addr" is ignored. Errors: ====== ================================================================= - EINVAL the target is unknown, or the combination of features is invalid. - ENOENT a features bit specified is unknown. + EINVAL the target is unknown, or the combination of features is invalid. + ENOENT a features bit specified is unknown. ====== ================================================================= This tells KVM what type of CPU to present to the guest, and what -optional features it should have. This will cause a reset of the cpu -registers to their initial values. If this is not called, KVM_RUN will +optional features it should have. This will cause a reset of the cpu +registers to their initial values. If this is not called, KVM_RUN will return ENOEXEC for that vcpu. The initial values are defined as: @@ -3239,8 +3239,8 @@ VCPU matching underlying host. Errors: ===== ============================================================== - E2BIG the reg index list is too big to fit in the array specified by - the user (the number required will be written into n). + E2BIG the reg index list is too big to fit in the array specified by + the user (the number required will be written into n). ===== ============================================================== :: @@ -3288,7 +3288,7 @@ specific device. ARM/arm64 divides the id field into two parts, a device id and an address type id specific to the individual device:: - bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 | + bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 | field: | 0x00000000 | device id | addr type id | ARM/arm64 currently only require this when using the in-kernel GIC @@ -7049,7 +7049,7 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to trap and emulate MSRs that are outside of the scope of KVM as well as limit the attack surface on KVM's MSR emulation code. -8.28 KVM_CAP_ENFORCE_PV_CPUID +8.28 KVM_CAP_ENFORCE_PV_FEATURE_CPUID ----------------------------- Architectures: x86 diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst index 35eca377543d..88fa495abbac 100644 --- a/Documentation/virt/kvm/locking.rst +++ b/Documentation/virt/kvm/locking.rst @@ -25,10 +25,10 @@ On x86: - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock -- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock is - taken inside kvm->arch.mmu_lock, and cannot be taken without already - holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise - there's no need to take kvm->arch.tdp_mmu_pages_lock at all). +- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and + kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and + cannot be taken without already holding kvm->arch.mmu_lock (typically with + ``read_lock`` for the TDP MMU, thus the need for additional spinlocks). Everything else is a leaf: no other lock is taken inside the critical sections. diff --git a/LICENSES/dual/CC-BY-4.0 b/LICENSES/dual/CC-BY-4.0 index 45a81b8e4669..869cad3d1643 100644 --- a/LICENSES/dual/CC-BY-4.0 +++ b/LICENSES/dual/CC-BY-4.0 @@ -392,7 +392,7 @@ Section 8 -- Interpretation. Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances -will be considered the “Licensor.” The text of the Creative Commons +will be considered the "Licensor." The text of the Creative Commons public licenses is dedicated to the public domain under the CC0 Public Domain Dedication. Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as diff --git a/MAINTAINERS b/MAINTAINERS index a61f4f3b78a9..c6b8a720c0bc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -445,7 +445,7 @@ F: drivers/platform/x86/wmi.c F: include/uapi/linux/wmi.h ACRN HYPERVISOR SERVICE MODULE -M: Shuo Liu <shuo.a.liu@intel.com> +M: Fei Li <fei1.li@intel.com> L: acrn-dev@lists.projectacrn.org (subscribers-only) S: Supported W: https://projectacrn.org @@ -933,6 +933,7 @@ F: drivers/video/fbdev/geode/ AMD IOMMU (AMD-VI) M: Joerg Roedel <joro@8bytes.org> +R: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> L: iommu@lists.linux-foundation.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git @@ -3865,6 +3866,16 @@ L: bcm-kernel-feedback-list@broadcom.com S: Maintained F: drivers/mtd/nand/raw/brcmnand/ +BROADCOM STB PCIE DRIVER +M: Jim Quinlan <jim2101024@gmail.com> +M: Nicolas Saenz Julienne <nsaenz@kernel.org> +M: Florian Fainelli <f.fainelli@gmail.com> +M: bcm-kernel-feedback-list@broadcom.com +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml +F: drivers/pci/controller/pcie-brcmstb.c + BROADCOM SYSTEMPORT ETHERNET DRIVER M: Florian Fainelli <f.fainelli@gmail.com> L: bcm-kernel-feedback-list@broadcom.com @@ -4497,7 +4508,7 @@ L: clang-built-linux@googlegroups.com S: Supported W: https://clangbuiltlinux.github.io/ B: https://github.com/ClangBuiltLinux/linux/issues -C: irc://chat.freenode.net/clangbuiltlinux +C: irc://irc.libera.chat/clangbuiltlinux F: Documentation/kbuild/llvm.rst F: include/linux/compiler-clang.h F: scripts/clang-tools/ @@ -7857,9 +7868,9 @@ S: Maintained F: drivers/input/touchscreen/goodix.c GOOGLE ETHERNET DRIVERS -M: Catherine Sullivan <csully@google.com> -R: Sagi Shahar <sagis@google.com> -R: Jon Olson <jonolson@google.com> +M: Jeroen de Borst <jeroendb@google.com> +R: Catherine Sullivan <csully@google.com> +R: David Awogbemila <awogbemila@google.com> L: netdev@vger.kernel.org S: Supported F: Documentation/networking/device_drivers/ethernet/google/gve.rst @@ -11326,6 +11337,12 @@ W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git F: drivers/media/radio/radio-maxiradio* +MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER +R: Yasushi SHOJI <yashi@spacecubics.com> +L: linux-can@vger.kernel.org +S: Maintained +F: drivers/net/can/usb/mcba_usb.c + MCAN MMIO DEVICE DRIVER M: Chandrasekar Ramakrishnan <rcsekar@samsung.com> L: linux-can@vger.kernel.org @@ -11757,6 +11774,7 @@ F: drivers/char/hw_random/mtk-rng.c MEDIATEK SWITCH DRIVER M: Sean Wang <sean.wang@mediatek.com> M: Landen Chao <Landen.Chao@mediatek.com> +M: DENG Qingfang <dqfext@gmail.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/dsa/mt7530.* @@ -14422,6 +14440,13 @@ S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt F: drivers/pci/controller/dwc/pcie-histb.c +PCIE DRIVER FOR INTEL LGM GW SOC +M: Rahul Tanwar <rtanwar@maxlinear.com> +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml +F: drivers/pci/controller/dwc/pcie-intel-gw.c + PCIE DRIVER FOR MEDIATEK M: Ryder Lee <ryder.lee@mediatek.com> M: Jianjun Wang <jianjun.wang@mediatek.com> @@ -15009,6 +15034,13 @@ F: drivers/net/phy/dp83640* F: drivers/ptp/* F: include/linux/ptp_cl* +PTP VIRTUAL CLOCK SUPPORT +M: Yangbo Lu <yangbo.lu@nxp.com> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/ptp/ptp_vclock.c +F: net/ethtool/phc_vclocks.c + PTRACE SUPPORT M: Oleg Nesterov <oleg@redhat.com> S: Maintained @@ -15459,6 +15491,8 @@ M: Pan, Xinhui <Xinhui.Pan@amd.com> L: amd-gfx@lists.freedesktop.org S: Supported T: git https://gitlab.freedesktop.org/agd5f/linux.git +B: https://gitlab.freedesktop.org/drm/amd/-/issues +C: irc://irc.oftc.net/radeon F: drivers/gpu/drm/amd/ F: drivers/gpu/drm/radeon/ F: include/uapi/drm/amdgpu_drm.h @@ -15786,7 +15820,7 @@ F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml F: drivers/i2c/busses/i2c-emev2.c RENESAS ETHERNET DRIVERS -R: Sergei Shtylyov <sergei.shtylyov@gmail.com> +R: Sergey Shtylyov <s.shtylyov@omp.ru> L: netdev@vger.kernel.org L: linux-renesas-soc@vger.kernel.org F: Documentation/devicetree/bindings/net/renesas,*.yaml @@ -17798,7 +17832,7 @@ F: include/linux/sync_file.h F: include/uapi/linux/sync_file.h SYNOPSYS ARC ARCHITECTURE -M: Vineet Gupta <vgupta@synopsys.com> +M: Vineet Gupta <vgupta@kernel.org> L: linux-snps-arc@lists.infradead.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git @@ -19114,7 +19148,7 @@ M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-usb@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/phy/hisilicon,hi3670-usb3.yaml -F: drivers/phy/hisilicon/phy-kirin970-usb3.c +F: drivers/phy/hisilicon/phy-hi3670-usb3.c USB ISP116X DRIVER M: Olav Kongas <ok@artecdesign.ee> @@ -19792,6 +19826,14 @@ L: netdev@vger.kernel.org S: Supported F: drivers/ptp/ptp_vmw.c +VMWARE VMCI DRIVER +M: Jorgen Hansen <jhansen@vmware.com> +M: Vishnu Dasa <vdasa@vmware.com> +L: linux-kernel@vger.kernel.org +L: pv-drivers@vmware.com (private) +S: Maintained +F: drivers/misc/vmw_vmci/ + VMWARE VMMOUSE SUBDRIVER M: "VMware Graphics" <linux-graphics-maintainer@vmware.com> M: "VMware, Inc." <pv-drivers@vmware.com> @@ -19992,7 +20034,8 @@ F: Documentation/devicetree/bindings/extcon/wlf,arizona.yaml F: Documentation/devicetree/bindings/mfd/wlf,arizona.yaml F: Documentation/devicetree/bindings/mfd/wm831x.txt F: Documentation/devicetree/bindings/regulator/wlf,arizona.yaml -F: Documentation/devicetree/bindings/sound/wlf,arizona.yaml +F: Documentation/devicetree/bindings/sound/wlf,*.yaml +F: Documentation/devicetree/bindings/sound/wm* F: Documentation/hwmon/wm83??.rst F: arch/arm/mach-s3c/mach-crag6410* F: drivers/clk/clk-wm83*.c @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 14 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc7 NAME = Opossums on Parade # *DOCUMENTATION* @@ -546,7 +546,6 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ PHONY += scripts_basic scripts_basic: $(Q)$(MAKE) $(build)=scripts/basic - $(Q)rm -f .tmp_quiet_recordmcount PHONY += outputmakefile ifdef building_out_of_srctree @@ -728,11 +727,12 @@ $(KCONFIG_CONFIG): # This exploits the 'multi-target pattern rule' trick. # The syncconfig should be executed only once to make all the targets. # (Note: use the grouped target '&:' when we bump to GNU Make 4.3) -quiet_cmd_syncconfig = SYNC $@ - cmd_syncconfig = $(MAKE) -f $(srctree)/Makefile syncconfig - +# +# Do not use $(call cmd,...) here. That would suppress prompts from syncconfig, +# so you cannot notice that Kconfig is waiting for the user input. %/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG) - +$(call cmd,syncconfig) + $(Q)$(kecho) " SYNC $@" + $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig else # !may-sync-config # External modules and some install targets need include/generated/autoconf.h # and include/config/auto.conf but do not care if they are up-to-date. @@ -802,7 +802,7 @@ else # Warn about unmarked fall-throughs in switch statement. # Disabled for clang while comment to attribute conversion happens and # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed. -KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,) +KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=5,) endif # These warnings generated too much noise in a regular build. @@ -1317,6 +1317,16 @@ scripts_unifdef: scripts_basic $(Q)$(MAKE) $(build)=scripts scripts/unifdef # --------------------------------------------------------------------------- +# Install + +# Many distributions have the custom install script, /sbin/installkernel. +# If DKMS is installed, 'make install' will eventually recuses back +# to the this Makefile to build and install external modules. +# Cancel sub_make_done so that options such as M=, V=, etc. are parsed. + +install: sub_make_done := + +# --------------------------------------------------------------------------- # Tools ifdef CONFIG_STACK_VALIDATION diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 77d3280dc678..6c50877841df 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -14,7 +14,6 @@ config ALPHA select PCI_SYSCALL if PCI select HAVE_AOUT select HAVE_ASM_MODVERSIONS - select HAVE_IDE select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS select NEED_DMA_MAP_STATE @@ -532,7 +531,7 @@ config SMP will run faster if you say N here. See also the SMP-HOWTO available at - <http://www.tldp.org/docs.html#howto>. + <https://www.tldp.org/docs.html#howto>. If you don't know what to do here, say N. diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c index 00266e6e1b71..b4faba2432d5 100644 --- a/arch/alpha/boot/bootp.c +++ b/arch/alpha/boot/bootp.c @@ -23,7 +23,7 @@ #include "ksize.h" extern unsigned long switch_to_osf_pal(unsigned long nr, - struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa, + struct pcb_struct *pcb_va, struct pcb_struct *pcb_pa, unsigned long *vptb); extern void move_stack(unsigned long new_stack); diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c index 43af71835adf..90a2b341e9c0 100644 --- a/arch/alpha/boot/bootpz.c +++ b/arch/alpha/boot/bootpz.c @@ -200,7 +200,7 @@ extern char _end; START_ADDR KSEG address of the entry point of kernel code. ZERO_PGE KSEG address of page full of zeroes, but - upon entry to kerne cvan be expected + upon entry to kernel, it can be expected to hold the parameter list and possible INTRD information. diff --git a/arch/alpha/boot/misc.c b/arch/alpha/boot/misc.c index d65192202703..325d4dd4f904 100644 --- a/arch/alpha/boot/misc.c +++ b/arch/alpha/boot/misc.c @@ -30,7 +30,7 @@ extern long srm_printk(const char *, ...) __attribute__ ((format (printf, 1, 2))); /* - * gzip delarations + * gzip declarations */ #define OF(args) args #define STATIC static diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig index dd2dd9f0861f..7f1ca30b115b 100644 --- a/arch/alpha/configs/defconfig +++ b/arch/alpha/configs/defconfig @@ -70,3 +70,4 @@ CONFIG_DEBUG_INFO=y CONFIG_ALPHA_LEGACY_START_ADDRESS=y CONFIG_MATHEMU=y CONFIG_CRYPTO_HMAC=y +CONFIG_DEVTMPFS=y diff --git a/arch/alpha/include/asm/compiler.h b/arch/alpha/include/asm/compiler.h index 5159ba259d65..ae645959018a 100644 --- a/arch/alpha/include/asm/compiler.h +++ b/arch/alpha/include/asm/compiler.h @@ -4,15 +4,4 @@ #include <uapi/asm/compiler.h> -/* Some idiots over in <linux/compiler.h> thought inline should imply - always_inline. This breaks stuff. We'll include this file whenever - we run into such problems. */ - -#include <linux/compiler.h> -#undef inline -#undef __inline__ -#undef __inline -#undef __always_inline -#define __always_inline inline __attribute__((always_inline)) - #endif /* __ALPHA_COMPILER_H */ diff --git a/arch/alpha/include/asm/syscall.h b/arch/alpha/include/asm/syscall.h index 11c688c1d7ec..f21babaeed85 100644 --- a/arch/alpha/include/asm/syscall.h +++ b/arch/alpha/include/asm/syscall.h @@ -9,4 +9,10 @@ static inline int syscall_get_arch(struct task_struct *task) return AUDIT_ARCH_ALPHA; } +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->r0; +} + #endif /* _ASM_ALPHA_SYSCALL_H */ diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index d5367a1c6300..d31167e3269c 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -834,7 +834,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, return -EFAULT; state = ¤t_thread_info()->ieee_state; - /* Update softare trap enable bits. */ + /* Update software trap enable bits. */ *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); /* Update the real fpcr. */ @@ -854,7 +854,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, state = ¤t_thread_info()->ieee_state; exc &= IEEE_STATUS_MASK; - /* Update softare trap enable bits. */ + /* Update software trap enable bits. */ swcr = (*state & IEEE_SW_MASK) | exc; *state |= exc; diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index e7a59d927d78..efcf7321701b 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -574,7 +574,7 @@ static void alpha_pmu_start(struct perf_event *event, int flags) * Check that CPU performance counters are supported. * - currently support EV67 and later CPUs. * - actually some later revisions of the EV6 have the same PMC model as the - * EV67 but we don't do suffiently deep CPU detection to detect them. + * EV67 but we don't do sufficiently deep CPU detection to detect them. * Bad luck to the very few people who might have one, I guess. */ static int supported_cpu(void) diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index ef0c08ed0481..a5123ea426ce 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -256,7 +256,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childstack->r26 = (unsigned long) ret_from_kernel_thread; childstack->r9 = usp; /* function */ childstack->r10 = kthread_arg; - childregs->hae = alpha_mv.hae_cache, + childregs->hae = alpha_mv.hae_cache; childti->pcb.usp = 0; return 0; } diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 7d56c217b235..b4fbbba30aa2 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -319,18 +319,19 @@ setup_memory(void *kernel_end) i, cluster->usage, cluster->start_pfn, cluster->start_pfn + cluster->numpages); - /* Bit 0 is console/PALcode reserved. Bit 1 is - non-volatile memory -- we might want to mark - this for later. */ - if (cluster->usage & 3) - continue; - end = cluster->start_pfn + cluster->numpages; if (end > max_low_pfn) max_low_pfn = end; memblock_add(PFN_PHYS(cluster->start_pfn), cluster->numpages << PAGE_SHIFT); + + /* Bit 0 is console/PALcode reserved. Bit 1 is + non-volatile memory -- we might want to mark + this for later. */ + if (cluster->usage & 3) + memblock_reserve(PFN_PHYS(cluster->start_pfn), + cluster->numpages << PAGE_SHIFT); } /* diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 4b2575f936d4..cb64e4797d2a 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -582,7 +582,7 @@ void smp_send_stop(void) { cpumask_t to_whom; - cpumask_copy(&to_whom, cpu_possible_mask); + cpumask_copy(&to_whom, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &to_whom); #ifdef DEBUG_IPI_MSG if (hard_smp_processor_id() != boot_cpu_id) diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 53adf43dcd44..96fd6ff3fe81 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -212,7 +212,7 @@ nautilus_init_pci(void) /* Use default IO. */ pci_add_resource(&bridge->windows, &ioport_resource); - /* Irongate PCI memory aperture, calculate requred size before + /* Irongate PCI memory aperture, calculate required size before setting it up. */ pci_add_resource(&bridge->windows, &irongate_mem); diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 921d4b6e4d95..5398f982bdd1 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -730,7 +730,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, long error; /* Check the UAC bits to decide what the user wants us to do - with the unaliged access. */ + with the unaligned access. */ if (!(current_thread_info()->status & TS_UAC_NOPRINT)) { if (__ratelimit(&ratelimit)) { diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c index d568cd9a3e43..f7cef66af88d 100644 --- a/arch/alpha/math-emu/math.c +++ b/arch/alpha/math-emu/math.c @@ -65,7 +65,7 @@ static long (*save_emul) (unsigned long pc); long do_alpha_fp_emul_imprecise(struct pt_regs *, unsigned long); long do_alpha_fp_emul(unsigned long); -int init_module(void) +static int alpha_fp_emul_init_module(void) { save_emul_imprecise = alpha_fp_emul_imprecise; save_emul = alpha_fp_emul; @@ -73,12 +73,14 @@ int init_module(void) alpha_fp_emul = do_alpha_fp_emul; return 0; } +module_init(alpha_fp_emul_init_module); -void cleanup_module(void) +static void alpha_fp_emul_cleanup_module(void) { alpha_fp_emul_imprecise = save_emul_imprecise; alpha_fp_emul = save_emul; } +module_exit(alpha_fp_emul_cleanup_module); #undef alpha_fp_emul_imprecise #define alpha_fp_emul_imprecise do_alpha_fp_emul_imprecise @@ -401,3 +403,5 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask) egress: return si_code; } + +EXPORT_SYMBOL(__udiv_qrnnd); diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index d8f51eb8963b..b5bf68e74732 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -409,7 +409,7 @@ choice help Depending on the configuration, CPU can contain DSP registers (ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_CTRL, DSP_FFT_CTRL). - Bellow is options describing how to handle these registers in + Below are options describing how to handle these registers in interrupt entry / exit and in context switch. config ARC_DSP_NONE diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h index 69debd77cd04..0b485800a392 100644 --- a/arch/arc/include/asm/checksum.h +++ b/arch/arc/include/asm/checksum.h @@ -24,7 +24,7 @@ */ static inline __sum16 csum_fold(__wsum s) { - unsigned r = s << 16 | s >> 16; /* ror */ + unsigned int r = s << 16 | s >> 16; /* ror */ s = ~s; s -= r; return s >> 16; diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 30b9ae511ea9..e1971d34ef30 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -123,7 +123,7 @@ static const char * const arc_pmu_ev_hw_map[] = { #define C(_x) PERF_COUNT_HW_CACHE_##_x #define CACHE_OP_UNSUPPORTED 0xffff -static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC, diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c index c67c0f0f5f77..ec640219d989 100644 --- a/arch/arc/kernel/fpu.c +++ b/arch/arc/kernel/fpu.c @@ -57,23 +57,26 @@ void fpu_save_restore(struct task_struct *prev, struct task_struct *next) void fpu_init_task(struct pt_regs *regs) { + const unsigned int fwe = 0x80000000; + /* default rounding mode */ write_aux_reg(ARC_REG_FPU_CTRL, 0x100); - /* set "Write enable" to allow explicit write to exception flags */ - write_aux_reg(ARC_REG_FPU_STATUS, 0x80000000); + /* Initialize to zero: setting requires FWE be set */ + write_aux_reg(ARC_REG_FPU_STATUS, fwe); } void fpu_save_restore(struct task_struct *prev, struct task_struct *next) { struct arc_fpu *save = &prev->thread.fpu; struct arc_fpu *restore = &next->thread.fpu; + const unsigned int fwe = 0x80000000; save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL); save->status = read_aux_reg(ARC_REG_FPU_STATUS); write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl); - write_aux_reg(ARC_REG_FPU_STATUS, restore->status); + write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status)); } #endif diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 47bab67f8649..9e28058cdba8 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -260,7 +260,7 @@ static void init_unwind_hdr(struct unwind_table *table, { const u8 *ptr; unsigned long tableSize = table->size, hdrSize; - unsigned n; + unsigned int n; const u32 *fde; struct { u8 version; @@ -462,7 +462,7 @@ static uleb128_t get_uleb128(const u8 **pcur, const u8 *end) { const u8 *cur = *pcur; uleb128_t value; - unsigned shift; + unsigned int shift; for (shift = 0, value = 0; cur < end; shift += 7) { if (shift + 7 > 8 * sizeof(value) @@ -483,7 +483,7 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) { const u8 *cur = *pcur; sleb128_t value; - unsigned shift; + unsigned int shift; for (shift = 0, value = 0; cur < end; shift += 7) { if (shift + 7 > 8 * sizeof(value) @@ -609,7 +609,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end, static signed fde_pointer_type(const u32 *cie) { const u8 *ptr = (const u8 *)(cie + 2); - unsigned version = *ptr; + unsigned int version = *ptr; if (*++ptr) { const char *aug; @@ -904,7 +904,7 @@ int arc_unwind(struct unwind_frame_info *frame) const u8 *ptr = NULL, *end = NULL; unsigned long pc = UNW_PC(frame) - frame->call_frame; unsigned long startLoc = 0, endLoc = 0, cfa; - unsigned i; + unsigned int i; signed ptrType = -1; uleb128_t retAddrReg = 0; const struct unwind_table *table; diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index e2146a8da195..529ae50f9fe2 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -88,6 +88,8 @@ SECTIONS CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 3ea1c417339f..2fb7012c3246 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -95,7 +95,6 @@ config ARM select HAVE_FUNCTION_TRACER if !XIP_KERNEL select HAVE_GCC_PLUGINS select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) - select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZ4 @@ -361,7 +360,6 @@ config ARCH_FOOTBRIDGE bool "FootBridge" select CPU_SA110 select FOOTBRIDGE - select HAVE_IDE select NEED_MACH_IO_H if !MMU select NEED_MACH_MEMORY_H help @@ -395,7 +393,7 @@ config ARCH_IXP4XX select IXP4XX_IRQ select IXP4XX_TIMER # With the new PCI driver this is not needed - select NEED_MACH_IO_H if PCI_IXP4XX_LEGACY + select NEED_MACH_IO_H if IXP4XX_PCI_LEGACY select USB_EHCI_BIG_ENDIAN_DESC select USB_EHCI_BIG_ENDIAN_MMIO help @@ -430,7 +428,6 @@ config ARCH_PXA select GENERIC_IRQ_MULTI_HANDLER select GPIO_PXA select GPIOLIB - select HAVE_IDE select IRQ_DOMAIN select PLAT_PXA select SPARSE_IRQ @@ -446,7 +443,6 @@ config ARCH_RPC select ARM_HAS_SG_CHAIN select CPU_SA110 select FIQ - select HAVE_IDE select HAVE_PATA_PLATFORM select ISA_DMA_API select LEGACY_TIMER_TICK @@ -469,7 +465,6 @@ config ARCH_SA1100 select CPU_SA1100 select GENERIC_IRQ_MULTI_HANDLER select GPIOLIB - select HAVE_IDE select IRQ_DOMAIN select ISA select NEED_MACH_MEMORY_H @@ -505,7 +500,6 @@ config ARCH_OMAP1 select GENERIC_IRQ_CHIP select GENERIC_IRQ_MULTI_HANDLER select GPIOLIB - select HAVE_IDE select HAVE_LEGACY_CLK select IRQ_DOMAIN select NEED_MACH_IO_H if PCCARD diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi index 40ef3973f2a9..ba58e6b0da1d 100644 --- a/arch/arm/boot/dts/am437x-l4.dtsi +++ b/arch/arm/boot/dts/am437x-l4.dtsi @@ -1595,7 +1595,7 @@ compatible = "ti,am4372-d_can", "ti,am3352-d_can"; reg = <0x0 0x2000>; clocks = <&dcan1_fck>; - clock-name = "fck"; + clock-names = "fck"; syscon-raminit = <&scm_conf 0x644 1>; interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index aae0af10a5b1..2aa75abf85a9 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -582,7 +582,7 @@ status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins>; - clock-frequency = <400000>; + clock-frequency = <100000>; tps65218: tps65218@24 { reg = <0x24>; diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts index 33e413ca07e4..9b4cf5ebe6d5 100644 --- a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts +++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts @@ -4,6 +4,7 @@ #include "aspeed-g5.dtsi" #include <dt-bindings/gpio/aspeed-gpio.h> #include <dt-bindings/i2c/i2c.h> +#include <dt-bindings/interrupt-controller/irq.h> /{ model = "ASRock E3C246D4I BMC"; @@ -73,7 +74,8 @@ &vuart { status = "okay"; - aspeed,sirq-active-high; + aspeed,lpc-io-reg = <0x2f8>; + aspeed,lpc-interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; }; &mac0 { diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts index d26a9e16ff7c..aa24cac8e5be 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts @@ -406,14 +406,14 @@ reg = <0x69>; }; - power-supply@6a { + power-supply@6b { compatible = "ibm,cffps"; - reg = <0x6a>; + reg = <0x6b>; }; - power-supply@6b { + power-supply@6d { compatible = "ibm,cffps"; - reg = <0x6b>; + reg = <0x6d>; }; }; @@ -2832,6 +2832,7 @@ &emmc { status = "okay"; + clk-phase-mmc-hs200 = <180>, <180>; }; &fsim0 { diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts index 941c0489479a..481d0ee1f85f 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts @@ -280,10 +280,7 @@ /*W0-W7*/ "","","","","","","","", /*X0-X7*/ "","","","","","","","", /*Y0-Y7*/ "","","","","","","","", - /*Z0-Z7*/ "","","","","","","","", - /*AA0-AA7*/ "","","","","","","","", - /*AB0-AB7*/ "","","","","","","","", - /*AC0-AC7*/ "","","","","","","",""; + /*Z0-Z7*/ "","","","","","","",""; pin_mclr_vpp { gpio-hog; diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts index e863ec088970..e33153dcaea8 100644 --- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts +++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts @@ -136,10 +136,7 @@ /*W0-W7*/ "","","","","","","","", /*X0-X7*/ "","","","","","","","", /*Y0-Y7*/ "","","","","","","","", - /*Z0-Z7*/ "","","","","","","","", - /*AA0-AA7*/ "","","","","","","","", - /*AB0-AB7*/ "","","","","","","","", - /*AC0-AC7*/ "","","","","","","",""; + /*Z0-Z7*/ "","","","","","","",""; }; &fmc { @@ -189,6 +186,7 @@ &emmc { status = "okay"; + clk-phase-mmc-hs200 = <36>, <270>; }; &fsim0 { diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts index f98691ae4415..d3082b9774e4 100644 --- a/arch/arm/boot/dts/imx53-m53menlo.dts +++ b/arch/arm/boot/dts/imx53-m53menlo.dts @@ -388,13 +388,13 @@ pinctrl_power_button: powerbutgrp { fsl,pins = < - MX53_PAD_SD2_DATA2__GPIO1_13 0x1e4 + MX53_PAD_SD2_DATA0__GPIO1_15 0x1e4 >; }; pinctrl_power_out: poweroutgrp { fsl,pins = < - MX53_PAD_SD2_DATA0__GPIO1_15 0x1e4 + MX53_PAD_SD2_DATA2__GPIO1_13 0x1e4 >; }; diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts index 5a5fa6190a52..37d0cffea99c 100644 --- a/arch/arm/boot/dts/imx53-ppd.dts +++ b/arch/arm/boot/dts/imx53-ppd.dts @@ -70,6 +70,12 @@ clock-frequency = <11289600>; }; + achc_24M: achc-clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <24000000>; + }; + sgtlsound: sound { compatible = "fsl,imx53-cpuvo-sgtl5000", "fsl,imx-audio-sgtl5000"; @@ -314,16 +320,13 @@ &gpio4 12 GPIO_ACTIVE_LOW>; status = "okay"; - spidev0: spi@0 { - compatible = "ge,achc"; - reg = <0>; - spi-max-frequency = <1000000>; - }; - - spidev1: spi@1 { - compatible = "ge,achc"; - reg = <1>; - spi-max-frequency = <1000000>; + spidev0: spi@1 { + compatible = "ge,achc", "nxp,kinetis-k20"; + reg = <1>, <0>; + vdd-supply = <®_3v3>; + vdda-supply = <®_3v3>; + clocks = <&achc_24M>; + reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>; }; gpioxra0: gpio@2 { diff --git a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi index 0ad8ccde0cf8..f86efd0ccc40 100644 --- a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi @@ -54,7 +54,13 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_microsom_enet_ar8035>; phy-mode = "rgmii-id"; - phy-reset-duration = <2>; + + /* + * The PHY seems to require a long-enough reset duration to avoid + * some rare issues where the PHY gets stuck in an inconsistent and + * non-functional state at boot-up. 10ms proved to be fine . + */ + phy-reset-duration = <10>; phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; status = "okay"; diff --git a/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi b/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi index a0545431b3dc..9f1e38282bee 100644 --- a/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi +++ b/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi @@ -43,6 +43,7 @@ assigned-clock-rates = <0>, <198000000>; cap-power-off-card; keep-power-in-suspend; + max-frequency = <25000000>; mmc-pwrseq = <&wifi_pwrseq>; no-1-8-v; non-removable; diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index 45435bb88c89..373984c130e0 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi @@ -30,14 +30,6 @@ regulator-max-microvolt = <5000000>; }; - vdds_1v8_main: fixedregulator-vdds_1v8_main { - compatible = "regulator-fixed"; - regulator-name = "vdds_1v8_main"; - vin-supply = <&smps7_reg>; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - }; - vmmcsd_fixed: fixedregulator-mmcsd { compatible = "regulator-fixed"; regulator-name = "vmmcsd_fixed"; @@ -487,6 +479,7 @@ regulator-boot-on; }; + vdds_1v8_main: smps7_reg: smps7 { /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */ regulator-name = "smps7"; diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts index dace8ffeb991..0a4ffd10c484 100644 --- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts +++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts @@ -581,7 +581,7 @@ * EBI2. This has a 25MHz chrystal next to it, so no * clocking is needed. */ - ethernet-ebi2@2,0 { + ethernet@2,0 { compatible = "smsc,lan9221", "smsc,lan9115"; reg = <2 0x0 0x100>; /* @@ -598,8 +598,6 @@ phy-mode = "mii"; reg-io-width = <2>; smsc,force-external-phy; - /* IRQ on edge falling = active low */ - smsc,irq-active-low; smsc,irq-push-pull; /* diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index c9b906432341..1815361fe73c 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -755,14 +755,14 @@ status = "disabled"; }; - vica: intc@10140000 { + vica: interrupt-controller@10140000 { compatible = "arm,versatile-vic"; interrupt-controller; #interrupt-cells = <1>; reg = <0x10140000 0x20>; }; - vicb: intc@10140020 { + vicb: interrupt-controller@10140020 { compatible = "arm,versatile-vic"; interrupt-controller; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi index c5ea08fec535..6cf1c8b4c6e2 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi @@ -37,7 +37,7 @@ poll-interval = <20>; /* - * The EXTi IRQ line 3 is shared with touchscreen and ethernet, + * The EXTi IRQ line 3 is shared with ethernet, * so mark this as polled GPIO key. */ button-0 { @@ -47,6 +47,16 @@ }; /* + * The EXTi IRQ line 6 is shared with touchscreen, + * so mark this as polled GPIO key. + */ + button-1 { + label = "TA2-GPIO-B"; + linux,code = <KEY_B>; + gpios = <&gpiod 6 GPIO_ACTIVE_LOW>; + }; + + /* * The EXTi IRQ line 0 is shared with PMIC, * so mark this as polled GPIO key. */ @@ -60,13 +70,6 @@ gpio-keys { compatible = "gpio-keys"; - button-1 { - label = "TA2-GPIO-B"; - linux,code = <KEY_B>; - gpios = <&gpiod 6 GPIO_ACTIVE_LOW>; - wakeup-source; - }; - button-3 { label = "TA4-GPIO-D"; linux,code = <KEY_D>; @@ -82,6 +85,7 @@ label = "green:led5"; gpios = <&gpioc 6 GPIO_ACTIVE_HIGH>; default-state = "off"; + status = "disabled"; }; led-1 { @@ -185,8 +189,8 @@ touchscreen@38 { compatible = "edt,edt-ft5406"; reg = <0x38>; - interrupt-parent = <&gpiog>; - interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */ + interrupt-parent = <&gpioc>; + interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */ }; }; diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi index 2af0a6752674..8c41f819f776 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi @@ -12,6 +12,8 @@ aliases { ethernet0 = ðernet0; ethernet1 = &ksz8851; + rtc0 = &hwrtc; + rtc1 = &rtc; }; memory@c0000000 { @@ -138,6 +140,7 @@ reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>; reset-assert-us = <500>; reset-deassert-us = <500>; + smsc,disable-energy-detect; interrupt-parent = <&gpioi>; interrupts = <11 IRQ_TYPE_LEVEL_LOW>; }; @@ -248,7 +251,7 @@ /delete-property/dmas; /delete-property/dma-names; - rtc@32 { + hwrtc: rtc@32 { compatible = "microcrystal,rv8803"; reg = <0x32>; }; diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 37bd41ff8dff..151c0220047d 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -195,16 +195,15 @@ #size-cells = <1>; ranges; - vic: intc@10140000 { + vic: interrupt-controller@10140000 { compatible = "arm,versatile-vic"; interrupt-controller; #interrupt-cells = <1>; reg = <0x10140000 0x1000>; - clear-mask = <0xffffffff>; valid-mask = <0xffffffff>; }; - sic: intc@10003000 { + sic: interrupt-controller@10003000 { compatible = "arm,versatile-sic"; interrupt-controller; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 06a0fdf24026..e7e751a858d8 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -7,7 +7,7 @@ amba { /* The Versatile PB is using more SIC IRQ lines than the AB */ - sic: intc@10003000 { + sic: interrupt-controller@10003000 { clear-mask = <0xffffffff>; /* * Valid interrupt lines mask according to diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig index b06e537d5149..4dfe321a79f6 100644 --- a/arch/arm/configs/integrator_defconfig +++ b/arch/arm/configs/integrator_defconfig @@ -57,10 +57,7 @@ CONFIG_DRM=y CONFIG_DRM_DISPLAY_CONNECTOR=y CONFIG_DRM_SIMPLE_BRIDGE=y CONFIG_DRM_PL111=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_MATROX=y -CONFIG_FB_MATROX_MILLENIUM=y -CONFIG_FB_MATROX_MYSTIQUE=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_VGA_CONSOLE is not set CONFIG_LOGO=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 52a0400fdd92..d9abaae118dd 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -821,7 +821,7 @@ CONFIG_USB_ISP1760=y CONFIG_USB_HSIC_USB3503=y CONFIG_AB8500_USB=y CONFIG_KEYSTONE_USB_PHY=m -CONFIG_NOP_USB_XCEIV=m +CONFIG_NOP_USB_XCEIV=y CONFIG_AM335X_PHY_USB=m CONFIG_TWL6030_USB=m CONFIG_USB_GPIO_VBUS=y diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig index 3f35761dc9ff..23595fc5a29a 100644 --- a/arch/arm/configs/nhk8815_defconfig +++ b/arch/arm/configs/nhk8815_defconfig @@ -15,8 +15,6 @@ CONFIG_SLAB=y CONFIG_ARCH_NOMADIK=y CONFIG_MACH_NOMADIK_8815NHK=y CONFIG_AEABI=y -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -52,9 +50,9 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_ONENAND=y CONFIG_MTD_ONENAND_VERIFY_WRITE=y CONFIG_MTD_ONENAND_GENERIC=y -CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y CONFIG_MTD_RAW_NAND=y CONFIG_MTD_NAND_FSMC=y +CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y @@ -97,6 +95,7 @@ CONFIG_REGULATOR=y CONFIG_DRM=y CONFIG_DRM_PANEL_TPO_TPG110=y CONFIG_DRM_PL111=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_PWM=y CONFIG_FRAMEBUFFER_CONSOLE=y @@ -136,9 +135,8 @@ CONFIG_NLS_ISO8859_15=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_DES=y +# CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set -# CONFIG_DEBUG_BUGVERBOSE is not set diff --git a/arch/arm/configs/realview_defconfig b/arch/arm/configs/realview_defconfig index 483c400dd391..4c01e313099f 100644 --- a/arch/arm/configs/realview_defconfig +++ b/arch/arm/configs/realview_defconfig @@ -64,11 +64,9 @@ CONFIG_DRM_PANEL_SIMPLE=y CONFIG_DRM_DISPLAY_CONNECTOR=y CONFIG_DRM_SIMPLE_BRIDGE=y CONFIG_DRM_PL111=y -CONFIG_FB_MODE_HELPERS=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y # CONFIG_SND_DRIVERS is not set diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index 66c8b0980a0a..d9a27e4e0914 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -135,6 +135,7 @@ CONFIG_DRM_SII902X=y CONFIG_DRM_SIMPLE_BRIDGE=y CONFIG_DRM_I2C_ADV7511=y CONFIG_DRM_I2C_ADV7511_AUDIO=y +CONFIG_FB=y CONFIG_FB_SH_MOBILE_LCDC=y CONFIG_BACKLIGHT_PWM=y CONFIG_BACKLIGHT_AS3711=y diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index dbb1ef601762..3b30913d7d8d 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -61,6 +61,10 @@ CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ATMEL_MXT=y CONFIG_TOUCHSCREEN_BU21013=y CONFIG_TOUCHSCREEN_CY8CTMA140=y +CONFIG_TOUCHSCREEN_CYTTSP_CORE=y +CONFIG_TOUCHSCREEN_CYTTSP_SPI=y +CONFIG_TOUCHSCREEN_MMS114=y +CONFIG_TOUCHSCREEN_ZINITIX=y CONFIG_INPUT_MISC=y CONFIG_INPUT_AB8500_PONKEY=y CONFIG_INPUT_GPIO_VIBRA=y @@ -100,6 +104,7 @@ CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI=y CONFIG_DRM_PANEL_SONY_ACX424AKP=y CONFIG_DRM_LIMA=y CONFIG_DRM_MCDE=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_KTD253=y CONFIG_BACKLIGHT_GPIO=y diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig index e7ecfb365e91..b703f4757021 100644 --- a/arch/arm/configs/versatile_defconfig +++ b/arch/arm/configs/versatile_defconfig @@ -60,7 +60,7 @@ CONFIG_DRM_PANEL_SIMPLE=y CONFIG_DRM_DISPLAY_CONNECTOR=y CONFIG_DRM_SIMPLE_BRIDGE=y CONFIG_DRM_PL111=y -CONFIG_FB_MODE_HELPERS=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_LOGO=y CONFIG_SOUND=y @@ -88,8 +88,6 @@ CONFIG_NFSD=y CONFIG_NFSD_V3=y CONFIG_NLS_CODEPAGE_850=m CONFIG_NLS_ISO8859_1=m -CONFIG_FONTS=y -CONFIG_FONT_ACORN_8x8=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig index 4479369540f2..b5e246dd23f4 100644 --- a/arch/arm/configs/vexpress_defconfig +++ b/arch/arm/configs/vexpress_defconfig @@ -11,9 +11,6 @@ CONFIG_CPUSETS=y # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_PROFILING=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_ARCH_VEXPRESS=y CONFIG_ARCH_VEXPRESS_DCSCB=y CONFIG_ARCH_VEXPRESS_TC2_PM=y @@ -23,14 +20,17 @@ CONFIG_MCPM=y CONFIG_VMSPLIT_2G=y CONFIG_NR_CPUS=8 CONFIG_ARM_PSCI=y -CONFIG_CMA=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_CMDLINE="console=ttyAMA0" CONFIG_CPU_IDLE=y CONFIG_VFP=y CONFIG_NEON=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -43,7 +43,6 @@ CONFIG_IP_PNP_BOOTP=y CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y CONFIG_DEVTMPFS=y -CONFIG_DMA_CMA=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y @@ -59,7 +58,6 @@ CONFIG_VIRTIO_BLK=y CONFIG_BLK_DEV_SD=y CONFIG_SCSI_VIRTIO=y CONFIG_ATA=y -# CONFIG_SATA_PMP is not set CONFIG_NETDEVICES=y CONFIG_VIRTIO_NET=y CONFIG_SMC91X=y @@ -81,11 +79,9 @@ CONFIG_DRM=y CONFIG_DRM_PANEL_SIMPLE=y CONFIG_DRM_SII902X=y CONFIG_DRM_PL111=y -CONFIG_FB_MODE_HELPERS=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y # CONFIG_SND_DRIVERS is not set @@ -136,10 +132,11 @@ CONFIG_ROOT_NFS=y CONFIG_9P_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y +# CONFIG_CRYPTO_HW is not set +CONFIG_DMA_CMA=y CONFIG_DEBUG_INFO=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set CONFIG_DEBUG_USER=y -# CONFIG_CRYPTO_HW is not set diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index de11030748d0..1d3aef84287d 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -9,7 +9,6 @@ menuconfig ARCH_DAVINCI select PM_GENERIC_DOMAINS_OF if PM && OF select REGMAP_MMIO select RESET_CONTROLLER - select HAVE_IDE select PINCTRL_SINGLE if ARCH_DAVINCI diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index f0a073a71401..13f3068e9845 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -68,7 +68,6 @@ void imx_set_cpu_arg(int cpu, u32 arg); void v7_secondary_startup(void); void imx_scu_map_io(void); void imx_smp_prepare(void); -void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn); #else static inline void imx_scu_map_io(void) {} static inline void imx_smp_prepare(void) {} @@ -81,6 +80,7 @@ void imx_gpc_mask_all(void); void imx_gpc_restore_all(void); void imx_gpc_hwirq_mask(unsigned int hwirq); void imx_gpc_hwirq_unmask(unsigned int hwirq); +void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn); void imx_anatop_init(void); void imx_anatop_pre_suspend(void); void imx_anatop_post_resume(void); diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c index 0dfd0ae7a63d..af12668d0bf5 100644 --- a/arch/arm/mach-imx/mmdc.c +++ b/arch/arm/mach-imx/mmdc.c @@ -103,6 +103,7 @@ struct mmdc_pmu { struct perf_event *mmdc_events[MMDC_NUM_COUNTERS]; struct hlist_node node; struct fsl_mmdc_devtype_data *devtype_data; + struct clk *mmdc_ipg_clk; }; /* @@ -462,11 +463,14 @@ static int imx_mmdc_remove(struct platform_device *pdev) cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); perf_pmu_unregister(&pmu_mmdc->pmu); + iounmap(pmu_mmdc->mmdc_base); + clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk); kfree(pmu_mmdc); return 0; } -static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base) +static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base, + struct clk *mmdc_ipg_clk) { struct mmdc_pmu *pmu_mmdc; char *name; @@ -494,6 +498,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b } mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); + pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk; if (mmdc_num == 0) name = "mmdc"; else @@ -529,7 +534,7 @@ pmu_free: #else #define imx_mmdc_remove NULL -#define imx_mmdc_perf_init(pdev, mmdc_base) 0 +#define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0 #endif static int imx_mmdc_probe(struct platform_device *pdev) @@ -567,7 +572,13 @@ static int imx_mmdc_probe(struct platform_device *pdev) val &= ~(1 << BP_MMDC_MAPSR_PSD); writel_relaxed(val, reg); - return imx_mmdc_perf_init(pdev, mmdc_base); + err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk); + if (err) { + iounmap(mmdc_base); + clk_disable_unprepare(mmdc_ipg_clk); + } + + return err; } int imx_mmdc_get_ddr_type(void) diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig index bf14d65120b9..34a1c7742088 100644 --- a/arch/arm/mach-ixp4xx/Kconfig +++ b/arch/arm/mach-ixp4xx/Kconfig @@ -91,6 +91,7 @@ config MACH_IXDP465 config MACH_GORAMO_MLR bool "GORAMO Multi Link Router" + depends on IXP4XX_PCI_LEGACY help Say 'Y' here if you want your kernel to support GORAMO MultiLink router. diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h index abb07f105515..74e63d4531aa 100644 --- a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h +++ b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h @@ -218,30 +218,30 @@ /* * PCI Control/Status Registers */ -#define IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x))) - -#define PCI_NP_AD IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET) -#define PCI_NP_CBE IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET) -#define PCI_NP_WDATA IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET) -#define PCI_NP_RDATA IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET) -#define PCI_CRP_AD_CBE IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET) -#define PCI_CRP_WDATA IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET) -#define PCI_CRP_RDATA IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET) -#define PCI_CSR IXP4XX_PCI_CSR(PCI_CSR_OFFSET) -#define PCI_ISR IXP4XX_PCI_CSR(PCI_ISR_OFFSET) -#define PCI_INTEN IXP4XX_PCI_CSR(PCI_INTEN_OFFSET) -#define PCI_DMACTRL IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET) -#define PCI_AHBMEMBASE IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET) -#define PCI_AHBIOBASE IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET) -#define PCI_PCIMEMBASE IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET) -#define PCI_AHBDOORBELL IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET) -#define PCI_PCIDOORBELL IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET) -#define PCI_ATPDMA0_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET) -#define PCI_ATPDMA0_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET) -#define PCI_ATPDMA0_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET) -#define PCI_ATPDMA1_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET) -#define PCI_ATPDMA1_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET) -#define PCI_ATPDMA1_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET) +#define _IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x))) + +#define PCI_NP_AD _IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET) +#define PCI_NP_CBE _IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET) +#define PCI_NP_WDATA _IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET) +#define PCI_NP_RDATA _IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET) +#define PCI_CRP_AD_CBE _IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET) +#define PCI_CRP_WDATA _IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET) +#define PCI_CRP_RDATA _IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET) +#define PCI_CSR _IXP4XX_PCI_CSR(PCI_CSR_OFFSET) +#define PCI_ISR _IXP4XX_PCI_CSR(PCI_ISR_OFFSET) +#define PCI_INTEN _IXP4XX_PCI_CSR(PCI_INTEN_OFFSET) +#define PCI_DMACTRL _IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET) +#define PCI_AHBMEMBASE _IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET) +#define PCI_AHBIOBASE _IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET) +#define PCI_PCIMEMBASE _IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET) +#define PCI_AHBDOORBELL _IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET) +#define PCI_PCIDOORBELL _IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET) +#define PCI_ATPDMA0_AHBADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET) +#define PCI_ATPDMA0_PCIADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET) +#define PCI_ATPDMA0_LENADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET) +#define PCI_ATPDMA1_AHBADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET) +#define PCI_ATPDMA1_PCIADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET) +#define PCI_ATPDMA1_LENADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET) /* * PCI register values and bit definitions diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 65934b2924fb..12b26e04686f 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -3776,6 +3776,7 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh) struct omap_hwmod_ocp_if *oi; struct clockdomain *clkdm; struct clk_hw_omap *clk; + struct clk_hw *hw; if (!oh) return NULL; @@ -3792,7 +3793,14 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh) c = oi->_clk; } - clk = to_clk_hw_omap(__clk_get_hw(c)); + hw = __clk_get_hw(c); + if (!hw) + return NULL; + + clk = to_clk_hw_omap(hw); + if (!clk) + return NULL; + clkdm = clk->clkdm; if (!clkdm) return NULL; diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c index d23970bd638d..f70fb9c4b0cb 100644 --- a/arch/arm/mach-rpc/riscpc.c +++ b/arch/arm/mach-rpc/riscpc.c @@ -49,6 +49,7 @@ static int __init parse_tag_acorn(const struct tag *tag) fallthrough; /* ??? */ case 256: vram_size += PAGE_SIZE * 256; + break; default: break; } diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 897634d0a67c..a951276f0547 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1602,6 +1602,9 @@ exit: rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e07e7de9ac49..fdcd54d39c1e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1605,7 +1605,8 @@ config ARM64_BTI_KERNEL depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 depends on !CC_IS_GCC || GCC_VERSION >= 100100 - depends on !(CC_IS_CLANG && GCOV_KERNEL) + # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9 + depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help Build the kernel with Branch Target Identification annotations @@ -1799,11 +1800,11 @@ config RANDOMIZE_BASE If unsure, say N. config RANDOMIZE_MODULE_REGION_FULL - bool "Randomize the module region over a 4 GB range" + bool "Randomize the module region over a 2 GB range" depends on RANDOMIZE_BASE default y help - Randomizes the location of the module region inside a 4 GB window + Randomizes the location of the module region inside a 2 GB window covering the core kernel. This way, it is less likely for modules to leak information about the location of core kernel data structures but it does imply that function calls between modules and the core @@ -1811,7 +1812,10 @@ config RANDOMIZE_MODULE_REGION_FULL When this option is not set, the module region will be randomized over a limited range that contains the [_stext, _etext] interval of the - core kernel, so branch relocations are always in range. + core kernel, so branch relocations are almost always in range unless + ARM64_MODULE_PLTS is enabled and the region is exhausted. In this + particular case of region exhaustion, modules might be able to fall + back to a larger 2GB area. config CC_HAVE_STACKPROTECTOR_SYSREG def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 7bc37d0a1b68..1110d386f3b4 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -21,19 +21,11 @@ LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \ endif ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) - ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y) -$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum) - else + ifeq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y) LDFLAGS_vmlinux += --fix-cortex-a53-843419 endif endif -ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y) - ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y) -$(warning LSE atomics not supported by binutils) - endif -endif - cc_has_k_constraint := $(call try-run,echo \ 'int main(void) { \ asm volatile("and w0, w0, %w0" :: "K" (4294967295)); \ @@ -176,10 +168,23 @@ vdso_install: archprepare: $(Q)$(MAKE) $(build)=arch/arm64/tools kapi +ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) + ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y) + @echo "warning: ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum" >&2 + endif +endif +ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS),y) + ifneq ($(CONFIG_ARM64_LSE_ATOMICS),y) + @echo "warning: LSE atomics not supported by binutils" >&2 + endif +endif + # We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) + $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso + $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32 ifeq ($(KBUILD_EXTMOD),) # We need to generate vdso-offsets.h before compiling certain files in kernel/. diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts index dd764b720fb0..f6a79c8080d1 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts @@ -54,6 +54,7 @@ &mscc_felix_port0 { label = "swp0"; + managed = "in-band-status"; phy-handle = <&phy0>; phy-mode = "sgmii"; status = "okay"; @@ -61,6 +62,7 @@ &mscc_felix_port1 { label = "swp1"; + managed = "in-band-status"; phy-handle = <&phy1>; phy-mode = "sgmii"; status = "okay"; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index b2e3e5d2a108..343ecf0e8973 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -66,7 +66,7 @@ }; }; - sysclk: clock-sysclk { + sysclk: sysclk { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <100000000>; diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi index 9f7c7f587d38..f4eaab3ecf03 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi @@ -579,7 +579,7 @@ }; flexcan1: can@308c0000 { - compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan"; + compatible = "fsl,imx8mp-flexcan"; reg = <0x308c0000 0x10000>; interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MP_CLK_IPG_ROOT>, @@ -594,7 +594,7 @@ }; flexcan2: can@308d0000 { - compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan"; + compatible = "fsl,imx8mp-flexcan"; reg = <0x308d0000 0x10000>; interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MP_CLK_IPG_ROOT>, @@ -821,9 +821,9 @@ eqos: ethernet@30bf0000 { compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a"; reg = <0x30bf0000 0x10000>; - interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "eth_wake_irq", "macirq"; + interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "macirq", "eth_wake_irq"; clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>, <&clk IMX8MP_CLK_QOS_ENET_ROOT>, <&clk IMX8MP_CLK_ENET_QOS_TIMER>, diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts index ce2bcddf396f..a05b1ab2dd12 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -19,6 +19,8 @@ aliases { spi0 = &spi0; ethernet1 = ð1; + mmc0 = &sdhci0; + mmc1 = &sdhci1; }; chosen { @@ -119,6 +121,7 @@ pinctrl-names = "default"; pinctrl-0 = <&i2c1_pins>; clock-frequency = <100000>; + /delete-property/ mrvl,i2c-fast-mode; status = "okay"; rtc@6f { diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index b7d532841390..5ba7a4519b95 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -948,6 +948,10 @@ <&bpmp TEGRA194_CLK_XUSB_SS>, <&bpmp TEGRA194_CLK_XUSB_FS>; clock-names = "dev", "ss", "ss_src", "fs_src"; + interconnects = <&mc TEGRA194_MEMORY_CLIENT_XUSB_DEVR &emc>, + <&mc TEGRA194_MEMORY_CLIENT_XUSB_DEVW &emc>; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_XUSB_DEV>; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBB>, <&bpmp TEGRA194_POWER_DOMAIN_XUSBA>; power-domain-names = "dev", "ss"; @@ -977,6 +981,10 @@ "xusb_ss", "xusb_ss_src", "xusb_hs_src", "xusb_fs_src", "pll_u_480m", "clk_m", "pll_e"; + interconnects = <&mc TEGRA194_MEMORY_CLIENT_XUSB_HOSTR &emc>, + <&mc TEGRA194_MEMORY_CLIENT_XUSB_HOSTW &emc>; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_XUSB_HOST>; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBC>, <&bpmp TEGRA194_POWER_DOMAIN_XUSBA>; @@ -1832,7 +1840,11 @@ interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE1R &emc>, <&mc TEGRA194_MEMORY_CLIENT_PCIE1W &emc>; - interconnect-names = "read", "write"; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_PCIE1>; + iommu-map = <0x0 &smmu TEGRA194_SID_PCIE1 0x1000>; + iommu-map-mask = <0x0>; + dma-coherent; }; pcie@14120000 { @@ -1882,7 +1894,11 @@ interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE2AR &emc>, <&mc TEGRA194_MEMORY_CLIENT_PCIE2AW &emc>; - interconnect-names = "read", "write"; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_PCIE2>; + iommu-map = <0x0 &smmu TEGRA194_SID_PCIE2 0x1000>; + iommu-map-mask = <0x0>; + dma-coherent; }; pcie@14140000 { @@ -1932,7 +1948,11 @@ interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE3R &emc>, <&mc TEGRA194_MEMORY_CLIENT_PCIE3W &emc>; - interconnect-names = "read", "write"; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_PCIE3>; + iommu-map = <0x0 &smmu TEGRA194_SID_PCIE3 0x1000>; + iommu-map-mask = <0x0>; + dma-coherent; }; pcie@14160000 { @@ -1982,7 +2002,11 @@ interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE4R &emc>, <&mc TEGRA194_MEMORY_CLIENT_PCIE4W &emc>; - interconnect-names = "read", "write"; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_PCIE4>; + iommu-map = <0x0 &smmu TEGRA194_SID_PCIE4 0x1000>; + iommu-map-mask = <0x0>; + dma-coherent; }; pcie@14180000 { @@ -2032,7 +2056,11 @@ interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE0R &emc>, <&mc TEGRA194_MEMORY_CLIENT_PCIE0W &emc>; - interconnect-names = "read", "write"; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_PCIE0>; + iommu-map = <0x0 &smmu TEGRA194_SID_PCIE0 0x1000>; + iommu-map-mask = <0x0>; + dma-coherent; }; pcie@141a0000 { @@ -2086,7 +2114,11 @@ interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE5R &emc>, <&mc TEGRA194_MEMORY_CLIENT_PCIE5W &emc>; - interconnect-names = "read", "write"; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_PCIE5>; + iommu-map = <0x0 &smmu TEGRA194_SID_PCIE5 0x1000>; + iommu-map-mask = <0x0>; + dma-coherent; }; pcie_ep@14160000 { @@ -2119,6 +2151,14 @@ nvidia,aspm-cmrt-us = <60>; nvidia,aspm-pwr-on-t-us = <20>; nvidia,aspm-l0s-entrance-latency-us = <3>; + + interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE4R &emc>, + <&mc TEGRA194_MEMORY_CLIENT_PCIE4W &emc>; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_PCIE4>; + iommu-map = <0x0 &smmu TEGRA194_SID_PCIE4 0x1000>; + iommu-map-mask = <0x0>; + dma-coherent; }; pcie_ep@14180000 { @@ -2151,6 +2191,14 @@ nvidia,aspm-cmrt-us = <60>; nvidia,aspm-pwr-on-t-us = <20>; nvidia,aspm-l0s-entrance-latency-us = <3>; + + interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE0R &emc>, + <&mc TEGRA194_MEMORY_CLIENT_PCIE0W &emc>; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_PCIE0>; + iommu-map = <0x0 &smmu TEGRA194_SID_PCIE0 0x1000>; + iommu-map-mask = <0x0>; + dma-coherent; }; pcie_ep@141a0000 { @@ -2186,6 +2234,14 @@ nvidia,aspm-cmrt-us = <60>; nvidia,aspm-pwr-on-t-us = <20>; nvidia,aspm-l0s-entrance-latency-us = <3>; + + interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE5R &emc>, + <&mc TEGRA194_MEMORY_CLIENT_PCIE5W &emc>; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_PCIE5>; + iommu-map = <0x0 &smmu TEGRA194_SID_PCIE5 0x1000>; + iommu-map-mask = <0x0>; + dma-coherent; }; sram@40000000 { @@ -2469,6 +2525,11 @@ * for 8x and 11.025x sample rate streams. */ assigned-clock-rates = <258000000>; + + interconnects = <&mc TEGRA194_MEMORY_CLIENT_APEDMAR &emc>, + <&mc TEGRA194_MEMORY_CLIENT_APEDMAW &emc>; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_APE>; }; tcu: tcu { diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi index 068692350e00..51e17094d7b1 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi @@ -1063,7 +1063,7 @@ status = "okay"; extcon = <&usb2_id>; - usb@7600000 { + dwc3@7600000 { extcon = <&usb2_id>; dr_mode = "otg"; maximum-speed = "high-speed"; @@ -1074,7 +1074,7 @@ status = "okay"; extcon = <&usb3_id>; - usb@6a00000 { + dwc3@6a00000 { extcon = <&usb3_id>; dr_mode = "otg"; }; diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi index 95d6cb8cd4c0..f39bc10cc5bd 100644 --- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi @@ -443,7 +443,7 @@ resets = <&gcc GCC_USB0_BCR>; status = "disabled"; - dwc_0: usb@8a00000 { + dwc_0: dwc3@8a00000 { compatible = "snps,dwc3"; reg = <0x8a00000 0xcd00>; interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; @@ -484,7 +484,7 @@ resets = <&gcc GCC_USB1_BCR>; status = "disabled"; - dwc_1: usb@8c00000 { + dwc_1: dwc3@8c00000 { compatible = "snps,dwc3"; reg = <0x8c00000 0xcd00>; interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts index 23cdcc9f7c72..1ccca83292ac 100644 --- a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts +++ b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2015, LGE Inc. All rights reserved. * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com> */ /dts-v1/; @@ -9,6 +10,9 @@ #include "pm8994.dtsi" #include "pmi8994.dtsi" +/* cont_splash_mem has different memory mapping */ +/delete-node/ &cont_splash_mem; + / { model = "LG Nexus 5X"; compatible = "lg,bullhead", "qcom,msm8992"; @@ -17,6 +21,9 @@ qcom,board-id = <0xb64 0>; qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>; + /* Bullhead firmware doesn't support PSCI */ + /delete-node/ psci; + aliases { serial0 = &blsp1_uart2; }; @@ -38,6 +45,11 @@ ftrace-size = <0x10000>; pmsg-size = <0x20000>; }; + + cont_splash_mem: memory@3400000 { + reg = <0 0x03400000 0 0x1200000>; + no-map; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts index ffe1a9bd8f70..c096b7758aa0 100644 --- a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts +++ b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts @@ -1,12 +1,16 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2015, Huawei Inc. All rights reserved. * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com> */ /dts-v1/; #include "msm8994.dtsi" +/* Angler's firmware does not report where the memory is allocated */ +/delete-node/ &cont_splash_mem; + / { model = "Huawei Nexus 6P"; compatible = "huawei,angler", "qcom,msm8994"; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 0e1bc4669d7e..78c55ca10ba9 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -2566,7 +2566,7 @@ power-domains = <&gcc USB30_GDSC>; status = "disabled"; - usb@6a00000 { + dwc3@6a00000 { compatible = "snps,dwc3"; reg = <0x06a00000 0xcc00>; interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>; @@ -2873,7 +2873,7 @@ qcom,select-utmi-as-pipe-clk; status = "disabled"; - usb@7600000 { + dwc3@7600000 { compatible = "snps,dwc3"; reg = <0x07600000 0xcc00>; interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi index 6f294f9c0cdf..e9d3ce29937c 100644 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi @@ -1964,7 +1964,7 @@ resets = <&gcc GCC_USB_30_BCR>; - usb3_dwc3: usb@a800000 { + usb3_dwc3: dwc3@a800000 { compatible = "snps,dwc3"; reg = <0x0a800000 0xcd00>; interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi index f8a55307b855..a80c578484ba 100644 --- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi @@ -337,7 +337,7 @@ &usb3 { status = "okay"; - usb@7580000 { + dwc3@7580000 { dr_mode = "host"; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi index 9c4be020d568..339790ba585d 100644 --- a/arch/arm64/boot/dts/qcom/qcs404.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi @@ -544,7 +544,7 @@ assigned-clock-rates = <19200000>, <200000000>; status = "disabled"; - usb@7580000 { + dwc3@7580000 { compatible = "snps,dwc3"; reg = <0x07580000 0xcd00>; interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; @@ -573,7 +573,7 @@ assigned-clock-rates = <19200000>, <133333333>; status = "disabled"; - usb@78c0000 { + dwc3@78c0000 { compatible = "snps,dwc3"; reg = <0x078c0000 0xcc00>; interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi index a5d58eb92896..a9a052f8c63c 100644 --- a/arch/arm64/boot/dts/qcom/sc7180.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi @@ -2756,7 +2756,7 @@ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3 0>; interconnect-names = "usb-ddr", "apps-usb"; - usb_1_dwc3: usb@a600000 { + usb_1_dwc3: dwc3@a600000 { compatible = "snps,dwc3"; reg = <0 0x0a600000 0 0xe000>; interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index a8c274ad74c4..188c5768a55a 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -200,7 +200,7 @@ &BIG_CPU_SLEEP_1 &CLUSTER_SLEEP_0>; next-level-cache = <&L2_700>; - qcom,freq-domain = <&cpufreq_hw 1>; + qcom,freq-domain = <&cpufreq_hw 2>; #cooling-cells = <2>; L2_700: l2-cache { compatible = "cache"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi index 4d052e39b348..eb6b1d15293d 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi @@ -69,7 +69,7 @@ }; rmtfs_upper_guard: memory@f5d01000 { no-map; - reg = <0 0xf5d01000 0 0x2000>; + reg = <0 0xf5d01000 0 0x1000>; }; /* @@ -78,7 +78,7 @@ */ removed_region: memory@88f00000 { no-map; - reg = <0 0x88f00000 0 0x200000>; + reg = <0 0x88f00000 0 0x1c00000>; }; ramoops: ramoops@ac300000 { diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 1796ae8372be..0a86fe71a66d 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -3781,7 +3781,7 @@ <&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>; interconnect-names = "usb-ddr", "apps-usb"; - usb_1_dwc3: usb@a600000 { + usb_1_dwc3: dwc3@a600000 { compatible = "snps,dwc3"; reg = <0 0x0a600000 0 0xcd00>; interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>; @@ -3829,7 +3829,7 @@ <&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_1 0>; interconnect-names = "usb-ddr", "apps-usb"; - usb_2_dwc3: usb@a800000 { + usb_2_dwc3: dwc3@a800000 { compatible = "snps,dwc3"; reg = <0 0x0a800000 0 0xcd00>; interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index c2a709a384e9..d7591a4621a2 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -700,7 +700,7 @@ left_spkr: wsa8810-left{ compatible = "sdw10217211000"; reg = <0 3>; - powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrLeft"; #sound-dai-cells = <0>; @@ -708,7 +708,7 @@ right_spkr: wsa8810-right{ compatible = "sdw10217211000"; - powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>; reg = <0 4>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrRight"; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 612dda0fef43..eef9d79157e9 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -2344,7 +2344,7 @@ resets = <&gcc GCC_USB30_PRIM_BCR>; - usb_1_dwc3: usb@a600000 { + usb_1_dwc3: dwc3@a600000 { compatible = "snps,dwc3"; reg = <0 0x0a600000 0 0xcd00>; interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi index 734c8adeceba..01482d227506 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi @@ -82,10 +82,10 @@ <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "eri", "rxi", "txi", "bri", "dri", "tei"; - clocks = <&cpg CPG_MOD R9A07G044_CLK_SCIF0>; + clocks = <&cpg CPG_MOD R9A07G044_SCIF0_CLK_PCK>; clock-names = "fck"; power-domains = <&cpg>; - resets = <&cpg R9A07G044_CLK_SCIF0>; + resets = <&cpg R9A07G044_SCIF0_RST_SYSTEM_N>; status = "disabled"; }; diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index a9c0716e7440..a074459f8f2f 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -47,7 +47,7 @@ * cache before the transfer is done, causing old data to be seen by * the CPU. */ -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN (128) #ifdef CONFIG_KASAN_SW_TAGS #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 21fa330f498d..b83fb24954b7 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -33,8 +33,7 @@ * EL2. */ .macro __init_el2_timers - mrs x0, cnthctl_el2 - orr x0, x0, #3 // Enable EL1 physical timers + mov x0, #3 // Enable EL1 physical timers msr cnthctl_el2, x0 msr cntvoff_el2, xzr // Clear virtual offset .endm diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index e58bca832dff..41b332c054ab 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -320,7 +320,17 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) static inline unsigned long regs_return_value(struct pt_regs *regs) { - return regs->regs[0]; + unsigned long val = regs->regs[0]; + + /* + * Audit currently uses regs_return_value() instead of + * syscall_get_return_value(). Apply the same sign-extension here until + * audit is updated to use syscall_get_return_value(). + */ + if (compat_user_mode(regs)) + val = sign_extend64(val, 31); + + return val; } static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index 99ad77df8f52..97ddc6c203b7 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h @@ -10,6 +10,7 @@ #include <linux/cpumask.h> +#include <asm/smp.h> #include <asm/types.h> struct mpidr_hash { diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 1801399204d7..8aebc00c1718 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -35,7 +35,7 @@ struct stack_info { * accounting information necessary for robust unwinding. * * @fp: The fp value in the frame record (or the real fp) - * @pc: The fp value in the frame record (or the real lr) + * @pc: The lr value in the frame record (or the real lr) * * @stacks_done: Stacks which have been entirely unwound, for which it is no * longer valid to unwind to. diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index cfc0672013f6..03e20895453a 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -29,22 +29,23 @@ static inline void syscall_rollback(struct task_struct *task, regs->regs[0] = regs->orig_x0; } - -static inline long syscall_get_error(struct task_struct *task, - struct pt_regs *regs) +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) { - unsigned long error = regs->regs[0]; + unsigned long val = regs->regs[0]; if (is_compat_thread(task_thread_info(task))) - error = sign_extend64(error, 31); + val = sign_extend64(val, 31); - return IS_ERR_VALUE(error) ? error : 0; + return val; } -static inline long syscall_get_return_value(struct task_struct *task, - struct pt_regs *regs) +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) { - return regs->regs[0]; + unsigned long error = syscall_get_return_value(task, regs); + + return IS_ERR_VALUE(error) ? error : 0; } static inline void syscall_set_return_value(struct task_struct *task, diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index cce308586fcc..3f1490bfb938 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -17,7 +17,7 @@ CFLAGS_syscall.o += -fno-stack-protector # It's not safe to invoke KCOV when portions of the kernel environment aren't # available or are out-of-sync with HW state. Since `noinstr` doesn't always # inhibit KCOV instrumentation, disable it for the entire compilation unit. -KCOV_INSTRUMENT_entry.o := n +KCOV_INSTRUMENT_entry-common.o := n KCOV_INSTRUMENT_idle.o := n # Object file lists. diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 125d5c9471ac..0ead8bfedf20 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -81,6 +81,7 @@ #include <asm/mmu_context.h> #include <asm/mte.h> #include <asm/processor.h> +#include <asm/smp.h> #include <asm/sysreg.h> #include <asm/traps.h> #include <asm/virt.h> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 12ce14a98b7c..db8b2e2d02c2 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -604,7 +604,7 @@ asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) __el0_fiq_handler_common(regs); } -static void __el0_error_handler_common(struct pt_regs *regs) +static void noinstr __el0_error_handler_common(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index cfa2cfde3019..418b2bba1521 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -162,7 +162,9 @@ u64 __init kaslr_early_init(void) * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE, * _stext) . This guarantees that the resulting region still * covers [_stext, _etext], and that all relative branches can - * be resolved without veneers. + * be resolved without veneers unless this region is exhausted + * and we fall back to a larger 2GB window in module_alloc() + * when ARM64_MODULE_PLTS is enabled. */ module_range = MODULES_VSIZE - (u64)(_etext - _stext); module_alloc_base = (u64)_etext + offset - MODULES_VSIZE; diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 69b3fde8759e..36f51b0e438a 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -193,18 +193,6 @@ void mte_check_tfsr_el1(void) } #endif -static void update_gcr_el1_excl(u64 excl) -{ - - /* - * Note that the mask controlled by the user via prctl() is an - * include while GCR_EL1 accepts an exclude mask. - * No need for ISB since this only affects EL0 currently, implicit - * with ERET. - */ - sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl); -} - static void set_gcr_el1_excl(u64 excl) { current->thread.gcr_user_excl = excl; @@ -265,7 +253,8 @@ void mte_suspend_exit(void) if (!system_supports_mte()) return; - update_gcr_el1_excl(gcr_kernel_excl); + sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl); + isb(); } long set_mte_ctrl(struct task_struct *task, unsigned long arg) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 499b6b2f9757..b381a1ee9ea7 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1862,7 +1862,7 @@ void syscall_trace_exit(struct pt_regs *regs) audit_syscall_exit(regs); if (flags & _TIF_SYSCALL_TRACEPOINT) - trace_sys_exit(regs, regs_return_value(regs)); + trace_sys_exit(regs, syscall_get_return_value(current, regs)); if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index f8192f4ae0b8..23036334f4dc 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -29,6 +29,7 @@ #include <asm/unistd.h> #include <asm/fpsimd.h> #include <asm/ptrace.h> +#include <asm/syscall.h> #include <asm/signal32.h> #include <asm/traps.h> #include <asm/vdso.h> @@ -890,7 +891,7 @@ static void do_signal(struct pt_regs *regs) retval == -ERESTART_RESTARTBLOCK || (retval == -ERESTARTSYS && !(ksig.ka.sa.sa_flags & SA_RESTART)))) { - regs->regs[0] = -EINTR; + syscall_set_return_value(current, regs, -EINTR, 0); regs->pc = continue_addr; } diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S index d3d37f932b97..487381164ff6 100644 --- a/arch/arm64/kernel/smccc-call.S +++ b/arch/arm64/kernel/smccc-call.S @@ -32,20 +32,23 @@ SYM_FUNC_END(__arm_smccc_sve_check) EXPORT_SYMBOL(__arm_smccc_sve_check) .macro SMCCC instr + stp x29, x30, [sp, #-16]! + mov x29, sp alternative_if ARM64_SVE bl __arm_smccc_sve_check alternative_else_nop_endif \instr #0 - ldr x4, [sp] + ldr x4, [sp, #16] stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] - ldr x4, [sp, #8] + ldr x4, [sp, #24] cbz x4, 1f /* no quirk structure */ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS] cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6 b.ne 1f str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS] -1: ret +1: ldp x29, x30, [sp], #16 + ret .endm /* diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index b83c8d911930..8982a2b78acf 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -218,7 +218,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) #ifdef CONFIG_STACKTRACE -noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, +noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 263d6c1a525f..50a0f1a38e84 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -54,10 +54,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, ret = do_ni_syscall(regs, scno); } - if (is_compat_task()) - ret = lower_32_bits(ret); - - regs->regs[0] = ret; + syscall_set_return_value(current, regs, 0, ret); /* * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(), @@ -115,7 +112,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, * syscall. do_notify_resume() will send a signal to userspace * before the syscall is restarted. */ - regs->regs[0] = -ERESTARTNOINTR; + syscall_set_return_value(current, regs, -ERESTARTNOINTR, 0); return; } @@ -136,7 +133,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, * anyway. */ if (scno == NO_SYSCALL) - regs->regs[0] = -ENOSYS; + syscall_set_return_value(current, regs, -ENOSYS, 0); scno = syscall_trace_enter(regs); if (scno == NO_SYSCALL) goto trace_exit; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e9a2b8f27792..0ca72f5cda41 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -94,10 +94,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, kvm->arch.return_nisv_io_abort_to_user = true; break; case KVM_CAP_ARM_MTE: - if (!system_supports_mte() || kvm->created_vcpus) - return -EINVAL; - r = 0; - kvm->arch.mte_enabled = true; + mutex_lock(&kvm->lock); + if (!system_supports_mte() || kvm->created_vcpus) { + r = -EINVAL; + } else { + r = 0; + kvm->arch.mte_enabled = true; + } + mutex_unlock(&kvm->lock); break; default: r = -EINVAL; diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index d938ce95d3bd..a6ce991b1467 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -193,7 +193,7 @@ static bool range_is_memory(u64 start, u64 end) { struct kvm_mem_range r1, r2; - if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2)) + if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2)) return false; if (r1.start != r2.start) return false; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 3155c9e778f0..0625bf2353c2 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -947,7 +947,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, vma_shift = get_vma_page_shift(vma, hva); } - shared = (vma->vm_flags & VM_PFNMAP); + shared = (vma->vm_flags & VM_SHARED); switch (vma_shift) { #ifndef __PAGETABLE_PMD_FOLDED diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 95cd62d67371..2cf999e41d30 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -29,7 +29,7 @@ .endm .macro ldrh1 reg, ptr, val - user_ldst 9998f, ldtrh, \reg, \ptr, \val + user_ldst 9997f, ldtrh, \reg, \ptr, \val .endm .macro strh1 reg, ptr, val @@ -37,7 +37,7 @@ .endm .macro ldr1 reg, ptr, val - user_ldst 9998f, ldtr, \reg, \ptr, \val + user_ldst 9997f, ldtr, \reg, \ptr, \val .endm .macro str1 reg, ptr, val @@ -45,7 +45,7 @@ .endm .macro ldp1 reg1, reg2, ptr, val - user_ldp 9998f, \reg1, \reg2, \ptr, \val + user_ldp 9997f, \reg1, \reg2, \ptr, \val .endm .macro stp1 reg1, reg2, ptr, val @@ -53,8 +53,10 @@ .endm end .req x5 +srcin .req x15 SYM_FUNC_START(__arch_copy_from_user) add end, x0, x2 + mov srcin, x1 #include "copy_template.S" mov x0, #0 // Nothing to copy ret @@ -63,6 +65,11 @@ EXPORT_SYMBOL(__arch_copy_from_user) .section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder +USER(9998f, ldtrb tmp1w, [srcin]) + strb tmp1w, [dst], #1 9998: sub x0, end, dst // bytes not copied ret .previous diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 1f61cd0df062..dbea3799c3ef 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -30,33 +30,34 @@ .endm .macro ldrh1 reg, ptr, val - user_ldst 9998f, ldtrh, \reg, \ptr, \val + user_ldst 9997f, ldtrh, \reg, \ptr, \val .endm .macro strh1 reg, ptr, val - user_ldst 9998f, sttrh, \reg, \ptr, \val + user_ldst 9997f, sttrh, \reg, \ptr, \val .endm .macro ldr1 reg, ptr, val - user_ldst 9998f, ldtr, \reg, \ptr, \val + user_ldst 9997f, ldtr, \reg, \ptr, \val .endm .macro str1 reg, ptr, val - user_ldst 9998f, sttr, \reg, \ptr, \val + user_ldst 9997f, sttr, \reg, \ptr, \val .endm .macro ldp1 reg1, reg2, ptr, val - user_ldp 9998f, \reg1, \reg2, \ptr, \val + user_ldp 9997f, \reg1, \reg2, \ptr, \val .endm .macro stp1 reg1, reg2, ptr, val - user_stp 9998f, \reg1, \reg2, \ptr, \val + user_stp 9997f, \reg1, \reg2, \ptr, \val .endm end .req x5 - +srcin .req x15 SYM_FUNC_START(__arch_copy_in_user) add end, x0, x2 + mov srcin, x1 #include "copy_template.S" mov x0, #0 ret @@ -65,6 +66,12 @@ EXPORT_SYMBOL(__arch_copy_in_user) .section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder +USER(9998f, ldtrb tmp1w, [srcin]) +USER(9998f, sttrb tmp1w, [dst]) + add dst, dst, #1 9998: sub x0, end, dst // bytes not copied ret .previous diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 043da90f5dd7..9f380eecf653 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -32,7 +32,7 @@ .endm .macro strh1 reg, ptr, val - user_ldst 9998f, sttrh, \reg, \ptr, \val + user_ldst 9997f, sttrh, \reg, \ptr, \val .endm .macro ldr1 reg, ptr, val @@ -40,7 +40,7 @@ .endm .macro str1 reg, ptr, val - user_ldst 9998f, sttr, \reg, \ptr, \val + user_ldst 9997f, sttr, \reg, \ptr, \val .endm .macro ldp1 reg1, reg2, ptr, val @@ -48,12 +48,14 @@ .endm .macro stp1 reg1, reg2, ptr, val - user_stp 9998f, \reg1, \reg2, \ptr, \val + user_stp 9997f, \reg1, \reg2, \ptr, \val .endm end .req x5 +srcin .req x15 SYM_FUNC_START(__arch_copy_to_user) add end, x0, x2 + mov srcin, x1 #include "copy_template.S" mov x0, #0 ret @@ -62,6 +64,12 @@ EXPORT_SYMBOL(__arch_copy_to_user) .section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder + ldrb tmp1w, [srcin] +USER(9998f, sttrb tmp1w, [dst]) + add dst, dst, #1 9998: sub x0, end, dst // bytes not copied ret .previous diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S index 35fbdb7d6e1a..1648790e91b3 100644 --- a/arch/arm64/lib/strlen.S +++ b/arch/arm64/lib/strlen.S @@ -8,6 +8,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/mte-def.h> /* Assumptions: * @@ -42,7 +43,16 @@ #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 +/* + * When KASAN_HW_TAGS is in use, memory is checked at MTE_GRANULE_SIZE + * (16-byte) granularity, and we must ensure that no access straddles this + * alignment boundary. + */ +#ifdef CONFIG_KASAN_HW_TAGS +#define MIN_PAGE_SIZE MTE_GRANULE_SIZE +#else #define MIN_PAGE_SIZE 4096 +#endif /* Since strings are short on average, we check the first 16 bytes of the string for a NUL character. In order to do an unaligned ldp diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d74586508448..9ff0de1b2b93 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1339,7 +1339,6 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) return dt_virt; } -#if CONFIG_PGTABLE_LEVELS > 3 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) { pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); @@ -1354,16 +1353,6 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) return 1; } -int pud_clear_huge(pud_t *pudp) -{ - if (!pud_sect(READ_ONCE(*pudp))) - return 0; - pud_clear(pudp); - return 1; -} -#endif - -#if CONFIG_PGTABLE_LEVELS > 2 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) { pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); @@ -1378,6 +1367,14 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) return 1; } +int pud_clear_huge(pud_t *pudp) +{ + if (!pud_sect(READ_ONCE(*pudp))) + return 0; + pud_clear(pudp); + return 1; +} + int pmd_clear_huge(pmd_t *pmdp) { if (!pmd_sect(READ_ONCE(*pmdp))) @@ -1385,7 +1382,6 @@ int pmd_clear_huge(pmd_t *pmdp) pmd_clear(pmdp); return 1; } -#endif int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) { diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index dccf98a37283..41c23f474ea6 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -823,6 +823,19 @@ emit_cond_jmp: return ret; break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + /* + * Nothing required here. + * + * In case of arm64, we rely on the firmware mitigation of + * Speculative Store Bypass as controlled via the ssbd kernel + * parameter. Whenever the mitigation is enabled, it works + * for all of the kernel code with no need to provide any + * additional instructions. + */ + break; + /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu index b5e14d513e62..c30baa0499fc 100644 --- a/arch/h8300/Kconfig.cpu +++ b/arch/h8300/Kconfig.cpu @@ -44,7 +44,6 @@ config H8300_H8MAX bool "H8MAX" select H83069 select RAMKERNEL - select HAVE_IDE help H8MAX Evaluation Board Support More Information. (Japanese Only) diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index cf425c2c63af..4993c7ac7ff6 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -25,7 +25,6 @@ config IA64 select HAVE_ASM_MODVERSIONS select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_EXIT_THREAD - select HAVE_IDE select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_FTRACE_MCOUNT_RECORD diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 96989ad46f66..d632a1d576f9 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -23,7 +23,6 @@ config M68K select HAVE_DEBUG_BUGVERBOSE select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED select HAVE_FUTEX_CMPXCHG if MMU && FUTEX - select HAVE_IDE select HAVE_MOD_ARCH_SPECIFIC select HAVE_UID16 select MMU_GATHER_NO_RANGE if MMU diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index d964c1f27399..6a07a6817885 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine @@ -33,6 +33,7 @@ config MAC depends on MMU select MMU_MOTOROLA if MMU select HAVE_ARCH_NVRAM_OPS + select HAVE_PATA_PLATFORM select LEGACY_TIMER_TICK help This option enables support for the Apple Macintosh series of diff --git a/arch/m68k/coldfire/m525x.c b/arch/m68k/coldfire/m525x.c index 2c4d2ca2f20d..485375112e28 100644 --- a/arch/m68k/coldfire/m525x.c +++ b/arch/m68k/coldfire/m525x.c @@ -26,7 +26,7 @@ DEFINE_CLK(pll, "pll.0", MCF_CLK); DEFINE_CLK(sys, "sys.0", MCF_BUSCLK); static struct clk_lookup m525x_clk_lookup[] = { - CLKDEV_INIT(NULL, "pll.0", &pll), + CLKDEV_INIT(NULL, "pll.0", &clk_pll), CLKDEV_INIT(NULL, "sys.0", &clk_sys), CLKDEV_INIT("mcftmr.0", NULL, &clk_sys), CLKDEV_INIT("mcftmr.1", NULL, &clk_sys), diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cee6087cd686..6dfb27d531dd 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -71,7 +71,6 @@ config MIPS select HAVE_FUNCTION_TRACER select HAVE_GCC_PLUGINS select HAVE_GENERIC_VDSO - select HAVE_IDE select HAVE_IOREMAP_PROT select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_TIME_ACCOUNTING diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 4e942b7ef022..653befc1b176 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -321,7 +321,7 @@ KBUILD_LDFLAGS += -m $(ld-emul) ifdef CONFIG_MIPS CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ - egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ + egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \ sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g') endif diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 08f9dd6903b7..86310d6e1035 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -76,7 +76,7 @@ static inline int __enable_fpu(enum fpu_mode mode) /* we only have a 32-bit FPU */ return SIGFPE; #endif - fallthrough; + /* fallthrough */ case FPU_32BIT: if (cpu_has_fre) { /* clear FRE */ diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 4b2567d6b2df..c7925d0e9874 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -58,15 +58,20 @@ do { \ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - pmd_t *pmd = NULL; + pmd_t *pmd; struct page *pg; - pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER); - if (pg) { - pgtable_pmd_page_ctor(pg); - pmd = (pmd_t *)page_address(pg); - pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); + pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER); + if (!pg) + return NULL; + + if (!pgtable_pmd_page_ctor(pg)) { + __free_pages(pg, PMD_ORDER); + return NULL; } + + pmd = (pmd_t *)page_address(pg); + pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); return pmd; } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index cd4afcdf3725..9adad24c2e65 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1383,6 +1383,7 @@ static void build_r4000_tlb_refill_handler(void) switch (boot_cpu_type()) { default: if (sizeof(long) == 4) { + fallthrough; case CPU_LOONGSON2EF: /* Loongson2 ebase is different than r4k, we have more space */ if ((p - tlb_handler) > 64) @@ -2169,6 +2170,7 @@ static void build_r4000_tlb_load_handler(void) default: if (cpu_has_mips_r2_exec_hazard) { uasm_i_ehb(&p); + fallthrough; case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c index ee7471984fe7..4ffbcc58c6f6 100644 --- a/arch/mips/mti-malta/malta-platform.c +++ b/arch/mips/mti-malta/malta-platform.c @@ -48,7 +48,8 @@ static struct plat_serial8250_port uart8250_data[] = { .mapbase = 0x1f000900, /* The CBUS UART */ .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2, .uartclk = 3686400, /* Twice the usual clk! */ - .iotype = UPIO_MEM32, + .iotype = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ? + UPIO_MEM32BE : UPIO_MEM32, .flags = CBUS_UART_FLAGS, .regshift = 3, }, diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 939dd06764bc..3a73e9375712 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -1355,6 +1355,9 @@ jeq_common: } break; + case BPF_ST | BPF_NOSPEC: /* speculation barrier */ + break; + case BPF_ST | BPF_B | BPF_MEM: case BPF_ST | BPF_H | BPF_MEM: case BPF_ST | BPF_W | BPF_MEM: diff --git a/arch/nds32/mm/mmap.c b/arch/nds32/mm/mmap.c index c206b31ce07a..1bdf5e7d1b43 100644 --- a/arch/nds32/mm/mmap.c +++ b/arch/nds32/mm/mmap.c @@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index bde9907bc5b2..4f8c1fbf8f2f 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -3,7 +3,6 @@ config PARISC def_bool y select ARCH_32BIT_OFF_T if !64BIT select ARCH_MIGHT_HAVE_PC_PARPORT - select HAVE_IDE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_SYSCALL_TRACEPOINTS diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d01e3401581d..663766fbf505 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -220,7 +220,6 @@ config PPC select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC_BOOK3S_64 && SMP select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) - select HAVE_IDE select HAVE_IOREMAP_PROT select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_TIME_ACCOUNTING diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 64201125a287..d4b145b279f6 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -4,6 +4,8 @@ #include <asm/bug.h> #include <asm/book3s/32/mmu-hash.h> +#include <asm/mmu.h> +#include <asm/synch.h> #ifndef __ASSEMBLY__ @@ -28,6 +30,15 @@ static inline void kuep_lock(void) return; update_user_segments(mfsr(0) | SR_NX); + /* + * This isync() shouldn't be necessary as the kernel is not excepted to + * run any instruction in userspace soon after the update of segments, + * but hash based cores (at least G3) seem to exhibit a random + * behaviour when the 'isync' is not there. 603 cores don't have this + * behaviour so don't do the 'isync' as it saves several CPU cycles. + */ + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + isync(); /* Context sync required after mtsr() */ } static inline void kuep_unlock(void) @@ -36,6 +47,15 @@ static inline void kuep_unlock(void) return; update_user_segments(mfsr(0) & ~SR_NX); + /* + * This isync() shouldn't be necessary as a 'rfi' will soon be executed + * to return to userspace, but hash based cores (at least G3) seem to + * exhibit a random behaviour when the 'isync' is not there. 603 cores + * don't have this behaviour so don't do the 'isync' as it saves several + * CPU cycles. + */ + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + isync(); /* Context sync required after mtsr() */ } #ifdef CONFIG_PPC_KUAP diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index d4bdf7d274ac..6b800d3e2681 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); +/* irq.c */ +DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ); + void __noreturn unrecoverable_exception(struct pt_regs *regs); void replay_system_reset(void); diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 4982f3711fc3..2b3278534bc1 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS]; extern void *hardirq_ctx[NR_CPUS]; extern void *softirq_ctx[NR_CPUS]; -extern void do_IRQ(struct pt_regs *regs); +void __do_IRQ(struct pt_regs *regs); extern void __init init_IRQ(void); extern void __do_irq(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 3e5d470a6155..14422e851494 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -70,6 +70,22 @@ struct pt_regs unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */ }; #endif +#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) + struct { /* Must be a multiple of 16 bytes */ + unsigned long mas0; + unsigned long mas1; + unsigned long mas2; + unsigned long mas3; + unsigned long mas6; + unsigned long mas7; + unsigned long srr0; + unsigned long srr1; + unsigned long csrr0; + unsigned long csrr1; + unsigned long dsrr0; + unsigned long dsrr1; + }; +#endif }; #endif diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index a47eefa09bcb..5bee245d832b 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -309,24 +309,21 @@ int main(void) STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr); #endif -#if defined(CONFIG_PPC32) -#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) - DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); - DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); +#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) + STACK_PT_REGS_OFFSET(MAS0, mas0); /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ - DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); - DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); - DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); - DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); - DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); - DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); - DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); - DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); - DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); - DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); - DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); - DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); -#endif + STACK_PT_REGS_OFFSET(MMUCR, mas0); + STACK_PT_REGS_OFFSET(MAS1, mas1); + STACK_PT_REGS_OFFSET(MAS2, mas2); + STACK_PT_REGS_OFFSET(MAS3, mas3); + STACK_PT_REGS_OFFSET(MAS6, mas6); + STACK_PT_REGS_OFFSET(MAS7, mas7); + STACK_PT_REGS_OFFSET(_SRR0, srr0); + STACK_PT_REGS_OFFSET(_SRR1, srr1); + STACK_PT_REGS_OFFSET(_CSRR0, csrr0); + STACK_PT_REGS_OFFSET(_CSRR1, csrr1); + STACK_PT_REGS_OFFSET(_DSRR0, dsrr0); + STACK_PT_REGS_OFFSET(_DSRR1, dsrr1); #endif /* About the CPU features table */ diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S index 764edd860ed4..68e5c0a7e99d 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) EXCEPTION_PROLOG_1 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1 prepare_transfer_to_handler - lwz r5, _DSISR(r11) + lwz r5, _DSISR(r1) andis. r0, r5, DSISR_DABRMATCH@h bne- 1f bl do_page_fault diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 87b806e8eded..e5503420b6c6 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -168,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) /* only on e500mc */ #define DBG_STACK_BASE dbgirq_ctx -#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE) - #ifdef CONFIG_SMP #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ mfspr r8,SPRN_PIR; \ slwi r8,r8,2; \ addis r8,r8,level##_STACK_BASE@ha; \ lwz r8,level##_STACK_BASE@l(r8); \ - addi r8,r8,EXC_LVL_FRAME_OVERHEAD; + addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE; #else #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ lis r8,level##_STACK_BASE@ha; \ lwz r8,level##_STACK_BASE@l(r8); \ - addi r8,r8,EXC_LVL_FRAME_OVERHEAD; + addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE; #endif /* @@ -208,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) mtmsr r11; \ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\ - addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ + addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\ beq 1f; \ /* COMING FROM USER MODE */ \ stw r9,_CCR(r11); /* save CR */\ @@ -516,24 +514,5 @@ label: bl kernel_fp_unavailable_exception; \ b interrupt_return -#else /* __ASSEMBLY__ */ -struct exception_regs { - unsigned long mas0; - unsigned long mas1; - unsigned long mas2; - unsigned long mas3; - unsigned long mas6; - unsigned long mas7; - unsigned long srr0; - unsigned long srr1; - unsigned long csrr0; - unsigned long csrr1; - unsigned long dsrr0; - unsigned long dsrr1; -}; - -/* ensure this structure is always sized to a multiple of the stack alignment */ -#define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16) - #endif /* __ASSEMBLY__ */ #endif /* __HEAD_BOOKE_H__ */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 91e63eac4e8f..551b653228c4 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -750,7 +750,7 @@ void __do_irq(struct pt_regs *regs) trace_irq_exit(regs); } -DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) +void __do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); void *cursp, *irqsp, *sirqsp; @@ -774,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) set_irq_regs(old_regs); } +DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) +{ + __do_IRQ(regs); +} + static void *__init alloc_vm_stack(void) { return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP, diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index cbc28d1a2e1b..7a7cd6bda53e 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -292,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs) if (user_mode(regs)) return 0; - if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) + if (!IS_ENABLED(CONFIG_BOOKE) && + (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))) return 0; /* diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 5ff0e55d0db1..defecb3b1b15 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -1167,7 +1167,7 @@ static int __init topology_init(void) * CPU. For instance, the boot cpu might never be valid * for hotplugging. */ - if (smp_ops->cpu_offline_self) + if (smp_ops && smp_ops->cpu_offline_self) c->hotpluggable = 1; #endif diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index e45ce427bffb..c487ba5a6e11 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -586,7 +586,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt) #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) if (atomic_read(&ppc_n_lost_interrupts) != 0) - do_IRQ(regs); + __do_IRQ(regs); #endif old_regs = set_irq_regs(regs); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index dfbce527c98e..d56254f05e17 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1104,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException) _exception(SIGTRAP, regs, TRAP_UNK, 0); } -DEFINE_INTERRUPT_HANDLER(single_step_exception) +static void __single_step_exception(struct pt_regs *regs) { clear_single_step(regs); clear_br_trace(regs); @@ -1121,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception) _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); } +DEFINE_INTERRUPT_HANDLER(single_step_exception) +{ + __single_step_exception(regs); +} + /* * After we have successfully emulated an instruction, we have to * check if the instruction was being single-stepped, and if so, @@ -1130,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception) static void emulate_single_step(struct pt_regs *regs) { if (single_stepping(regs)) - single_step_exception(regs); + __single_step_exception(regs); } static inline int __parse_fpscr(unsigned long fpscr) diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 2813e3f98db6..3c5baaa6f1e7 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile @@ -27,6 +27,13 @@ KASAN_SANITIZE := n ccflags-y := -shared -fno-common -fno-builtin -nostdlib \ -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both + +# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true +# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is +# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code +# generation is minimal, it will just use r29 instead. +ccflags-y += $(call cc-option, -ffixed-r30) + asflags-y := -D__VDSO64__ -s targets += vdso64.lds diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 1d1fcc290fca..085fb8ecbf68 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2697,8 +2697,10 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX; if (cpu_has_feature(CPU_FTR_HVMODE)) { vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) vcpu->arch.hfscr |= HFSCR_TM; +#endif } if (cpu_has_feature(CPU_FTR_TM_COMP)) vcpu->arch.hfscr |= HFSCR_TM; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 8543ad538b0c..898f942eb198 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -302,6 +302,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) if (vcpu->kvm->arch.l1_ptcr == 0) return H_NOT_AVAILABLE; + if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) + return H_BAD_MODE; + /* copy parameters in */ hv_ptr = kvmppc_get_gpr(vcpu, 4); regs_ptr = kvmppc_get_gpr(vcpu, 5); @@ -322,6 +325,23 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) if (l2_hv.vcpu_token >= NR_CPUS) return H_PARAMETER; + /* + * L1 must have set up a suspended state to enter the L2 in a + * transactional state, and only in that case. These have to be + * filtered out here to prevent causing a TM Bad Thing in the + * host HRFID. We could synthesize a TM Bad Thing back to the L1 + * here but there doesn't seem like much point. + */ + if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) { + if (!MSR_TM_ACTIVE(l2_regs.msr)) + return H_BAD_MODE; + } else { + if (l2_regs.msr & MSR_TS_MASK) + return H_BAD_MODE; + if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK)) + return H_BAD_MODE; + } + /* translate lpid */ l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true); if (!l2) diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index 83f592eadcd2..961b3d70483c 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -317,6 +317,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc */ mtspr(SPRN_HDEC, hdec); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +tm_return_to_guest: +#endif mtspr(SPRN_DAR, vcpu->arch.shregs.dar); mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0); @@ -415,11 +418,23 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc * is in real suspend mode and is trying to transition to * transactional mode. */ - if (local_paca->kvm_hstate.fake_suspend && + if (!local_paca->kvm_hstate.fake_suspend && (vcpu->arch.shregs.msr & MSR_TS_S)) { if (kvmhv_p9_tm_emulation_early(vcpu)) { - /* Prevent it being handled again. */ - trap = 0; + /* + * Go straight back into the guest with the + * new NIP/MSR as set by TM emulation. + */ + mtspr(SPRN_HSRR0, vcpu->arch.regs.nip); + mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr); + + /* + * tm_return_to_guest re-loads SRR0/1, DAR, + * DSISR after RI is cleared, in case they had + * been clobbered by a MCE. + */ + __mtmsrd(0, 1); /* clear RI */ + goto tm_return_to_guest; } } #endif @@ -499,6 +514,10 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc * If we are in real mode, only switch MMU on after the MMU is * switched to host, to avoid the P9_RADIX_PREFETCH_BUG. */ + if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && + vcpu->arch.shregs.msr & MSR_TS_MASK) + msr |= MSR_TS_S; + __mtmsrd(msr, 0); end_timing(vcpu); diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index c5e677508d3b..0f847f1e5ddd 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -242,6 +242,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) * value so we can restore it on the way out. */ orig_rets = args.rets; + if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) { + /* + * Don't overflow our args array: ensure there is room for + * at least rets[0] (even if the call specifies 0 nret). + * + * Each handler must then check for the correct nargs and nret + * values, but they may always return failure in rets[0]. + */ + rc = -EINVAL; + goto fail; + } args.rets = &args.args[be32_to_cpu(args.nargs)]; mutex_lock(&vcpu->kvm->arch.rtas_token_lock); @@ -269,9 +280,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) fail: /* * We only get here if the guest has called RTAS with a bogus - * args pointer. That means we can't get to the args, and so we - * can't fail the RTAS call. So fail right out to userspace, - * which should kill the guest. + * args pointer or nargs/nret values that would overflow the + * array. That means we can't get to the args, and so we can't + * fail the RTAS call. So fail right out to userspace, which + * should kill the guest. + * + * SLOF should actually pass the hcall return value from the + * rtas handler call in r3, so enter_rtas could be modified to + * return a failure indication in r3 and we could return such + * errors to the guest rather than failing to host userspace. + * However old guests that don't test for failure could then + * continue silently after errors, so for now we won't do this. */ return rc; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index be33b5321a76..b4e6f70b97b9 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -2048,9 +2048,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, { struct kvm_enable_cap cap; r = -EFAULT; - vcpu_load(vcpu); if (copy_from_user(&cap, argp, sizeof(cap))) goto out; + vcpu_load(vcpu); r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); vcpu_put(vcpu); break; @@ -2074,9 +2074,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_DIRTY_TLB: { struct kvm_dirty_tlb dirty; r = -EFAULT; - vcpu_load(vcpu); if (copy_from_user(&dirty, argp, sizeof(dirty))) goto out; + vcpu_load(vcpu); r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); vcpu_put(vcpu); break; diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 60780e089118..0df9fe29dd56 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -240,3 +240,13 @@ void __init setup_kuap(bool disabled) mtspr(SPRN_MD_AP, MD_APG_KUAP); } #endif + +int pud_clear_huge(pud_t *pud) +{ + return 0; +} + +int pmd_clear_huge(pmd_t *pmd) +{ + return 0; +} diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c index 0876216ceee6..edea388e9d3f 100644 --- a/arch/powerpc/mm/pageattr.c +++ b/arch/powerpc/mm/pageattr.c @@ -18,16 +18,12 @@ /* * Updates the attributes of a page in three steps: * - * 1. invalidate the page table entry - * 2. flush the TLB - * 3. install the new entry with the updated attributes - * - * Invalidating the pte means there are situations where this will not work - * when in theory it should. - * For example: - * - removing write from page whilst it is being executed - * - setting a page read-only whilst it is being read by another CPU + * 1. take the page_table_lock + * 2. install the new entry with the updated attributes + * 3. flush the TLB * + * This sequence is safe against concurrent updates, and also allows updating the + * attributes of a page currently being executed or accessed. */ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) { @@ -36,9 +32,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) spin_lock(&init_mm.page_table_lock); - /* invalidate the PTE so it's safe to modify */ - pte = ptep_get_and_clear(&init_mm, addr, ptep); - flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + pte = ptep_get(ptep); /* modify the PTE bits as desired, then apply */ switch (action) { @@ -59,11 +53,14 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) break; } - set_pte_at(&init_mm, addr, ptep, pte); + pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0); /* See ptesync comment in radix__set_pte_at() */ if (radix_enabled()) asm volatile("ptesync": : :"memory"); + + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + spin_unlock(&init_mm.page_table_lock); return 0; diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index 34bb1583fc0c..beb12cbc8c29 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -738,6 +738,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * break; /* + * BPF_ST NOSPEC (speculation barrier) + */ + case BPF_ST | BPF_NOSPEC: + break; + + /* * BPF_ST(X) */ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index de8595880fee..b87a63dba9c8 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -628,6 +628,12 @@ emit_clear: break; /* + * BPF_ST NOSPEC (speculation barrier) + */ + case BPF_ST | BPF_NOSPEC: + break; + + /* * BPF_ST(X) */ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c index 9b88e3cded7d..534b0317fc15 100644 --- a/arch/powerpc/platforms/pasemi/idle.c +++ b/arch/powerpc/platforms/pasemi/idle.c @@ -42,6 +42,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs) switch (regs->msr & SRR1_WAKEMASK) { case SRR1_WAKEDEC: set_dec(1); + break; case SRR1_WAKEEE: /* * Handle these when interrupts get re-enabled and we take diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index bdfea6d6ab69..3256a316e884 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -146,6 +146,7 @@ static inline void psurge_clr_ipi(int cpu) switch(psurge_type) { case PSURGE_DUAL: out_8(psurge_sec_intr, ~0); + break; case PSURGE_NONE: break; default: diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 631a0d57b6cd..0dfaa6ab44cc 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -77,7 +77,7 @@ #include "../../../../drivers/pci/pci.h" DEFINE_STATIC_KEY_FALSE(shared_processor); -EXPORT_SYMBOL_GPL(shared_processor); +EXPORT_SYMBOL(shared_processor); int CMO_PrPSP = -1; int CMO_SecPSP = -1; @@ -539,9 +539,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if * H_CPU_BEHAV_FAVOUR_SECURITY is. */ - if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) + if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) { security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); - else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H) + pseries_security_flavor = 0; + } else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H) pseries_security_flavor = 1; else pseries_security_flavor = 2; diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index dbdbbc2f1dc5..8183ca343675 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -67,6 +67,7 @@ static struct irq_domain *xive_irq_domain; static struct xive_ipi_desc { unsigned int irq; char name[16]; + atomic_t started; } *xive_ipis; /* @@ -1120,7 +1121,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = { .alloc = xive_ipi_irq_domain_alloc, }; -static int __init xive_request_ipi(void) +static int __init xive_init_ipis(void) { struct fwnode_handle *fwnode; struct irq_domain *ipi_domain; @@ -1144,10 +1145,6 @@ static int __init xive_request_ipi(void) struct xive_ipi_desc *xid = &xive_ipis[node]; struct xive_ipi_alloc_info info = { node }; - /* Skip nodes without CPUs */ - if (cpumask_empty(cpumask_of_node(node))) - continue; - /* * Map one IPI interrupt per node for all cpus of that node. * Since the HW interrupt number doesn't have any meaning, @@ -1159,11 +1156,6 @@ static int __init xive_request_ipi(void) xid->irq = ret; snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); - - ret = request_irq(xid->irq, xive_muxed_ipi_action, - IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL); - - WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); } return ret; @@ -1178,6 +1170,22 @@ out: return ret; } +static int xive_request_ipi(unsigned int cpu) +{ + struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)]; + int ret; + + if (atomic_inc_return(&xid->started) > 1) + return 0; + + ret = request_irq(xid->irq, xive_muxed_ipi_action, + IRQF_PERCPU | IRQF_NO_THREAD, + xid->name, NULL); + + WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); + return ret; +} + static int xive_setup_cpu_ipi(unsigned int cpu) { unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); @@ -1192,6 +1200,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu) if (xc->hw_ipi != XIVE_BAD_IRQ) return 0; + /* Register the IPI */ + xive_request_ipi(cpu); + /* Grab an IPI from the backend, this will populate xc->hw_ipi */ if (xive_ops->get_ipi(cpu, xc)) return -EIO; @@ -1231,6 +1242,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) if (xc->hw_ipi == XIVE_BAD_IRQ) return; + /* TODO: clear IPI mapping */ + /* Mask the IPI */ xive_do_source_set_mask(&xc->ipi_data, true); @@ -1253,7 +1266,7 @@ void __init xive_smp_probe(void) smp_ops->cause_ipi = xive_cause_ipi; /* Register the IPI */ - xive_request_ipi(); + xive_init_ipis(); /* Allocate and setup IPI for the boot CPU */ xive_setup_cpu_ipi(smp_processor_id()); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 8fcceb8eda07..4f7b70ae7c31 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -492,10 +492,16 @@ config CC_HAVE_STACKPROTECTOR_TLS config STACKPROTECTOR_PER_TASK def_bool y + depends on !GCC_PLUGIN_RANDSTRUCT depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS +config PHYS_RAM_BASE_FIXED + bool "Explicitly specified physical RAM address" + default n + config PHYS_RAM_BASE hex "Platform Physical RAM address" + depends on PHYS_RAM_BASE_FIXED default "0x80000000" help This is the physical address of RAM in the system. It has to be @@ -508,6 +514,7 @@ config XIP_KERNEL # This prevents XIP from being enabled by all{yes,mod}config, which # fail to build since XIP doesn't support large kernels. depends on !COMPILE_TEST + select PHYS_RAM_BASE_FIXED help Execute-In-Place allows the kernel to run from non-volatile storage directly addressable by the CPU, such as NOR flash. This saves RAM diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts index b1c3c596578f..2e4ea84f27e7 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts @@ -24,7 +24,7 @@ memory@80000000 { device_type = "memory"; - reg = <0x0 0x80000000 0x2 0x00000000>; + reg = <0x0 0x80000000 0x4 0x00000000>; }; soc { diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h index 6d98cd999680..7b3483ba2e84 100644 --- a/arch/riscv/include/asm/efi.h +++ b/arch/riscv/include/asm/efi.h @@ -27,10 +27,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); #define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE) -/* Load initrd at enough distance from DRAM start */ +/* Load initrd anywhere in system RAM */ static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) { - return image_addr + SZ_256M; + return ULONG_MAX; } #define alloc_screen_info(x...) (&screen_info) diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index cca8764aed83..b0ca5058e7ae 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -103,6 +103,7 @@ struct kernel_mapping { }; extern struct kernel_mapping kernel_map; +extern phys_addr_t phys_ram_base; #ifdef CONFIG_64BIT #define is_kernel_mapping(x) \ @@ -113,9 +114,9 @@ extern struct kernel_mapping kernel_map; #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset)) #define kernel_mapping_pa_to_va(y) ({ \ unsigned long _y = y; \ - (_y >= CONFIG_PHYS_RAM_BASE) ? \ - (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET) : \ - (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset); \ + (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \ + (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \ + (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \ }) #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index d3081e4d9600..3397ddac1a30 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -11,7 +11,7 @@ endif CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) ifdef CONFIG_KEXEC -AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax +AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax) endif extra-y += head.o diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 18bd0e4bc36c..120b2f6f71bc 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -229,8 +229,8 @@ static void __init init_resources(void) } /* Clean-up any unused pre-allocated resources */ - mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res); - memblock_free(__pa(mem_res), mem_res_sz); + if (res_idx >= 0) + memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res)); return; error: diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index ff467b98c3e3..315db3d0229b 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -27,7 +27,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, fp = frame_pointer(regs); sp = user_stack_pointer(regs); pc = instruction_pointer(regs); - } else if (task == current) { + } else if (task == NULL || task == current) { fp = (unsigned long)__builtin_frame_address(1); sp = (unsigned long)__builtin_frame_address(0); pc = (unsigned long)__builtin_return_address(0); @@ -132,8 +132,12 @@ unsigned long get_wchan(struct task_struct *task) { unsigned long pc = 0; - if (likely(task && task != current && !task_is_running(task))) + if (likely(task && task != current && !task_is_running(task))) { + if (!try_get_task_stack(task)) + return 0; walk_stackframe(task, NULL, save_wchan, &pc); + put_task_stack(task); + } return pc; } diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index bceb0629e440..63bc691cff91 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -30,23 +30,23 @@ ENTRY(__asm_copy_from_user) * t0 - end of uncopied dst */ add t0, a0, a2 - bgtu a0, t0, 5f /* * Use byte copy only if too small. + * SZREG holds 4 for RV32 and 8 for RV64 */ - li a3, 8*SZREG /* size must be larger than size in word_copy */ + li a3, 9*SZREG /* size must be larger than size in word_copy */ bltu a2, a3, .Lbyte_copy_tail /* - * Copy first bytes until dst is align to word boundary. + * Copy first bytes until dst is aligned to word boundary. * a0 - start of dst * t1 - start of aligned dst */ addi t1, a0, SZREG-1 andi t1, t1, ~(SZREG-1) /* dst is already aligned, skip */ - beq a0, t1, .Lskip_first_bytes + beq a0, t1, .Lskip_align_dst 1: /* a5 - one byte for copying data */ fixup lb a5, 0(a1), 10f @@ -55,7 +55,7 @@ ENTRY(__asm_copy_from_user) addi a0, a0, 1 /* dst */ bltu a0, t1, 1b /* t1 - start of aligned dst */ -.Lskip_first_bytes: +.Lskip_align_dst: /* * Now dst is aligned. * Use shift-copy if src is misaligned. @@ -72,10 +72,9 @@ ENTRY(__asm_copy_from_user) * * a0 - start of aligned dst * a1 - start of aligned src - * a3 - a1 & mask:(SZREG-1) * t0 - end of aligned dst */ - addi t0, t0, -(8*SZREG-1) /* not to over run */ + addi t0, t0, -(8*SZREG) /* not to over run */ 2: fixup REG_L a4, 0(a1), 10f fixup REG_L a5, SZREG(a1), 10f @@ -97,7 +96,7 @@ ENTRY(__asm_copy_from_user) addi a1, a1, 8*SZREG bltu a0, t0, 2b - addi t0, t0, 8*SZREG-1 /* revert to original value */ + addi t0, t0, 8*SZREG /* revert to original value */ j .Lbyte_copy_tail .Lshift_copy: @@ -107,7 +106,7 @@ ENTRY(__asm_copy_from_user) * For misaligned copy we still perform aligned word copy, but * we need to use the value fetched from the previous iteration and * do some shifts. - * This is safe because reading less than a word size. + * This is safe because reading is less than a word size. * * a0 - start of aligned dst * a1 - start of src @@ -117,7 +116,7 @@ ENTRY(__asm_copy_from_user) */ /* calculating aligned word boundary for dst */ andi t1, t0, ~(SZREG-1) - /* Converting unaligned src to aligned arc */ + /* Converting unaligned src to aligned src */ andi a1, a1, ~(SZREG-1) /* @@ -125,11 +124,11 @@ ENTRY(__asm_copy_from_user) * t3 - prev shift * t4 - current shift */ - slli t3, a3, LGREG + slli t3, a3, 3 /* converting bytes in a3 to bits */ li a5, SZREG*8 sub t4, a5, t3 - /* Load the first word to combine with seceond word */ + /* Load the first word to combine with second word */ fixup REG_L a5, 0(a1), 10f 3: @@ -161,7 +160,7 @@ ENTRY(__asm_copy_from_user) * a1 - start of remaining src * t0 - end of remaining dst */ - bgeu a0, t0, 5f + bgeu a0, t0, .Lout_copy_user /* check if end of copy */ 4: fixup lb a5, 0(a1), 10f addi a1, a1, 1 /* src */ @@ -169,7 +168,7 @@ ENTRY(__asm_copy_from_user) addi a0, a0, 1 /* dst */ bltu a0, t0, 4b /* t0 - end of dst */ -5: +.Lout_copy_user: /* Disable access to user memory */ csrc CSR_STATUS, t6 li a0, 0 diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 269fc648ef3d..7cb4f391d106 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -36,6 +36,9 @@ EXPORT_SYMBOL(kernel_map); #define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map)) #endif +phys_addr_t phys_ram_base __ro_after_init; +EXPORT_SYMBOL(phys_ram_base); + #ifdef CONFIG_XIP_KERNEL extern char _xiprom[], _exiprom[]; #endif @@ -127,10 +130,17 @@ void __init mem_init(void) } /* - * The default maximal physical memory size is -PAGE_OFFSET, - * limit the memory size via mem. + * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel, + * whereas for 64-bit kernel, the end of the virtual address space is occupied + * by the modules/BPF/kernel mappings which reduces the available size of the + * linear mapping. + * Limit the memory size via mem. */ +#ifdef CONFIG_64BIT +static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G; +#else static phys_addr_t memory_limit = -PAGE_OFFSET; +#endif static int __init early_mem(char *p) { @@ -152,8 +162,8 @@ static void __init setup_bootmem(void) { phys_addr_t vmlinux_end = __pa_symbol(&_end); phys_addr_t vmlinux_start = __pa_symbol(&_start); - phys_addr_t max_mapped_addr = __pa(~(ulong)0); - phys_addr_t dram_end; + phys_addr_t __maybe_unused max_mapped_addr; + phys_addr_t phys_ram_end; #ifdef CONFIG_XIP_KERNEL vmlinux_start = __pa_symbol(&_sdata); @@ -174,18 +184,28 @@ static void __init setup_bootmem(void) #endif memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); - dram_end = memblock_end_of_DRAM(); + + phys_ram_end = memblock_end_of_DRAM(); +#ifndef CONFIG_64BIT +#ifndef CONFIG_XIP_KERNEL + phys_ram_base = memblock_start_of_DRAM(); +#endif /* * memblock allocator is not aware of the fact that last 4K bytes of * the addressable memory can not be mapped because of IS_ERR_VALUE * macro. Make sure that last 4k bytes are not usable by memblock - * if end of dram is equal to maximum addressable memory. + * if end of dram is equal to maximum addressable memory. For 64-bit + * kernel, this problem can't happen here as the end of the virtual + * address space is occupied by the kernel mapping then this check must + * be done as soon as the kernel mapping base address is determined. */ - if (max_mapped_addr == (dram_end - 1)) + max_mapped_addr = __pa(~(ulong)0); + if (max_mapped_addr == (phys_ram_end - 1)) memblock_set_current_limit(max_mapped_addr - 4096); +#endif - min_low_pfn = PFN_UP(memblock_start_of_DRAM()); - max_low_pfn = max_pfn = PFN_DOWN(dram_end); + min_low_pfn = PFN_UP(phys_ram_base); + max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); @@ -544,6 +564,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); + phys_ram_base = CONFIG_PHYS_RAM_BASE; kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); @@ -570,6 +591,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); BUG_ON((kernel_map.phys_addr % map_size) != 0); +#ifdef CONFIG_64BIT + /* + * The last 4K bytes of the addressable memory can not be mapped because + * of IS_ERR_VALUE macro. + */ + BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); +#endif + pt_ops.alloc_pte = alloc_pte_early; pt_ops.get_pte_virt = get_pte_virt_early; #ifndef __PAGETABLE_PMD_FOLDED @@ -709,6 +738,8 @@ static void __init setup_vm_final(void) if (start <= __pa(PAGE_OFFSET) && __pa(PAGE_OFFSET) < end) start = __pa(PAGE_OFFSET); + if (end >= __pa(PAGE_OFFSET) + memory_limit) + end = __pa(PAGE_OFFSET) + memory_limit; map_size = best_map_size(start, end - start); for (pa = start; pa < end; pa += map_size) { diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c index 81de865f4c7c..e6497424cbf6 100644 --- a/arch/riscv/net/bpf_jit_comp32.c +++ b/arch/riscv/net/bpf_jit_comp32.c @@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, return -1; break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; + case BPF_ST | BPF_MEM | BPF_B: case BPF_ST | BPF_MEM | BPF_H: case BPF_ST | BPF_MEM | BPF_W: diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 87e3bf5b9086..3af4131c22c7 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -939,6 +939,10 @@ out_be: emit_ld(rd, 0, RV_REG_T1, ctx); break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; + /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_B: emit_imm(RV_REG_T1, imm, ctx); diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index 660c799d875d..e30d3fdbbc78 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -11,6 +11,7 @@ UBSAN_SANITIZE := n KASAN_SANITIZE := n obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o +obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o obj-all := $(obj-y) piggy.o syms.o targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 diff --git a/arch/s390/boot/compressed/clz_ctz.c b/arch/s390/boot/compressed/clz_ctz.c new file mode 100644 index 000000000000..c3ebf248596b --- /dev/null +++ b/arch/s390/boot/compressed/clz_ctz.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "../../../../lib/clz_ctz.c" diff --git a/arch/s390/boot/text_dma.S b/arch/s390/boot/text_dma.S index f7c77cd518f2..5ff5fee02801 100644 --- a/arch/s390/boot/text_dma.S +++ b/arch/s390/boot/text_dma.S @@ -9,16 +9,6 @@ #include <asm/errno.h> #include <asm/sigp.h> -#ifdef CC_USING_EXPOLINE - .pushsection .dma.text.__s390_indirect_jump_r14,"axG" -__dma__s390_indirect_jump_r14: - larl %r1,0f - ex 0,0(%r1) - j . -0: br %r14 - .popsection -#endif - .section .dma.text,"ax" /* * Simplified version of expoline thunk. The normal thunks can not be used here, @@ -27,11 +17,10 @@ __dma__s390_indirect_jump_r14: * affects a few functions that are not performance-relevant. */ .macro BR_EX_DMA_r14 -#ifdef CC_USING_EXPOLINE - jg __dma__s390_indirect_jump_r14 -#else - br %r14 -#endif + larl %r1,0f + ex 0,0(%r1) + j . +0: br %r14 .endm /* diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 86afcc6b56bf..b88184019af9 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -5,7 +5,12 @@ CONFIG_WATCH_QUEUE=y CONFIG_AUDIT=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_LSM=y CONFIG_PREEMPT=y +CONFIG_SCHED_CORE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -28,14 +33,13 @@ CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_MISC=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set -CONFIG_BPF_LSM=y -CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y @@ -76,6 +80,7 @@ CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y CONFIG_BLK_CGROUP_IOLATENCY=y CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_BLK_CGROUP_IOPRIO=y CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y @@ -95,6 +100,7 @@ CONFIG_CLEANCACHE=y CONFIG_FRONTSWAP=y CONFIG_CMA_DEBUG=y CONFIG_CMA_DEBUGFS=y +CONFIG_CMA_SYSFS=y CONFIG_CMA_AREAS=7 CONFIG_MEM_SOFT_DIRTY=y CONFIG_ZSWAP=y @@ -158,6 +164,7 @@ CONFIG_IPV6_RPL_LWTUNNEL=y CONFIG_MPTCP=y CONFIG_NETFILTER=y CONFIG_BRIDGE_NETFILTER=m +CONFIG_NETFILTER_NETLINK_HOOK=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y @@ -280,6 +287,7 @@ CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -327,7 +335,7 @@ CONFIG_L2TP_DEBUGFS=m CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m -CONFIG_BRIDGE=m +CONFIG_BRIDGE=y CONFIG_BRIDGE_MRP=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y @@ -384,12 +392,11 @@ CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y -CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m CONFIG_PCI=y -CONFIG_PCI_IOV=y # CONFIG_PCIEASPM is not set CONFIG_PCI_DEBUG=y +CONFIG_PCI_IOV=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y CONFIG_DEVTMPFS=y @@ -436,7 +443,7 @@ CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m CONFIG_MD_CLUSTER=m CONFIG_BCACHE=m -CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_DM=y CONFIG_DM_UNSTRIPED=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m @@ -453,6 +460,7 @@ CONFIG_DM_MULTIPATH_ST=m CONFIG_DM_MULTIPATH_HST=m CONFIG_DM_MULTIPATH_IOA=m CONFIG_DM_DELAY=m +CONFIG_DM_INIT=y CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m @@ -495,6 +503,7 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_GOOGLE is not set # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MICROSOFT is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m CONFIG_MLX5_CORE=m @@ -551,7 +560,6 @@ CONFIG_INPUT_EVDEV=y CONFIG_LEGACY_PTY_COUNT=0 CONFIG_VIRTIO_CONSOLE=m CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m CONFIG_TN3270_FS=y CONFIG_PPS=m @@ -574,7 +582,6 @@ CONFIG_SYNC_FILE=y CONFIG_VFIO=m CONFIG_VFIO_PCI=m CONFIG_VFIO_MDEV=m -CONFIG_VFIO_MDEV_DEVICE=m CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=y @@ -619,6 +626,7 @@ CONFIG_FUSE_FS=y CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_OVERLAY_FS=m +CONFIG_NETFS_STATS=y CONFIG_FSCACHE=m CONFIG_CACHEFILES=m CONFIG_ISO9660_FS=y @@ -654,7 +662,6 @@ CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_CIFS=m -CONFIG_CIFS_STATS2=y CONFIG_CIFS_WEAK_PW_HASH=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y @@ -682,6 +689,7 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_LOCKDOWN_LSM=y CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_SECURITY_LANDLOCK=y CONFIG_INTEGRITY_SIGNATURE=y CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y CONFIG_IMA=y @@ -696,6 +704,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m @@ -843,7 +852,6 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y CONFIG_FAIL_FUNCTION=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_LKDTM=m -CONFIG_TEST_LIST_SORT=y CONFIG_TEST_MIN_HEAP=y CONFIG_TEST_SORT=y CONFIG_KPROBES_SANITY_TEST=y @@ -853,3 +861,4 @@ CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BITOPS=m CONFIG_TEST_BPF=m +CONFIG_TEST_LIVEPATCH=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 71b49ea5b058..1667a3cdcf0a 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -4,6 +4,11 @@ CONFIG_WATCH_QUEUE=y CONFIG_AUDIT=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_LSM=y +CONFIG_SCHED_CORE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -26,14 +31,13 @@ CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_MISC=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set -CONFIG_BPF_LSM=y -CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y @@ -70,6 +74,7 @@ CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y CONFIG_BLK_CGROUP_IOLATENCY=y CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_BLK_CGROUP_IOPRIO=y CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y @@ -87,6 +92,7 @@ CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CLEANCACHE=y CONFIG_FRONTSWAP=y +CONFIG_CMA_SYSFS=y CONFIG_CMA_AREAS=7 CONFIG_MEM_SOFT_DIRTY=y CONFIG_ZSWAP=y @@ -149,6 +155,7 @@ CONFIG_IPV6_RPL_LWTUNNEL=y CONFIG_MPTCP=y CONFIG_NETFILTER=y CONFIG_BRIDGE_NETFILTER=m +CONFIG_NETFILTER_NETLINK_HOOK=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y @@ -271,6 +278,7 @@ CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -317,7 +325,7 @@ CONFIG_L2TP_DEBUGFS=m CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m -CONFIG_BRIDGE=m +CONFIG_BRIDGE=y CONFIG_BRIDGE_MRP=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y @@ -374,11 +382,10 @@ CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y -CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m CONFIG_PCI=y -CONFIG_PCI_IOV=y # CONFIG_PCIEASPM is not set +CONFIG_PCI_IOV=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y CONFIG_UEVENT_HELPER=y @@ -427,7 +434,7 @@ CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m CONFIG_MD_CLUSTER=m CONFIG_BCACHE=m -CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_DM=y CONFIG_DM_UNSTRIPED=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m @@ -444,6 +451,7 @@ CONFIG_DM_MULTIPATH_ST=m CONFIG_DM_MULTIPATH_HST=m CONFIG_DM_MULTIPATH_IOA=m CONFIG_DM_DELAY=m +CONFIG_DM_INIT=y CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m @@ -487,6 +495,7 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_GOOGLE is not set # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MICROSOFT is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m CONFIG_MLX5_CORE=m @@ -543,7 +552,6 @@ CONFIG_INPUT_EVDEV=y CONFIG_LEGACY_PTY_COUNT=0 CONFIG_VIRTIO_CONSOLE=m CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m CONFIG_TN3270_FS=y # CONFIG_PTP_1588_CLOCK is not set @@ -566,7 +574,6 @@ CONFIG_SYNC_FILE=y CONFIG_VFIO=m CONFIG_VFIO_PCI=m CONFIG_VFIO_MDEV=m -CONFIG_VFIO_MDEV_DEVICE=m CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=y @@ -607,6 +614,7 @@ CONFIG_FUSE_FS=y CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_OVERLAY_FS=m +CONFIG_NETFS_STATS=y CONFIG_FSCACHE=m CONFIG_CACHEFILES=m CONFIG_ISO9660_FS=y @@ -642,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_CIFS=m -CONFIG_CIFS_STATS2=y CONFIG_CIFS_WEAK_PW_HASH=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y @@ -669,6 +676,7 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_LOCKDOWN_LSM=y CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_SECURITY_LANDLOCK=y CONFIG_INTEGRITY_SIGNATURE=y CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y CONFIG_IMA=y @@ -684,6 +692,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m @@ -754,6 +763,7 @@ CONFIG_CRC8=m CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_DWARF4=y CONFIG_GDB_SCRIPTS=y @@ -781,3 +791,4 @@ CONFIG_LKDTM=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m +CONFIG_TEST_LIVEPATCH=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 76123a4b26ab..d576aaab27c9 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -29,9 +29,9 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_COMPACTION is not set # CONFIG_MIGRATION is not set -# CONFIG_BOUNCE is not set CONFIG_NET=y # CONFIG_IUCV is not set +# CONFIG_PCPU_DEV_REFCNT is not set # CONFIG_ETHTOOL_NETLINK is not set CONFIG_DEVTMPFS=y CONFIG_BLK_DEV_RAM=y @@ -51,7 +51,6 @@ CONFIG_ZFCP=y # CONFIG_SERIO is not set # CONFIG_HVC_IUCV is not set # CONFIG_HW_RANDOM_S390 is not set -CONFIG_RAW_DRIVER=y # CONFIG_HMC_DRV is not set # CONFIG_S390_TAPE is not set # CONFIG_VMCP is not set diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 695c61989f97..345cbe982a8b 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -19,6 +19,7 @@ void ftrace_caller(void); extern char ftrace_graph_caller_end; extern unsigned long ftrace_plt; +extern void *ftrace_func; struct dyn_arch_ftrace { }; diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 9b4473f76e56..161a9e12bfb8 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -445,15 +445,15 @@ struct kvm_vcpu_stat { u64 instruction_sigp_init_cpu_reset; u64 instruction_sigp_cpu_reset; u64 instruction_sigp_unknown; - u64 diagnose_10; - u64 diagnose_44; - u64 diagnose_9c; - u64 diagnose_9c_ignored; - u64 diagnose_9c_forward; - u64 diagnose_258; - u64 diagnose_308; - u64 diagnose_500; - u64 diagnose_other; + u64 instruction_diagnose_10; + u64 instruction_diagnose_44; + u64 instruction_diagnose_9c; + u64 diag_9c_ignored; + u64 diag_9c_forward; + u64 instruction_diagnose_258; + u64 instruction_diagnose_308; + u64 instruction_diagnose_500; + u64 instruction_diagnose_other; u64 pfault_sync; }; diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index c6ddeb5029b4..2d8f595d9196 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -40,6 +40,7 @@ * trampoline (ftrace_plt), which clobbers also r1. */ +void *ftrace_func __read_mostly = ftrace_stub; unsigned long ftrace_plt; int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, @@ -85,6 +86,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) int ftrace_update_ftrace_func(ftrace_func_t func) { + ftrace_func = func; return 0; } diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index faf64c2f90f5..6b13797143a7 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -59,13 +59,13 @@ ENTRY(ftrace_caller) #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES aghik %r2,%r0,-MCOUNT_INSN_SIZE lgrl %r4,function_trace_op - lgrl %r1,ftrace_trace_function + lgrl %r1,ftrace_func #else lgr %r2,%r0 aghi %r2,-MCOUNT_INSN_SIZE larl %r4,function_trace_op lg %r4,0(%r4) - larl %r1,ftrace_trace_function + larl %r1,ftrace_func lg %r1,0(%r1) #endif lgr %r3,%r14 diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 975a00c8c564..d7dc36ec0a60 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -745,7 +745,7 @@ static int __init cpumf_pmu_init(void) if (!cf_dbg) { pr_err("Registration of s390dbf(cpum_cf) failed\n"); return -ENOMEM; - }; + } debug_register_view(cf_dbg, &debug_sprintf_view); cpumf_pmu.attr_groups = cpumf_cf_event_group(); diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index bbf8622bbf5d..bd3ef121c379 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -126,6 +126,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, case DIE_SSTEP: if (uprobe_post_sstep_notifier(regs)) return NOTIFY_STOP; + break; default: break; } diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index b2349a3f4fa3..3457dcf10396 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -29,6 +29,7 @@ $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32) obj-y += vdso32_wrapper.o +targets += vdso32.lds CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) # Disable gcov profiling, ubsan and kasan for VDSO code diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S index bff50b6acd6d..edf5ff1debe1 100644 --- a/arch/s390/kernel/vdso32/vdso32.lds.S +++ b/arch/s390/kernel/vdso32/vdso32.lds.S @@ -51,6 +51,7 @@ SECTIONS .rela.dyn ALIGN(8) : { *(.rela.dyn) } .got ALIGN(8) : { *(.got .toc) } + .got.plt ALIGN(8) : { *(.got.plt) } _end = .; PROVIDE(end = .); diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S index d4fb336d747b..4461ea151e49 100644 --- a/arch/s390/kernel/vdso64/vdso64.lds.S +++ b/arch/s390/kernel/vdso64/vdso64.lds.S @@ -51,6 +51,7 @@ SECTIONS .rela.dyn ALIGN(8) : { *(.rela.dyn) } .got ALIGN(8) : { *(.got .toc) } + .got.plt ALIGN(8) : { *(.got.plt) } _end = .; PROVIDE(end = .); diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 02c146f9e5cd..807fa9da1e72 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -24,7 +24,7 @@ static int diag_release_pages(struct kvm_vcpu *vcpu) start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; - vcpu->stat.diagnose_10++; + vcpu->stat.instruction_diagnose_10++; if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end || start < 2 * PAGE_SIZE) @@ -74,7 +74,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx", vcpu->run->s.regs.gprs[rx]); - vcpu->stat.diagnose_258++; + vcpu->stat.instruction_diagnose_258++; if (vcpu->run->s.regs.gprs[rx] & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); @@ -145,7 +145,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) static int __diag_time_slice_end(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); - vcpu->stat.diagnose_44++; + vcpu->stat.instruction_diagnose_44++; kvm_vcpu_on_spin(vcpu, true); return 0; } @@ -169,7 +169,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) int tid; tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; - vcpu->stat.diagnose_9c++; + vcpu->stat.instruction_diagnose_9c++; /* yield to self */ if (tid == vcpu->vcpu_id) @@ -192,7 +192,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: yield forwarded", tid); - vcpu->stat.diagnose_9c_forward++; + vcpu->stat.diag_9c_forward++; return 0; } @@ -203,7 +203,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) return 0; no_yield: VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid); - vcpu->stat.diagnose_9c_ignored++; + vcpu->stat.diag_9c_ignored++; return 0; } @@ -213,7 +213,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode); - vcpu->stat.diagnose_308++; + vcpu->stat.instruction_diagnose_308++; switch (subcode) { case 3: vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; @@ -245,7 +245,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) { int ret; - vcpu->stat.diagnose_500++; + vcpu->stat.instruction_diagnose_500++; /* No virtio-ccw notification? Get out quickly. */ if (!vcpu->kvm->arch.css_support || (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) @@ -299,7 +299,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) case 0x500: return __diag_virtio_hypercall(vcpu); default: - vcpu->stat.diagnose_other++; + vcpu->stat.instruction_diagnose_other++; return -EOPNOTSUPP; } } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b655a7d82bf0..4527ac7b5961 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -163,15 +163,15 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset), STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset), STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown), - STATS_DESC_COUNTER(VCPU, diagnose_10), - STATS_DESC_COUNTER(VCPU, diagnose_44), - STATS_DESC_COUNTER(VCPU, diagnose_9c), - STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored), - STATS_DESC_COUNTER(VCPU, diagnose_9c_forward), - STATS_DESC_COUNTER(VCPU, diagnose_258), - STATS_DESC_COUNTER(VCPU, diagnose_308), - STATS_DESC_COUNTER(VCPU, diagnose_500), - STATS_DESC_COUNTER(VCPU, diagnose_other), + STATS_DESC_COUNTER(VCPU, instruction_diagnose_10), + STATS_DESC_COUNTER(VCPU, instruction_diagnose_44), + STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c), + STATS_DESC_COUNTER(VCPU, diag_9c_ignored), + STATS_DESC_COUNTER(VCPU, diag_9c_forward), + STATS_DESC_COUNTER(VCPU, instruction_diagnose_258), + STATS_DESC_COUNTER(VCPU, instruction_diagnose_308), + STATS_DESC_COUNTER(VCPU, instruction_diagnose_500), + STATS_DESC_COUNTER(VCPU, instruction_diagnose_other), STATS_DESC_COUNTER(VCPU, pfault_sync) }; static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) == diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 63cae0476bb4..88419263a89a 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -112,7 +112,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) { u32 r1 = reg2hex[b1]; - if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15) + if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1]) jit->seen_reg[r1] = 1; } @@ -1154,6 +1154,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, } break; /* + * BPF_NOSPEC (speculation barrier) + */ + case BPF_ST | BPF_NOSPEC: + break; + /* * BPF_ST(X) */ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */ diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index b0993e05affe..8fcb7ecb7225 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -560,9 +560,12 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) int pcibios_add_device(struct pci_dev *pdev) { + struct zpci_dev *zdev = to_zpci(pdev); struct resource *res; int i; + /* The pdev has a reference to the zdev via its bus */ + zpci_zdev_get(zdev); if (pdev->is_physfn) pdev->no_vf_scan = 1; @@ -582,7 +585,10 @@ int pcibios_add_device(struct pci_dev *pdev) void pcibios_release_device(struct pci_dev *pdev) { + struct zpci_dev *zdev = to_zpci(pdev); + zpci_unmap_resources(pdev); + zpci_zdev_put(zdev); } int pcibios_enable_device(struct pci_dev *pdev, int mask) diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index b877a97e6745..e359d2686178 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -22,6 +22,11 @@ static inline void zpci_zdev_put(struct zpci_dev *zdev) kref_put(&zdev->kref, zpci_release_device); } +static inline void zpci_zdev_get(struct zpci_dev *zdev) +{ + kref_get(&zdev->kref); +} + int zpci_alloc_domain(int domain); void zpci_free_domain(int domain); int zpci_setup_bus_resources(struct zpci_dev *zdev, diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 45a0549421cd..b683b69a4556 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -39,7 +39,6 @@ config SUPERH select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_FTRACE_MCOUNT_RECORD select HAVE_HW_BREAKPOINT - select HAVE_IDE if HAS_IOPORT_MAP select HAVE_IOREMAP_PROT if MMU && !X2TLB select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index c5fa7932b550..f0c0f955e169 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -19,7 +19,6 @@ config SPARC select OF select OF_PROMTREE select HAVE_ASM_MODVERSIONS - select HAVE_IDE select HAVE_ARCH_KGDB if !SMP || SPARC64 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_SECCOMP if SPARC64 diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 4b8d3c65d266..9a2f20cbd48b 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) return 1; break; } + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 49270655e827..88fb922c23a0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -202,7 +202,6 @@ config X86 select HAVE_FUNCTION_TRACER select HAVE_GCC_PLUGINS select HAVE_HW_BREAKPOINT - select HAVE_IDE select HAVE_IOREMAP_PROT select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 select HAVE_IRQ_TIME_ACCOUNTING diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 1eb45139fcc6..3092fbf9dbe4 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2489,13 +2489,15 @@ void perf_clear_dirty_counters(void) return; for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) { - /* Metrics and fake events don't have corresponding HW counters. */ - if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR)) - continue; - else if (i >= INTEL_PMC_IDX_FIXED) + if (i >= INTEL_PMC_IDX_FIXED) { + /* Metrics and fake events don't have corresponding HW counters. */ + if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed)) + continue; + wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0); - else + } else { wrmsrl(x86_pmu_event_addr(i), 0); + } } bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index fca7a6e2242f..ac6fd2dabf6a 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2904,24 +2904,28 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) */ static int intel_pmu_handle_irq(struct pt_regs *regs) { - struct cpu_hw_events *cpuc; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + bool late_ack = hybrid_bit(cpuc->pmu, late_ack); + bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack); int loops; u64 status; int handled; int pmu_enabled; - cpuc = this_cpu_ptr(&cpu_hw_events); - /* * Save the PMU state. * It needs to be restored when leaving the handler. */ pmu_enabled = cpuc->enabled; /* - * No known reason to not always do late ACK, - * but just in case do it opt-in. + * In general, the early ACK is only applied for old platforms. + * For the big core starts from Haswell, the late ACK should be + * applied. + * For the small core after Tremont, we have to do the ACK right + * before re-enabling counters, which is in the middle of the + * NMI handler. */ - if (!x86_pmu.late_ack) + if (!late_ack && !mid_ack) apic_write(APIC_LVTPC, APIC_DM_NMI); intel_bts_disable_local(); cpuc->enabled = 0; @@ -2958,6 +2962,8 @@ again: goto again; done: + if (mid_ack) + apic_write(APIC_LVTPC, APIC_DM_NMI); /* Only restore PMU state when it's active. See x86_pmu_disable(). */ cpuc->enabled = pmu_enabled; if (pmu_enabled) @@ -2969,7 +2975,7 @@ done: * have been reset. This avoids spurious NMIs on * Haswell CPUs. */ - if (x86_pmu.late_ack) + if (late_ack) apic_write(APIC_LVTPC, APIC_DM_NMI); return handled; } @@ -6129,7 +6135,6 @@ __init int intel_pmu_init(void) static_branch_enable(&perf_is_hybrid); x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS; - x86_pmu.late_ack = true; x86_pmu.pebs_aliases = NULL; x86_pmu.pebs_prec_dist = true; x86_pmu.pebs_block = true; @@ -6167,6 +6172,7 @@ __init int intel_pmu_init(void) pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; pmu->name = "cpu_core"; pmu->cpu_type = hybrid_big; + pmu->late_ack = true; if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { pmu->num_counters = x86_pmu.num_counters + 2; pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1; @@ -6192,6 +6198,7 @@ __init int intel_pmu_init(void) pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; pmu->name = "cpu_atom"; pmu->cpu_type = hybrid_small; + pmu->mid_ack = true; pmu->num_counters = x86_pmu.num_counters; pmu->num_counters_fixed = x86_pmu.num_counters_fixed; pmu->max_pebs_events = x86_pmu.max_pebs_events; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 2bf1c7ea2758..e3ac05c97b5e 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -656,6 +656,10 @@ struct x86_hybrid_pmu { struct event_constraint *event_constraints; struct event_constraint *pebs_constraints; struct extra_reg *extra_regs; + + unsigned int late_ack :1, + mid_ack :1, + enabled_ack :1; }; static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) @@ -686,6 +690,16 @@ extern struct static_key_false perf_is_hybrid; __Fp; \ })) +#define hybrid_bit(_pmu, _field) \ +({ \ + bool __Fp = x86_pmu._field; \ + \ + if (is_hybrid() && (_pmu)) \ + __Fp = hybrid_pmu(_pmu)->_field; \ + \ + __Fp; \ +}) + enum hybrid_pmu_type { hybrid_big = 0x40, hybrid_small = 0x20, @@ -755,6 +769,7 @@ struct x86_pmu { /* PMI handler bits */ unsigned int late_ack :1, + mid_ack :1, enabled_ack :1; /* * sysfs attrs @@ -1115,9 +1130,10 @@ void x86_pmu_stop(struct perf_event *event, int flags); static inline void x86_pmu_disable_event(struct perf_event *event) { + u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); struct hw_perf_event *hwc = &event->hw; - wrmsrl(hwc->config_base, hwc->config); + wrmsrl(hwc->config_base, hwc->config & ~disable_mask); if (is_counter_pair(hwc)) wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 974cbfb1eefe..af6ce8d4c86a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1038,6 +1038,13 @@ struct kvm_arch { struct list_head lpage_disallowed_mmu_pages; struct kvm_page_track_notifier_node mmu_sp_tracker; struct kvm_page_track_notifier_head track_notifier_head; + /* + * Protects marking pages unsync during page faults, as TDP MMU page + * faults only take mmu_lock for read. For simplicity, the unsync + * pages lock is always taken when marking pages unsync regardless of + * whether mmu_lock is held for read or write. + */ + spinlock_t mmu_unsync_pages_lock; struct list_head assigned_dev_head; struct iommu_domain *iommu_domain; diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index e322676039f4..b00dbc5fac2b 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -184,6 +184,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define V_IGN_TPR_SHIFT 20 #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) +#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK) + #define V_INTR_MASKING_SHIFT 24 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d5c691a3208b..39224e035e47 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1986,7 +1986,8 @@ static struct irq_chip ioapic_chip __read_mostly = { .irq_set_affinity = ioapic_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_get_irqchip_state = ioapic_irq_get_chip_state, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, }; static struct irq_chip ioapic_ir_chip __read_mostly = { @@ -1999,7 +2000,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = { .irq_set_affinity = ioapic_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_get_irqchip_state = ioapic_irq_get_chip_state, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, }; static inline void init_IO_APIC_traps(void) diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 44ebe25e7703..dbacb9ec8843 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -58,11 +58,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force) * The quirk bit is not set in this case. * - The new vector is the same as the old vector * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up) + * - The interrupt is not yet started up * - The new destination CPU is the same as the old destination CPU */ if (!irqd_msi_nomask_quirk(irqd) || cfg->vector == old_cfg.vector || old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR || + !irqd_is_started(irqd) || cfg->dest_apicid == old_cfg.dest_apicid) { irq_msi_update_msg(irqd, cfg); return ret; @@ -150,7 +152,8 @@ static struct irq_chip pci_msi_controller = { .irq_ack = irq_chip_ack_parent, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_affinity = msi_set_affinity, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, }; int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, @@ -219,7 +222,8 @@ static struct irq_chip pci_msi_ir_controller = { .irq_mask = pci_msi_mask_irq, .irq_ack = irq_chip_ack_parent, .irq_retrigger = irq_chip_retrigger_hierarchy, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, }; static struct msi_domain_info pci_msi_ir_domain_info = { @@ -273,7 +277,8 @@ static struct irq_chip dmar_msi_controller = { .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_compose_msi_msg = dmar_msi_compose_msg, .irq_write_msi_msg = dmar_msi_write_msg, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, }; static int dmar_msi_init(struct irq_domain *domain, diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index cc8f1773deca..c890d67a64ad 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -237,7 +237,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus) for_each_present_cpu(i) { if (i == 0) continue; - ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i); + ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i)); BUG_ON(ret); } diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index f07c10b87a87..57e4bb695ff9 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -285,15 +285,14 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) return chunks >>= shift; } -static int __mon_event_count(u32 rmid, struct rmid_read *rr) +static u64 __mon_event_count(u32 rmid, struct rmid_read *rr) { struct mbm_state *m; u64 chunks, tval; tval = __rmid_read(rmid, rr->evtid); if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) { - rr->val = tval; - return -EINVAL; + return tval; } switch (rr->evtid) { case QOS_L3_OCCUP_EVENT_ID: @@ -305,12 +304,6 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr) case QOS_L3_MBM_LOCAL_EVENT_ID: m = &rr->d->mbm_local[rmid]; break; - default: - /* - * Code would never reach here because - * an invalid event id would fail the __rmid_read. - */ - return -EINVAL; } if (rr->first) { @@ -361,23 +354,29 @@ void mon_event_count(void *info) struct rdtgroup *rdtgrp, *entry; struct rmid_read *rr = info; struct list_head *head; + u64 ret_val; rdtgrp = rr->rgrp; - if (__mon_event_count(rdtgrp->mon.rmid, rr)) - return; + ret_val = __mon_event_count(rdtgrp->mon.rmid, rr); /* - * For Ctrl groups read data from child monitor groups. + * For Ctrl groups read data from child monitor groups and + * add them together. Count events which are read successfully. + * Discard the rmid_read's reporting errors. */ head = &rdtgrp->mon.crdtgrp_list; if (rdtgrp->type == RDTCTRL_GROUP) { list_for_each_entry(entry, head, mon.crdtgrp_list) { - if (__mon_event_count(entry->mon.rmid, rr)) - return; + if (__mon_event_count(entry->mon.rmid, rr) == 0) + ret_val = 0; } } + + /* Report error if none of rmid_reads are successful */ + if (ret_val) + rr->val = ret_val; } /* diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 08651a4e6aa0..42fc41dd0e1f 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -508,7 +508,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = { .irq_set_affinity = msi_domain_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_write_msi_msg = hpet_msi_write_msg, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, }; static int hpet_msi_init(struct irq_domain *domain, diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index 674906fad43b..68f091ba8443 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -79,9 +79,10 @@ __jump_label_patch(struct jump_entry *entry, enum jump_label_type type) return (struct jump_label_patch){.code = code, .size = size}; } -static inline void __jump_label_transform(struct jump_entry *entry, - enum jump_label_type type, - int init) +static __always_inline void +__jump_label_transform(struct jump_entry *entry, + enum jump_label_type type, + int init) { const struct jump_label_patch jlp = __jump_label_patch(entry, type); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index c42613cfb5ba..fe03bd978761 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -208,30 +208,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_mmu_after_set_cpuid(vcpu); } -static int is_efer_nx(void) -{ - return host_efer & EFER_NX; -} - -static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) -{ - int i; - struct kvm_cpuid_entry2 *e, *entry; - - entry = NULL; - for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { - e = &vcpu->arch.cpuid_entries[i]; - if (e->function == 0x80000001) { - entry = e; - break; - } - } - if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) { - cpuid_entry_clear(entry, X86_FEATURE_NX); - printk(KERN_INFO "kvm: guest NX capability removed\n"); - } -} - int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; @@ -302,7 +278,6 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, vcpu->arch.cpuid_entries = e2; vcpu->arch.cpuid_nent = cpuid->nent; - cpuid_fix_nx_cap(vcpu); kvm_update_cpuid_runtime(vcpu); kvm_vcpu_after_set_cpuid(vcpu); @@ -401,7 +376,6 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) void kvm_set_cpu_caps(void) { - unsigned int f_nx = is_efer_nx() ? F(NX) : 0; #ifdef CONFIG_X86_64 unsigned int f_gbpages = F(GBPAGES); unsigned int f_lm = F(LM); @@ -515,7 +489,7 @@ void kvm_set_cpu_caps(void) F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | F(PAT) | F(PSE36) | 0 /* Reserved */ | - f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | + F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) | 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW) ); @@ -765,7 +739,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS); edx.split.bit_width_fixed = cap.bit_width_fixed; - edx.split.anythread_deprecated = 1; + if (cap.version) + edx.split.anythread_deprecated = 1; edx.split.reserved1 = 0; edx.split.reserved2 = 0; @@ -940,8 +915,21 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); unsigned phys_as = entry->eax & 0xff; - if (!g_phys_as) + /* + * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as + * the guest operates in the same PA space as the host, i.e. + * reductions in MAXPHYADDR for memory encryption affect shadow + * paging, too. + * + * If TDP is enabled but an explicit guest MAXPHYADDR is not + * provided, use the raw bare metal MAXPHYADDR as reductions to + * the HPAs do not affect GPAs. + */ + if (!tdp_enabled) + g_phys_as = boot_cpu_data.x86_phys_bits; + else if (!g_phys_as) g_phys_as = phys_as; + entry->eax = g_phys_as | (virt_as << 8); entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0008_EBX); @@ -964,12 +952,18 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) case 0x8000001a: case 0x8000001e: break; - /* Support memory encryption cpuid if host supports it */ case 0x8000001F: - if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) + if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; - else + } else { cpuid_entry_override(entry, CPUID_8000_001F_EAX); + + /* + * Enumerate '0' for "PA bits reduction", the adjusted + * MAXPHYADDR is enumerated directly (see 0x80000008). + */ + entry->ebx &= ~GENMASK(11, 6); + } break; /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index b07592ca92f0..41d2a53c5dea 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1933,7 +1933,7 @@ ret_success: void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *entry; - struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + struct kvm_vcpu_hv *hv_vcpu; entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0); if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) { @@ -2016,6 +2016,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) { + trace_kvm_hv_hypercall_done(result); kvm_hv_hypercall_set_result(vcpu, result); ++vcpu->stat.hypercalls; return kvm_skip_emulated_instruction(vcpu); @@ -2139,6 +2140,7 @@ static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code) int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_hv_hcall hc; u64 ret = HV_STATUS_SUCCESS; @@ -2173,17 +2175,25 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff; hc.rep = !!(hc.rep_cnt || hc.rep_idx); - if (hc.fast && is_xmm_fast_hypercall(&hc)) - kvm_hv_hypercall_read_xmm(&hc); - trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx, hc.ingpa, hc.outgpa); - if (unlikely(!hv_check_hypercall_access(to_hv_vcpu(vcpu), hc.code))) { + if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) { ret = HV_STATUS_ACCESS_DENIED; goto hypercall_complete; } + if (hc.fast && is_xmm_fast_hypercall(&hc)) { + if (unlikely(hv_vcpu->enforce_cpuid && + !(hv_vcpu->cpuid_cache.features_edx & + HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + kvm_hv_hypercall_read_xmm(&hc); + } + switch (hc.code) { case HVCALL_NOTIFY_LONG_SPIN_WAIT: if (unlikely(hc.rep)) { diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 698969e18fe3..ff005fe738a4 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) { ioapic->rtc_status.pending_eoi = 0; - bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); + bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1); } static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index 660401700075..11e4065e1617 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -43,13 +43,13 @@ struct kvm_vcpu; struct dest_map { /* vcpu bitmap where IRQ has been sent */ - DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); + DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1); /* * Vector sent to a given vcpu, only valid when * the vcpu's bit in map is set */ - u8 vectors[KVM_MAX_VCPU_ID]; + u8 vectors[KVM_MAX_VCPU_ID + 1]; }; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 845d114ae075..47b765270239 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -53,6 +53,8 @@ #include <asm/kvm_page_track.h> #include "trace.h" +#include "paging.h" + extern bool itlb_multihit_kvm_mitigation; int __read_mostly nx_huge_pages = -1; @@ -1642,7 +1644,7 @@ static int is_empty_shadow_page(u64 *spt) * aggregate version in order to make the slab shrinker * faster */ -static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) +static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr) { kvm->arch.n_used_mmu_pages += nr; percpu_counter_add(&kvm_total_used_mmu_pages, nr); @@ -2533,6 +2535,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) { struct kvm_mmu_page *sp; + bool locked = false; /* * Force write-protection if the page is being tracked. Note, the page @@ -2555,9 +2558,34 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) if (sp->unsync) continue; + /* + * TDP MMU page faults require an additional spinlock as they + * run with mmu_lock held for read, not write, and the unsync + * logic is not thread safe. Take the spinklock regardless of + * the MMU type to avoid extra conditionals/parameters, there's + * no meaningful penalty if mmu_lock is held for write. + */ + if (!locked) { + locked = true; + spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock); + + /* + * Recheck after taking the spinlock, a different vCPU + * may have since marked the page unsync. A false + * positive on the unprotected check above is not + * possible as clearing sp->unsync _must_ hold mmu_lock + * for write, i.e. unsync cannot transition from 0->1 + * while this CPU holds mmu_lock for read (or write). + */ + if (READ_ONCE(sp->unsync)) + continue; + } + WARN_ON(sp->role.level != PG_LEVEL_4K); kvm_unsync_page(vcpu, sp); } + if (locked) + spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock); /* * We need to ensure that the marking of unsync pages is visible @@ -5535,6 +5563,8 @@ void kvm_mmu_init_vm(struct kvm *kvm) { struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; + spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); + if (!kvm_mmu_init_tdp_mmu(kvm)) /* * No smp_load/store wrappers needed here as we are in diff --git a/arch/x86/kvm/mmu/paging.h b/arch/x86/kvm/mmu/paging.h new file mode 100644 index 000000000000..de8ab323bb70 --- /dev/null +++ b/arch/x86/kvm/mmu/paging.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Shadow paging constants/helpers that don't need to be #undef'd. */ +#ifndef __KVM_X86_PAGING_H +#define __KVM_X86_PAGING_H + +#define GUEST_PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) +#define PT64_LVL_ADDR_MASK(level) \ + (GUEST_PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ + * PT64_LEVEL_BITS))) - 1)) +#define PT64_LVL_OFFSET_MASK(level) \ + (GUEST_PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ + * PT64_LEVEL_BITS))) - 1)) +#endif /* __KVM_X86_PAGING_H */ + diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 490a028ddabe..ee044d357b5f 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -24,7 +24,7 @@ #define pt_element_t u64 #define guest_walker guest_walker64 #define FNAME(name) paging##64_##name - #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK + #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) #define PT_INDEX(addr, level) PT64_INDEX(addr, level) @@ -57,7 +57,7 @@ #define pt_element_t u64 #define guest_walker guest_walkerEPT #define FNAME(name) ept_##name - #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK + #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) #define PT_INDEX(addr, level) PT64_INDEX(addr, level) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 7a5ce9314107..eb7b227fc6cf 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -38,12 +38,6 @@ static_assert(SPTE_TDP_AD_ENABLED_MASK == 0); #else #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) #endif -#define PT64_LVL_ADDR_MASK(level) \ - (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ - * PT64_LEVEL_BITS))) - 1)) -#define PT64_LVL_OFFSET_MASK(level) \ - (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ - * PT64_LEVEL_BITS))) - 1)) #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ | shadow_x_mask | shadow_nx_mask | shadow_me_mask) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 0853370bd811..d80cb122b5f3 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -43,6 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) if (!kvm->arch.tdp_mmu_enabled) return; + WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); /* @@ -81,8 +82,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, bool shared) { - gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); - kvm_lockdep_assert_mmu_lock_held(kvm, shared); if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) @@ -94,7 +93,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, list_del_rcu(&root->link); spin_unlock(&kvm->arch.tdp_mmu_pages_lock); - zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared); + zap_gfn_range(kvm, root, 0, -1ull, false, false, shared); call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); } @@ -724,13 +723,29 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, bool can_yield, bool flush, bool shared) { + gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT); + bool zap_all = (start == 0 && end >= max_gfn_host); struct tdp_iter iter; + /* + * No need to try to step down in the iterator when zapping all SPTEs, + * zapping the top-level non-leaf SPTEs will recurse on their children. + */ + int min_level = zap_all ? root->role.level : PG_LEVEL_4K; + + /* + * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will + * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF, + * and so KVM will never install a SPTE for such addresses. + */ + end = min(end, max_gfn_host); + kvm_lockdep_assert_mmu_lock_held(kvm, shared); rcu_read_lock(); - tdp_root_for_each_pte(iter, root, start, end) { + for_each_tdp_pte_min_level(iter, root->spt, root->role.level, + min_level, start, end) { retry: if (can_yield && tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) { @@ -744,9 +759,10 @@ retry: /* * If this is a non-last-level SPTE that covers a larger range * than should be zapped, continue, and zap the mappings at a - * lower level. + * lower level, except when zapping all SPTEs. */ - if ((iter.gfn < start || + if (!zap_all && + (iter.gfn < start || iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && !is_last_spte(iter.old_spte, iter.level)) continue; @@ -794,12 +810,11 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, void kvm_tdp_mmu_zap_all(struct kvm *kvm) { - gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); bool flush = false; int i; for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) - flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, + flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush, false); if (flush) @@ -838,7 +853,6 @@ static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm, */ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) { - gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); struct kvm_mmu_page *next_root; struct kvm_mmu_page *root; bool flush = false; @@ -854,8 +868,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) rcu_read_unlock(); - flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush, - true); + flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true); /* * Put the reference acquired in diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 1d01da64c333..a8ad78a2faa1 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -646,7 +646,7 @@ out: void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - struct vmcb *vmcb = svm->vmcb; + struct vmcb *vmcb = svm->vmcb01.ptr; bool activated = kvm_vcpu_apicv_active(vcpu); if (!enable_apicv) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 21d03e3a5dfd..e5515477c30a 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -154,6 +154,13 @@ void recalc_intercepts(struct vcpu_svm *svm) for (i = 0; i < MAX_INTERCEPT; i++) c->intercepts[i] |= g->intercepts[i]; + + /* If SMI is not intercepted, ignore guest SMI intercept as well */ + if (!intercept_smi) + vmcb_clr_intercept(c, INTERCEPT_SMI); + + vmcb_set_intercept(c, INTERCEPT_VMLOAD); + vmcb_set_intercept(c, INTERCEPT_VMSAVE); } static void copy_vmcb_control_area(struct vmcb_control_area *dst, @@ -304,8 +311,8 @@ static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, return true; } -static void nested_load_control_from_vmcb12(struct vcpu_svm *svm, - struct vmcb_control_area *control) +void nested_load_control_from_vmcb12(struct vcpu_svm *svm, + struct vmcb_control_area *control) { copy_vmcb_control_area(&svm->nested.ctl, control); @@ -499,7 +506,11 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) { - const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; + const u32 int_ctl_vmcb01_bits = + V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK; + + const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK; + struct kvm_vcpu *vcpu = &svm->vcpu; /* @@ -511,7 +522,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id, * avic_physical_id. */ - WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK); + WARN_ON(kvm_apicv_activated(svm->vcpu.kvm)); /* Copied from vmcb01. msrpm_base can be overwritten later. */ svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl; @@ -531,8 +542,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; svm->vmcb->control.int_ctl = - (svm->nested.ctl.int_ctl & ~mask) | - (svm->vmcb01.ptr->control.int_ctl & mask); + (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | + (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits); svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; @@ -618,6 +629,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) struct kvm_host_map map; u64 vmcb12_gpa; + if (!svm->nested.hsave_msr) { + kvm_inject_gp(vcpu, 0); + return 1; + } + if (is_smm(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; @@ -692,7 +708,28 @@ out: return ret; } -void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) +/* Copy state save area fields which are handled by VMRUN */ +void svm_copy_vmrun_state(struct vmcb_save_area *to_save, + struct vmcb_save_area *from_save) +{ + to_save->es = from_save->es; + to_save->cs = from_save->cs; + to_save->ss = from_save->ss; + to_save->ds = from_save->ds; + to_save->gdtr = from_save->gdtr; + to_save->idtr = from_save->idtr; + to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED; + to_save->efer = from_save->efer; + to_save->cr0 = from_save->cr0; + to_save->cr3 = from_save->cr3; + to_save->cr4 = from_save->cr4; + to_save->rax = from_save->rax; + to_save->rsp = from_save->rsp; + to_save->rip = from_save->rip; + to_save->cpl = 0; +} + +void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb) { to_vmcb->save.fs = from_vmcb->save.fs; to_vmcb->save.gs = from_vmcb->save.gs; @@ -1355,28 +1392,11 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; - svm->vmcb01.ptr->save.es = save->es; - svm->vmcb01.ptr->save.cs = save->cs; - svm->vmcb01.ptr->save.ss = save->ss; - svm->vmcb01.ptr->save.ds = save->ds; - svm->vmcb01.ptr->save.gdtr = save->gdtr; - svm->vmcb01.ptr->save.idtr = save->idtr; - svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED; - svm->vmcb01.ptr->save.efer = save->efer; - svm->vmcb01.ptr->save.cr0 = save->cr0; - svm->vmcb01.ptr->save.cr3 = save->cr3; - svm->vmcb01.ptr->save.cr4 = save->cr4; - svm->vmcb01.ptr->save.rax = save->rax; - svm->vmcb01.ptr->save.rsp = save->rsp; - svm->vmcb01.ptr->save.rip = save->rip; - svm->vmcb01.ptr->save.cpl = 0; - + svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save); nested_load_control_from_vmcb12(svm, ctl); svm_switch_vmcb(svm, &svm->nested.vmcb02); - nested_vmcb02_prepare_control(svm); - kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); ret = 0; out_free: diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 62926f1a5f7b..7fbce342eec4 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -64,6 +64,7 @@ static DEFINE_MUTEX(sev_bitmap_lock); unsigned int max_sev_asid; static unsigned int min_sev_asid; static unsigned long sev_me_mask; +static unsigned int nr_asids; static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; @@ -78,11 +79,11 @@ struct enc_region { /* Called with the sev_bitmap_lock held, or on shutdown */ static int sev_flush_asids(int min_asid, int max_asid) { - int ret, pos, error = 0; + int ret, asid, error = 0; /* Check if there are any ASIDs to reclaim before performing a flush */ - pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid); - if (pos >= max_asid) + asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid); + if (asid > max_asid) return -EBUSY; /* @@ -115,15 +116,15 @@ static bool __sev_recycle_asids(int min_asid, int max_asid) /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */ bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap, - max_sev_asid); - bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid); + nr_asids); + bitmap_zero(sev_reclaim_asid_bitmap, nr_asids); return true; } static int sev_asid_new(struct kvm_sev_info *sev) { - int pos, min_asid, max_asid, ret; + int asid, min_asid, max_asid, ret; bool retry = true; enum misc_res_type type; @@ -143,11 +144,11 @@ static int sev_asid_new(struct kvm_sev_info *sev) * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. */ - min_asid = sev->es_active ? 0 : min_sev_asid - 1; + min_asid = sev->es_active ? 1 : min_sev_asid; max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; again: - pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid); - if (pos >= max_asid) { + asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); + if (asid > max_asid) { if (retry && __sev_recycle_asids(min_asid, max_asid)) { retry = false; goto again; @@ -157,11 +158,11 @@ again: goto e_uncharge; } - __set_bit(pos, sev_asid_bitmap); + __set_bit(asid, sev_asid_bitmap); mutex_unlock(&sev_bitmap_lock); - return pos + 1; + return asid; e_uncharge: misc_cg_uncharge(type, sev->misc_cg, 1); put_misc_cg(sev->misc_cg); @@ -179,17 +180,16 @@ static int sev_get_asid(struct kvm *kvm) static void sev_asid_free(struct kvm_sev_info *sev) { struct svm_cpu_data *sd; - int cpu, pos; + int cpu; enum misc_res_type type; mutex_lock(&sev_bitmap_lock); - pos = sev->asid - 1; - __set_bit(pos, sev_reclaim_asid_bitmap); + __set_bit(sev->asid, sev_reclaim_asid_bitmap); for_each_possible_cpu(cpu) { sd = per_cpu(svm_data, cpu); - sd->sev_vmcbs[pos] = NULL; + sd->sev_vmcbs[sev->asid] = NULL; } mutex_unlock(&sev_bitmap_lock); @@ -1272,8 +1272,8 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) /* Pin guest memory */ guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, PAGE_SIZE, &n, 0); - if (!guest_page) - return -EFAULT; + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); /* allocate memory for header and transport buffer */ ret = -ENOMEM; @@ -1310,8 +1310,9 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) } /* Copy packet header to userspace. */ - ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, - params.hdr_len); + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + ret = -EFAULT; e_free_trans_data: kfree(trans_data); @@ -1463,11 +1464,12 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) data.trans_len = params.trans_len; /* Pin guest memory */ - ret = -EFAULT; guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, PAGE_SIZE, &n, 0); - if (!guest_page) + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); goto e_free_trans; + } /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; @@ -1855,12 +1857,17 @@ void __init sev_hardware_setup(void) min_sev_asid = edx; sev_me_mask = 1UL << (ebx & 0x3f); - /* Initialize SEV ASID bitmaps */ - sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); + /* + * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap, + * even though it's never used, so that the bitmap is indexed by the + * actual ASID. + */ + nr_asids = max_sev_asid + 1; + sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL); if (!sev_asid_bitmap) goto out; - sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); + sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL); if (!sev_reclaim_asid_bitmap) { bitmap_free(sev_asid_bitmap); sev_asid_bitmap = NULL; @@ -1905,7 +1912,7 @@ void sev_hardware_teardown(void) return; /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ - sev_flush_asids(0, max_sev_asid); + sev_flush_asids(1, max_sev_asid); bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); @@ -1919,7 +1926,7 @@ int sev_cpu_init(struct svm_cpu_data *sd) if (!sev_enabled) return 0; - sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL); + sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL); if (!sd->sev_vmcbs) return -ENOMEM; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8834822c00cd..69639f9624f5 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -198,6 +198,11 @@ module_param(avic, bool, 0444); bool __read_mostly dump_invalid_vmcb; module_param(dump_invalid_vmcb, bool, 0644); + +bool intercept_smi = true; +module_param(intercept_smi, bool, 0444); + + static bool svm_gp_erratum_intercept = true; static u8 rsm_ins_bytes[] = "\x0f\xaa"; @@ -1185,7 +1190,10 @@ static void init_vmcb(struct kvm_vcpu *vcpu) svm_set_intercept(svm, INTERCEPT_INTR); svm_set_intercept(svm, INTERCEPT_NMI); - svm_set_intercept(svm, INTERCEPT_SMI); + + if (intercept_smi) + svm_set_intercept(svm, INTERCEPT_SMI); + svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); svm_set_intercept(svm, INTERCEPT_RDPMC); svm_set_intercept(svm, INTERCEPT_CPUID); @@ -1398,8 +1406,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) goto error_free_vmsa_page; } - svm_vcpu_init_msrpm(vcpu, svm->msrpm); - svm->vmcb01.ptr = page_address(vmcb01_page); svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); @@ -1411,6 +1417,8 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) svm_switch_vmcb(svm, &svm->vmcb01); init_vmcb(vcpu); + svm_vcpu_init_msrpm(vcpu, svm->msrpm); + svm_init_osvw(vcpu); vcpu->arch.microcode_version = 0x01000065; @@ -1560,8 +1568,11 @@ static void svm_set_vintr(struct vcpu_svm *svm) { struct vmcb_control_area *control; - /* The following fields are ignored when AVIC is enabled */ - WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu)); + /* + * The following fields are ignored when AVIC is enabled + */ + WARN_ON(kvm_apicv_activated(svm->vcpu.kvm)); + svm_set_intercept(svm, INTERCEPT_VINTR); /* @@ -1578,17 +1589,18 @@ static void svm_set_vintr(struct vcpu_svm *svm) static void svm_clear_vintr(struct vcpu_svm *svm) { - const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK; svm_clr_intercept(svm, INTERCEPT_VINTR); /* Drop int_ctl fields related to VINTR injection. */ - svm->vmcb->control.int_ctl &= mask; + svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; if (is_guest_mode(&svm->vcpu)) { - svm->vmcb01.ptr->control.int_ctl &= mask; + svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != (svm->nested.ctl.int_ctl & V_TPR_MASK)); - svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask; + + svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & + V_IRQ_INJECTION_BITS_MASK; } vmcb_mark_dirty(svm->vmcb, VMCB_INTR); @@ -1923,7 +1935,7 @@ static int npf_interception(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); + u64 fault_address = svm->vmcb->control.exit_info_2; u64 error_code = svm->vmcb->control.exit_info_1; trace_kvm_page_fault(fault_address, error_code); @@ -2106,6 +2118,11 @@ static int nmi_interception(struct kvm_vcpu *vcpu) return 1; } +static int smi_interception(struct kvm_vcpu *vcpu) +{ + return 1; +} + static int intr_interception(struct kvm_vcpu *vcpu) { ++vcpu->stat.irq_exits; @@ -2134,11 +2151,12 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) ret = kvm_skip_emulated_instruction(vcpu); if (vmload) { - nested_svm_vmloadsave(vmcb12, svm->vmcb); + svm_copy_vmloadsave_state(svm->vmcb, vmcb12); svm->sysenter_eip_hi = 0; svm->sysenter_esp_hi = 0; - } else - nested_svm_vmloadsave(svm->vmcb, vmcb12); + } else { + svm_copy_vmloadsave_state(vmcb12, svm->vmcb); + } kvm_vcpu_unmap(vcpu, &map, true); @@ -2941,7 +2959,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm_disable_lbrv(vcpu); break; case MSR_VM_HSAVE_PA: - svm->nested.hsave_msr = data; + /* + * Old kernels did not validate the value written to + * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid + * value to allow live migrating buggy or malicious guests + * originating from those kernels. + */ + if (!msr->host_initiated && !page_address_valid(vcpu, data)) + return 1; + + svm->nested.hsave_msr = data & PAGE_MASK; break; case MSR_VM_CR: return svm_set_vm_cr(vcpu, data); @@ -3080,8 +3107,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, [SVM_EXIT_INTR] = intr_interception, [SVM_EXIT_NMI] = nmi_interception, - [SVM_EXIT_SMI] = kvm_emulate_as_nop, - [SVM_EXIT_INIT] = kvm_emulate_as_nop, + [SVM_EXIT_SMI] = smi_interception, [SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc, [SVM_EXIT_CPUID] = kvm_emulate_cpuid, @@ -4288,6 +4314,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) { struct vcpu_svm *svm = to_svm(vcpu); + struct kvm_host_map map_save; int ret; if (is_guest_mode(vcpu)) { @@ -4303,6 +4330,29 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ret = nested_svm_vmexit(svm); if (ret) return ret; + + /* + * KVM uses VMCB01 to store L1 host state while L2 runs but + * VMCB01 is going to be used during SMM and thus the state will + * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save + * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the + * format of the area is identical to guest save area offsetted + * by 0x400 (matches the offset of 'struct vmcb_save_area' + * within 'struct vmcb'). Note: HSAVE area may also be used by + * L1 hypervisor to save additional host context (e.g. KVM does + * that, see svm_prepare_guest_switch()) which must be + * preserved. + */ + if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), + &map_save) == -EINVAL) + return 1; + + BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); + + svm_copy_vmrun_state(map_save.hva + 0x400, + &svm->vmcb01.ptr->save); + + kvm_vcpu_unmap(vcpu, &map_save, true); } return 0; } @@ -4310,13 +4360,14 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) { struct vcpu_svm *svm = to_svm(vcpu); - struct kvm_host_map map; + struct kvm_host_map map, map_save; int ret = 0; if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8); u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); + struct vmcb *vmcb12; if (guest) { if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) @@ -4332,8 +4383,25 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) if (svm_allocate_nested(svm)) return 1; - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, map.hva); + vmcb12 = map.hva; + + nested_load_control_from_vmcb12(svm, &vmcb12->control); + + ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12); kvm_vcpu_unmap(vcpu, &map, true); + + /* + * Restore L1 host state from L1 HSAVE area as VMCB01 was + * used during SMM (see svm_enter_smm()) + */ + if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), + &map_save) == -EINVAL) + return 1; + + svm_copy_vmrun_state(&svm->vmcb01.ptr->save, + map_save.hva + 0x400); + + kvm_vcpu_unmap(vcpu, &map_save, true); } } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index f89b623bb591..bd0fe94c2920 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -31,6 +31,7 @@ #define MSRPM_OFFSETS 16 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; extern bool npt_enabled; +extern bool intercept_smi; /* * Clean bits in VMCB. @@ -463,7 +464,9 @@ void svm_leave_nested(struct vcpu_svm *svm); void svm_free_nested(struct vcpu_svm *svm); int svm_allocate_nested(struct vcpu_svm *svm); int nested_svm_vmrun(struct kvm_vcpu *vcpu); -void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb); +void svm_copy_vmrun_state(struct vmcb_save_area *to_save, + struct vmcb_save_area *from_save); +void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); int nested_svm_vmexit(struct vcpu_svm *svm); static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) @@ -479,6 +482,8 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu); int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); int nested_svm_exit_special(struct vcpu_svm *svm); +void nested_load_control_from_vmcb12(struct vcpu_svm *svm, + struct vmcb_control_area *control); void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h index 9b9a55abc29f..c53b8bf8d013 100644 --- a/arch/x86/kvm/svm/svm_onhyperv.h +++ b/arch/x86/kvm/svm/svm_onhyperv.h @@ -89,7 +89,7 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments( * as we mark it dirty unconditionally towards end of vcpu * init phase. */ - if (vmcb && vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) && + if (vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) && hve->hv_enlightenments_control.msr_bitmap) vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); } diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index b484141ea15b..03ebe368333e 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -92,6 +92,21 @@ TRACE_EVENT(kvm_hv_hypercall, __entry->outgpa) ); +TRACE_EVENT(kvm_hv_hypercall_done, + TP_PROTO(u64 result), + TP_ARGS(result), + + TP_STRUCT__entry( + __field(__u64, result) + ), + + TP_fast_assign( + __entry->result = result; + ), + + TP_printk("result 0x%llx", __entry->result) +); + /* * Tracepoint for Xen hypercall. */ diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 1a52134b0c42..b3f77d18eb5a 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) vcpu_put(vcpu); } +#define EPTP_PA_MASK GENMASK_ULL(51, 12) + +static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) +{ + return VALID_PAGE(root_hpa) && + ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); +} + +static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, + gpa_t addr) +{ + uint i; + struct kvm_mmu_root_info *cached_root; + + WARN_ON_ONCE(!mmu_is_nested(vcpu)); + + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { + cached_root = &vcpu->arch.mmu->prev_roots[i]; + + if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd, + eptp)) + vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa); + } +} + static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { @@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, vm_exit_reason = EXIT_REASON_PML_FULL; vmx->nested.pml_full = false; exit_qualification &= INTR_INFO_UNBLOCK_NMI; - } else if (fault->error_code & PFERR_RSVD_MASK) - vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; - else - vm_exit_reason = EXIT_REASON_EPT_VIOLATION; + } else { + if (fault->error_code & PFERR_RSVD_MASK) + vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; + else + vm_exit_reason = EXIT_REASON_EPT_VIOLATION; + + /* + * Although the caller (kvm_inject_emulated_page_fault) would + * have already synced the faulting address in the shadow EPT + * tables for the current EPTP12, we also need to sync it for + * any other cached EPTP02s based on the same EP4TA, since the + * TLB associates mappings to the EP4TA rather than the full EPTP. + */ + nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer, + fault->address); + } nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); vmcs12->guest_physical_address = fault->address; @@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) return nested_vmx_succeed(vcpu); } -#define EPTP_PA_MASK GENMASK_ULL(51, 12) - -static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) -{ - return VALID_PAGE(root_hpa) && - ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); -} - /* Emulate the INVEPT instruction */ static int handle_invept(struct kvm_vcpu *vcpu) { @@ -5826,7 +5855,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, if (is_nmi(intr_info)) return true; else if (is_page_fault(intr_info)) - return vcpu->arch.apf.host_apf_flags || !enable_ept; + return vcpu->arch.apf.host_apf_flags || + vmx_need_pf_intercept(vcpu); else if (is_debug(intr_info) && vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 3979a947933a..17a1cb4b059d 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -14,8 +14,6 @@ #include "vmx_ops.h" #include "cpuid.h" -extern const u32 vmx_msr_index[]; - #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 #define MSR_TYPE_RW 3 @@ -524,7 +522,7 @@ static inline struct vmcs *alloc_vmcs(bool shadow) static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) { - return vmx->secondary_exec_control & + return secondary_exec_controls_get(vmx) & SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c6dc1b445231..e5d5c5ed7dd4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3407,7 +3407,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; break; case MSR_KVM_ASYNC_PF_ACK: - if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) + if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) return 1; if (data & 0x1) { vcpu->arch.apf.pageready_pending = false; @@ -3746,7 +3746,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vcpu->arch.apf.msr_int_val; break; case MSR_KVM_ASYNC_PF_ACK: - if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) + if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) return 1; msr_info->data = 0; @@ -4358,8 +4358,17 @@ static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) { - return kvm_arch_interrupt_allowed(vcpu) && - kvm_cpu_accept_dm_intr(vcpu); + /* + * Do not cause an interrupt window exit if an exception + * is pending or an event needs reinjection; userspace + * might want to inject the interrupt manually using KVM_SET_REGS + * or KVM_SET_SREGS. For that to work, we must be at an + * instruction boundary and with no events half-injected. + */ + return (kvm_arch_interrupt_allowed(vcpu) && + kvm_cpu_accept_dm_intr(vcpu) && + !kvm_event_needs_reinjection(vcpu) && + !vcpu->arch.exception.pending); } static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, @@ -9601,6 +9610,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) set_debugreg(vcpu->arch.eff_db[3], 3); set_debugreg(vcpu->arch.dr6, 6); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; + } else if (unlikely(hw_breakpoint_active())) { + set_debugreg(0, 7); } for (;;) { @@ -10985,9 +10996,6 @@ int kvm_arch_hardware_setup(void *opaque) int r; rdmsrl_safe(MSR_EFER, &host_efer); - if (WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_NX) && - !(host_efer & EFER_NX))) - return -EIO; if (boot_cpu_has(X86_FEATURE_XSAVES)) rdmsrl(MSR_IA32_XSS, host_xss); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 3364fe62b903..3481b35cb4ec 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -682,7 +682,6 @@ int p4d_clear_huge(p4d_t *p4d) } #endif -#if CONFIG_PGTABLE_LEVELS > 3 /** * pud_set_huge - setup kernel PUD mapping * @@ -722,23 +721,6 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) } /** - * pud_clear_huge - clear kernel PUD mapping when it is set - * - * Returns 1 on success and 0 on failure (no PUD map is found). - */ -int pud_clear_huge(pud_t *pud) -{ - if (pud_large(*pud)) { - pud_clear(pud); - return 1; - } - - return 0; -} -#endif - -#if CONFIG_PGTABLE_LEVELS > 2 -/** * pmd_set_huge - setup kernel PMD mapping * * See text over pud_set_huge() above. @@ -769,6 +751,21 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) } /** + * pud_clear_huge - clear kernel PUD mapping when it is set + * + * Returns 1 on success and 0 on failure (no PUD map is found). + */ +int pud_clear_huge(pud_t *pud) +{ + if (pud_large(*pud)) { + pud_clear(pud); + return 1; + } + + return 0; +} + +/** * pmd_clear_huge - clear kernel PMD mapping when it is set * * Returns 1 on success and 0 on failure (no PMD map is found). @@ -782,7 +779,6 @@ int pmd_clear_huge(pmd_t *pmd) return 0; } -#endif #ifdef CONFIG_X86_64 /** diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index e835164189f1..16d76f814e9b 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -570,6 +570,9 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) for (i = 0; i < prog->aux->size_poke_tab; i++) { poke = &prog->aux->poke_tab[i]; + if (poke->aux && poke->aux != prog->aux) + continue; + WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); if (poke->reason != BPF_POKE_REASON_TAIL_CALL) @@ -1216,6 +1219,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + if (boot_cpu_has(X86_FEATURE_XMM2)) + /* Emit 'lfence' */ + EMIT3(0x0F, 0xAE, 0xE8); + break; + /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_B: if (is_ereg(dst_reg)) diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 3da88ded6ee3..3bfda5f502cb 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1886,6 +1886,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, i++; break; } + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + if (boot_cpu_has(X86_FEATURE_XMM2)) + /* Emit 'lfence' */ + EMIT3(0x0F, 0xAE, 0xE8); + break; /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_H: case BPF_ST | BPF_MEM | BPF_B: diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk index fd1ab80be0de..a4cf678cf5c8 100644 --- a/arch/x86/tools/chkobjdump.awk +++ b/arch/x86/tools/chkobjdump.awk @@ -10,6 +10,7 @@ BEGIN { /^GNU objdump/ { verstr = "" + gsub(/\(.*\)/, ""); for (i = 3; i <= NF; i++) if (match($(i), "^[0-9]")) { verstr = $(i); diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 04c5a44b9682..9ba700dc47de 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -57,12 +57,12 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { [S_REL] = "^(__init_(begin|end)|" "__x86_cpu_dev_(start|end)|" - "(__parainstructions|__alt_instructions)(|_end)|" - "(__iommu_table|__apicdrivers|__smp_locks)(|_end)|" + "(__parainstructions|__alt_instructions)(_end)?|" + "(__iommu_table|__apicdrivers|__smp_locks)(_end)?|" "__(start|end)_pci_.*|" "__(start|end)_builtin_fw|" - "__(start|stop)___ksymtab(|_gpl)|" - "__(start|stop)___kcrctab(|_gpl)|" + "__(start|stop)___ksymtab(_gpl)?|" + "__(start|stop)___kcrctab(_gpl)?|" "__(start|stop)___param|" "__(start|stop)___modver|" "__(start|stop)___bug_table|" diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 2332b2156993..3878880469d1 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -327,7 +327,6 @@ config XTENSA_PLATFORM_ISS config XTENSA_PLATFORM_XT2000 bool "XT2000" - select HAVE_IDE help XT2000 is the name of Tensilica's feature-rich emulation platform. This hardware is capable of running a full Linux distribution. diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 64053d67a97b..2f2158e05a91 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -9,12 +9,6 @@ config MQ_IOSCHED_DEADLINE help MQ version of the deadline IO scheduler. -config MQ_IOSCHED_DEADLINE_CGROUP - tristate - default y - depends on MQ_IOSCHED_DEADLINE - depends on BLK_CGROUP - config MQ_IOSCHED_KYBER tristate "Kyber I/O scheduler" default y diff --git a/block/Makefile b/block/Makefile index bfbe4e13ca1e..1e1afa10f869 100644 --- a/block/Makefile +++ b/block/Makefile @@ -22,8 +22,6 @@ obj-$(CONFIG_BLK_CGROUP_IOPRIO) += blk-ioprio.o obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o -mq-deadline-y += mq-deadline-main.o -mq-deadline-$(CONFIG_MQ_IOSCHED_DEADLINE_CGROUP)+= mq-deadline-cgroup.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o obj-$(CONFIG_IOSCHED_BFQ) += bfq.o diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 575d7a2e7203..31fe9be179d9 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -790,6 +790,7 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) struct blkcg_gq *parent = blkg->parent; struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu); struct blkg_iostat cur, delta; + unsigned long flags; unsigned int seq; /* fetch the current per-cpu values */ @@ -799,21 +800,21 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) } while (u64_stats_fetch_retry(&bisc->sync, seq)); /* propagate percpu delta to global */ - u64_stats_update_begin(&blkg->iostat.sync); + flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); blkg_iostat_set(&delta, &cur); blkg_iostat_sub(&delta, &bisc->last); blkg_iostat_add(&blkg->iostat.cur, &delta); blkg_iostat_add(&bisc->last, &delta); - u64_stats_update_end(&blkg->iostat.sync); + u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); /* propagate global delta to parent (unless that's root) */ if (parent && parent->parent) { - u64_stats_update_begin(&parent->iostat.sync); + flags = u64_stats_update_begin_irqsave(&parent->iostat.sync); blkg_iostat_set(&delta, &blkg->iostat.cur); blkg_iostat_sub(&delta, &blkg->iostat.last); blkg_iostat_add(&parent->iostat.cur, &delta); blkg_iostat_add(&blkg->iostat.last, &delta); - u64_stats_update_end(&parent->iostat.sync); + u64_stats_update_end_irqrestore(&parent->iostat.sync, flags); } } @@ -848,6 +849,7 @@ static void blkcg_fill_root_iostats(void) memset(&tmp, 0, sizeof(tmp)); for_each_possible_cpu(cpu) { struct disk_stats *cpu_dkstats; + unsigned long flags; cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); tmp.ios[BLKG_IOSTAT_READ] += @@ -864,9 +866,9 @@ static void blkcg_fill_root_iostats(void) tmp.bytes[BLKG_IOSTAT_DISCARD] += cpu_dkstats->sectors[STAT_DISCARD] << 9; - u64_stats_update_begin(&blkg->iostat.sync); + flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); blkg_iostat_set(&blkg->iostat.cur, &tmp); - u64_stats_update_end(&blkg->iostat.sync); + u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); } } } diff --git a/block/blk-core.c b/block/blk-core.c index 04477697ee4b..4f8449b29b21 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -122,7 +122,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq) rq->internal_tag = BLK_MQ_NO_TAG; rq->start_time_ns = ktime_get_ns(); rq->part = NULL; - refcount_set(&rq->ref, 1); blk_crypto_rq_set_defaults(rq); } EXPORT_SYMBOL(blk_rq_init); diff --git a/block/blk-flush.c b/block/blk-flush.c index 1002f6c58181..4201728bf3a5 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) spin_unlock_irqrestore(&fq->mq_flush_lock, flags); } +bool is_flush_rq(struct request *rq) +{ + return rq->end_io == flush_end_io; +} + /** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked @@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, flush_rq->rq_flags |= RQF_FLUSH_SEQ; flush_rq->rq_disk = first_rq->rq_disk; flush_rq->end_io = flush_end_io; + /* + * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one + * implied in refcount_inc_not_zero() called from + * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref + * and READ flush_rq->end_io + */ + smp_wmb(); + refcount_set(&flush_rq->ref, 1); blk_flush_queue_rq(flush_rq, false); } diff --git a/block/blk-iocost.c b/block/blk-iocost.c index c2d6bc88d3f1..0e56557cacf2 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1440,16 +1440,17 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode, return -1; iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost); + wait->committed = true; /* * autoremove_wake_function() removes the wait entry only when it - * actually changed the task state. We want the wait always - * removed. Remove explicitly and use default_wake_function(). + * actually changed the task state. We want the wait always removed. + * Remove explicitly and use default_wake_function(). Note that the + * order of operations is important as finish_wait() tests whether + * @wq_entry is removed without grabbing the lock. */ - list_del_init(&wq_entry->entry); - wait->committed = true; - default_wake_function(wq_entry, mode, flags, key); + list_del_init_careful(&wq_entry->entry); return 0; } @@ -3060,19 +3061,19 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX) return -EINVAL; - spin_lock(&blkcg->lock); + spin_lock_irq(&blkcg->lock); iocc->dfl_weight = v * WEIGHT_ONE; hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { struct ioc_gq *iocg = blkg_to_iocg(blkg); if (iocg) { - spin_lock_irq(&iocg->ioc->lock); + spin_lock(&iocg->ioc->lock); ioc_now(iocg->ioc, &now); weight_updated(iocg, &now); - spin_unlock_irq(&iocg->ioc->lock); + spin_unlock(&iocg->ioc->lock); } } - spin_unlock(&blkcg->lock); + spin_unlock_irq(&blkcg->lock); return nbytes; } diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 81be0096411d..d8b0d8bd132b 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -833,7 +833,11 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, enable = iolatency_set_min_lat_nsec(blkg, lat_val); if (enable) { - WARN_ON_ONCE(!blk_get_queue(blkg->q)); + if (!blk_get_queue(blkg->q)) { + ret = -ENODEV; + goto out; + } + blkg_get(blkg); } diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index c838d81ac058..0f006cabfd91 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -515,17 +515,6 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, percpu_ref_put(&q->q_usage_counter); } -static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, - struct blk_mq_hw_ctx *hctx, - unsigned int hctx_idx) -{ - if (hctx->sched_tags) { - blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); - blk_mq_free_rq_map(hctx->sched_tags, set->flags); - hctx->sched_tags = NULL; - } -} - static int blk_mq_sched_alloc_tags(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) @@ -539,8 +528,10 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q, return -ENOMEM; ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); - if (ret) - blk_mq_sched_free_tags(set, hctx, hctx_idx); + if (ret) { + blk_mq_free_rq_map(hctx->sched_tags, set->flags); + hctx->sched_tags = NULL; + } return ret; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 2c4ac51e54eb..9d4fdc2be88a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next) void blk_mq_put_rq_ref(struct request *rq) { - if (is_flush_rq(rq, rq->mq_hctx)) + if (is_flush_rq(rq)) rq->end_io(rq, 0); else if (refcount_dec_and_test(&rq->ref)) __blk_mq_free_request(rq); @@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, unsigned long *next = priv; /* - * Just do a quick check if it is expired before locking the request in - * so we're not unnecessarilly synchronizing across CPUs. - */ - if (!blk_mq_req_expired(rq, next)) - return true; - - /* - * We have reason to believe the request may be expired. Take a - * reference on the request to lock this request lifetime into its - * currently allocated context to prevent it from being reallocated in - * the event the completion by-passes this timeout handler. - * - * If the reference was already released, then the driver beat the - * timeout handler to posting a natural completion. - */ - if (!refcount_inc_not_zero(&rq->ref)) - return true; - - /* - * The request is now locked and cannot be reallocated underneath the - * timeout handler's processing. Re-verify this exact request is truly - * expired; if it is not expired, then the request was completed and - * reallocated as a new request. + * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot + * be reallocated underneath the timeout handler's processing, then + * the expire check is reliable. If the request is not expired, then + * it was completed and reallocated as a new request after returning + * from blk_mq_check_expired(). */ if (blk_mq_req_expired(rq, next)) blk_mq_rq_timed_out(rq, reserved); - - blk_mq_put_rq_ref(rq); return true; } @@ -2994,10 +2974,12 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared) int i; queue_for_each_hw_ctx(q, hctx, i) { - if (shared) + if (shared) { hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; - else + } else { + blk_mq_tag_idle(hctx); hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; + } } } diff --git a/block/blk.h b/block/blk.h index 4b885c0f6708..cb01429c162c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q) kobject_get(&q->kobj); } -static inline bool -is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx) -{ - return hctx->fq->flush_rq == req; -} +bool is_flush_rq(struct request *req); struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, gfp_t flags); diff --git a/block/genhd.c b/block/genhd.c index af4d2ab4a633..298ee78c1bda 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1079,10 +1079,9 @@ static void disk_release(struct device *dev) disk_release_events(disk); kfree(disk->random); xa_destroy(&disk->part_tbl); - bdput(disk->part0); if (test_bit(GD_QUEUE_REF, &disk->state) && disk->queue) blk_put_queue(disk->queue); - kfree(disk); + bdput(disk->part0); /* frees the disk */ } struct class block_class = { .name = "block", diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 81e3279ecd57..15a8be57203d 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -596,13 +596,13 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *head = &kcq->rq_list[sched_domain]; spin_lock(&kcq->lock); + trace_block_rq_insert(rq); if (at_head) list_move(&rq->queuelist, head); else list_move_tail(&rq->queuelist, head); sbitmap_set_bit(&khd->kcq_map[sched_domain], rq->mq_ctx->index_hw[hctx->type]); - trace_block_rq_insert(rq); spin_unlock(&kcq->lock); } } diff --git a/block/mq-deadline-cgroup.c b/block/mq-deadline-cgroup.c deleted file mode 100644 index 3b4bfddec39f..000000000000 --- a/block/mq-deadline-cgroup.c +++ /dev/null @@ -1,126 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include <linux/blk-cgroup.h> -#include <linux/ioprio.h> - -#include "mq-deadline-cgroup.h" - -static struct blkcg_policy dd_blkcg_policy; - -static struct blkcg_policy_data *dd_cpd_alloc(gfp_t gfp) -{ - struct dd_blkcg *pd; - - pd = kzalloc(sizeof(*pd), gfp); - if (!pd) - return NULL; - pd->stats = alloc_percpu_gfp(typeof(*pd->stats), - GFP_KERNEL | __GFP_ZERO); - if (!pd->stats) { - kfree(pd); - return NULL; - } - return &pd->cpd; -} - -static void dd_cpd_free(struct blkcg_policy_data *cpd) -{ - struct dd_blkcg *dd_blkcg = container_of(cpd, typeof(*dd_blkcg), cpd); - - free_percpu(dd_blkcg->stats); - kfree(dd_blkcg); -} - -static struct dd_blkcg *dd_blkcg_from_pd(struct blkg_policy_data *pd) -{ - return container_of(blkcg_to_cpd(pd->blkg->blkcg, &dd_blkcg_policy), - struct dd_blkcg, cpd); -} - -/* - * Convert an association between a block cgroup and a request queue into a - * pointer to the mq-deadline information associated with a (blkcg, queue) pair. - */ -struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio) -{ - struct blkg_policy_data *pd; - - pd = blkg_to_pd(bio->bi_blkg, &dd_blkcg_policy); - if (!pd) - return NULL; - - return dd_blkcg_from_pd(pd); -} - -static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size) -{ - static const char *const prio_class_name[] = { - [IOPRIO_CLASS_NONE] = "NONE", - [IOPRIO_CLASS_RT] = "RT", - [IOPRIO_CLASS_BE] = "BE", - [IOPRIO_CLASS_IDLE] = "IDLE", - }; - struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd); - int res = 0; - u8 prio; - - for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++) - res += scnprintf(buf + res, size - res, - " [%s] dispatched=%u inserted=%u merged=%u", - prio_class_name[prio], - ddcg_sum(blkcg, dispatched, prio) + - ddcg_sum(blkcg, merged, prio) - - ddcg_sum(blkcg, completed, prio), - ddcg_sum(blkcg, inserted, prio) - - ddcg_sum(blkcg, completed, prio), - ddcg_sum(blkcg, merged, prio)); - - return res; -} - -static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q, - struct blkcg *blkcg) -{ - struct dd_blkg *pd; - - pd = kzalloc(sizeof(*pd), gfp); - if (!pd) - return NULL; - return &pd->pd; -} - -static void dd_pd_free(struct blkg_policy_data *pd) -{ - struct dd_blkg *dd_blkg = container_of(pd, typeof(*dd_blkg), pd); - - kfree(dd_blkg); -} - -static struct blkcg_policy dd_blkcg_policy = { - .cpd_alloc_fn = dd_cpd_alloc, - .cpd_free_fn = dd_cpd_free, - - .pd_alloc_fn = dd_pd_alloc, - .pd_free_fn = dd_pd_free, - .pd_stat_fn = dd_pd_stat, -}; - -int dd_activate_policy(struct request_queue *q) -{ - return blkcg_activate_policy(q, &dd_blkcg_policy); -} - -void dd_deactivate_policy(struct request_queue *q) -{ - blkcg_deactivate_policy(q, &dd_blkcg_policy); -} - -int __init dd_blkcg_init(void) -{ - return blkcg_policy_register(&dd_blkcg_policy); -} - -void __exit dd_blkcg_exit(void) -{ - blkcg_policy_unregister(&dd_blkcg_policy); -} diff --git a/block/mq-deadline-cgroup.h b/block/mq-deadline-cgroup.h deleted file mode 100644 index 0143fd74f3ce..000000000000 --- a/block/mq-deadline-cgroup.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#if !defined(_MQ_DEADLINE_CGROUP_H_) -#define _MQ_DEADLINE_CGROUP_H_ - -#include <linux/blk-cgroup.h> - -struct request_queue; - -/** - * struct io_stats_per_prio - I/O statistics per I/O priority class. - * @inserted: Number of inserted requests. - * @merged: Number of merged requests. - * @dispatched: Number of dispatched requests. - * @completed: Number of I/O completions. - */ -struct io_stats_per_prio { - local_t inserted; - local_t merged; - local_t dispatched; - local_t completed; -}; - -/* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */ -struct blkcg_io_stats { - struct io_stats_per_prio stats[4]; -}; - -/** - * struct dd_blkcg - Per cgroup data. - * @cpd: blkcg_policy_data structure. - * @stats: I/O statistics. - */ -struct dd_blkcg { - struct blkcg_policy_data cpd; /* must be the first member */ - struct blkcg_io_stats __percpu *stats; -}; - -/* - * Count one event of type 'event_type' and with I/O priority class - * 'prio_class'. - */ -#define ddcg_count(ddcg, event_type, prio_class) do { \ -if (ddcg) { \ - struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats); \ - \ - BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \ - BUILD_BUG_ON(!__same_type((prio_class), u8)); \ - local_inc(&io_stats->stats[(prio_class)].event_type); \ - put_cpu_ptr(io_stats); \ -} \ -} while (0) - -/* - * Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls - * across all CPUs. No locking or barriers since it is fine if the returned - * sum is slightly outdated. - */ -#define ddcg_sum(ddcg, event_type, prio) ({ \ - unsigned int cpu; \ - u32 sum = 0; \ - \ - BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \ - BUILD_BUG_ON(!__same_type((prio), u8)); \ - for_each_present_cpu(cpu) \ - sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)-> \ - stats[(prio)].event_type); \ - sum; \ -}) - -#ifdef CONFIG_BLK_CGROUP - -/** - * struct dd_blkg - Per (cgroup, request queue) data. - * @pd: blkg_policy_data structure. - */ -struct dd_blkg { - struct blkg_policy_data pd; /* must be the first member */ -}; - -struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio); -int dd_activate_policy(struct request_queue *q); -void dd_deactivate_policy(struct request_queue *q); -int __init dd_blkcg_init(void); -void __exit dd_blkcg_exit(void); - -#else /* CONFIG_BLK_CGROUP */ - -static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio) -{ - return NULL; -} - -static inline int dd_activate_policy(struct request_queue *q) -{ - return 0; -} - -static inline void dd_deactivate_policy(struct request_queue *q) -{ -} - -static inline int dd_blkcg_init(void) -{ - return 0; -} - -static inline void dd_blkcg_exit(void) -{ -} - -#endif /* CONFIG_BLK_CGROUP */ - -#endif /* _MQ_DEADLINE_CGROUP_H_ */ diff --git a/block/mq-deadline-main.c b/block/mq-deadline.c index 6f612e6dc82b..a09761cbdf12 100644 --- a/block/mq-deadline-main.c +++ b/block/mq-deadline.c @@ -25,7 +25,6 @@ #include "blk-mq-debugfs.h" #include "blk-mq-tag.h" #include "blk-mq-sched.h" -#include "mq-deadline-cgroup.h" /* * See Documentation/block/deadline-iosched.rst @@ -57,6 +56,14 @@ enum dd_prio { enum { DD_PRIO_COUNT = 3 }; +/* I/O statistics per I/O priority. */ +struct io_stats_per_prio { + local_t inserted; + local_t merged; + local_t dispatched; + local_t completed; +}; + /* I/O statistics for all I/O priorities (enum dd_prio). */ struct io_stats { struct io_stats_per_prio stats[DD_PRIO_COUNT]; @@ -79,9 +86,6 @@ struct deadline_data { * run time data */ - /* Request queue that owns this data structure. */ - struct request_queue *queue; - struct dd_per_prio per_prio[DD_PRIO_COUNT]; /* Data direction of latest dispatched request. */ @@ -234,10 +238,8 @@ static void dd_merged_requests(struct request_queue *q, struct request *req, struct deadline_data *dd = q->elevator->elevator_data; const u8 ioprio_class = dd_rq_ioclass(next); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; - struct dd_blkcg *blkcg = next->elv.priv[0]; dd_count(dd, merged, prio); - ddcg_count(blkcg, merged, ioprio_class); /* * if next expires before rq, assign its expire time to rq @@ -375,7 +377,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, { struct request *rq, *next_rq; enum dd_data_dir data_dir; - struct dd_blkcg *blkcg; enum dd_prio prio; u8 ioprio_class; @@ -474,8 +475,6 @@ done: ioprio_class = dd_rq_ioclass(rq); prio = ioprio_class_to_prio[ioprio_class]; dd_count(dd, dispatched, prio); - blkcg = rq->elv.priv[0]; - ddcg_count(blkcg, dispatched, ioprio_class); /* * If the request needs its target zone locked, do it. */ @@ -569,8 +568,6 @@ static void dd_exit_sched(struct elevator_queue *e) struct deadline_data *dd = e->elevator_data; enum dd_prio prio; - dd_deactivate_policy(dd->queue); - for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; @@ -584,7 +581,7 @@ static void dd_exit_sched(struct elevator_queue *e) } /* - * Initialize elevator private data (deadline_data) and associate with blkcg. + * initialize elevator private data (deadline_data). */ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) { @@ -593,12 +590,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) enum dd_prio prio; int ret = -ENOMEM; - /* - * Initialization would be very tricky if the queue is not frozen, - * hence the warning statement below. - */ - WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter)); - eq = elevator_alloc(q, e); if (!eq) return ret; @@ -614,8 +605,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) if (!dd->stats) goto free_dd; - dd->queue = q; - for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; @@ -635,17 +624,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) spin_lock_init(&dd->lock); spin_lock_init(&dd->zone_lock); - ret = dd_activate_policy(q); - if (ret) - goto free_stats; - - ret = 0; q->elevator = eq; return 0; -free_stats: - free_percpu(dd->stats); - free_dd: kfree(dd); @@ -718,7 +699,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); struct dd_per_prio *per_prio; enum dd_prio prio; - struct dd_blkcg *blkcg; LIST_HEAD(free); lockdep_assert_held(&dd->lock); @@ -729,18 +709,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, */ blk_req_zone_write_unlock(rq); - /* - * If a block cgroup has been associated with the submitter and if an - * I/O priority has been set in the associated block cgroup, use the - * lowest of the cgroup priority and the request priority for the - * request. If no priority has been set in the request, use the cgroup - * priority. - */ prio = ioprio_class_to_prio[ioprio_class]; dd_count(dd, inserted, prio); - blkcg = dd_blkcg_from_bio(rq->bio); - ddcg_count(blkcg, inserted, ioprio_class); - rq->elv.priv[0] = blkcg; if (blk_mq_sched_try_insert_merge(q, rq, &free)) { blk_mq_free_requests(&free); @@ -789,10 +759,12 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, spin_unlock(&dd->lock); } -/* Callback from inside blk_mq_rq_ctx_init(). */ +/* + * Nothing to do here. This is defined only to ensure that .finish_request + * method is called upon request completion. + */ static void dd_prepare_request(struct request *rq) { - rq->elv.priv[0] = NULL; } /* @@ -815,13 +787,11 @@ static void dd_finish_request(struct request *rq) { struct request_queue *q = rq->q; struct deadline_data *dd = q->elevator->elevator_data; - struct dd_blkcg *blkcg = rq->elv.priv[0]; const u8 ioprio_class = dd_rq_ioclass(rq); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; struct dd_per_prio *per_prio = &dd->per_prio[prio]; dd_count(dd, completed, prio); - ddcg_count(blkcg, completed, ioprio_class); if (blk_queue_is_zoned(q)) { unsigned long flags; @@ -1144,26 +1114,11 @@ MODULE_ALIAS("mq-deadline-iosched"); static int __init deadline_init(void) { - int ret; - - ret = elv_register(&mq_deadline); - if (ret) - goto out; - ret = dd_blkcg_init(); - if (ret) - goto unreg; - -out: - return ret; - -unreg: - elv_unregister(&mq_deadline); - goto out; + return elv_register(&mq_deadline); } static void __exit deadline_exit(void) { - dd_blkcg_exit(); elv_unregister(&mq_deadline); } diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c index cc86534c80ad..b8b518d7fb77 100644 --- a/block/partitions/ldm.c +++ b/block/partitions/ldm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * ldm - Support for Windows Logical Disk Manager (Dynamic Disks) * * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org> diff --git a/crypto/Kconfig b/crypto/Kconfig index ca3b02dcbbfa..64b772c5d1c9 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1768,7 +1768,7 @@ config CRYPTO_DRBG_HMAC bool default y select CRYPTO_HMAC - select CRYPTO_SHA256 + select CRYPTO_SHA512 config CRYPTO_DRBG_HASH bool "Enable Hash DRBG" diff --git a/drivers/accessibility/speakup/i18n.c b/drivers/accessibility/speakup/i18n.c index bc7b47d1876f..d62079b1661f 100644 --- a/drivers/accessibility/speakup/i18n.c +++ b/drivers/accessibility/speakup/i18n.c @@ -90,13 +90,13 @@ static char *speakup_default_msgs[MSG_LAST_INDEX] = { [MSG_COLOR_YELLOW] = "yellow", [MSG_COLOR_WHITE] = "white", [MSG_COLOR_GREY] = "grey", - [MSG_COLOR_BRIGHTBLUE] "bright blue", - [MSG_COLOR_BRIGHTGREEN] "bright green", - [MSG_COLOR_BRIGHTCYAN] "bright cyan", - [MSG_COLOR_BRIGHTRED] "bright red", - [MSG_COLOR_BRIGHTMAGENTA] "bright magenta", - [MSG_COLOR_BRIGHTYELLOW] "bright yellow", - [MSG_COLOR_BRIGHTWHITE] "bright white", + [MSG_COLOR_BRIGHTBLUE] = "bright blue", + [MSG_COLOR_BRIGHTGREEN] = "bright green", + [MSG_COLOR_BRIGHTCYAN] = "bright cyan", + [MSG_COLOR_BRIGHTRED] = "bright red", + [MSG_COLOR_BRIGHTMAGENTA] = "bright magenta", + [MSG_COLOR_BRIGHTYELLOW] = "bright yellow", + [MSG_COLOR_BRIGHTWHITE] = "bright white", /* Names of key states. */ [MSG_STATE_DOUBLE] = "double", diff --git a/drivers/accessibility/speakup/speakup_soft.c b/drivers/accessibility/speakup/speakup_soft.c index c3f97c572fb6..19824e7006fe 100644 --- a/drivers/accessibility/speakup/speakup_soft.c +++ b/drivers/accessibility/speakup/speakup_soft.c @@ -153,18 +153,25 @@ static char *get_initstring(void) static char buf[40]; char *cp; struct var_t *var; + size_t len; + size_t n; memset(buf, 0, sizeof(buf)); cp = buf; + len = sizeof(buf); + var = synth_soft.vars; while (var->var_id != MAXVARS) { if (var->var_id != CAPS_START && var->var_id != CAPS_STOP && - var->var_id != PAUSE && var->var_id != DIRECT) - cp = cp + sprintf(cp, var->u.n.synth_fmt, - var->u.n.value); + var->var_id != PAUSE && var->var_id != DIRECT) { + n = scnprintf(cp, len, var->u.n.synth_fmt, + var->u.n.value); + cp = cp + n; + len = len - n; + } var++; } - cp = cp + sprintf(cp, "\n"); + cp = cp + scnprintf(cp, len, "\n"); return buf; } diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 9d872ea477a6..8f9940f40baa 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -370,7 +370,7 @@ config ACPI_TABLE_UPGRADE config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD bool "Override ACPI tables from built-in initrd" depends on ACPI_TABLE_UPGRADE - depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION="" + depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION_NONE help This option provides functionality to override arbitrary ACPI tables from built-in uncompressed initrd. diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 38e10ab976e6..14b71b41e845 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -379,13 +379,6 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info, (*element_ptr)->common.reference_count = original_ref_count; - - /* - * The original_element holds a reference from the package object - * that represents _HID. Since a new element was created by _HID, - * remove the reference from the _CID package. - */ - acpi_ut_remove_reference(original_element); } element_ptr++; diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c index 5fca18296bf6..550b9081fcbc 100644 --- a/drivers/acpi/dptf/dptf_pch_fivr.c +++ b/drivers/acpi/dptf/dptf_pch_fivr.c @@ -9,6 +9,42 @@ #include <linux/module.h> #include <linux/platform_device.h> +struct pch_fivr_resp { + u64 status; + u64 result; +}; + +static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp *fivr_resp) +{ + struct acpi_buffer resp = { sizeof(struct pch_fivr_resp), fivr_resp}; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_buffer format = { sizeof("NN"), "NN" }; + union acpi_object *obj; + acpi_status status; + int ret = -EFAULT; + + status = acpi_evaluate_object(handle, method, NULL, &buffer); + if (ACPI_FAILURE(status)) + return ret; + + obj = buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_PACKAGE) + goto release_buffer; + + status = acpi_extract_package(obj, &format, &resp); + if (ACPI_FAILURE(status)) + goto release_buffer; + + if (fivr_resp->status) + goto release_buffer; + + ret = 0; + +release_buffer: + kfree(buffer.pointer); + return ret; +} + /* * Presentation of attributes which are defined for INT1045 * They are: @@ -23,15 +59,14 @@ static ssize_t name##_show(struct device *dev,\ char *buf)\ {\ struct acpi_device *acpi_dev = dev_get_drvdata(dev);\ - unsigned long long val;\ - acpi_status status;\ + struct pch_fivr_resp fivr_resp;\ + int status;\ \ - status = acpi_evaluate_integer(acpi_dev->handle, #method,\ - NULL, &val);\ - if (ACPI_SUCCESS(status))\ - return sprintf(buf, "%d\n", (int)val);\ - else\ - return -EINVAL;\ + status = pch_fivr_read(acpi_dev->handle, #method, &fivr_resp);\ + if (status)\ + return status;\ +\ + return sprintf(buf, "%llu\n", fivr_resp.result);\ } #define PCH_FIVR_STORE(name, method) \ diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 23d9a09d7060..a3ef6cce644c 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -3021,6 +3021,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; struct nd_mapping_desc *mapping; + /* range index 0 == unmapped in SPA or invalid-SPA */ + if (memdev->range_index == 0 || spa->range_index == 0) + continue; if (memdev->range_index != spa->range_index) continue; if (count >= ND_MAX_MAPPINGS) { diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c index 31cf9aee5edd..1f6007abcf18 100644 --- a/drivers/acpi/prmt.c +++ b/drivers/acpi/prmt.c @@ -292,6 +292,12 @@ void __init init_prmt(void) int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) + sizeof (struct acpi_table_prmt_header), 0, acpi_parse_prmt, 0); + /* + * Return immediately if PRMT table is not present or no PRM module found. + */ + if (mc <= 0) + return; + pr_info("PRM: found %u modules\n", mc); status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index dc01fb550b28..ee78a210c606 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -423,13 +423,6 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, } } -static bool irq_is_legacy(struct acpi_resource_irq *irq) -{ - return irq->triggering == ACPI_EDGE_SENSITIVE && - irq->polarity == ACPI_ACTIVE_HIGH && - irq->shareable == ACPI_EXCLUSIVE; -} - /** * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information. * @ares: Input ACPI resource object. @@ -468,7 +461,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, } acpi_dev_get_irqresource(res, irq->interrupts[index], irq->triggering, irq->polarity, - irq->shareable, irq_is_legacy(irq)); + irq->shareable, true); break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: ext_irq = &ares->data.extended_irq; diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index e7ddd281afff..d5cedffeeff9 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -860,11 +860,9 @@ EXPORT_SYMBOL(acpi_dev_present); * Return the next match of ACPI device if another matching device was present * at the moment of invocation, or NULL otherwise. * - * FIXME: The function does not tolerate the sudden disappearance of @adev, e.g. - * in the case of a hotplug event. That said, the caller should ensure that - * this will never happen. - * * The caller is responsible for invoking acpi_dev_put() on the returned device. + * On the other hand the function invokes acpi_dev_put() on the given @adev + * assuming that its reference counter had been increased beforehand. * * See additional information in acpi_dev_present() as well. */ @@ -880,6 +878,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha match.hrv = hrv; dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb); + acpi_dev_put(adev); return dev ? to_acpi_device(dev) : NULL; } EXPORT_SYMBOL(acpi_dev_get_next_match_dev); diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index 1c507804fb10..3a308461246a 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -378,19 +378,25 @@ static int lps0_device_attach(struct acpi_device *adev, * AMDI0006: * - should use rev_id 0x0 * - function mask = 0x3: Should use Microsoft method + * AMDI0007: + * - Should use rev_id 0x2 + * - Should only use AMD method */ const char *hid = acpi_device_hid(adev); - rev_id = 0; + rev_id = strcmp(hid, "AMDI0007") ? 0 : 2; lps0_dsm_func_mask = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid); lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle, - ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id, + ACPI_LPS0_DSM_UUID_MICROSOFT, 0, &lps0_dsm_guid_microsoft); if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") || !strcmp(hid, "AMDI0005"))) { lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1; acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n", ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask); + } else if (lps0_dsm_func_mask_microsoft > 0 && !strcmp(hid, "AMDI0007")) { + lps0_dsm_func_mask_microsoft = -EINVAL; + acpi_handle_debug(adev->handle, "_DSM Using AMD method\n"); } } else { rev_id = 1; @@ -446,7 +452,7 @@ int acpi_s2idle_prepare_late(void) if (lps0_dsm_func_mask_microsoft > 0) { acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); - acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); @@ -473,7 +479,7 @@ void acpi_s2idle_restore_early(void) if (lps0_dsm_func_mask_microsoft > 0) { acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); - acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index bcec598b89f2..d9030cb6b1e4 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2547,8 +2547,8 @@ static void binder_transaction(struct binder_proc *proc, ref->node, &target_proc, &return_error); } else { - binder_user_error("%d:%d got transaction to invalid handle\n", - proc->pid, thread->pid); + binder_user_error("%d:%d got transaction to invalid handle, %u\n", + proc->pid, thread->pid, tr->target.handle); return_error = BR_FAILED_REPLY; } binder_proc_unlock(proc); diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index e80ba93c62a9..e3605cdd4335 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -58,6 +58,10 @@ enum binderfs_stats_mode { binderfs_stats_mode_global, }; +struct binder_features { + bool oneway_spam_detection; +}; + static const struct constant_table binderfs_param_stats[] = { { "global", binderfs_stats_mode_global }, {} @@ -69,6 +73,10 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = { {} }; +static struct binder_features binder_features = { + .oneway_spam_detection = true, +}; + static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb) { return sb->s_fs_info; @@ -583,6 +591,33 @@ out: return dentry; } +static int binder_features_show(struct seq_file *m, void *unused) +{ + bool *feature = m->private; + + seq_printf(m, "%d\n", *feature); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(binder_features); + +static int init_binder_features(struct super_block *sb) +{ + struct dentry *dentry, *dir; + + dir = binderfs_create_dir(sb->s_root, "features"); + if (IS_ERR(dir)) + return PTR_ERR(dir); + + dentry = binderfs_create_file(dir, "oneway_spam_detection", + &binder_features_fops, + &binder_features.oneway_spam_detection); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + return 0; +} + static int init_binder_logs(struct super_block *sb) { struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir; @@ -723,6 +758,10 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc) name++; } + ret = init_binder_features(sb); + if (ret) + return ret; + if (info->mount_opts.stats_mode == binderfs_stats_mode_global) return init_binder_logs(sb); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index ae7189d1a568..b71ea4a680b0 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -637,6 +637,20 @@ unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf, } EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); +static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page, + unsigned int offset, size_t xfer_size) +{ + bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE); + unsigned char *buf; + + buf = kmap_atomic(page); + qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write); + kunmap_atomic(buf); + + if (!do_write && !PageSlab(page)) + flush_dcache_page(page); +} + /** * ata_pio_sector - Transfer a sector of data. * @qc: Command on going @@ -648,11 +662,9 @@ EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); */ static void ata_pio_sector(struct ata_queued_cmd *qc) { - int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); struct ata_port *ap = qc->ap; struct page *page; unsigned int offset; - unsigned char *buf; if (!qc->cursg) { qc->curbytes = qc->nbytes; @@ -670,13 +682,20 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); - /* do the actual data transfer */ - buf = kmap_atomic(page); - ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size, do_write); - kunmap_atomic(buf); + /* + * Split the transfer when it splits a page boundary. Note that the + * split still has to be dword aligned like all ATA data transfers. + */ + WARN_ON_ONCE(offset % 4); + if (offset + qc->sect_size > PAGE_SIZE) { + unsigned int split_len = PAGE_SIZE - offset; - if (!do_write && !PageSlab(page)) - flush_dcache_page(page); + ata_pio_xfer(qc, page, offset, split_len); + ata_pio_xfer(qc, nth_page(page, 1), 0, + qc->sect_size - split_len); + } else { + ata_pio_xfer(qc, page, offset, qc->sect_size); + } qc->curbytes += qc->sect_size; qc->cursg_ofs += qc->sect_size; diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c index adc199dfba3c..6a30264ab2ba 100644 --- a/drivers/base/auxiliary.c +++ b/drivers/base/auxiliary.c @@ -231,6 +231,8 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device); int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner, const char *modname) { + int ret; + if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table)) return -EINVAL; @@ -246,7 +248,11 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, auxdrv->driver.bus = &auxiliary_bus_type; auxdrv->driver.mod_name = modname; - return driver_register(&auxdrv->driver); + ret = driver_register(&auxdrv->driver); + if (ret) + kfree(auxdrv->driver.name); + + return ret; } EXPORT_SYMBOL_GPL(__auxiliary_driver_register); diff --git a/drivers/base/core.c b/drivers/base/core.c index cadcade65825..6c0ef9d55a34 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -574,8 +574,10 @@ static void devlink_remove_symlinks(struct device *dev, return; } - snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); - sysfs_remove_link(&con->kobj, buf); + if (device_is_registered(con)) { + snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); + sysfs_remove_link(&con->kobj, buf); + } snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); sysfs_remove_link(&sup->kobj, buf); kfree(buf); @@ -2835,6 +2837,7 @@ void device_initialize(struct device *dev) device_pm_init(dev); set_dev_node(dev, -1); #ifdef CONFIG_GENERIC_MSI_IRQ + raw_spin_lock_init(&dev->msi_lock); INIT_LIST_HEAD(&dev->msi_list); #endif INIT_LIST_HEAD(&dev->links.consumers); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index daeb9b5763ae..437cd61343b2 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -653,8 +653,6 @@ dev_groups_failed: else if (drv->remove) drv->remove(dev); probe_failed: - kfree(dev->dma_range_map); - dev->dma_range_map = NULL; if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); @@ -662,6 +660,8 @@ pinctrl_bind_failed: device_links_no_driver(dev); devres_release_all(dev); arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 91899d185e31..d7d63c1aa993 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -89,12 +89,11 @@ static void __fw_load_abort(struct fw_priv *fw_priv) { /* * There is a small window in which user can write to 'loading' - * between loading done and disappearance of 'loading' + * between loading done/aborted and disappearance of 'loading' */ - if (fw_sysfs_done(fw_priv)) + if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv)) return; - list_del_init(&fw_priv->pending_list); fw_state_aborted(fw_priv); } @@ -280,7 +279,6 @@ static ssize_t firmware_loading_store(struct device *dev, * Same logic as fw_load_abort, only the DONE bit * is ignored and we set ABORT only on failure. */ - list_del_init(&fw_priv->pending_list); if (rc) { fw_state_aborted(fw_priv); written = rc; @@ -513,6 +511,11 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout) } mutex_lock(&fw_lock); + if (fw_state_is_aborted(fw_priv)) { + mutex_unlock(&fw_lock); + retval = -EINTR; + goto out; + } list_add(&fw_priv->pending_list, &pending_fw_head); mutex_unlock(&fw_lock); @@ -535,11 +538,10 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout) if (fw_state_is_aborted(fw_priv)) { if (retval == -ERESTARTSYS) retval = -EINTR; - else - retval = -EAGAIN; } else if (fw_priv->is_paged_buf && !fw_priv->data) retval = -ENOMEM; +out: device_del(f_dev); err_put_dev: put_device(f_dev); diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index 63bd29fdcb9c..a3014e9e2c85 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h @@ -117,8 +117,16 @@ static inline void __fw_state_set(struct fw_priv *fw_priv, WRITE_ONCE(fw_st->status, status); - if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) + if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) { +#ifdef CONFIG_FW_LOADER_USER_HELPER + /* + * Doing this here ensures that the fw_priv is deleted from + * the pending list in all abort/done paths. + */ + list_del_init(&fw_priv->pending_list); +#endif complete_all(&fw_st->completion); + } } static inline void fw_state_aborted(struct fw_priv *fw_priv) diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 4fdb8219cd08..68c549d71230 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -783,8 +783,10 @@ static void fw_abort_batch_reqs(struct firmware *fw) return; fw_priv = fw->priv; + mutex_lock(&fw_lock); if (!fw_state_is_aborted(fw_priv)) fw_state_aborted(fw_priv); + mutex_unlock(&fw_lock); } /* called from request_firmware() and request_firmware_work_func() */ diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f37b9e3d833c..f0cdff0c5fbf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -88,6 +88,47 @@ static DEFINE_IDR(loop_index_idr); static DEFINE_MUTEX(loop_ctl_mutex); +static DEFINE_MUTEX(loop_validate_mutex); + +/** + * loop_global_lock_killable() - take locks for safe loop_validate_file() test + * + * @lo: struct loop_device + * @global: true if @lo is about to bind another "struct loop_device", false otherwise + * + * Returns 0 on success, -EINTR otherwise. + * + * Since loop_validate_file() traverses on other "struct loop_device" if + * is_loop_device() is true, we need a global lock for serializing concurrent + * loop_configure()/loop_change_fd()/__loop_clr_fd() calls. + */ +static int loop_global_lock_killable(struct loop_device *lo, bool global) +{ + int err; + + if (global) { + err = mutex_lock_killable(&loop_validate_mutex); + if (err) + return err; + } + err = mutex_lock_killable(&lo->lo_mutex); + if (err && global) + mutex_unlock(&loop_validate_mutex); + return err; +} + +/** + * loop_global_unlock() - release locks taken by loop_global_lock_killable() + * + * @lo: struct loop_device + * @global: true if @lo was about to bind another "struct loop_device", false otherwise + */ +static void loop_global_unlock(struct loop_device *lo, bool global) +{ + mutex_unlock(&lo->lo_mutex); + if (global) + mutex_unlock(&loop_validate_mutex); +} static int max_part; static int part_shift; @@ -672,13 +713,15 @@ static int loop_validate_file(struct file *file, struct block_device *bdev) while (is_loop_device(f)) { struct loop_device *l; + lockdep_assert_held(&loop_validate_mutex); if (f->f_mapping->host->i_rdev == bdev->bd_dev) return -EBADF; l = I_BDEV(f->f_mapping->host)->bd_disk->private_data; - if (l->lo_state != Lo_bound) { + if (l->lo_state != Lo_bound) return -EINVAL; - } + /* Order wrt setting lo->lo_backing_file in loop_configure(). */ + rmb(); f = l->lo_backing_file; } if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) @@ -697,13 +740,18 @@ static int loop_validate_file(struct file *file, struct block_device *bdev) static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, unsigned int arg) { - struct file *file = NULL, *old_file; - int error; - bool partscan; + struct file *file = fget(arg); + struct file *old_file; + int error; + bool partscan; + bool is_loop; - error = mutex_lock_killable(&lo->lo_mutex); + if (!file) + return -EBADF; + is_loop = is_loop_device(file); + error = loop_global_lock_killable(lo, is_loop); if (error) - return error; + goto out_putf; error = -ENXIO; if (lo->lo_state != Lo_bound) goto out_err; @@ -713,11 +761,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) goto out_err; - error = -EBADF; - file = fget(arg); - if (!file) - goto out_err; - error = loop_validate_file(file, bdev); if (error) goto out_err; @@ -740,7 +783,16 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue); partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; - mutex_unlock(&lo->lo_mutex); + loop_global_unlock(lo, is_loop); + + /* + * Flush loop_validate_file() before fput(), for l->lo_backing_file + * might be pointing at old_file which might be the last reference. + */ + if (!is_loop) { + mutex_lock(&loop_validate_mutex); + mutex_unlock(&loop_validate_mutex); + } /* * We must drop file reference outside of lo_mutex as dropping * the file ref can take open_mutex which creates circular locking @@ -752,9 +804,9 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, return 0; out_err: - mutex_unlock(&lo->lo_mutex); - if (file) - fput(file); + loop_global_unlock(lo, is_loop); +out_putf: + fput(file); return error; } @@ -1136,22 +1188,22 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, struct block_device *bdev, const struct loop_config *config) { - struct file *file; - struct inode *inode; + struct file *file = fget(config->fd); + struct inode *inode; struct address_space *mapping; - int error; - loff_t size; - bool partscan; - unsigned short bsize; + int error; + loff_t size; + bool partscan; + unsigned short bsize; + bool is_loop; + + if (!file) + return -EBADF; + is_loop = is_loop_device(file); /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); - error = -EBADF; - file = fget(config->fd); - if (!file) - goto out; - /* * If we don't hold exclusive handle for the device, upgrade to it * here to avoid changing device under exclusive owner. @@ -1162,7 +1214,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, goto out_putf; } - error = mutex_lock_killable(&lo->lo_mutex); + error = loop_global_lock_killable(lo, is_loop); if (error) goto out_bdev; @@ -1242,6 +1294,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, size = get_loop_size(lo, file); loop_set_size(lo, size); + /* Order wrt reading lo_state in loop_validate_file(). */ + wmb(); + lo->lo_state = Lo_bound; if (part_shift) lo->lo_flags |= LO_FLAGS_PARTSCAN; @@ -1253,7 +1308,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev). */ bdgrab(bdev); - mutex_unlock(&lo->lo_mutex); + loop_global_unlock(lo, is_loop); if (partscan) loop_reread_partitions(lo); if (!(mode & FMODE_EXCL)) @@ -1261,13 +1316,12 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, return 0; out_unlock: - mutex_unlock(&lo->lo_mutex); + loop_global_unlock(lo, is_loop); out_bdev: if (!(mode & FMODE_EXCL)) bd_abort_claiming(bdev, loop_configure); out_putf: fput(file); -out: /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); return error; @@ -1283,6 +1337,18 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) int lo_number; struct loop_worker *pos, *worker; + /* + * Flush loop_configure() and loop_change_fd(). It is acceptable for + * loop_validate_file() to succeed, for actual clear operation has not + * started yet. + */ + mutex_lock(&loop_validate_mutex); + mutex_unlock(&loop_validate_mutex); + /* + * loop_validate_file() now fails because l->lo_state != Lo_bound + * became visible. + */ + mutex_lock(&lo->lo_mutex); if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) { err = -ENXIO; diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c index 7b4dd10af9ec..c84be0028f63 100644 --- a/drivers/block/n64cart.c +++ b/drivers/block/n64cart.c @@ -74,7 +74,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos) n64cart_wait_dma(); - n64cart_write_reg(PI_DRAM_REG, dma_addr + bv->bv_offset); + n64cart_write_reg(PI_DRAM_REG, dma_addr); n64cart_write_reg(PI_CART_REG, (bstart | CART_DOMAIN) & CART_MAX); n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index b7d663736d35..19f5d5a8b16a 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -239,8 +239,8 @@ static void nbd_dev_remove(struct nbd_device *nbd) if (disk) { del_gendisk(disk); - blk_mq_free_tag_set(&nbd->tag_set); blk_cleanup_disk(disk); + blk_mq_free_tag_set(&nbd->tag_set); } /* @@ -818,6 +818,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved) { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); + /* don't abort one completed request */ + if (blk_mq_request_completed(req)) + return true; + mutex_lock(&cmd->lock); cmd->status = BLK_STS_IOERR; mutex_unlock(&cmd->lock); @@ -2004,15 +2008,19 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd) { mutex_lock(&nbd->config_lock); nbd_disconnect(nbd); - nbd_clear_sock(nbd); - mutex_unlock(&nbd->config_lock); + sock_shutdown(nbd); /* * Make sure recv thread has finished, so it does not drop the last * config ref and try to destroy the workqueue from inside the work - * queue. + * queue. And this also ensure that we can safely call nbd_clear_que() + * to cancel the inflight I/Os. */ if (nbd->recv_workq) flush_workqueue(nbd->recv_workq); + nbd_clear_que(nbd); + nbd->task_setup = NULL; + mutex_unlock(&nbd->config_lock); + if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, &nbd->config->runtime_flags)) nbd_config_put(nbd); diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 3b2b8e872beb..9b3298926356 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -1014,8 +1014,8 @@ static void __exit pd_exit(void) if (p) { disk->gd = NULL; del_gendisk(p); - blk_mq_free_tag_set(&disk->tag_set); blk_cleanup_disk(p); + blk_mq_free_tag_set(&disk->tag_set); pi_release(disk->pi); } } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 531d390902dd..90b947c96402 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4100,8 +4100,6 @@ again: static bool rbd_quiesce_lock(struct rbd_device *rbd_dev) { - bool need_wait; - dout("%s rbd_dev %p\n", __func__, rbd_dev); lockdep_assert_held_write(&rbd_dev->lock_rwsem); @@ -4113,11 +4111,11 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev) */ rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING; rbd_assert(!completion_done(&rbd_dev->releasing_wait)); - need_wait = !list_empty(&rbd_dev->running_list); - downgrade_write(&rbd_dev->lock_rwsem); - if (need_wait) - wait_for_completion(&rbd_dev->releasing_wait); - up_read(&rbd_dev->lock_rwsem); + if (list_empty(&rbd_dev->running_list)) + return true; + + up_write(&rbd_dev->lock_rwsem); + wait_for_completion(&rbd_dev->releasing_wait); down_write(&rbd_dev->lock_rwsem); if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) @@ -4203,15 +4201,11 @@ static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v, if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { down_write(&rbd_dev->lock_rwsem); if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { - /* - * we already know that the remote client is - * the owner - */ - up_write(&rbd_dev->lock_rwsem); - return; + dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n", + __func__, rbd_dev, cid.gid, cid.handle); + } else { + rbd_set_owner_cid(rbd_dev, &cid); } - - rbd_set_owner_cid(rbd_dev, &cid); downgrade_write(&rbd_dev->lock_rwsem); } else { down_read(&rbd_dev->lock_rwsem); @@ -4236,14 +4230,12 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v, if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { down_write(&rbd_dev->lock_rwsem); if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { - dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n", + dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n", __func__, rbd_dev, cid.gid, cid.handle, rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle); - up_write(&rbd_dev->lock_rwsem); - return; + } else { + rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); } - - rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); downgrade_write(&rbd_dev->lock_rwsem); } else { down_read(&rbd_dev->lock_rwsem); @@ -4951,6 +4943,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) disk->minors = RBD_MINORS_PER_MAJOR; } disk->fops = &rbd_bd_ops; + disk->private_data = rbd_dev; blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4b49df2dfd23..afb37aac09e8 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -692,6 +692,28 @@ static const struct blk_mq_ops virtio_mq_ops = { static unsigned int virtblk_queue_depth; module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); +static int virtblk_validate(struct virtio_device *vdev) +{ + u32 blk_size; + + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + + if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE)) + return 0; + + blk_size = virtio_cread32(vdev, + offsetof(struct virtio_blk_config, blk_size)); + + if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE) + __virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE); + + return 0; +} + static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; @@ -703,12 +725,6 @@ static int virtblk_probe(struct virtio_device *vdev) u8 physical_block_exp, alignment_offset; unsigned int queue_depth; - if (!vdev->config->get) { - dev_err(&vdev->dev, "%s failure: config access disabled\n", - __func__); - return -EINVAL; - } - err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), GFP_KERNEL); if (err < 0) @@ -823,6 +839,14 @@ static int virtblk_probe(struct virtio_device *vdev) else blk_size = queue_logical_block_size(q); + if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) { + dev_err(&vdev->dev, + "block size is changed unexpectedly, now is %u\n", + blk_size); + err = -EINVAL; + goto err_cleanup_disk; + } + /* Use topology information if available */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, physical_block_exp, @@ -881,6 +905,8 @@ static int virtblk_probe(struct virtio_device *vdev) device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); return 0; +err_cleanup_disk: + blk_cleanup_disk(vblk->disk); out_free_tags: blk_mq_free_tag_set(&vblk->tag_set); out_free_vq: @@ -983,6 +1009,7 @@ static struct virtio_driver virtio_blk = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, + .validate = virtblk_validate, .probe = virtblk_probe, .remove = virtblk_remove, .config_changed = virtblk_config_changed, diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8d49f8fa98bb..d83fee21f6c5 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -502,34 +502,21 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) static int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument) { - struct blkfront_info *info = bdev->bd_disk->private_data; int i; - dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", - command, (long)argument); - switch (command) { case CDROMMULTISESSION: - dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; - - case CDROM_GET_CAPABILITY: { - struct gendisk *gd = info->gd; - if (gd->flags & GENHD_FL_CD) + case CDROM_GET_CAPABILITY: + if (bdev->bd_disk->flags & GENHD_FL_CD) return 0; return -EINVAL; - } - default: - /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", - command);*/ - return -EINVAL; /* same return as native Linux */ + return -EINVAL; } - - return 0; } static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, @@ -1177,36 +1164,6 @@ out_release_minors: return err; } -static void xlvbd_release_gendisk(struct blkfront_info *info) -{ - unsigned int minor, nr_minors, i; - struct blkfront_ring_info *rinfo; - - if (info->rq == NULL) - return; - - /* No more blkif_request(). */ - blk_mq_stop_hw_queues(info->rq); - - for_each_rinfo(info, rinfo, i) { - /* No more gnttab callback work. */ - gnttab_cancel_free_callback(&rinfo->callback); - - /* Flush gnttab callback work. Must be done with no locks held. */ - flush_work(&rinfo->work); - } - - del_gendisk(info->gd); - - minor = info->gd->first_minor; - nr_minors = info->gd->minors; - xlbd_release_minors(minor, nr_minors); - - blk_cleanup_disk(info->gd); - info->gd = NULL; - blk_mq_free_tag_set(&info->tag_set); -} - /* Already hold rinfo->ring_lock. */ static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo) { @@ -1756,12 +1713,6 @@ abort_transaction: return err; } -static void free_info(struct blkfront_info *info) -{ - list_del(&info->info_list); - kfree(info); -} - /* Common code used when first setting up, and when resuming. */ static int talk_to_blkback(struct xenbus_device *dev, struct blkfront_info *info) @@ -1880,13 +1831,6 @@ again: xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); - - mutex_lock(&blkfront_mutex); - free_info(info); - mutex_unlock(&blkfront_mutex); - - dev_set_drvdata(&dev->dev, NULL); - return err; } @@ -2126,38 +2070,26 @@ static int blkfront_resume(struct xenbus_device *dev) static void blkfront_closing(struct blkfront_info *info) { struct xenbus_device *xbdev = info->xbdev; - struct block_device *bdev = NULL; - - mutex_lock(&info->mutex); + struct blkfront_ring_info *rinfo; + unsigned int i; - if (xbdev->state == XenbusStateClosing) { - mutex_unlock(&info->mutex); + if (xbdev->state == XenbusStateClosing) return; - } - if (info->gd) - bdev = bdgrab(info->gd->part0); - - mutex_unlock(&info->mutex); - - if (!bdev) { - xenbus_frontend_closed(xbdev); - return; - } + /* No more blkif_request(). */ + blk_mq_stop_hw_queues(info->rq); + blk_set_queue_dying(info->rq); + set_capacity(info->gd, 0); - mutex_lock(&bdev->bd_disk->open_mutex); + for_each_rinfo(info, rinfo, i) { + /* No more gnttab callback work. */ + gnttab_cancel_free_callback(&rinfo->callback); - if (bdev->bd_openers) { - xenbus_dev_error(xbdev, -EBUSY, - "Device in use; refusing to close"); - xenbus_switch_state(xbdev, XenbusStateClosing); - } else { - xlvbd_release_gendisk(info); - xenbus_frontend_closed(xbdev); + /* Flush gnttab callback work. Must be done with no locks held. */ + flush_work(&rinfo->work); } - mutex_unlock(&bdev->bd_disk->open_mutex); - bdput(bdev); + xenbus_frontend_closed(xbdev); } static void blkfront_setup_discard(struct blkfront_info *info) @@ -2472,8 +2404,7 @@ static void blkback_changed(struct xenbus_device *dev, break; fallthrough; case XenbusStateClosing: - if (info) - blkfront_closing(info); + blkfront_closing(info); break; } } @@ -2481,56 +2412,21 @@ static void blkback_changed(struct xenbus_device *dev, static int blkfront_remove(struct xenbus_device *xbdev) { struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); - struct block_device *bdev = NULL; - struct gendisk *disk; dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); - if (!info) - return 0; - - blkif_free(info, 0); - - mutex_lock(&info->mutex); - - disk = info->gd; - if (disk) - bdev = bdgrab(disk->part0); - - info->xbdev = NULL; - mutex_unlock(&info->mutex); - - if (!bdev) { - mutex_lock(&blkfront_mutex); - free_info(info); - mutex_unlock(&blkfront_mutex); - return 0; - } - - /* - * The xbdev was removed before we reached the Closed - * state. See if it's safe to remove the disk. If the bdev - * isn't closed yet, we let release take care of it. - */ - - mutex_lock(&disk->open_mutex); - info = disk->private_data; - - dev_warn(disk_to_dev(disk), - "%s was hot-unplugged, %d stale handles\n", - xbdev->nodename, bdev->bd_openers); + del_gendisk(info->gd); - if (info && !bdev->bd_openers) { - xlvbd_release_gendisk(info); - disk->private_data = NULL; - mutex_lock(&blkfront_mutex); - free_info(info); - mutex_unlock(&blkfront_mutex); - } + mutex_lock(&blkfront_mutex); + list_del(&info->info_list); + mutex_unlock(&blkfront_mutex); - mutex_unlock(&disk->open_mutex); - bdput(bdev); + blkif_free(info, 0); + xlbd_release_minors(info->gd->first_minor, info->gd->minors); + blk_cleanup_disk(info->gd); + blk_mq_free_tag_set(&info->tag_set); + kfree(info); return 0; } @@ -2541,77 +2437,9 @@ static int blkfront_is_ready(struct xenbus_device *dev) return info->is_ready && info->xbdev; } -static int blkif_open(struct block_device *bdev, fmode_t mode) -{ - struct gendisk *disk = bdev->bd_disk; - struct blkfront_info *info; - int err = 0; - - mutex_lock(&blkfront_mutex); - - info = disk->private_data; - if (!info) { - /* xbdev gone */ - err = -ERESTARTSYS; - goto out; - } - - mutex_lock(&info->mutex); - - if (!info->gd) - /* xbdev is closed */ - err = -ERESTARTSYS; - - mutex_unlock(&info->mutex); - -out: - mutex_unlock(&blkfront_mutex); - return err; -} - -static void blkif_release(struct gendisk *disk, fmode_t mode) -{ - struct blkfront_info *info = disk->private_data; - struct xenbus_device *xbdev; - - mutex_lock(&blkfront_mutex); - if (disk->part0->bd_openers) - goto out_mutex; - - /* - * Check if we have been instructed to close. We will have - * deferred this request, because the bdev was still open. - */ - - mutex_lock(&info->mutex); - xbdev = info->xbdev; - - if (xbdev && xbdev->state == XenbusStateClosing) { - /* pending switch to state closed */ - dev_info(disk_to_dev(disk), "releasing disk\n"); - xlvbd_release_gendisk(info); - xenbus_frontend_closed(info->xbdev); - } - - mutex_unlock(&info->mutex); - - if (!xbdev) { - /* sudden device removal */ - dev_info(disk_to_dev(disk), "releasing disk\n"); - xlvbd_release_gendisk(info); - disk->private_data = NULL; - free_info(info); - } - -out_mutex: - mutex_unlock(&blkfront_mutex); -} - static const struct block_device_operations xlvbd_block_fops = { .owner = THIS_MODULE, - .open = blkif_open, - .release = blkif_release, .getgeo = blkif_getgeo, .ioctl = blkif_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 09c8ab5e0959..6273f782d0f2 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -63,11 +63,14 @@ struct fsl_mc_addr_translation_range { #define FSL_MC_GCR1 0x0 #define GCR1_P1_STOP BIT(31) +#define GCR1_P2_STOP BIT(30) #define FSL_MC_FAPR 0x28 #define MC_FAPR_PL BIT(18) #define MC_FAPR_BMT BIT(17) +static phys_addr_t mc_portal_base_phys_addr; + /** * fsl_mc_bus_match - device to driver matching callback * @dev: the fsl-mc device to match against @@ -220,7 +223,7 @@ static int scan_fsl_mc_bus(struct device *dev, void *data) root_mc_dev = to_fsl_mc_device(dev); root_mc_bus = to_fsl_mc_bus(root_mc_dev); mutex_lock(&root_mc_bus->scan_mutex); - dprc_scan_objects(root_mc_dev, NULL); + dprc_scan_objects(root_mc_dev, false); mutex_unlock(&root_mc_bus->scan_mutex); exit: @@ -703,14 +706,30 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev, * If base address is in the region_desc use it otherwise * revert to old mechanism */ - if (region_desc.base_address) + if (region_desc.base_address) { regions[i].start = region_desc.base_address + region_desc.base_offset; - else + } else { error = translate_mc_addr(mc_dev, mc_region_type, region_desc.base_offset, ®ions[i].start); + /* + * Some versions of the MC firmware wrongly report + * 0 for register base address of the DPMCP associated + * with child DPRC objects thus rendering them unusable. + * This is particularly troublesome in ACPI boot + * scenarios where the legacy way of extracting this + * base address from the device tree does not apply. + * Given that DPMCPs share the same base address, + * workaround this by using the base address extracted + * from the root DPRC container. + */ + if (is_fsl_mc_bus_dprc(mc_dev) && + regions[i].start == region_desc.base_offset) + regions[i].start += mc_portal_base_phys_addr; + } + if (error < 0) { dev_err(parent_dev, "Invalid MC offset: %#x (for %s.%d\'s region %d)\n", @@ -895,6 +914,8 @@ error_cleanup_dev: } EXPORT_SYMBOL_GPL(fsl_mc_device_add); +static struct notifier_block fsl_mc_nb; + /** * fsl_mc_device_remove - Remove an fsl-mc device from being visible to * Linux @@ -947,10 +968,28 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev) * We know that the device has an endpoint because we verified by * interrogating the firmware. This is the case when the device was not * yet discovered by the fsl-mc bus, thus the lookup returned NULL. - * Differentiate this case by returning EPROBE_DEFER. + * Force a rescan of the devices in this container and retry the lookup. + */ + if (!endpoint) { + struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); + + if (mutex_trylock(&mc_bus->scan_mutex)) { + err = dprc_scan_objects(mc_bus_dev, true); + mutex_unlock(&mc_bus->scan_mutex); + } + + if (err < 0) + return ERR_PTR(err); + } + + endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); + /* + * This means that the endpoint might reside in a different isolation + * context (DPRC/container). Not much to do, so return a permssion + * error. */ if (!endpoint) - return ERR_PTR(-EPROBE_DEFER); + return ERR_PTR(-EPERM); return endpoint; } @@ -1089,17 +1128,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) } if (mc->fsl_mc_regs) { - /* - * Some bootloaders pause the MC firmware before booting the - * kernel so that MC will not cause faults as soon as the - * SMMU probes due to the fact that there's no configuration - * in place for MC. - * At this point MC should have all its SMMU setup done so make - * sure it is resumed. - */ - writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) & (~GCR1_P1_STOP), - mc->fsl_mc_regs + FSL_MC_GCR1); - if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) { mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR); /* @@ -1113,11 +1141,25 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT, &mc_stream_id); + if (error == -EPROBE_DEFER) + return error; if (error) dev_warn(&pdev->dev, "failed to configure dma: %d.\n", error); } + + /* + * Some bootloaders pause the MC firmware before booting the + * kernel so that MC will not cause faults as soon as the + * SMMU probes due to the fact that there's no configuration + * in place for MC. + * At this point MC should have all its SMMU setup done so make + * sure it is resumed. + */ + writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) & + (~(GCR1_P1_STOP | GCR1_P2_STOP)), + mc->fsl_mc_regs + FSL_MC_GCR1); } /* @@ -1126,6 +1168,8 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mc_portal_phys_addr = plat_res->start; mc_portal_size = resource_size(plat_res); + mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff; + error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr, mc_portal_size, NULL, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io); @@ -1199,9 +1243,26 @@ static int fsl_mc_bus_remove(struct platform_device *pdev) fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); mc->root_mc_bus_dev->mc_io = NULL; + bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb); + + if (mc->fsl_mc_regs) { + /* + * Pause the MC firmware so that it doesn't crash in certain + * scenarios, such as kexec. + */ + writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) | + (GCR1_P1_STOP | GCR1_P2_STOP), + mc->fsl_mc_regs + FSL_MC_GCR1); + } + return 0; } +static void fsl_mc_bus_shutdown(struct platform_device *pdev) +{ + fsl_mc_bus_remove(pdev); +} + static const struct of_device_id fsl_mc_bus_match_table[] = { {.compatible = "fsl,qoriq-mc",}, {}, @@ -1224,6 +1285,45 @@ static struct platform_driver fsl_mc_bus_driver = { }, .probe = fsl_mc_bus_probe, .remove = fsl_mc_bus_remove, + .shutdown = fsl_mc_bus_shutdown, +}; + +static int fsl_mc_bus_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct resource *res; + void __iomem *fsl_mc_regs; + + if (action != BUS_NOTIFY_ADD_DEVICE) + return 0; + + if (!of_match_device(fsl_mc_bus_match_table, dev) && + !acpi_match_device(fsl_mc_bus_acpi_match_table, dev)) + return 0; + + res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1); + if (!res) + return 0; + + fsl_mc_regs = ioremap(res->start, resource_size(res)); + if (!fsl_mc_regs) + return 0; + + /* + * Make sure that the MC firmware is paused before the IOMMU setup for + * it is done or otherwise the firmware will crash right after the SMMU + * gets probed and enabled. + */ + writel(readl(fsl_mc_regs + FSL_MC_GCR1) | (GCR1_P1_STOP | GCR1_P2_STOP), + fsl_mc_regs + FSL_MC_GCR1); + iounmap(fsl_mc_regs); + + return 0; +} + +static struct notifier_block fsl_mc_nb = { + .notifier_call = fsl_mc_bus_notifier, }; static int __init fsl_mc_bus_driver_init(void) @@ -1250,7 +1350,7 @@ static int __init fsl_mc_bus_driver_init(void) if (error < 0) goto error_cleanup_dprc_driver; - return 0; + return bus_register_notifier(&platform_bus_type, &fsl_mc_nb); error_cleanup_dprc_driver: dprc_driver_exit(); diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index 8100cf51cd09..0a972620a403 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -302,8 +302,8 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, struct mhi_buf *mhi_buf = image_info->mhi_buf; for (i = 0; i < image_info->entries; i++, mhi_buf++) - mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, - mhi_buf->dma_addr); + dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, + mhi_buf->buf, mhi_buf->dma_addr); kfree(image_info->mhi_buf); kfree(image_info); @@ -339,8 +339,8 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, vec_size = sizeof(struct bhi_vec_entry) * i; mhi_buf->len = vec_size; - mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size, - &mhi_buf->dma_addr, + mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, + vec_size, &mhi_buf->dma_addr, GFP_KERNEL); if (!mhi_buf->buf) goto error_alloc_segment; @@ -354,8 +354,8 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, error_alloc_segment: for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) - mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, - mhi_buf->dma_addr); + dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, + mhi_buf->buf, mhi_buf->dma_addr); error_alloc_mhi_buf: kfree(img_info); @@ -442,7 +442,8 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) if (size > firmware->size) size = firmware->size; - buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL); + buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr, + GFP_KERNEL); if (!buf) { release_firmware(firmware); goto error_fw_load; @@ -451,7 +452,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) /* Download image using BHI */ memcpy(buf, firmware->data, size); ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); - mhi_free_coherent(mhi_cntrl, size, buf, dma_addr); + dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr); /* Error or in EDL mode, we're done */ if (ret) { diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index c81b377fca8f..5aaca6d0f52b 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -129,7 +129,7 @@ static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, u64 len) { ring->alloc_size = len + (len - 1); - ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, &ring->dma_handle, GFP_KERNEL); if (!ring->pre_aligned) return -ENOMEM; @@ -221,13 +221,13 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) mhi_cmd = mhi_cntrl->mhi_cmd; for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { ring = &mhi_cmd->ring; - mhi_free_coherent(mhi_cntrl, ring->alloc_size, + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, ring->pre_aligned, ring->dma_handle); ring->base = NULL; ring->iommu_base = 0; } - mhi_free_coherent(mhi_cntrl, + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); @@ -237,17 +237,17 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) continue; ring = &mhi_event->ring; - mhi_free_coherent(mhi_cntrl, ring->alloc_size, + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, ring->pre_aligned, ring->dma_handle); ring->base = NULL; ring->iommu_base = 0; } - mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, mhi_ctxt->er_ctxt_addr); - mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, mhi_ctxt->chan_ctxt_addr); @@ -275,7 +275,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) return -ENOMEM; /* Setup channel ctxt */ - mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl, + mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * mhi_cntrl->max_chan, &mhi_ctxt->chan_ctxt_addr, @@ -307,7 +307,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) } /* Setup event context */ - mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl, + mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * mhi_cntrl->total_ev_rings, &mhi_ctxt->er_ctxt_addr, @@ -354,7 +354,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) /* Setup cmd context */ ret = -ENOMEM; - mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl, + mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, &mhi_ctxt->cmd_ctxt_addr, @@ -389,10 +389,10 @@ error_alloc_cmd: for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { struct mhi_ring *ring = &mhi_cmd->ring; - mhi_free_coherent(mhi_cntrl, ring->alloc_size, + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, ring->pre_aligned, ring->dma_handle); } - mhi_free_coherent(mhi_cntrl, + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); i = mhi_cntrl->total_ev_rings; @@ -405,15 +405,15 @@ error_alloc_er: if (mhi_event->offload_ev) continue; - mhi_free_coherent(mhi_cntrl, ring->alloc_size, + dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, ring->pre_aligned, ring->dma_handle); } - mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, mhi_ctxt->er_ctxt_addr); error_alloc_er_ctxt: - mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, mhi_ctxt->chan_ctxt_addr); @@ -567,7 +567,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, if (!chan_ctxt->rbase) /* Already uninitialized */ return; - mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, tre_ring->pre_aligned, tre_ring->dma_handle); vfree(buf_ring->base); @@ -610,7 +610,7 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, buf_ring->base = vzalloc(buf_ring->len); if (!buf_ring->base) { - mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, tre_ring->pre_aligned, tre_ring->dma_handle); return -ENOMEM; } @@ -885,7 +885,8 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || - !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq) + !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || + !mhi_cntrl->irq || !mhi_cntrl->reg_len) return -EINVAL; ret = parse_config(mhi_cntrl, config); @@ -1063,7 +1064,7 @@ EXPORT_SYMBOL_GPL(mhi_free_controller); int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) { struct device *dev = &mhi_cntrl->mhi_dev->dev; - u32 bhie_off; + u32 bhi_off, bhie_off; int ret; mutex_lock(&mhi_cntrl->pm_mutex); @@ -1072,29 +1073,51 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) if (ret) goto error_dev_ctxt; - /* - * Allocate RDDM table if specified, this table is for debugging purpose - */ - if (mhi_cntrl->rddm_size) { - mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, - mhi_cntrl->rddm_size); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); + if (ret) { + dev_err(dev, "Error getting BHI offset\n"); + goto error_reg_offset; + } - /* - * This controller supports RDDM, so we need to manually clear - * BHIE RX registers since POR values are undefined. - */ + if (bhi_off >= mhi_cntrl->reg_len) { + dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n", + bhi_off, mhi_cntrl->reg_len); + ret = -EINVAL; + goto error_reg_offset; + } + mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; + + if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &bhie_off); if (ret) { dev_err(dev, "Error getting BHIE offset\n"); - goto bhie_error; + goto error_reg_offset; } + if (bhie_off >= mhi_cntrl->reg_len) { + dev_err(dev, + "BHIe offset: 0x%x is out of range: 0x%zx\n", + bhie_off, mhi_cntrl->reg_len); + ret = -EINVAL; + goto error_reg_offset; + } mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; + } + + if (mhi_cntrl->rddm_size) { + /* + * This controller supports RDDM, so we need to manually clear + * BHIE RX registers since POR values are undefined. + */ memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + 4); - + /* + * Allocate RDDM table for debugging purpose if specified + */ + mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, + mhi_cntrl->rddm_size); if (mhi_cntrl->rddm_image) mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); } @@ -1103,11 +1126,8 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) return 0; -bhie_error: - if (mhi_cntrl->rddm_image) { - mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); - mhi_cntrl->rddm_image = NULL; - } +error_reg_offset: + mhi_deinit_dev_ctxt(mhi_cntrl); error_dev_ctxt: mutex_unlock(&mhi_cntrl->pm_mutex); @@ -1128,6 +1148,9 @@ void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) mhi_cntrl->rddm_image = NULL; } + mhi_cntrl->bhi = NULL; + mhi_cntrl->bhie = NULL; + mhi_deinit_dev_ctxt(mhi_cntrl); } EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 5b9ea66b92dc..721739c5e0d5 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -682,7 +682,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct image_info *img_info); void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan); + struct mhi_chan *mhi_chan, unsigned int flags); int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, @@ -690,26 +690,6 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); -/* Memory allocation methods */ -static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, - size_t size, - dma_addr_t *dma_handle, - gfp_t gfp) -{ - void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, dma_handle, - gfp); - - return buf; -} - -static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl, - size_t size, - void *vaddr, - dma_addr_t dma_handle) -{ - dma_free_coherent(mhi_cntrl->cntrl_dev, size, vaddr, dma_handle); -} - /* Event processing methods */ void mhi_ctrl_ev_task(unsigned long data); void mhi_ev_task(unsigned long data); diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 22acde118bc3..c01ec2fef02c 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -193,7 +193,7 @@ int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, struct mhi_buf_info *buf_info) { - void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len, + void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, &buf_info->p_addr, GFP_ATOMIC); if (!buf) @@ -220,8 +220,8 @@ void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, if (buf_info->dir == DMA_FROM_DEVICE) memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); - mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr, - buf_info->p_addr); + dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, + buf_info->bb_addr, buf_info->p_addr); } static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, @@ -773,11 +773,18 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, cmd_pkt = mhi_to_virtual(mhi_ring, ptr); chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - write_lock_bh(&mhi_chan->lock); - mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); - complete(&mhi_chan->completion); - write_unlock_bh(&mhi_chan->lock); + + if (chan < mhi_cntrl->max_chan && + mhi_cntrl->mhi_chan[chan].configured) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + } else { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Completion packet for invalid channel ID: %d\n", chan); + } mhi_del_ring_element(mhi_cntrl, mhi_ring); } @@ -1423,7 +1430,7 @@ exit_unprepare_channel: } int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan) + struct mhi_chan *mhi_chan, unsigned int flags) { int ret = 0; struct device *dev = &mhi_chan->mhi_dev->dev; @@ -1448,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, if (ret) goto error_pm_state; + if (mhi_chan->dir == DMA_FROM_DEVICE) + mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); + /* Pre-allocate buffer for xfer ring */ if (mhi_chan->pre_alloc) { int nr_el = get_nr_avail_ring_elements(mhi_cntrl, @@ -1603,7 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) } /* Move channel to start state */ -int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) { int ret, dir; struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; @@ -1614,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) if (!mhi_chan) continue; - ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); + ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); if (ret) goto error_open_chan; } diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index bbf6cd04861e..fb99e3727155 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -1059,28 +1059,8 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) if (ret) goto error_setup_irq; - /* Setup BHI offset & INTVEC */ + /* Setup BHI INTVEC */ write_lock_irq(&mhi_cntrl->pm_lock); - ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val); - if (ret) { - write_unlock_irq(&mhi_cntrl->pm_lock); - goto error_bhi_offset; - } - - mhi_cntrl->bhi = mhi_cntrl->regs + val; - - /* Setup BHIE offset */ - if (mhi_cntrl->fbc_download) { - ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val); - if (ret) { - write_unlock_irq(&mhi_cntrl->pm_lock); - dev_err(dev, "Error reading BHIE offset\n"); - goto error_bhi_offset; - } - - mhi_cntrl->bhie = mhi_cntrl->regs + val; - } - mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); mhi_cntrl->pm_state = MHI_PM_POR; mhi_cntrl->ee = MHI_EE_MAX; @@ -1089,12 +1069,16 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) /* Confirm that the device is in valid exec env */ if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { - dev_err(dev, "Not a valid EE for power on\n"); + dev_err(dev, "%s is not a valid EE for power on\n", + TO_MHI_EXEC_STR(current_ee)); ret = -EIO; - goto error_bhi_offset; + goto error_async_power_up; } state = mhi_get_mhi_state(mhi_cntrl); + dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n", + TO_MHI_EXEC_STR(current_ee), TO_MHI_STATE_STR(state)); + if (state == MHI_STATE_SYS_ERR) { mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); ret = wait_event_timeout(mhi_cntrl->state_event, @@ -1110,7 +1094,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) if (!ret) { ret = -EIO; dev_info(dev, "Failed to reset MHI due to syserr state\n"); - goto error_bhi_offset; + goto error_async_power_up; } /* @@ -1132,7 +1116,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) return 0; -error_bhi_offset: +error_async_power_up: mhi_deinit_free_irq(mhi_cntrl); error_setup_irq: diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c index ca3bc40427f8..4026d5c2ee79 100644 --- a/drivers/bus/mhi/pci_generic.c +++ b/drivers/bus/mhi/pci_generic.c @@ -32,6 +32,8 @@ * @edl: emergency download mode firmware path (if any) * @bar_num: PCI base address register to use for MHI MMIO register space * @dma_data_width: DMA transfer word size (32 or 64 bits) + * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead + * of inband wake support (such as sdx24) */ struct mhi_pci_dev_info { const struct mhi_controller_config *config; @@ -40,6 +42,7 @@ struct mhi_pci_dev_info { const char *edl; unsigned int bar_num; unsigned int dma_data_width; + bool sideband_wake; }; #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ @@ -72,6 +75,22 @@ struct mhi_pci_dev_info { .doorbell_mode_switch = false, \ } +#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + .auto_queue = true, \ + } + #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \ { \ .num_elements = el_count, \ @@ -210,7 +229,7 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0), MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0), MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0), - MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0), + MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0), MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), @@ -242,7 +261,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { .edl = "qcom/sdx65m/edl.mbn", .config = &modem_qcom_v1_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32 + .dma_data_width = 32, + .sideband_wake = false, }; static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { @@ -251,7 +271,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { .edl = "qcom/sdx55m/edl.mbn", .config = &modem_qcom_v1_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32 + .dma_data_width = 32, + .sideband_wake = false, }; static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { @@ -259,7 +280,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { .edl = "qcom/prog_firehose_sdx24.mbn", .config = &modem_qcom_v1_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32 + .dma_data_width = 32, + .sideband_wake = true, }; static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = { @@ -301,7 +323,8 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { .edl = "qcom/prog_firehose_sdx24.mbn", .config = &modem_quectel_em1xx_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32 + .dma_data_width = 32, + .sideband_wake = true, }; static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { @@ -339,7 +362,42 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .edl = "qcom/sdx55m/edl.mbn", .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32 + .dma_data_width = 32, + .sideband_wake = false, +}; + +static const struct mhi_channel_config mhi_mv31_channels[] = { + MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), + MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), + /* MBIM Control Channel */ + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0), + /* MBIM Data Channel */ + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3), +}; + +static struct mhi_event_config mhi_mv31_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 256), + MHI_EVENT_CONFIG_DATA(1, 256), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101), +}; + +static const struct mhi_controller_config modem_mv31_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_mv31_channels), + .ch_cfg = mhi_mv31_channels, + .num_events = ARRAY_SIZE(mhi_mv31_events), + .event_cfg = mhi_mv31_events, +}; + +static const struct mhi_pci_dev_info mhi_mv31_info = { + .name = "cinterion-mv31", + .config = &modem_mv31_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, }; static const struct pci_device_id mhi_pci_id_table[] = { @@ -362,6 +420,9 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1), .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* MV31-W (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00b3), + .driver_data = (kernel_ulong_t) &mhi_mv31_info }, { } }; MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); @@ -463,6 +524,7 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, return err; } mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; + mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num); err = pci_set_dma_mask(pdev, dma_mask); if (err) { @@ -640,9 +702,12 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mhi_cntrl->status_cb = mhi_pci_status_cb; mhi_cntrl->runtime_get = mhi_pci_runtime_get; mhi_cntrl->runtime_put = mhi_pci_runtime_put; - mhi_cntrl->wake_get = mhi_pci_wake_get_nop; - mhi_cntrl->wake_put = mhi_pci_wake_put_nop; - mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; + + if (info->sideband_wake) { + mhi_cntrl->wake_get = mhi_pci_wake_get_nop; + mhi_cntrl->wake_put = mhi_pci_wake_put_nop; + mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; + } err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); if (err) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 38cb116ed433..148a4dd8cb9a 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -100,6 +100,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @cookie: data used by legacy platform callbacks * @name: name if available * @revision: interconnect target module revision + * @reserved: target module is reserved and already in use * @enabled: sysc runtime enabled status * @needs_resume: runtime resume needed on resume from suspend * @child_needs_resume: runtime resume needed for child on resume from suspend @@ -130,6 +131,7 @@ struct sysc { struct ti_sysc_cookie cookie; const char *name; u32 revision; + unsigned int reserved:1; unsigned int enabled:1; unsigned int needs_resume:1; unsigned int child_needs_resume:1; @@ -2951,6 +2953,8 @@ static int sysc_init_soc(struct sysc *ddata) case SOC_3430 ... SOC_3630: sysc_add_disabled(0x48304000); /* timer12 */ break; + case SOC_AM3: + sysc_add_disabled(0x48310000); /* rng */ default: break; } @@ -3093,7 +3097,9 @@ static int sysc_probe(struct platform_device *pdev) return error; error = sysc_check_active_timer(ddata); - if (error) + if (error == -ENXIO) + ddata->reserved = true; + else if (error) return error; error = sysc_get_clocks(ddata); @@ -3130,11 +3136,15 @@ static int sysc_probe(struct platform_device *pdev) sysc_show_registers(ddata); ddata->dev->type = &sysc_device_type; - error = of_platform_populate(ddata->dev->of_node, sysc_match_table, - pdata ? pdata->auxdata : NULL, - ddata->dev); - if (error) - goto err; + + if (!ddata->reserved) { + error = of_platform_populate(ddata->dev->of_node, + sysc_match_table, + pdata ? pdata->auxdata : NULL, + ddata->dev); + if (error) + goto err; + } INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index ea3ead00f30f..740811893c57 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -427,8 +427,6 @@ config ADI and SSM (Silicon Secured Memory). Intended consumers of this driver include crash and makedumpfile. -endmenu - config RANDOM_TRUST_CPU bool "Trust the CPU manufacturer to initialize Linux's CRNG" depends on ARCH_RANDOM @@ -452,3 +450,5 @@ config RANDOM_TRUST_BOOTLOADER booloader is trustworthy so it will be added to the kernel's entropy pool. Otherwise, say N here so it will be regarded as device input that only mixes the entropy pool. + +endmenu diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c index 027484ecfb0d..3c99696b145e 100644 --- a/drivers/char/powernv-op-panel.c +++ b/drivers/char/powernv-op-panel.c @@ -75,6 +75,7 @@ static int __op_panel_update_display(void) rc); break; } + break; case OPAL_SUCCESS: break; default: diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 2ccdf8ac6994..6e3235565a4d 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -254,11 +254,11 @@ static int ftpm_tee_probe(struct device *dev) pvt_data->session = sess_arg.session; /* Allocate dynamic shared memory with fTPM TA */ - pvt_data->shm = tee_shm_alloc(pvt_data->ctx, - MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE, - TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + pvt_data->shm = tee_shm_alloc_kernel_buf(pvt_data->ctx, + MAX_COMMAND_SIZE + + MAX_RESPONSE_SIZE); if (IS_ERR(pvt_data->shm)) { - dev_err(dev, "%s: tee_shm_alloc failed\n", __func__); + dev_err(dev, "%s: tee_shm_alloc_kernel_buf failed\n", __func__); rc = -ENOMEM; goto out_shm_alloc; } diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index be160764911b..f9d5b7334341 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -92,13 +92,20 @@ int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, } EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional); +static void devm_clk_bulk_release_all(struct device *dev, void *res) +{ + struct clk_bulk_devres *devres = res; + + clk_bulk_put_all(devres->num_clks, devres->clks); +} + int __must_check devm_clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks) { struct clk_bulk_devres *devres; int ret; - devres = devres_alloc(devm_clk_bulk_release, + devres = devres_alloc(devm_clk_bulk_release_all, sizeof(*devres), GFP_KERNEL); if (!devres) return -ENOMEM; diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index 18117ce5ff85..5c75e3d906c2 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -526,7 +526,7 @@ struct stm32f4_pll { struct stm32f4_pll_post_div_data { int idx; - u8 pll_num; + int pll_idx; const char *name; const char *parent; u8 flag; @@ -557,13 +557,13 @@ static const struct clk_div_table post_divr_table[] = { #define MAX_POST_DIV 3 static const struct stm32f4_pll_post_div_data post_div_data[MAX_POST_DIV] = { - { CLK_I2SQ_PDIV, PLL_I2S, "plli2s-q-div", "plli2s-q", + { CLK_I2SQ_PDIV, PLL_VCO_I2S, "plli2s-q-div", "plli2s-q", CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL}, - { CLK_SAIQ_PDIV, PLL_SAI, "pllsai-q-div", "pllsai-q", + { CLK_SAIQ_PDIV, PLL_VCO_SAI, "pllsai-q-div", "pllsai-q", CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL }, - { NO_IDX, PLL_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT, + { NO_IDX, PLL_VCO_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table }, }; @@ -1774,7 +1774,7 @@ static void __init stm32f4_rcc_init(struct device_node *np) post_div->width, post_div->flag_div, post_div->div_table, - clks[post_div->pll_num], + clks[post_div->pll_idx], &stm32f4_clk_lock); if (post_div->idx != NO_IDX) diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig index 5ecc37aaa118..c1ec75aa4ccd 100644 --- a/drivers/clk/hisilicon/Kconfig +++ b/drivers/clk/hisilicon/Kconfig @@ -18,6 +18,7 @@ config COMMON_CLK_HI3519 config COMMON_CLK_HI3559A bool "Hi3559A Clock Driver" depends on ARCH_HISI || COMPILE_TEST + select RESET_HISI default ARCH_HISI help Build the clock driver for hi3559a. diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index 496900de0b0b..de36f58d551c 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -974,6 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk); } - imx_register_uart_clocks(1); + imx_register_uart_clocks(2); } CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init); diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 800b2fef1887..b2c142f3a649 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -467,7 +467,7 @@ DEFINE_CLK_SMD_RPM(msm8936, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, static struct clk_smd_rpm *msm8936_clks[] = { [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk, - [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_clk, + [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk, [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk, [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk, [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk, diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 51ed640e527b..4ece326ea233 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -357,27 +357,43 @@ static int gdsc_init(struct gdsc *sc) if (on < 0) return on; - /* - * Votable GDSCs can be ON due to Vote from other masters. - * If a Votable GDSC is ON, make sure we have a Vote. - */ - if ((sc->flags & VOTABLE) && on) - gdsc_enable(&sc->pd); + if (on) { + /* The regulator must be on, sync the kernel state */ + if (sc->rsupply) { + ret = regulator_enable(sc->rsupply); + if (ret < 0) + return ret; + } - /* - * Make sure the retain bit is set if the GDSC is already on, otherwise - * we end up turning off the GDSC and destroying all the register - * contents that we thought we were saving. - */ - if ((sc->flags & RETAIN_FF_ENABLE) && on) - gdsc_retain_ff_on(sc); + /* + * Votable GDSCs can be ON due to Vote from other masters. + * If a Votable GDSC is ON, make sure we have a Vote. + */ + if (sc->flags & VOTABLE) { + ret = regmap_update_bits(sc->regmap, sc->gdscr, + SW_COLLAPSE_MASK, val); + if (ret) + return ret; + } + + /* Turn on HW trigger mode if supported */ + if (sc->flags & HW_CTRL) { + ret = gdsc_hwctrl(sc, true); + if (ret < 0) + return ret; + } - /* If ALWAYS_ON GDSCs are not ON, turn them ON */ - if (sc->flags & ALWAYS_ON) { - if (!on) - gdsc_enable(&sc->pd); + /* + * Make sure the retain bit is set if the GDSC is already on, + * otherwise we end up turning off the GDSC and destroying all + * the register contents that we thought we were saving. + */ + if (sc->flags & RETAIN_FF_ENABLE) + gdsc_retain_ff_on(sc); + } else if (sc->flags & ALWAYS_ON) { + /* If ALWAYS_ON GDSCs are not ON, turn them ON */ + gdsc_enable(&sc->pd); on = true; - sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; } if (on || (sc->pwrsts & PWRSTS_RET)) @@ -385,6 +401,8 @@ static int gdsc_init(struct gdsc *sc) else gdsc_clear_mem_on(sc); + if (sc->flags & ALWAYS_ON) + sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; if (!sc->pd.power_off) sc->pd.power_off = gdsc_disable; if (!sc->pd.power_on) diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c index 50b5269586a4..ae24e0397d3c 100644 --- a/drivers/clk/renesas/r9a07g044-cpg.c +++ b/drivers/clk/renesas/r9a07g044-cpg.c @@ -30,8 +30,9 @@ enum clk_ids { CLK_PLL2_DIV20, CLK_PLL3, CLK_PLL3_DIV2, + CLK_PLL3_DIV2_4, + CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV4, - CLK_PLL3_DIV8, CLK_PLL4, CLK_PLL5, CLK_PLL5_DIV2, @@ -42,12 +43,13 @@ enum clk_ids { }; /* Divider tables */ -static const struct clk_div_table dtable_3b[] = { +static const struct clk_div_table dtable_1_32[] = { {0, 1}, {1, 2}, {2, 4}, {3, 8}, {4, 32}, + {0, 0}, }; static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = { @@ -66,47 +68,56 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = { DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20), DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2), + DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4), + DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2), DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4), - DEF_FIXED(".pll3_div8", CLK_PLL3_DIV8, CLK_PLL3, 1, 8), /* Core output clk */ DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1), DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV16, DIVPL2A, - dtable_3b, CLK_DIVIDER_HIWORD_MASK), + dtable_1_32, CLK_DIVIDER_HIWORD_MASK), DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV20, 1, 1), - DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV8, - DIVPL3B, dtable_3b, CLK_DIVIDER_HIWORD_MASK), + DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4, + DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), + DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2, + DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), }; static struct rzg2l_mod_clk r9a07g044_mod_clks[] = { - DEF_MOD("gic", R9A07G044_CLK_GIC600, - R9A07G044_CLK_P1, - 0x514, BIT(0), (BIT(0) | BIT(1))), - DEF_MOD("ia55", R9A07G044_CLK_IA55, - R9A07G044_CLK_P1, - 0x518, (BIT(0) | BIT(1)), BIT(0)), - DEF_MOD("scif0", R9A07G044_CLK_SCIF0, - R9A07G044_CLK_P0, - 0x584, BIT(0), BIT(0)), - DEF_MOD("scif1", R9A07G044_CLK_SCIF1, - R9A07G044_CLK_P0, - 0x584, BIT(1), BIT(1)), - DEF_MOD("scif2", R9A07G044_CLK_SCIF2, - R9A07G044_CLK_P0, - 0x584, BIT(2), BIT(2)), - DEF_MOD("scif3", R9A07G044_CLK_SCIF3, - R9A07G044_CLK_P0, - 0x584, BIT(3), BIT(3)), - DEF_MOD("scif4", R9A07G044_CLK_SCIF4, - R9A07G044_CLK_P0, - 0x584, BIT(4), BIT(4)), - DEF_MOD("sci0", R9A07G044_CLK_SCI0, - R9A07G044_CLK_P0, - 0x588, BIT(0), BIT(0)), + DEF_MOD("gic", R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1, + 0x514, 0), + DEF_MOD("ia55_pclk", R9A07G044_IA55_PCLK, R9A07G044_CLK_P2, + 0x518, 0), + DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1, + 0x518, 1), + DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 0), + DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 1), + DEF_MOD("scif2", R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 2), + DEF_MOD("scif3", R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 3), + DEF_MOD("scif4", R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 4), + DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0, + 0x588, 0), +}; + +static struct rzg2l_reset r9a07g044_resets[] = { + DEF_RST(R9A07G044_GIC600_GICRESET_N, 0x814, 0), + DEF_RST(R9A07G044_GIC600_DBG_GICRESET_N, 0x814, 1), + DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0), + DEF_RST(R9A07G044_SCIF0_RST_SYSTEM_N, 0x884, 0), + DEF_RST(R9A07G044_SCIF1_RST_SYSTEM_N, 0x884, 1), + DEF_RST(R9A07G044_SCIF2_RST_SYSTEM_N, 0x884, 2), + DEF_RST(R9A07G044_SCIF3_RST_SYSTEM_N, 0x884, 3), + DEF_RST(R9A07G044_SCIF4_RST_SYSTEM_N, 0x884, 4), + DEF_RST(R9A07G044_SCI0_RST, 0x888, 0), }; static const unsigned int r9a07g044_crit_mod_clks[] __initconst = { - MOD_CLK_BASE + R9A07G044_CLK_GIC600, + MOD_CLK_BASE + R9A07G044_GIC600_GICCLK, }; const struct rzg2l_cpg_info r9a07g044_cpg_info = { @@ -123,5 +134,9 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = { /* Module Clocks */ .mod_clks = r9a07g044_mod_clks, .num_mod_clks = ARRAY_SIZE(r9a07g044_mod_clks), - .num_hw_mod_clks = R9A07G044_CLK_MIPI_DSI_PIN + 1, + .num_hw_mod_clks = R9A07G044_TSU_PCLK + 1, + + /* Resets */ + .resets = r9a07g044_resets, + .num_resets = ARRAY_SIZE(r9a07g044_resets), }; diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.c b/drivers/clk/renesas/renesas-rzg2l-cpg.c index 5009b9e48b13..e7c59af2a1d8 100644 --- a/drivers/clk/renesas/renesas-rzg2l-cpg.c +++ b/drivers/clk/renesas/renesas-rzg2l-cpg.c @@ -47,9 +47,9 @@ #define SDIV(val) DIV_RSMASK(val, 0, 0x7) #define CLK_ON_R(reg) (reg) -#define CLK_MON_R(reg) (0x680 - 0x500 + (reg)) -#define CLK_RST_R(reg) (0x800 - 0x500 + (reg)) -#define CLK_MRST_R(reg) (0x980 - 0x500 + (reg)) +#define CLK_MON_R(reg) (0x180 + (reg)) +#define CLK_RST_R(reg) (reg) +#define CLK_MRST_R(reg) (0x180 + (reg)) #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff) #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) @@ -78,6 +78,7 @@ struct rzg2l_cpg_priv { struct clk **clks; unsigned int num_core_clks; unsigned int num_mod_clks; + unsigned int num_resets; unsigned int last_dt_core_clk; struct raw_notifier_head notifiers; @@ -315,15 +316,13 @@ fail: * * @hw: handle between common and hardware-specific interfaces * @off: register offset - * @onoff: ON/MON bits - * @reset: reset bits + * @bit: ON/MON bit * @priv: CPG/MSTP private data */ struct mstp_clock { struct clk_hw hw; u16 off; - u8 onoff; - u8 reset; + u8 bit; struct rzg2l_cpg_priv *priv; }; @@ -337,6 +336,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) struct device *dev = priv->dev; unsigned long flags; unsigned int i; + u32 bitmask = BIT(clock->bit); u32 value; if (!clock->off) { @@ -349,9 +349,9 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) spin_lock_irqsave(&priv->rmw_lock, flags); if (enable) - value = (clock->onoff << 16) | clock->onoff; + value = (bitmask << 16) | bitmask; else - value = clock->onoff << 16; + value = bitmask << 16; writel(value, priv->base + CLK_ON_R(reg)); spin_unlock_irqrestore(&priv->rmw_lock, flags); @@ -360,7 +360,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) return 0; for (i = 1000; i > 0; --i) { - if (((readl(priv->base + CLK_MON_R(reg))) & clock->onoff)) + if (((readl(priv->base + CLK_MON_R(reg))) & bitmask)) break; cpu_relax(); } @@ -388,6 +388,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw) { struct mstp_clock *clock = to_mod_clock(hw); struct rzg2l_cpg_priv *priv = clock->priv; + u32 bitmask = BIT(clock->bit); u32 value; if (!clock->off) { @@ -397,7 +398,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw) value = readl(priv->base + CLK_MON_R(clock->off)); - return !(value & clock->onoff); + return !(value & bitmask); } static const struct clk_ops rzg2l_mod_clock_ops = { @@ -457,8 +458,7 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod, init.num_parents = 1; clock->off = mod->off; - clock->onoff = mod->onoff; - clock->reset = mod->reset; + clock->bit = mod->bit; clock->priv = priv; clock->hw.init = &init; @@ -483,12 +483,11 @@ static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 dis = info->mod_clks[id].reset; + unsigned int reg = info->resets[id].off; + u32 dis = BIT(info->resets[id].bit); u32 we = dis << 16; - dev_dbg(rcdev->dev, "reset name:%s id:%ld offset:0x%x\n", - info->mod_clks[id].name, id, CLK_RST_R(reg)); + dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg)); /* Reset module */ writel(we, priv->base + CLK_RST_R(reg)); @@ -507,11 +506,10 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 value = info->mod_clks[id].reset << 16; + unsigned int reg = info->resets[id].off; + u32 value = BIT(info->resets[id].bit) << 16; - dev_dbg(rcdev->dev, "assert name:%s id:%ld offset:0x%x\n", - info->mod_clks[id].name, id, CLK_RST_R(reg)); + dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg)); writel(value, priv->base + CLK_RST_R(reg)); return 0; @@ -522,12 +520,12 @@ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 dis = info->mod_clks[id].reset; + unsigned int reg = info->resets[id].off; + u32 dis = BIT(info->resets[id].bit); u32 value = (dis << 16) | dis; - dev_dbg(rcdev->dev, "deassert name:%s id:%ld offset:0x%x\n", - info->mod_clks[id].name, id, CLK_RST_R(reg)); + dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, + CLK_RST_R(reg)); writel(value, priv->base + CLK_RST_R(reg)); return 0; @@ -538,8 +536,8 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 bitmask = info->mod_clks[id].reset; + unsigned int reg = info->resets[id].off; + u32 bitmask = BIT(info->resets[id].bit); return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask); } @@ -554,9 +552,11 @@ static const struct reset_control_ops rzg2l_cpg_reset_ops = { static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev, const struct of_phandle_args *reset_spec) { + struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); + const struct rzg2l_cpg_info *info = priv->info; unsigned int id = reset_spec->args[0]; - if (id >= rcdev->nr_resets) { + if (id >= rcdev->nr_resets || !info->resets[id].off) { dev_err(rcdev->dev, "Invalid reset index %u\n", id); return -EINVAL; } @@ -571,7 +571,7 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv) priv->rcdev.dev = priv->dev; priv->rcdev.of_reset_n_cells = 1; priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate; - priv->rcdev.nr_resets = priv->num_mod_clks; + priv->rcdev.nr_resets = priv->num_resets; return devm_reset_controller_register(priv->dev, &priv->rcdev); } @@ -594,42 +594,49 @@ static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device { struct device_node *np = dev->of_node; struct of_phandle_args clkspec; + bool once = true; struct clk *clk; int error; int i = 0; while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec)) { - if (rzg2l_cpg_is_pm_clk(&clkspec)) - goto found; - - of_node_put(clkspec.np); + if (rzg2l_cpg_is_pm_clk(&clkspec)) { + if (once) { + once = false; + error = pm_clk_create(dev); + if (error) { + of_node_put(clkspec.np); + goto err; + } + } + clk = of_clk_get_from_provider(&clkspec); + of_node_put(clkspec.np); + if (IS_ERR(clk)) { + error = PTR_ERR(clk); + goto fail_destroy; + } + + error = pm_clk_add_clk(dev, clk); + if (error) { + dev_err(dev, "pm_clk_add_clk failed %d\n", + error); + goto fail_put; + } + } else { + of_node_put(clkspec.np); + } i++; } return 0; -found: - clk = of_clk_get_from_provider(&clkspec); - of_node_put(clkspec.np); - - if (IS_ERR(clk)) - return PTR_ERR(clk); - - error = pm_clk_create(dev); - if (error) - goto fail_put; - - error = pm_clk_add_clk(dev, clk); - if (error) - goto fail_destroy; - - return 0; +fail_put: + clk_put(clk); fail_destroy: pm_clk_destroy(dev); -fail_put: - clk_put(clk); +err: return error; } @@ -692,6 +699,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev) priv->clks = clks; priv->num_core_clks = info->num_total_core_clks; priv->num_mod_clks = info->num_hw_mod_clks; + priv->num_resets = info->num_resets; priv->last_dt_core_clk = info->last_dt_core_clk; for (i = 0; i < nclks; i++) diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.h b/drivers/clk/renesas/renesas-rzg2l-cpg.h index 3948bdd8afc9..63695280ce8b 100644 --- a/drivers/clk/renesas/renesas-rzg2l-cpg.h +++ b/drivers/clk/renesas/renesas-rzg2l-cpg.h @@ -21,6 +21,7 @@ #define DDIV_PACK(offset, bitpos, size) \ (((offset) << 20) | ((bitpos) << 12) | ((size) << 8)) #define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3) +#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3) #define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3) /** @@ -76,26 +77,40 @@ enum clk_types { * @id: clock index in array containing all Core and Module Clocks * @parent: id of parent clock * @off: register offset - * @onoff: ON/MON bits - * @reset: reset bits + * @bit: ON/MON bit */ struct rzg2l_mod_clk { const char *name; unsigned int id; unsigned int parent; u16 off; - u8 onoff; - u8 reset; + u8 bit; }; -#define DEF_MOD(_name, _id, _parent, _off, _onoff, _reset) \ - [_id] = { \ +#define DEF_MOD(_name, _id, _parent, _off, _bit) \ + { \ .name = _name, \ - .id = MOD_CLK_BASE + _id, \ + .id = MOD_CLK_BASE + (_id), \ .parent = (_parent), \ .off = (_off), \ - .onoff = (_onoff), \ - .reset = (_reset) \ + .bit = (_bit), \ + } + +/** + * struct rzg2l_reset - Reset definitions + * + * @off: register offset + * @bit: reset bit + */ +struct rzg2l_reset { + u16 off; + u8 bit; +}; + +#define DEF_RST(_id, _off, _bit) \ + [_id] = { \ + .off = (_off), \ + .bit = (_bit) \ } /** @@ -126,6 +141,10 @@ struct rzg2l_cpg_info { unsigned int num_mod_clks; unsigned int num_hw_mod_clks; + /* Resets */ + const struct rzg2l_reset *resets; + unsigned int num_resets; + /* Critical Module Clocks that should not be disabled */ const unsigned int *crit_mod_clks; unsigned int num_crit_mod_clks; diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c index 316912d3b1a4..4f2c3309eea4 100644 --- a/drivers/clk/tegra/clk-sdmmc-mux.c +++ b/drivers/clk/tegra/clk-sdmmc-mux.c @@ -194,6 +194,15 @@ static void clk_sdmmc_mux_disable(struct clk_hw *hw) gate_ops->disable(gate_hw); } +static void clk_sdmmc_mux_disable_unused(struct clk_hw *hw) +{ + struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw); + const struct clk_ops *gate_ops = sdmmc_mux->gate_ops; + struct clk_hw *gate_hw = &sdmmc_mux->gate.hw; + + gate_ops->disable_unused(gate_hw); +} + static void clk_sdmmc_mux_restore_context(struct clk_hw *hw) { struct clk_hw *parent = clk_hw_get_parent(hw); @@ -218,6 +227,7 @@ static const struct clk_ops tegra_clk_sdmmc_mux_ops = { .is_enabled = clk_sdmmc_mux_is_enabled, .enable = clk_sdmmc_mux_enable, .disable = clk_sdmmc_mux_disable, + .disable_unused = clk_sdmmc_mux_disable_unused, .restore_context = clk_sdmmc_mux_restore_context, }; diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index 3fc98a3ffd91..c10fc33b29b1 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -104,7 +104,11 @@ struct armada_37xx_dvfs { }; static struct armada_37xx_dvfs armada_37xx_dvfs[] = { - {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, + /* + * The cpufreq scaling for 1.2 GHz variant of the SOC is currently + * unstable because we do not know how to configure it properly. + */ + /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */ {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} }, {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} }, {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} }, diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index bef7528aecd3..231e585f6ba2 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -139,7 +139,9 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "qcom,qcs404", }, { .compatible = "qcom,sc7180", }, { .compatible = "qcom,sc7280", }, + { .compatible = "qcom,sc8180x", }, { .compatible = "qcom,sdm845", }, + { .compatible = "qcom,sm8150", }, { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 182a4dbca095..c538a153ee82 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -942,8 +942,6 @@ static int __init longhaul_init(void) return cpufreq_register_driver(&longhaul_driver); case 10: pr_err("Use acpi-cpufreq driver for VIA C7\n"); - default: - ; } return -ENODEV; diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index ec9a87ca2dbb..75f818d04b48 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -134,7 +134,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) } if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL)) - ret = -ENOMEM; + return -ENOMEM; /* Obtain CPUs that share SCMI performance controls */ ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index 7b91060e82f6..d9262db79cae 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -382,8 +382,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum; alt_recent = idx_recent_sum > NR_RECENT / 2; if (alt_recent || alt_intercepts) { - s64 last_enabled_span_ns = duration_ns; - int last_enabled_idx = idx; + s64 first_suitable_span_ns = duration_ns; + int first_suitable_idx = idx; /* * Look for the deepest idle state whose target residency had @@ -397,37 +397,51 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, intercept_sum = 0; recent_sum = 0; - for (i = idx - 1; i >= idx0; i--) { + for (i = idx - 1; i >= 0; i--) { struct teo_bin *bin = &cpu_data->state_bins[i]; s64 span_ns; intercept_sum += bin->intercepts; recent_sum += bin->recent; + span_ns = teo_middle_of_bin(i, drv); + + if ((!alt_recent || 2 * recent_sum > idx_recent_sum) && + (!alt_intercepts || + 2 * intercept_sum > idx_intercept_sum)) { + if (teo_time_ok(span_ns) && + !dev->states_usage[i].disable) { + idx = i; + duration_ns = span_ns; + } else { + /* + * The current state is too shallow or + * disabled, so take the first enabled + * deeper state with suitable time span. + */ + idx = first_suitable_idx; + duration_ns = first_suitable_span_ns; + } + break; + } + if (dev->states_usage[i].disable) continue; - span_ns = teo_middle_of_bin(i, drv); if (!teo_time_ok(span_ns)) { /* - * The current state is too shallow, so select - * the first enabled deeper state. + * The current state is too shallow, but if an + * alternative candidate state has been found, + * it may still turn out to be a better choice. */ - duration_ns = last_enabled_span_ns; - idx = last_enabled_idx; - break; - } + if (first_suitable_idx != idx) + continue; - if ((!alt_recent || 2 * recent_sum > idx_recent_sum) && - (!alt_intercepts || - 2 * intercept_sum > idx_intercept_sum)) { - idx = i; - duration_ns = span_ns; break; } - last_enabled_span_ns = span_ns; - last_enabled_idx = i; + first_suitable_span_ns = span_ns; + first_suitable_idx = i; } } diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 5fa6ae9dbc8b..44736cbd446e 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -313,7 +313,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, return -ENXIO; if (nr_pages < 0) - return nr_pages; + return -EINVAL; avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c index 193b40e7aec0..4c06c93c93d3 100644 --- a/drivers/dio/dio.c +++ b/drivers/dio/dio.c @@ -219,7 +219,7 @@ static int __init dio_init(void) /* Found a board, allocate it an entry in the list */ dev = kzalloc(sizeof(struct dio_dev), GFP_KERNEL); if (!dev) - return 0; + return -ENOMEM; dev->bus = &dio_bus; dev->dev.parent = &dio_bus.dev; diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 20d9bddbb985..394e6e1e9686 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -211,8 +211,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, struct sync_file *b) { struct sync_file *sync_file; - struct dma_fence **fences, **nfences, **a_fences, **b_fences; - int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; + struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences; + int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences; sync_file = sync_file_alloc(); if (!sync_file) @@ -236,7 +236,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, * If a sync_file can only be created with sync_file_merge * and sync_file_create, this is a reasonable assumption. */ - for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { + for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { struct dma_fence *pt_a = a_fences[i_a]; struct dma_fence *pt_b = b_fences[i_b]; @@ -277,15 +277,16 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, fences = nfences; } - if (sync_file_set_fence(sync_file, fences, i) < 0) { - kfree(fences); + if (sync_file_set_fence(sync_file, fences, i) < 0) goto err; - } strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); return sync_file; err: + while (i) + dma_fence_put(fences[--i]); + kfree(fences); fput(sync_file->file); return NULL; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 26482c7d4c3a..fc708be7ad9a 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -294,6 +294,14 @@ struct idxd_desc { struct idxd_wq *wq; }; +/* + * This is software defined error for the completion status. We overload the error code + * that will never appear in completion status and only SWERR register. + */ +enum idxd_completion_status { + IDXD_COMP_DESC_ABORT = 0xff, +}; + #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev) #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev) @@ -482,4 +490,10 @@ static inline void perfmon_init(void) {} static inline void perfmon_exit(void) {} #endif +static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason) +{ + idxd_dma_complete_txd(desc, reason); + idxd_free_desc(desc->wq, desc); +} + #endif diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index c8ae41d36040..c0f4c0422f32 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -102,6 +102,8 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) spin_lock_init(&idxd->irq_entries[i].list_lock); } + idxd_msix_perm_setup(idxd); + irq_entry = &idxd->irq_entries[0]; rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread, 0, "idxd-misc", irq_entry); @@ -148,7 +150,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) } idxd_unmask_error_interrupts(idxd); - idxd_msix_perm_setup(idxd); return 0; err_wq_irqs: @@ -162,6 +163,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) err_misc_irq: /* Disable error interrupt generation */ idxd_mask_error_interrupts(idxd); + idxd_msix_perm_clear(idxd); err_irq_entries: pci_free_irq_vectors(pdev); dev_err(dev, "No usable interrupts\n"); @@ -758,32 +760,40 @@ static void idxd_shutdown(struct pci_dev *pdev) for (i = 0; i < msixcnt; i++) { irq_entry = &idxd->irq_entries[i]; synchronize_irq(irq_entry->vector); - free_irq(irq_entry->vector, irq_entry); if (i == 0) continue; idxd_flush_pending_llist(irq_entry); idxd_flush_work_list(irq_entry); } - - idxd_msix_perm_clear(idxd); - idxd_release_int_handles(idxd); - pci_free_irq_vectors(pdev); - pci_iounmap(pdev, idxd->reg_base); - pci_disable_device(pdev); - destroy_workqueue(idxd->wq); + flush_workqueue(idxd->wq); } static void idxd_remove(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); + struct idxd_irq_entry *irq_entry; + int msixcnt = pci_msix_vec_count(pdev); + int i; dev_dbg(&pdev->dev, "%s called\n", __func__); idxd_shutdown(pdev); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); idxd_unregister_devices(idxd); - perfmon_pmu_remove(idxd); + + for (i = 0; i < msixcnt; i++) { + irq_entry = &idxd->irq_entries[i]; + free_irq(irq_entry->vector, irq_entry); + } + idxd_msix_perm_clear(idxd); + idxd_release_int_handles(idxd); + pci_free_irq_vectors(pdev); + pci_iounmap(pdev, idxd->reg_base); iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); + pci_disable_device(pdev); + destroy_workqueue(idxd->wq); + perfmon_pmu_remove(idxd); + device_unregister(&idxd->conf_dev); } static struct pci_driver idxd_pci_driver = { diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index ae68e1e5487a..4e3a7198c0ca 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -245,12 +245,6 @@ static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr) return false; } -static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason) -{ - idxd_dma_complete_txd(desc, reason); - idxd_free_desc(desc->wq, desc); -} - static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, enum irq_work_type wtype, int *processed, u64 data) @@ -272,8 +266,16 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, reason = IDXD_COMPLETE_DEV_FAIL; llist_for_each_entry_safe(desc, t, head, llnode) { - if (desc->completion->status) { - if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS) + u8 status = desc->completion->status & DSA_COMP_STATUS_MASK; + + if (status) { + if (unlikely(status == IDXD_COMP_DESC_ABORT)) { + complete_desc(desc, IDXD_COMPLETE_ABORT); + (*processed)++; + continue; + } + + if (unlikely(status != DSA_COMP_SUCCESS)) match_fault(desc, data); complete_desc(desc, reason); (*processed)++; @@ -329,7 +331,14 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry, spin_unlock_irqrestore(&irq_entry->list_lock, flags); list_for_each_entry(desc, &flist, list) { - if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS) + u8 status = desc->completion->status & DSA_COMP_STATUS_MASK; + + if (unlikely(status == IDXD_COMP_DESC_ABORT)) { + complete_desc(desc, IDXD_COMPLETE_ABORT); + continue; + } + + if (unlikely(status != DSA_COMP_SUCCESS)) match_fault(desc, data); complete_desc(desc, reason); } diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c index 19afb62abaff..36c9c1a89b7e 100644 --- a/drivers/dma/idxd/submit.c +++ b/drivers/dma/idxd/submit.c @@ -25,11 +25,10 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) * Descriptor completion vectors are 1...N for MSIX. We will round * robin through the N vectors. */ - wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1; + wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1; if (!idxd->int_handles) { desc->hw->int_handle = wq->vec_ptr; } else { - desc->vector = wq->vec_ptr; /* * int_handles are only for descriptor completion. However for device * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even @@ -88,9 +87,64 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) sbitmap_queue_clear(&wq->sbq, desc->id, cpu); } +static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, + struct idxd_desc *desc) +{ + struct idxd_desc *d, *n; + + lockdep_assert_held(&ie->list_lock); + list_for_each_entry_safe(d, n, &ie->work_list, list) { + if (d == desc) { + list_del(&d->list); + return d; + } + } + + /* + * At this point, the desc needs to be aborted is held by the completion + * handler where it has taken it off the pending list but has not added to the + * work list. It will be cleaned up by the interrupt handler when it sees the + * IDXD_COMP_DESC_ABORT for completion status. + */ + return NULL; +} + +static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, + struct idxd_desc *desc) +{ + struct idxd_desc *d, *t, *found = NULL; + struct llist_node *head; + unsigned long flags; + + desc->completion->status = IDXD_COMP_DESC_ABORT; + /* + * Grab the list lock so it will block the irq thread handler. This allows the + * abort code to locate the descriptor need to be aborted. + */ + spin_lock_irqsave(&ie->list_lock, flags); + head = llist_del_all(&ie->pending_llist); + if (head) { + llist_for_each_entry_safe(d, t, head, llnode) { + if (d == desc) { + found = desc; + continue; + } + list_add_tail(&desc->list, &ie->work_list); + } + } + + if (!found) + found = list_abort_desc(wq, ie, desc); + spin_unlock_irqrestore(&ie->list_lock, flags); + + if (found) + complete_desc(found, IDXD_COMPLETE_ABORT); +} + int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) { struct idxd_device *idxd = wq->idxd; + struct idxd_irq_entry *ie = NULL; void __iomem *portal; int rc; @@ -108,6 +162,16 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) * even on UP because the recipient is a device. */ wmb(); + + /* + * Pending the descriptor to the lockless list for the irq_entry + * that we designated the descriptor to. + */ + if (desc->hw->flags & IDXD_OP_FLAG_RCI) { + ie = &idxd->irq_entries[desc->vector]; + llist_add(&desc->llnode, &ie->pending_llist); + } + if (wq_dedicated(wq)) { iosubmit_cmds512(portal, desc->hw, 1); } else { @@ -118,29 +182,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) * device is not accepting descriptor at all. */ rc = enqcmds(portal, desc->hw); - if (rc < 0) + if (rc < 0) { + if (ie) + llist_abort_desc(wq, ie, desc); return rc; + } } percpu_ref_put(&wq->wq_active); - - /* - * Pending the descriptor to the lockless list for the irq_entry - * that we designated the descriptor to. - */ - if (desc->hw->flags & IDXD_OP_FLAG_RCI) { - int vec; - - /* - * If the driver is on host kernel, it would be the value - * assigned to interrupt handle, which is index for MSIX - * vector. If it's guest then can't use the int_handle since - * that is the index to IMS for the entire device. The guest - * device local index will be used. - */ - vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector; - llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist); - } - return 0; } diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 0460d58e3941..bb4df63906a7 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1744,8 +1744,6 @@ void idxd_unregister_devices(struct idxd_device *idxd) device_unregister(&group->conf_dev); } - - device_unregister(&idxd->conf_dev); } int idxd_register_bus_type(void) diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 7f116bbcfad2..2ddc31e64db0 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -812,6 +812,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( dma_length += sg_dma_len(sg); } + imxdma_config_write(chan, &imxdmac->config, direction); + switch (imxdmac->word_size) { case DMA_SLAVE_BUSWIDTH_4_BYTES: if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3) diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 104ad420abbe..baab1ca9f621 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -618,6 +618,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) case IDMAC_SDC_1: case IDMAC_IC_7: ipu_channel_set_priority(ipu, channel, true); + break; default: break; } @@ -978,6 +979,7 @@ static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan) case IDMAC_SDC_0: case IDMAC_SDC_1: n_desc = 4; + break; default: break; } diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index c1a69149c8bf..4a51fdbf5aa9 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -813,6 +813,7 @@ inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308) case 16: if (is_mpc8308) return false; + break; case 1: case 2: case 4: diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index ec00b20ae8e4..ac61ecda2926 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -67,8 +67,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, return NULL; ofdma_target = of_dma_find_controller(&dma_spec_target); - if (!ofdma_target) - return NULL; + if (!ofdma_target) { + ofdma->dma_router->route_free(ofdma->dma_router->dev, + route_data); + chan = ERR_PTR(-EPROBE_DEFER); + goto err; + } chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); if (IS_ERR_OR_NULL(chan)) { @@ -89,6 +93,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, } } +err: /* * Need to put the node back since the ofdma->of_dma_route_allocate * has taken it for generating the new, translated dma_spec diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 8f7ceb698226..1cc06900153e 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev) error: of_dma_controller_free(pdev->dev.of_node); - pm_runtime_put(&pdev->dev); error_pm: + pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return ret; } diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index f54ecb123a52..7dd1d3d0bf06 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1200,7 +1200,7 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c) chan->config_init = false; - ret = pm_runtime_get_sync(dmadev->ddev.dev); + ret = pm_runtime_resume_and_get(dmadev->ddev.dev); if (ret < 0) return ret; @@ -1470,7 +1470,7 @@ static int stm32_dma_suspend(struct device *dev) struct stm32_dma_device *dmadev = dev_get_drvdata(dev); int id, ret, scr; - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index ef0d0555103d..a42164389ebc 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -137,7 +137,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, /* Set dma request */ spin_lock_irqsave(&dmamux->lock, flags); - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { spin_unlock_irqrestore(&dmamux->lock, flags); goto error; @@ -336,7 +336,7 @@ static int stm32_dmamux_suspend(struct device *dev) struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); int i, ret; - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; @@ -361,7 +361,7 @@ static int stm32_dmamux_resume(struct device *dev) if (ret < 0) return ret; - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 96ad21869ba7..a35858610780 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -4948,6 +4948,7 @@ static int setup_resources(struct udma_dev *ud) ud->tchan_cnt), ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt)); + break; default: break; } diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c index 16b19654873d..d6b8a202474f 100644 --- a/drivers/dma/uniphier-xdmac.c +++ b/drivers/dma/uniphier-xdmac.c @@ -209,8 +209,8 @@ static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc) writel(0, xc->reg_ch_base + XDMAC_TSS); /* wait until transfer is stopped */ - return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val, - !(val & XDMAC_STAT_TENF), 100, 1000); + return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val, + !(val & XDMAC_STAT_TENF), 100, 1000); } /* xc->vc.lock must be held by caller */ diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 75c0b8e904e5..4b9530a7bf65 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor { * @genlock: Support genlock mode * @err: Channel has errors * @idle: Check for channel idle + * @terminating: Check for channel being synchronized by user * @tasklet: Cleanup work after irq * @config: Device configuration info * @flush_on_fsync: Flush on Frame sync @@ -431,6 +432,7 @@ struct xilinx_dma_chan { bool genlock; bool err; bool idle; + bool terminating; struct tasklet_struct tasklet; struct xilinx_vdma_config config; bool flush_on_fsync; @@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) /* Run any dependencies, then free the descriptor */ dma_run_dependencies(&desc->async_tx); xilinx_dma_free_tx_descriptor(chan, desc); + + /* + * While we ran a callback the user called a terminate function, + * which takes care of cleaning up any remaining descriptors + */ + if (chan->terminating) + break; } spin_unlock_irqrestore(&chan->lock, flags); @@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) if (desc->cyclic) chan->cyclic = true; + chan->terminating = false; + spin_unlock_irqrestore(&chan->lock, flags); return cookie; @@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) xilinx_dma_chan_reset(chan); /* Remove and free all of the descriptors in the lists */ + chan->terminating = true; xilinx_dma_free_descriptors(chan); chan->idle = true; diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 91164c5f0757..2fc4c3f91fd5 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -271,7 +271,7 @@ config EDAC_PND2 config EDAC_IGEN6 tristate "Intel client SoC Integrated MC" depends on PCI && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG - depends on X64_64 && X86_MCE_INTEL + depends on X86_64 && X86_MCE_INTEL help Support for error detection and correction on the Intel client SoC Integrated Memory Controller using In-Band ECC IP. diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 83166e02b191..00fe595a5bc8 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -46,9 +46,6 @@ static int ffa_device_probe(struct device *dev) struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); struct ffa_device *ffa_dev = to_ffa_dev(dev); - if (!ffa_device_match(dev, dev->driver)) - return -ENODEV; - return ffa_drv->probe(ffa_dev); } @@ -99,6 +96,9 @@ int ffa_driver_register(struct ffa_driver *driver, struct module *owner, { int ret; + if (!driver->probe) + return -EINVAL; + driver->driver.bus = &ffa_bus_type; driver->driver.name = driver->name; driver->driver.owner = owner; diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index b1edb4b2e94a..c9fb56afbcb4 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -120,7 +120,7 @@ #define PACK_TARGET_INFO(s, r) \ (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) -/** +/* * FF-A specification mentions explicitly about '4K pages'. This should * not be confused with the kernel PAGE_SIZE, which is the translation * granule kernel is configured and may be one among 4K, 16K and 64K. @@ -149,8 +149,10 @@ static const int ffa_linux_errmap[] = { static inline int ffa_to_linux_errno(int errno) { - if (errno < FFA_RET_SUCCESS && errno >= -ARRAY_SIZE(ffa_linux_errmap)) - return ffa_linux_errmap[-errno]; + int err_idx = -errno; + + if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap)) + return ffa_linux_errmap[err_idx]; return -EINVAL; } diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 784cf0027da3..6c7e24935eca 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -104,11 +104,6 @@ static int scmi_dev_probe(struct device *dev) { struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); struct scmi_device *scmi_dev = to_scmi_dev(dev); - const struct scmi_device_id *id; - - id = scmi_dev_match_id(scmi_dev, scmi_drv); - if (!id) - return -ENODEV; if (!scmi_dev->handle) return -EPROBE_DEFER; @@ -139,6 +134,9 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner, { int retval; + if (!driver->probe) + return -EINVAL; + retval = scmi_protocol_device_request(driver->id_table); if (retval) return retval; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 66e5e694be7d..9b2e8d42a992 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -47,7 +47,6 @@ enum scmi_error_codes { SCMI_ERR_GENERIC = -8, /* Generic Error */ SCMI_ERR_HARDWARE = -9, /* Hardware Error */ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ - SCMI_ERR_MAX }; /* List of all SCMI devices active in system */ @@ -166,8 +165,10 @@ static const int scmi_linux_errmap[] = { static inline int scmi_to_linux_errno(int errno) { - if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX) - return scmi_linux_errmap[-errno]; + int err_idx = -errno; + + if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap)) + return scmi_linux_errmap[err_idx]; return -EIO; } @@ -1025,8 +1026,9 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, const struct scmi_desc *desc = sinfo->desc; /* Pre-allocated messages, no more than what hdr.seq can support */ - if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { - dev_err(dev, "Maximum message of %d exceeds supported %ld\n", + if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) { + dev_err(dev, + "Invalid maximum messages %d, not in range [1 - %lu]\n", desc->max_msg, MSG_TOKEN_MAX); return -EINVAL; } @@ -1137,6 +1139,8 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) * @proto_id and @name: if device was still not existent it is created as a * child of the specified SCMI instance @info and its transport properly * initialized as usual. + * + * Return: A properly initialized scmi device, NULL otherwise. */ static inline struct scmi_device * scmi_get_protocol_device(struct device_node *np, struct scmi_info *info, diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c index d860bebd984a..0efd20cd9d69 100644 --- a/drivers/firmware/arm_scmi/notify.c +++ b/drivers/firmware/arm_scmi/notify.c @@ -1457,6 +1457,8 @@ static void scmi_devm_release_notifier(struct device *dev, void *res) * * Generic devres managed helper to register a notifier_block against a * protocol event. + * + * Return: 0 on Success */ static int scmi_devm_notifier_register(struct scmi_device *sdev, u8 proto_id, u8 evt_id, @@ -1523,6 +1525,8 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) * Generic devres managed helper to explicitly un-register a notifier_block * against a protocol event, which was previously registered using the above * @scmi_devm_notifier_register. + * + * Return: 0 on Success */ static int scmi_devm_notifier_unregister(struct scmi_device *sdev, u8 proto_id, u8 evt_id, diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 2c88aa221559..308471586381 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -166,7 +166,8 @@ struct scmi_msg_sensor_reading_get { struct scmi_resp_sensor_reading_complete { __le32 id; - __le64 readings; + __le32 readings_low; + __le32 readings_high; }; struct scmi_sensor_reading_resp { @@ -717,7 +718,8 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph, resp = t->rx.buf; if (le32_to_cpu(resp->id) == sensor_id) - *value = get_unaligned_le64(&resp->readings); + *value = + get_unaligned_le64(&resp->readings_low); else ret = -EPROTO; } diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c index ed10da5313e8..a5bf4c3f6dc7 100644 --- a/drivers/firmware/broadcom/tee_bnxt_fw.c +++ b/drivers/firmware/broadcom/tee_bnxt_fw.c @@ -212,10 +212,9 @@ static int tee_bnxt_fw_probe(struct device *dev) pvt_data.dev = dev; - fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ, - TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + fw_shm_pool = tee_shm_alloc_kernel_buf(pvt_data.ctx, MAX_SHM_MEM_SZ); if (IS_ERR(fw_shm_pool)) { - dev_err(pvt_data.dev, "tee_shm_alloc failed\n"); + dev_err(pvt_data.dev, "tee_shm_alloc_kernel_buf failed\n"); err = PTR_ERR(fw_shm_pool); goto out_sess; } @@ -242,6 +241,14 @@ static int tee_bnxt_fw_remove(struct device *dev) return 0; } +static void tee_bnxt_fw_shutdown(struct device *dev) +{ + tee_shm_free(pvt_data.fw_shm_pool); + tee_client_close_session(pvt_data.ctx, pvt_data.session_id); + tee_client_close_context(pvt_data.ctx); + pvt_data.ctx = NULL; +} + static const struct tee_client_device_id tee_bnxt_fw_id_table[] = { {UUID_INIT(0x6272636D, 0x2019, 0x0716, 0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)}, @@ -257,6 +264,7 @@ static struct tee_client_driver tee_bnxt_fw_driver = { .bus = &tee_bus_type, .probe = tee_bnxt_fw_probe, .remove = tee_bnxt_fw_remove, + .shutdown = tee_bnxt_fw_shutdown, }, }; diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c index 10d4457417a4..eb9c65f97841 100644 --- a/drivers/firmware/efi/dev-path-parser.c +++ b/drivers/firmware/efi/dev-path-parser.c @@ -34,7 +34,6 @@ static long __init parse_acpi_path(const struct efi_dev_path *node, break; if (!adev->pnp.unique_id && node->acpi.uid == 0) break; - acpi_dev_put(adev); } if (!adev) return -ENODEV; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 4b7ee3fa9224..847f33ffc4ae 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -896,6 +896,7 @@ static int __init efi_memreserve_map_root(void) static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) { struct resource *res, *parent; + int ret; res = kzalloc(sizeof(struct resource), GFP_ATOMIC); if (!res) @@ -908,7 +909,17 @@ static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) /* we expect a conflict with a 'System RAM' region */ parent = request_resource_conflict(&iomem_resource, res); - return parent ? request_resource(parent, res) : 0; + ret = parent ? request_resource(parent, res) : 0; + + /* + * Given that efi_mem_reserve_iomem() can be called at any + * time, only call memblock_reserve() if the architecture + * keeps the infrastructure around. + */ + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret) + memblock_reserve(addr, size); + + return ret; } int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 7bf0a7acae5e..2363fee9211c 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -35,15 +35,48 @@ efi_status_t check_platform_features(void) } /* - * Although relocatable kernels can fix up the misalignment with respect to - * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of - * sync with those recorded in the vmlinux when kaslr is disabled but the - * image required relocation anyway. Therefore retain 2M alignment unless - * KASLR is in use. + * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail + * to provide space, and fail to zero it). Check for this condition by double + * checking that the first and the last byte of the image are covered by the + * same EFI memory map entry. */ -static u64 min_kimg_align(void) +static bool check_image_region(u64 base, u64 size) { - return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; + unsigned long map_size, desc_size, buff_size; + efi_memory_desc_t *memory_map; + struct efi_boot_memmap map; + efi_status_t status; + bool ret = false; + int map_offset; + + map.map = &memory_map; + map.map_size = &map_size; + map.desc_size = &desc_size; + map.desc_ver = NULL; + map.key_ptr = NULL; + map.buff_size = &buff_size; + + status = efi_get_memory_map(&map); + if (status != EFI_SUCCESS) + return false; + + for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { + efi_memory_desc_t *md = (void *)memory_map + map_offset; + u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE; + + /* + * Find the region that covers base, and return whether + * it covers base+size bytes. + */ + if (base >= md->phys_addr && base < end) { + ret = (base + size) <= end; + break; + } + } + + efi_bs_call(free_pool, memory_map); + + return ret; } efi_status_t handle_kernel_image(unsigned long *image_addr, @@ -56,6 +89,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long kernel_size, kernel_memsize = 0; u32 phys_seed = 0; + /* + * Although relocatable kernels can fix up the misalignment with + * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are + * subtly out of sync with those recorded in the vmlinux when kaslr is + * disabled but the image required relocation anyway. Therefore retain + * 2M alignment if KASLR was explicitly disabled, even if it was not + * going to be activated to begin with. + */ + u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { if (!efi_nokaslr) { status = efi_get_random_bytes(sizeof(phys_seed), @@ -76,6 +119,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, if (image->image_base != _text) efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); + if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN)) + efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n", + EFI_KIMG_ALIGN >> 10); + kernel_size = _edata - _text; kernel_memsize = kernel_size + (_end - _edata); *reserve_size = kernel_memsize; @@ -85,14 +132,18 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, * If KASLR is enabled, and we have some randomness available, * locate the kernel at a randomized offset in physical memory. */ - status = efi_random_alloc(*reserve_size, min_kimg_align(), + status = efi_random_alloc(*reserve_size, min_kimg_align, reserve_addr, phys_seed); + if (status != EFI_SUCCESS) + efi_warn("efi_random_alloc() failed: 0x%lx\n", status); } else { status = EFI_OUT_OF_RESOURCES; } if (status != EFI_SUCCESS) { - if (IS_ALIGNED((u64)_text, min_kimg_align())) { + if (!check_image_region((u64)_text, kernel_memsize)) { + efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n"); + } else if (IS_ALIGNED((u64)_text, min_kimg_align)) { /* * Just execute from wherever we were loaded by the * UEFI PE/COFF loader if the alignment is suitable. @@ -103,7 +154,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, } status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, - ULONG_MAX, min_kimg_align()); + ULONG_MAX, min_kimg_align); if (status != EFI_SUCCESS) { efi_err("Failed to relocate kernel\n"); diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index aa8da0a49829..ae87dded989d 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -630,8 +630,8 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image, * @image: EFI loaded image protocol * @load_addr: pointer to loaded initrd * @load_size: size of loaded initrd - * @soft_limit: preferred size of allocated memory for loading the initrd - * @hard_limit: minimum size of allocated memory + * @soft_limit: preferred address for loading the initrd + * @hard_limit: upper limit address for loading the initrd * * Return: status code */ diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index a408df474d83..724155b9e10d 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -30,6 +30,8 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, (u64)ULONG_MAX); + if (region_end < size) + return 0; first_slot = round_up(md->phys_addr, align); last_slot = round_down(region_end - size + 1, align); diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c index d8bc01340686..38722d2009e2 100644 --- a/drivers/firmware/efi/mokvar-table.c +++ b/drivers/firmware/efi/mokvar-table.c @@ -180,7 +180,10 @@ void __init efi_mokvar_table_init(void) pr_err("EFI MOKvar config table is not valid\n"); return; } - efi_mem_reserve(efi.mokvar_table, map_size_needed); + + if (md.type == EFI_BOOT_SERVICES_DATA) + efi_mem_reserve(efi.mokvar_table, map_size_needed); + efi_mokvar_table_size = map_size_needed; } diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index c1955d320fec..8f665678e9e3 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -62,9 +62,11 @@ int __init efi_tpm_eventlog_init(void) tbl_size = sizeof(*log_tbl) + log_tbl->size; memblock_reserve(efi.tpm_log, tbl_size); - if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR || - log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) { - pr_warn(FW_BUG "TPM Final Events table missing or invalid\n"); + if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) { + pr_info("TPM Final Events table not present\n"); + goto out; + } else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) { + pr_warn(FW_BUG "TPM Final Events table invalid\n"); goto out; } diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 250e01680742..4b8978b254f9 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -329,12 +329,18 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node) fw = platform_get_drvdata(pdev); if (!fw) - return NULL; + goto err_put_device; if (!kref_get_unless_zero(&fw->consumers)) - return NULL; + goto err_put_device; + + put_device(&pdev->dev); return fw; + +err_put_device: + put_device(&pdev->dev); + return NULL; } EXPORT_SYMBOL_GPL(rpi_firmware_get); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 15b138326ecc..a3cadbaf3cba 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -664,7 +664,7 @@ int zynqmp_pm_write_ggs(u32 index, u32 value) EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs); /** - * zynqmp_pm_write_ggs() - PM API for reading global general storage (ggs) + * zynqmp_pm_read_ggs() - PM API for reading global general storage (ggs) * @index: GGS register index * @value: Register value to be written * @@ -697,7 +697,7 @@ int zynqmp_pm_write_pggs(u32 index, u32 value) EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs); /** - * zynqmp_pm_write_pggs() - PM API for reading persistent global general + * zynqmp_pm_read_pggs() - PM API for reading persistent global general * storage (pggs) * @index: PGGS register index * @value: Register value to be written @@ -1012,7 +1012,24 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities, EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement); /** - * zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using + * zynqmp_pm_load_pdi - Load and process PDI + * @src: Source device where PDI is located + * @address: PDI src address + * + * This function provides support to load PDI from linux + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_load_pdi(const u32 src, const u64 address) +{ + return zynqmp_pm_invoke_fn(PM_LOAD_PDI, src, + lower_32_bits(address), + upper_32_bits(address), 0, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_load_pdi); + +/** + * zynqmp_pm_aes_engine - Access AES hardware to encrypt/decrypt the data using * AES-GCM core. * @address: Address of the AesParams structure. * @out: Returned output value diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 8cd454ee20c0..991b3f361ec9 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -119,7 +119,7 @@ config XILINX_PR_DECOUPLER depends on HAS_IOMEM help Say Y to enable drivers for Xilinx LogiCORE PR Decoupler - or Xilinx Dynamic Function eXchnage AIX Shutdown Manager. + or Xilinx Dynamic Function eXchange AIX Shutdown Manager. The PR Decoupler exists in the FPGA fabric to isolate one region of the FPGA from the busses while that region is being reprogrammed during partial reconfig. @@ -234,4 +234,13 @@ config FPGA_MGR_ZYNQMP_FPGA to configure the programmable logic(PL) through PS on ZynqMP SoC. +config FPGA_MGR_VERSAL_FPGA + tristate "Xilinx Versal FPGA" + depends on ARCH_ZYNQMP || COMPILE_TEST + help + Select this option to enable FPGA manager driver support for + Xilinx Versal SoC. This driver uses the firmware interface to + configure the programmable logic(PL). + + To compile this as a module, choose M here. endif # FPGA diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index 18dc9885883a..0bff783d1b61 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o +obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 4e0edb60bfba..ccf4546eff29 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -346,7 +346,7 @@ static int altera_cvp_write_init(struct fpga_manager *mgr, } if (val & VSE_CVP_STATUS_CFG_RDY) { - dev_warn(&mgr->dev, "CvP already started, teardown first\n"); + dev_warn(&mgr->dev, "CvP already started, tear down first\n"); ret = altera_cvp_teardown(mgr, info); if (ret) return ret; diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c index dd58c4aea92e..7d22a44d652e 100644 --- a/drivers/fpga/altera-freeze-bridge.c +++ b/drivers/fpga/altera-freeze-bridge.c @@ -198,11 +198,13 @@ static const struct fpga_bridge_ops altera_freeze_br_br_ops = { .enable_show = altera_freeze_br_enable_show, }; +#ifdef CONFIG_OF static const struct of_device_id altera_freeze_br_of_match[] = { { .compatible = "altr,freeze-bridge-controller", }, {}, }; MODULE_DEVICE_TABLE(of, altera_freeze_br_of_match); +#endif static int altera_freeze_br_probe(struct platform_device *pdev) { diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c index d5861d13b306..313420405d5e 100644 --- a/drivers/fpga/dfl-fme-mgr.c +++ b/drivers/fpga/dfl-fme-mgr.c @@ -252,11 +252,6 @@ static int fme_mgr_write_complete(struct fpga_manager *mgr, return 0; } -static enum fpga_mgr_states fme_mgr_state(struct fpga_manager *mgr) -{ - return FPGA_MGR_STATE_UNKNOWN; -} - static u64 fme_mgr_status(struct fpga_manager *mgr) { struct fme_mgr_priv *priv = mgr->priv; @@ -268,7 +263,6 @@ static const struct fpga_manager_ops fme_mgr_ops = { .write_init = fme_mgr_write_init, .write = fme_mgr_write, .write_complete = fme_mgr_write_complete, - .state = fme_mgr_state, .status = fme_mgr_status, }; diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c index 4299145ef347..587c82be12f7 100644 --- a/drivers/fpga/dfl-fme-perf.c +++ b/drivers/fpga/dfl-fme-perf.c @@ -953,6 +953,8 @@ static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) return 0; priv->cpu = target; + perf_pmu_migrate_context(&priv->pmu, cpu, target); + return 0; } diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c index 1194c0e850e0..d61ce9a18879 100644 --- a/drivers/fpga/dfl-fme-pr.c +++ b/drivers/fpga/dfl-fme-pr.c @@ -148,7 +148,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) /* * it allows userspace to reset the PR region's logic by disabling and - * reenabling the bridge to clear things out between accleration runs. + * reenabling the bridge to clear things out between acceleration runs. * so no need to hold the bridges after partial reconfiguration. */ if (region->get_bridges) diff --git a/drivers/fpga/dfl-n3000-nios.c b/drivers/fpga/dfl-n3000-nios.c index 7a95366f6516..9ddf1d1d392f 100644 --- a/drivers/fpga/dfl-n3000-nios.c +++ b/drivers/fpga/dfl-n3000-nios.c @@ -461,7 +461,7 @@ static int n3000_nios_poll_stat_timeout(void __iomem *base, u64 *v) * We don't use the time based timeout here for performance. * * The regbus read/write is on the critical path of Intel PAC N3000 - * image programing. The time based timeout checking will add too much + * image programming. The time based timeout checking will add too much * overhead on it. Usually the state changes in 1 or 2 loops on the * test server, and we set 10000 times loop here for safety. */ diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index b44523ea8c91..4d68719e608f 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -74,6 +74,9 @@ static void cci_pci_free_irq(struct pci_dev *pcidev) #define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4 #define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30 #define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B +#define PCIE_DEVICE_ID_SILICOM_PAC_N5010 0x1000 +#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001 + /* VF Device */ #define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF #define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1 @@ -90,6 +93,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),}, + {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),}, + {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),}, {0,} }; MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl); diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index 511b20ff35a3..e73a70053906 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -381,6 +381,7 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata, ddev->type = feature_dev_id_type(pdev); ddev->feature_id = feature->id; + ddev->revision = feature->revision; ddev->cdev = pdata->dfl_cdev; /* add mmio resource */ @@ -717,6 +718,7 @@ struct build_feature_devs_info { */ struct dfl_feature_info { u16 fid; + u8 revision; struct resource mmio_res; void __iomem *ioaddr; struct list_head node; @@ -796,6 +798,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) /* save resource information for each feature */ feature->dev = fdev; feature->id = finfo->fid; + feature->revision = finfo->revision; /* * the FIU header feature has some fundamental functions (sriov @@ -910,19 +913,17 @@ static void build_info_free(struct build_feature_devs_info *binfo) devm_kfree(binfo->dev, binfo); } -static inline u32 feature_size(void __iomem *start) +static inline u32 feature_size(u64 value) { - u64 v = readq(start + DFH); - u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v); + u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, value); /* workaround for private features with invalid size, use 4K instead */ return ofst ? ofst : 4096; } -static u16 feature_id(void __iomem *start) +static u16 feature_id(u64 value) { - u64 v = readq(start + DFH); - u16 id = FIELD_GET(DFH_ID, v); - u8 type = FIELD_GET(DFH_TYPE, v); + u16 id = FIELD_GET(DFH_ID, value); + u8 type = FIELD_GET(DFH_TYPE, value); if (type == DFH_TYPE_FIU) return FEATURE_ID_FIU_HEADER; @@ -1021,10 +1022,15 @@ create_feature_instance(struct build_feature_devs_info *binfo, unsigned int irq_base, nr_irqs; struct dfl_feature_info *finfo; int ret; + u8 revision; + u64 v; + + v = readq(binfo->ioaddr + ofst); + revision = FIELD_GET(DFH_REVISION, v); /* read feature size and id if inputs are invalid */ - size = size ? size : feature_size(binfo->ioaddr + ofst); - fid = fid ? fid : feature_id(binfo->ioaddr + ofst); + size = size ? size : feature_size(v); + fid = fid ? fid : feature_id(v); if (binfo->len - ofst < size) return -EINVAL; @@ -1038,6 +1044,7 @@ create_feature_instance(struct build_feature_devs_info *binfo, return -ENOMEM; finfo->fid = fid; + finfo->revision = revision; finfo->mmio_res.start = binfo->start + ofst; finfo->mmio_res.end = finfo->mmio_res.start + size - 1; finfo->mmio_res.flags = IORESOURCE_MEM; @@ -1166,7 +1173,7 @@ static int parse_feature_private(struct build_feature_devs_info *binfo, { if (!is_feature_dev_detected(binfo)) { dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n", - feature_id(binfo->ioaddr + ofst)); + feature_id(readq(binfo->ioaddr + ofst))); return -EINVAL; } diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h index 2b82c96ba56c..53572c7aced0 100644 --- a/drivers/fpga/dfl.h +++ b/drivers/fpga/dfl.h @@ -232,7 +232,7 @@ struct dfl_feature_irq_ctx { * @id: sub feature id. * @resource_index: each sub feature has one mmio resource for its registers. * this index is used to find its mmio resource from the - * feature dev (platform device)'s reources. + * feature dev (platform device)'s resources. * @ioaddr: mapped mmio resource address. * @irq_ctx: interrupt context list. * @nr_irqs: number of interrupt contexts. @@ -243,6 +243,7 @@ struct dfl_feature_irq_ctx { struct dfl_feature { struct platform_device *dev; u16 id; + u8 revision; int resource_index; void __iomem *ioaddr; struct dfl_feature_irq_ctx *irq_ctx; diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 2bfb2ff86930..798f55670646 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -228,9 +228,9 @@ EXPORT_SYMBOL_GPL(fpga_bridges_put); * @info: fpga image specific information * @bridge_list: list of FPGA bridges * - * Get an exclusive reference to the bridge and and it to the list. + * Get an exclusive reference to the bridge and it to the list. * - * Return 0 for success, error code from of_fpga_bridge_get() othewise. + * Return 0 for success, error code from of_fpga_bridge_get() otherwise. */ int of_fpga_bridge_get_to_list(struct device_node *np, struct fpga_image_info *info, @@ -258,9 +258,9 @@ EXPORT_SYMBOL_GPL(of_fpga_bridge_get_to_list); * @info: fpga image specific information * @bridge_list: list of FPGA bridges * - * Get an exclusive reference to the bridge and and it to the list. + * Get an exclusive reference to the bridge and it to the list. * - * Return 0 for success, error code from fpga_bridge_get() othewise. + * Return 0 for success, error code from fpga_bridge_get() otherwise. */ int fpga_bridge_get_to_list(struct device *dev, struct fpga_image_info *info, diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index ecb4c3c795fa..aa30889e2320 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -25,6 +25,72 @@ struct fpga_mgr_devres { struct fpga_manager *mgr; }; +static inline void fpga_mgr_fpga_remove(struct fpga_manager *mgr) +{ + if (mgr->mops->fpga_remove) + mgr->mops->fpga_remove(mgr); +} + +static inline enum fpga_mgr_states fpga_mgr_state(struct fpga_manager *mgr) +{ + if (mgr->mops->state) + return mgr->mops->state(mgr); + return FPGA_MGR_STATE_UNKNOWN; +} + +static inline u64 fpga_mgr_status(struct fpga_manager *mgr) +{ + if (mgr->mops->status) + return mgr->mops->status(mgr); + return 0; +} + +static inline int fpga_mgr_write(struct fpga_manager *mgr, const char *buf, size_t count) +{ + if (mgr->mops->write) + return mgr->mops->write(mgr, buf, count); + return -EOPNOTSUPP; +} + +/* + * After all the FPGA image has been written, do the device specific steps to + * finish and set the FPGA into operating mode. + */ +static inline int fpga_mgr_write_complete(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + int ret = 0; + + mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE; + if (mgr->mops->write_complete) + ret = mgr->mops->write_complete(mgr, info); + if (ret) { + dev_err(&mgr->dev, "Error after writing image data to FPGA\n"); + mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR; + return ret; + } + mgr->state = FPGA_MGR_STATE_OPERATING; + + return 0; +} + +static inline int fpga_mgr_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count) +{ + if (mgr->mops->write_init) + return mgr->mops->write_init(mgr, info, buf, count); + return 0; +} + +static inline int fpga_mgr_write_sg(struct fpga_manager *mgr, + struct sg_table *sgt) +{ + if (mgr->mops->write_sg) + return mgr->mops->write_sg(mgr, sgt); + return -EOPNOTSUPP; +} + /** * fpga_image_info_alloc - Allocate an FPGA image info struct * @dev: owning device @@ -83,9 +149,9 @@ static int fpga_mgr_write_init_buf(struct fpga_manager *mgr, mgr->state = FPGA_MGR_STATE_WRITE_INIT; if (!mgr->mops->initial_header_size) - ret = mgr->mops->write_init(mgr, info, NULL, 0); + ret = fpga_mgr_write_init(mgr, info, NULL, 0); else - ret = mgr->mops->write_init( + ret = fpga_mgr_write_init( mgr, info, buf, min(mgr->mops->initial_header_size, count)); if (ret) { @@ -137,27 +203,6 @@ static int fpga_mgr_write_init_sg(struct fpga_manager *mgr, return ret; } -/* - * After all the FPGA image has been written, do the device specific steps to - * finish and set the FPGA into operating mode. - */ -static int fpga_mgr_write_complete(struct fpga_manager *mgr, - struct fpga_image_info *info) -{ - int ret; - - mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE; - ret = mgr->mops->write_complete(mgr, info); - if (ret) { - dev_err(&mgr->dev, "Error after writing image data to FPGA\n"); - mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR; - return ret; - } - mgr->state = FPGA_MGR_STATE_OPERATING; - - return 0; -} - /** * fpga_mgr_buf_load_sg - load fpga from image in buffer from a scatter list * @mgr: fpga manager @@ -188,13 +233,13 @@ static int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, /* Write the FPGA image to the FPGA. */ mgr->state = FPGA_MGR_STATE_WRITE; if (mgr->mops->write_sg) { - ret = mgr->mops->write_sg(mgr, sgt); + ret = fpga_mgr_write_sg(mgr, sgt); } else { struct sg_mapping_iter miter; sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG); while (sg_miter_next(&miter)) { - ret = mgr->mops->write(mgr, miter.addr, miter.length); + ret = fpga_mgr_write(mgr, miter.addr, miter.length); if (ret) break; } @@ -224,7 +269,7 @@ static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr, * Write the FPGA image to the FPGA. */ mgr->state = FPGA_MGR_STATE_WRITE; - ret = mgr->mops->write(mgr, buf, count); + ret = fpga_mgr_write(mgr, buf, count); if (ret) { dev_err(&mgr->dev, "Error while writing image data to FPGA\n"); mgr->state = FPGA_MGR_STATE_WRITE_ERR; @@ -417,10 +462,7 @@ static ssize_t status_show(struct device *dev, u64 status; int len = 0; - if (!mgr->mops->status) - return -ENOENT; - - status = mgr->mops->status(mgr); + status = fpga_mgr_status(mgr); if (status & FPGA_MGR_STATUS_OPERATION_ERR) len += sprintf(buf + len, "reconfig operation error\n"); @@ -568,9 +610,7 @@ struct fpga_manager *fpga_mgr_create(struct device *parent, const char *name, struct fpga_manager *mgr; int id, ret; - if (!mops || !mops->write_complete || !mops->state || - !mops->write_init || (!mops->write && !mops->write_sg) || - (mops->write && mops->write_sg)) { + if (!mops) { dev_err(parent, "Attempt to register without fpga_manager_ops\n"); return NULL; } @@ -688,7 +728,7 @@ int fpga_mgr_register(struct fpga_manager *mgr) * from device. FPGA may be in reset mode or may have been programmed * by bootloader or EEPROM. */ - mgr->state = mgr->mops->state(mgr); + mgr->state = fpga_mgr_state(mgr); ret = device_add(&mgr->dev); if (ret) @@ -719,8 +759,7 @@ void fpga_mgr_unregister(struct fpga_manager *mgr) * If the low level driver provides a method for putting fpga into * a desired state upon unregister, do it. */ - if (mgr->mops->fpga_remove) - mgr->mops->fpga_remove(mgr); + fpga_mgr_fpga_remove(mgr); device_unregister(&mgr->dev); } diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index a2cea500f7cc..047fd7f23706 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -388,13 +388,7 @@ static int s10_ops_write_complete(struct fpga_manager *mgr, return ret; } -static enum fpga_mgr_states s10_ops_state(struct fpga_manager *mgr) -{ - return FPGA_MGR_STATE_UNKNOWN; -} - static const struct fpga_manager_ops s10_ops = { - .state = s10_ops_state, .write_init = s10_ops_write_init, .write = s10_ops_write, .write_complete = s10_ops_write_complete, diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c index 101f016c6ed8..167abb0b08d4 100644 --- a/drivers/fpga/ts73xx-fpga.c +++ b/drivers/fpga/ts73xx-fpga.c @@ -32,11 +32,6 @@ struct ts73xx_fpga_priv { struct device *dev; }; -static enum fpga_mgr_states ts73xx_fpga_state(struct fpga_manager *mgr) -{ - return FPGA_MGR_STATE_UNKNOWN; -} - static int ts73xx_fpga_write_init(struct fpga_manager *mgr, struct fpga_image_info *info, const char *buf, size_t count) @@ -98,7 +93,6 @@ static int ts73xx_fpga_write_complete(struct fpga_manager *mgr, } static const struct fpga_manager_ops ts73xx_fpga_ops = { - .state = ts73xx_fpga_state, .write_init = ts73xx_fpga_write_init, .write = ts73xx_fpga_write, .write_complete = ts73xx_fpga_write_complete, diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c new file mode 100644 index 000000000000..5b0dda304bd2 --- /dev/null +++ b/drivers/fpga/versal-fpga.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019-2021 Xilinx, Inc. + */ + +#include <linux/dma-mapping.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/string.h> +#include <linux/firmware/xlnx-zynqmp.h> + +static int versal_fpga_ops_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t size) +{ + return 0; +} + +static int versal_fpga_ops_write(struct fpga_manager *mgr, + const char *buf, size_t size) +{ + dma_addr_t dma_addr = 0; + char *kbuf; + int ret; + + kbuf = dma_alloc_coherent(mgr->dev.parent, size, &dma_addr, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + memcpy(kbuf, buf, size); + ret = zynqmp_pm_load_pdi(PDI_SRC_DDR, dma_addr); + dma_free_coherent(mgr->dev.parent, size, kbuf, dma_addr); + + return ret; +} + +static const struct fpga_manager_ops versal_fpga_ops = { + .write_init = versal_fpga_ops_write_init, + .write = versal_fpga_ops_write, +}; + +static int versal_fpga_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct fpga_manager *mgr; + int ret; + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret < 0) { + dev_err(dev, "no usable DMA configuration\n"); + return ret; + } + + mgr = devm_fpga_mgr_create(dev, "Xilinx Versal FPGA Manager", + &versal_fpga_ops, NULL); + if (!mgr) + return -ENOMEM; + + return devm_fpga_mgr_register(dev, mgr); +} + +static const struct of_device_id versal_fpga_of_match[] = { + { .compatible = "xlnx,versal-fpga", }, + {}, +}; +MODULE_DEVICE_TABLE(of, versal_fpga_of_match); + +static struct platform_driver versal_fpga_driver = { + .probe = versal_fpga_probe, + .driver = { + .name = "versal_fpga_manager", + .of_match_table = of_match_ptr(versal_fpga_of_match), + }, +}; +module_platform_driver(versal_fpga_driver); + +MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>"); +MODULE_AUTHOR("Appana Durga Kedareswara rao <appanad.durga.rao@xilinx.com>"); +MODULE_DESCRIPTION("Xilinx Versal FPGA Manager"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c index ea2bde6e5bc4..e986ed47c4ed 100644 --- a/drivers/fpga/xilinx-pr-decoupler.c +++ b/drivers/fpga/xilinx-pr-decoupler.c @@ -81,6 +81,7 @@ static const struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = { .enable_show = xlnx_pr_decoupler_enable_show, }; +#ifdef CONFIG_OF static const struct xlnx_config_data decoupler_config = { .name = "Xilinx PR Decoupler", }; @@ -99,6 +100,7 @@ static const struct of_device_id xlnx_pr_decoupler_of_match[] = { {}, }; MODULE_DEVICE_TABLE(of, xlnx_pr_decoupler_of_match); +#endif static int xlnx_pr_decoupler_probe(struct platform_device *pdev) { diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c index fee4d0abf6bf..b6bcf1d9233d 100644 --- a/drivers/fpga/xilinx-spi.c +++ b/drivers/fpga/xilinx-spi.c @@ -256,11 +256,13 @@ static int xilinx_spi_probe(struct spi_device *spi) return devm_fpga_mgr_register(&spi->dev, mgr); } +#ifdef CONFIG_OF static const struct of_device_id xlnx_spi_of_match[] = { { .compatible = "xlnx,fpga-slave-serial", }, {} }; MODULE_DEVICE_TABLE(of, xlnx_spi_of_match); +#endif static struct spi_driver xilinx_slave_spi_driver = { .driver = { diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 07fa8d9ec675..9b75bd4f93d8 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -192,7 +192,7 @@ static void zynq_step_dma(struct zynq_fpga_priv *priv) /* Once the first transfer is queued we can turn on the ISR, future * calls to zynq_step_dma will happen from the ISR context. The - * dma_lock spinlock guarentees this handover is done coherently, the + * dma_lock spinlock guarantees this handover is done coherently, the * ISR enable is put at the end to avoid another CPU spinning in the * ISR on this lock. */ @@ -267,7 +267,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, ctrl = zynq_fpga_read(priv, CTRL_OFFSET); if (!(ctrl & CTRL_SEC_EN_MASK)) { dev_err(&mgr->dev, - "System not secure, can't use crypted bitstreams\n"); + "System not secure, can't use encrypted bitstreams\n"); err = -EINVAL; goto out_err; } @@ -344,7 +344,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, /* set configuration register with following options: * - enable PCAP interface - * - set throughput for maximum speed (if bistream not crypted) + * - set throughput for maximum speed (if bistream not encrypted) * - set CPU in user mode */ ctrl = zynq_fpga_read(priv, CTRL_OFFSET); diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c index 125743c9797f..7d3d5650c322 100644 --- a/drivers/fpga/zynqmp-fpga.c +++ b/drivers/fpga/zynqmp-fpga.c @@ -66,12 +66,6 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr, return ret; } -static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr, - struct fpga_image_info *info) -{ - return 0; -} - static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr) { u32 status = 0; @@ -87,7 +81,6 @@ static const struct fpga_manager_ops zynqmp_fpga_ops = { .state = zynqmp_fpga_ops_state, .write_init = zynqmp_fpga_ops_write_init, .write = zynqmp_fpga_ops_write, - .write_complete = zynqmp_fpga_ops_write_complete, }; static int zynqmp_fpga_probe(struct platform_device *pdev) @@ -110,12 +103,13 @@ static int zynqmp_fpga_probe(struct platform_device *pdev) return devm_fpga_mgr_register(dev, mgr); } +#ifdef CONFIG_OF static const struct of_device_id zynqmp_fpga_of_match[] = { { .compatible = "xlnx,zynqmp-pcap-fpga", }, {}, }; - MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match); +#endif static struct platform_driver zynqmp_fpga_driver = { .probe = zynqmp_fpga_probe, diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 4b9157a69fca..50b321a1ab1b 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -405,7 +405,7 @@ static int mpc8xxx_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn, mpc8xxx_gpio_irq_cascade, - IRQF_SHARED, "gpio-cascade", + IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade", mpc8xxx_gc); if (ret) { dev_err(&pdev->dev, diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c index 5022e0ad0fae..0f5d17f343f1 100644 --- a/drivers/gpio/gpio-tqmx86.c +++ b/drivers/gpio/gpio-tqmx86.c @@ -238,8 +238,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) struct resource *res; int ret, irq; - irq = platform_get_irq(pdev, 0); - if (irq < 0) + irq = platform_get_irq_optional(pdev, 0); + if (irq < 0 && irq != -ENXIO) return irq; res = platform_get_resource(pdev, IORESOURCE_IO, 0); @@ -278,7 +278,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); - if (irq) { + if (irq > 0) { struct irq_chip *irq_chip = &gpio->irq_chip; u8 irq_status; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c0316eaba547..8ac6eb9f1fdb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -619,6 +619,13 @@ struct amdgpu_video_codec_info { u32 max_level; }; +#define codec_info_build(type, width, height, level) \ + .codec_type = type,\ + .max_width = width,\ + .max_height = height,\ + .max_pixels_per_frame = height * width,\ + .max_level = level, + struct amdgpu_video_codecs { const u32 codec_count; const struct amdgpu_video_codec_info *codec_array; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 84a1b4bc9bb4..4137e848f6a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include <linux/power_supply.h> #include <linux/pm_runtime.h> +#include <linux/suspend.h> #include <acpi/video.h> #include <acpi/actbl.h> @@ -1039,10 +1040,10 @@ void amdgpu_acpi_detect(void) */ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { -#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE) +#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP) if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { if (adev->flags & AMD_IS_APU) - return true; + return pm_suspend_target_state == PM_SUSPEND_TO_IDLE; } #endif return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index db16b3e83694..cf62f43a03da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, uint64_t *size); int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed); + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 3b8e1ee8c475..4fb15750b9bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1057,8 +1057,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem, static int update_gpuvm_pte(struct kgd_mem *mem, struct kfd_mem_attachment *entry, - struct amdgpu_sync *sync, - bool *table_freed) + struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = entry->bo_va; struct amdgpu_device *adev = entry->adev; @@ -1069,7 +1068,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, return ret; /* Update the page tables */ - ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed); + ret = amdgpu_vm_bo_update(adev, bo_va, false); if (ret) { pr_err("amdgpu_vm_bo_update failed\n"); return ret; @@ -1081,8 +1080,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, static int map_bo_to_gpuvm(struct kgd_mem *mem, struct kfd_mem_attachment *entry, struct amdgpu_sync *sync, - bool no_update_pte, - bool *table_freed) + bool no_update_pte) { int ret; @@ -1099,7 +1097,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem, if (no_update_pte) return 0; - ret = update_gpuvm_pte(mem, entry, sync, table_freed); + ret = update_gpuvm_pte(mem, entry, sync); if (ret) { pr_err("update_gpuvm_pte() failed\n"); goto update_gpuvm_pte_failed; @@ -1393,8 +1391,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; @@ -1597,8 +1594,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, - void *drm_priv, bool *table_freed) + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); @@ -1686,7 +1682,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( entry->va, entry->va + bo_size, entry); ret = map_bo_to_gpuvm(mem, entry, ctx.sync, - is_invalid_userptr, table_freed); + is_invalid_userptr); if (ret) { pr_err("Failed to map bo to gpuvm\n"); goto out_unreserve; @@ -2136,7 +2132,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) continue; kfd_mem_dmaunmap_attachment(mem, attachment); - ret = update_gpuvm_pte(mem, attachment, &sync, NULL); + ret = update_gpuvm_pte(mem, attachment, &sync); if (ret) { pr_err("%s: update PTE failed\n", __func__); /* make sure this gets validated again */ @@ -2342,7 +2338,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) continue; kfd_mem_dmaunmap_attachment(mem, attachment); - ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL); + ret = update_gpuvm_pte(mem, attachment, &sync_obj); if (ret) { pr_debug("Memory eviction: update PTE failed. Try again\n"); goto validate_map_fail; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 3b5d13189073..8f53837d4d3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -468,6 +468,46 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false; } +/* + * Helper function to query RAS EEPROM address + * + * @adev: amdgpu_device pointer + * + * Return true if vbios supports ras rom address reporting + */ +bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index; + u16 data_offset, size; + union firmware_info *firmware_info; + u8 frev, crev; + + if (i2c_address == NULL) + return false; + + *i2c_address = 0; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); + + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, + index, &size, &frev, &crev, &data_offset)) { + /* support firmware_info 3.4 + */ + if ((frev == 3 && crev >=4) || (frev > 3)) { + firmware_info = (union firmware_info *) + (mode_info->atom_context->bios + data_offset); + *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr; + } + } + + if (*i2c_address != 0) + return true; + + return false; +} + + union smu_info { struct atom_smu_info_v3_1 v31; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index 1bbbb195015d..751248b253de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -36,6 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev); bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev); +bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address); bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev); bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 76fe5b71e35d..30fa1f61e0e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL); + r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); if (r) return r; @@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { bo_va = fpriv->csa_va; BUG_ON(!bo_va); - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; @@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (bo_va == NULL) continue; - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d303e88e3c23..f3fd5ec710b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3504,13 +3504,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_device_get_job_timeout_settings(adev); if (r) { dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); - goto failed_unmap; + return r; } /* early init functions */ r = amdgpu_device_ip_early_init(adev); if (r) - goto failed_unmap; + return r; /* doorbell bar mapping and doorbell index init*/ amdgpu_device_doorbell_init(adev); @@ -3736,10 +3736,6 @@ release_ras_con: failed: amdgpu_vf_error_trans_all(adev); -failed_unmap: - iounmap(adev->rmmio); - adev->rmmio = NULL; - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 43e7b61d1c5c..ada7bc19118a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) ip->major, ip->minor, ip->revision); + if (le16_to_cpu(ip->hw_id) == VCN_HWID) + adev->vcn.num_vcn_inst++; + for (k = 0; k < num_base_address; k++) { /* * convert the endianness of base addresses in place, @@ -385,7 +388,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) { struct binary_header *bhdr; struct harvest_table *harvest_info; - int i; + int i, vcn_harvest_count = 0; bhdr = (struct binary_header *)adev->mman.discovery_bin; harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + @@ -397,8 +400,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) switch (le32_to_cpu(harvest_info->list[i].hw_id)) { case VCN_HWID: - adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; - adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; + vcn_harvest_count++; break; case DMU_HWID: adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; @@ -407,6 +409,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) break; } } + if (vcn_harvest_count == adev->vcn.num_vcn_inst) { + adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; + adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; + } } int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 71beb0db0125..971c5b8e75dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1168,6 +1168,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, /* Renoir */ + {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, @@ -1189,6 +1190,10 @@ static const struct pci_device_id pciidlist[] = { /* Van Gogh */ {0x1002, 0x163F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VANGOGH|AMD_IS_APU}, + /* Yellow Carp */ + {0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU}, + {0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU}, + /* Navy_Flounder */ {0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, {0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, @@ -1208,6 +1213,13 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, + /* BEIGE_GOBY */ + {0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + {0, 0, 0} }; @@ -1559,6 +1571,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) pci_ignore_hotplug(pdev); pci_set_power_state(pdev, PCI_D3cold); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; + } else if (amdgpu_device_supports_boco(drm_dev)) { + /* nothing to do */ } else if (amdgpu_device_supports_baco(drm_dev)) { amdgpu_device_baco_enter(drm_dev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index b3404c43a911..854fc497844b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -255,6 +255,15 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) return -EPERM; + /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings + * for debugger access to invisible VRAM. Should have used MAP_SHARED + * instead. Clearing VM_MAYWRITE prevents the mapping from ever + * becoming writable and makes is_cow_mapping(vm_flags) false. + */ + if (is_cow_mapping(vma->vm_flags) && + !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + vma->vm_flags &= ~VM_MAYWRITE; + return drm_gem_ttm_mmap(obj, vma); } @@ -612,7 +621,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (operation == AMDGPU_VA_OP_MAP || operation == AMDGPU_VA_OP_REPLACE) { - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) goto error; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 32ce0e679dc7..83af307e97cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -278,6 +278,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev) return true; } +static void amdgpu_restore_msix(struct amdgpu_device *adev) +{ + u16 ctrl; + + pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + if (!(ctrl & PCI_MSIX_FLAGS_ENABLE)) + return; + + /* VF FLR */ + ctrl &= ~PCI_MSIX_FLAGS_ENABLE; + pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); + ctrl |= PCI_MSIX_FLAGS_ENABLE; + pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); +} + /** * amdgpu_irq_init - initialize interrupt handling * @@ -569,6 +584,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) { int i, j, k; + if (amdgpu_sriov_vf(adev)) + amdgpu_restore_msix(adev); + for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index c13b02caf8c3..fc66aca28594 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -809,7 +809,7 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, /* query/inject/cure begin */ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, - struct ras_query_if *info) + struct ras_query_if *info) { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data = {0, 0, 0, NULL}; @@ -1043,17 +1043,32 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, return ret; } -/* get the total error counts on all IPs */ -void amdgpu_ras_query_error_count(struct amdgpu_device *adev, - unsigned long *ce_count, - unsigned long *ue_count) +/** + * amdgpu_ras_query_error_count -- Get error counts of all IPs + * adev: pointer to AMD GPU device + * ce_count: pointer to an integer to be set to the count of correctible errors. + * ue_count: pointer to an integer to be set to the count of uncorrectible + * errors. + * + * If set, @ce_count or @ue_count, count and return the corresponding + * error counts in those integer pointers. Return 0 if the device + * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. + */ +int amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; unsigned long ce, ue; if (!adev->ras_enabled || !con) - return; + return -EOPNOTSUPP; + + /* Don't count since no reporting. + */ + if (!ce_count && !ue_count) + return 0; ce = 0; ue = 0; @@ -1061,9 +1076,11 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev, struct ras_query_if info = { .head = obj->head, }; + int res; - if (amdgpu_ras_query_error_status(adev, &info)) - return; + res = amdgpu_ras_query_error_status(adev, &info); + if (res) + return res; ce += info.ce_count; ue += info.ue_count; @@ -1074,6 +1091,8 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev, if (ue_count) *ue_count = ue; + + return 0; } /* query/inject/cure end */ @@ -2137,9 +2156,10 @@ static void amdgpu_ras_counte_dw(struct work_struct *work) /* Cache new values. */ - amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); - atomic_set(&con->ras_ce_count, ce_count); - atomic_set(&con->ras_ue_count, ue_count); + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } pm_runtime_mark_last_busy(dev->dev); Out: @@ -2312,9 +2332,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, /* Those are the cached values at init. */ - amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); - atomic_set(&con->ras_ce_count, ce_count); - atomic_set(&con->ras_ue_count, ue_count); + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } return 0; cleanup: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 256cea5d34f2..b504ed8c9b50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -490,9 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev); -void amdgpu_ras_query_error_count(struct amdgpu_device *adev, - unsigned long *ce_count, - unsigned long *ue_count); +int amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count); /* error handling functions */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index f40c871da0c6..38222de921d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -26,6 +26,7 @@ #include "amdgpu_ras.h" #include <linux/bits.h> #include "atom.h" +#include "amdgpu_atomfirmware.h" #define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8 @@ -96,6 +97,9 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev, if (!i2c_addr) return false; + if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)i2c_addr)) + return true; + switch (adev->asic_type) { case CHIP_VEGA20: *i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 59e0fefb15aa..acfa207cf970 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -54,11 +54,12 @@ static inline void amdgpu_res_first(struct ttm_resource *res, { struct drm_mm_node *node; - if (!res) { + if (!res || res->mem_type == TTM_PL_SYSTEM) { cur->start = start; cur->size = size; cur->remaining = size; cur->node = NULL; + WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 79cfa2d68487..078c068937fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1758,7 +1758,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, r = vm->update_funcs->commit(¶ms, fence); if (table_freed) - *table_freed = *table_freed || params.table_freed; + *table_freed = params.table_freed; error_unlock: amdgpu_vm_eviction_unlock(vm); @@ -1816,7 +1816,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, * @adev: amdgpu_device pointer * @bo_va: requested BO and VM object * @clear: if true clear the entries - * @table_freed: return true if page table is freed * * Fill in the page table entries for @bo_va. * @@ -1824,7 +1823,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, * 0 for success, -EINVAL for failure. */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear, bool *table_freed) + bool clear) { struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; @@ -1903,7 +1902,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, resv, mapping->start, mapping->last, update_flags, mapping->offset, mem, - pages_addr, last_update, table_freed); + pages_addr, last_update, NULL); if (r) return r; } @@ -2155,7 +2154,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { /* Per VM BOs never need to bo cleared in the page tables */ - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; } @@ -2174,7 +2173,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, else clear = true; - r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, clear); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ddb85a85cbba..f8fa653d4da7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -406,7 +406,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct dma_fence **fence, bool *free_table); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear, bool *table_freed); + bool clear); bool amdgpu_vm_evictable(struct amdgpu_bo *bo); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo, bool evicted); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 33324427b555..7e0d8c092c7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -766,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) { - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1; + adev->crtc_irq.num_types = adev->mode_info.num_crtc; adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f5e9c022960b..a64b2c706090 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3300,6 +3300,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000) }; @@ -3379,6 +3380,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000020), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1_Vangogh, 0xffffffff, 0x00070103), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00400000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), @@ -3445,6 +3447,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x01030000, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x03a00000, 0x00a00000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 044076ec1d03..6a23c6826e12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1295,6 +1295,16 @@ static bool is_raven_kicker(struct amdgpu_device *adev) return false; } +static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev) +{ + if ((adev->asic_type == CHIP_RENOIR) && + (adev->gfx.me_fw_version >= 0x000000a5) && + (adev->gfx.me_feature_version >= 52)) + return true; + else + return false; +} + static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) { if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) @@ -3675,7 +3685,16 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring) if (ring->use_doorbell) { WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, (adev->doorbell_index.kiq * 2) << 2); - WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, + /* If GC has entered CGPG, ringing doorbell > first page + * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to + * workaround this issue. And this change has to align with firmware + * update. + */ + if (check_if_enlarge_doorbell_range(adev)) + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, + (adev->doorbell.size - 4)); + else + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, (adev->doorbell_index.userqueue_end * 2) << 2); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 3ee481557fc9..ff2307d7ee0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -252,7 +252,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (!down_read_trylock(&adev->reset_sem)) + if (!down_write_trylock(&adev->reset_sem)) return; amdgpu_virt_fini_data_exchange(adev); @@ -268,7 +268,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) flr_done: atomic_set(&adev->in_gpu_reset, 0); - up_read(&adev->reset_sem); + up_write(&adev->reset_sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 48e588d3c409..9f7aac435d69 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -273,7 +273,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (!down_read_trylock(&adev->reset_sem)) + if (!down_write_trylock(&adev->reset_sem)) return; amdgpu_virt_fini_data_exchange(adev); @@ -289,7 +289,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) flr_done: atomic_set(&adev->in_gpu_reset, 0); - up_read(&adev->reset_sem); + up_write(&adev->reset_sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 94a2c0742ee5..94d029dbf30d 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -64,32 +64,13 @@ #include "smuio_v11_0.h" #include "smuio_v11_0_6.h" -#define codec_info_build(type, width, height, level) \ - .codec_type = type,\ - .max_width = width,\ - .max_height = height,\ - .max_pixels_per_frame = height * width,\ - .max_level = level, - static const struct amd_ip_funcs nv_common_ip_funcs; /* Navi */ static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = { - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, - .max_width = 4096, - .max_height = 2304, - .max_pixels_per_frame = 4096 * 2304, - .max_level = 0, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, - .max_width = 4096, - .max_height = 2304, - .max_pixels_per_frame = 4096 * 2304, - .max_level = 0, - }, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; static const struct amdgpu_video_codecs nv_video_codecs_encode = @@ -101,55 +82,13 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode = /* Navi1x */ static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = { - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 3, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 5, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 52, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 4, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, - .max_width = 8192, - .max_height = 4352, - .max_pixels_per_frame = 8192 * 4352, - .max_level = 186, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 0, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, - .max_width = 8192, - .max_height = 4352, - .max_pixels_per_frame = 8192 * 4352, - .max_level = 0, - }, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs nv_video_codecs_decode = @@ -161,62 +100,14 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = /* Sienna Cichlid */ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] = { - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 3, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 5, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 52, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 4, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, - .max_width = 8192, - .max_height = 4352, - .max_pixels_per_frame = 8192 * 4352, - .max_level = 186, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 0, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, - .max_width = 8192, - .max_height = 4352, - .max_pixels_per_frame = 8192 * 4352, - .max_level = 0, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, - .max_width = 8192, - .max_height = 4352, - .max_pixels_per_frame = 8192 * 4352, - .max_level = 0, - }, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs sc_video_codecs_decode = @@ -228,80 +119,20 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode = /* SRIOV Sienna Cichlid, not const since data is controlled by host */ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, - .max_width = 4096, - .max_height = 2304, - .max_pixels_per_frame = 4096 * 2304, - .max_level = 0, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, - .max_width = 4096, - .max_height = 2304, - .max_pixels_per_frame = 4096 * 2304, - .max_level = 0, - }, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] = { - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 3, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 5, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 52, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 4, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, - .max_width = 8192, - .max_height = 4352, - .max_pixels_per_frame = 8192 * 4352, - .max_level = 186, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 0, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, - .max_width = 8192, - .max_height = 4352, - .max_pixels_per_frame = 8192 * 4352, - .max_level = 0, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, - .max_width = 8192, - .max_height = 4352, - .max_pixels_per_frame = 8192 * 4352, - .max_level = 0, - }, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static struct amdgpu_video_codecs sriov_sc_video_codecs_encode = @@ -333,6 +164,19 @@ static const struct amdgpu_video_codecs bg_video_codecs_encode = { .codec_array = NULL, }; +/* Yellow Carp*/ +static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, +}; + +static const struct amdgpu_video_codecs yc_video_codecs_decode = { + .codec_count = ARRAY_SIZE(yc_video_codecs_decode_array), + .codec_array = yc_video_codecs_decode_array, +}; + static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { @@ -353,12 +197,17 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: if (encode) *codecs = &nv_video_codecs_encode; else *codecs = &sc_video_codecs_decode; return 0; + case CHIP_YELLOW_CARP: + if (encode) + *codecs = &nv_video_codecs_encode; + else + *codecs = &yc_video_codecs_decode; + return 0; case CHIP_BEIGE_GOBY: if (encode) *codecs = &bg_video_codecs_encode; @@ -1387,7 +1236,10 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG; - adev->external_rev_id = adev->rev_id + 0x01; + if (adev->pdev->device == 0x1681) + adev->external_rev_id = adev->rev_id + 0x19; + else + adev->external_rev_id = adev->rev_id + 0x01; break; default: /* FIXME: not supported yet */ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index 618e5b6b85d9..536d41f327c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -67,7 +67,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) err = psp_init_asd_microcode(psp, chip_name); if (err) - goto out; + return err; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); @@ -80,7 +80,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) } else { err = amdgpu_ucode_validate(adev->psp.ta_fw); if (err) - goto out2; + goto out; ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; @@ -105,10 +105,9 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) return 0; -out2: +out: release_firmware(adev->psp.ta_fw); adev->psp.ta_fw = NULL; -out: if (err) { dev_err(adev->dev, "psp v12.0: Failed to load firmware \"%s\"\n", diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index b02436401d46..b7d350be8050 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -88,20 +88,8 @@ /* Vega, Raven, Arcturus */ static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] = { - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, - .max_width = 4096, - .max_height = 2304, - .max_pixels_per_frame = 4096 * 2304, - .max_level = 0, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, - .max_width = 4096, - .max_height = 2304, - .max_pixels_per_frame = 4096 * 2304, - .max_level = 0, - }, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; static const struct amdgpu_video_codecs vega_video_codecs_encode = @@ -113,48 +101,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode = /* Vega */ static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] = { - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 3, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 5, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 52, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 4, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 186, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 0, - }, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, }; static const struct amdgpu_video_codecs vega_video_codecs_decode = @@ -166,55 +118,13 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode = /* Raven */ static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] = { - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 3, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 5, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 52, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 4, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 186, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 0, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 0, - }, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)}, }; static const struct amdgpu_video_codecs rv_video_codecs_decode = @@ -226,55 +136,13 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode = /* Renoir, Arcturus */ static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] = { - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 3, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 5, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 52, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 4, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, - .max_width = 8192, - .max_height = 4352, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 186, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 0, - }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, - .max_width = 8192, - .max_height = 4352, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 0, - }, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs rn_video_codecs_decode = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 67541c30327a..e48acdd03c1a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1393,7 +1393,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, long err = 0; int i; uint32_t *devices_arr = NULL; - bool table_freed = false; dev = kfd_device_by_id(GET_GPU_ID(args->handle)); if (!dev) @@ -1451,8 +1450,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - peer->kgd, (struct kgd_mem *)mem, - peer_pdd->drm_priv, &table_freed); + peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv); if (err) { pr_err("Failed to map to gpu %d/%d\n", i, args->n_devices); @@ -1470,17 +1468,16 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, } /* Flush TLBs after waiting for the page table updates to complete */ - if (table_freed) { - for (i = 0; i < args->n_devices; i++) { - peer = kfd_device_by_id(devices_arr[i]); - if (WARN_ON_ONCE(!peer)) - continue; - peer_pdd = kfd_get_process_device_data(peer, p); - if (WARN_ON_ONCE(!peer_pdd)) - continue; - kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); - } + for (i = 0; i < args->n_devices; i++) { + peer = kfd_device_by_id(devices_arr[i]); + if (WARN_ON_ONCE(!peer)) + continue; + peer_pdd = kfd_get_process_device_data(peer, p); + if (WARN_ON_ONCE(!peer_pdd)) + continue; + kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); } + kfree(devices_arr); return err; @@ -1568,27 +1565,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, } args->n_success = i+1; } - mutex_unlock(&p->mutex); - - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); - if (err) { - pr_debug("Sync memory failed, wait interrupted by user signal\n"); - goto sync_memory_failed; - } - - /* Flush TLBs after waiting for the page table updates to complete */ - for (i = 0; i < args->n_devices; i++) { - peer = kfd_device_by_id(devices_arr[i]); - if (WARN_ON_ONCE(!peer)) - continue; - peer_pdd = kfd_get_process_device_data(peer, p); - if (WARN_ON_ONCE(!peer_pdd)) - continue; - kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); - } - kfree(devices_arr); + mutex_unlock(&p->mutex); + return 0; bind_process_to_device_failed: @@ -1596,7 +1576,6 @@ get_mem_obj_from_handle_failed: unmap_memory_from_gpu_failed: mutex_unlock(&p->mutex); copy_from_user_failed: -sync_memory_failed: kfree(devices_arr); return err; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 21ec8a18cad2..8a2c6fc438c0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -714,8 +714,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, if (err) goto err_alloc_mem; - err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, - pdd->drm_priv, NULL); + err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv); if (err) goto err_map_mem; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9a71d8919bd6..e883731c3f8f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2375,21 +2375,27 @@ static bool svm_range_skip_recover(struct svm_range *prange) static void svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, - struct svm_range *prange, int32_t gpuidx) + int32_t gpuidx) { struct kfd_process_device *pdd; - if (gpuidx == MAX_GPU_INSTANCE) - /* fault is on different page of same range - * or fault is skipped to recover later - */ - pdd = svm_range_get_pdd_by_adev(prange, adev); - else - /* fault recovered - * or fault cannot recover because GPU no access on the range - */ - pdd = kfd_process_device_from_gpuidx(p, gpuidx); + /* fault is on different page of same range + * or fault is skipped to recover later + * or fault is on invalid virtual address + */ + if (gpuidx == MAX_GPU_INSTANCE) { + uint32_t gpuid; + int r; + + r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx); + if (r < 0) + return; + } + /* fault is recovered + * or fault cannot recover because GPU no access on the range + */ + pdd = kfd_process_device_from_gpuidx(p, gpuidx); if (pdd) WRITE_ONCE(pdd->faults, pdd->faults + 1); } @@ -2525,7 +2531,7 @@ out_unlock_svms: mutex_unlock(&svms->lock); mmap_read_unlock(mm); - svm_range_count_fault(adev, p, prange, gpuidx); + svm_range_count_fault(adev, p, gpuidx); mmput(mm); out: @@ -3020,6 +3026,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size, pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, start + size - 1, nattr); + /* Flush pending deferred work to avoid racing with deferred actions from + * previous memory map changes (e.g. munmap). Concurrent memory map changes + * can still race with get_attr because we don't hold the mmap lock. But that + * would be a race condition in the application anyway, and undefined + * behaviour is acceptable in that case. + */ + flush_work(&p->svms.deferred_list_work); + mmap_read_lock(mm); if (!svm_range_is_valid(mm, start, size)) { pr_debug("invalid range\n"); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 01e1062dc235..afa96c8f721b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1548,6 +1548,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) } hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; + adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = @@ -1561,7 +1562,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) adev->dm.dmcub_fw_version); } - adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); dmub_srv = adev->dm.dmub_srv; @@ -2429,9 +2429,9 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; - if (caps->ext_caps->bits.oled == 1 || + if (caps->ext_caps->bits.oled == 1 /*|| caps->ext_caps->bits.sdr_aux_backlight_control == 1 || - caps->ext_caps->bits.hdr_aux_backlight_control == 1) + caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) caps->aux_support = true; if (amdgpu_backlight == 0) @@ -9191,7 +9191,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) /* restore the backlight level */ - if (dm->backlight_dev) + if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0])) amdgpu_dm_backlight_set_level(dm, dm->brightness[0]); #endif /* @@ -9605,7 +9605,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, } else if (amdgpu_freesync_vid_mode && aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { - set_freesync_fixed_config(dm_new_crtc_state); + struct drm_display_mode *high_mode; + + high_mode = get_highest_refresh_rate_mode(aconnector, false); + if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { + set_freesync_fixed_config(dm_new_crtc_state); + } } ret = dm_atomic_get_state(state, &dm_state); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 40f617bbb86f..4aba0e8c84f8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -584,7 +584,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list); /*allocate a new amdgpu_dm_irq_handler_data*/ - handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL); + handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC); if (!handler_data_add) { DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); return; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 6e0c5c664fdc..a5331b96f551 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -197,7 +197,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider); -// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100); + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000); REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider); REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index c6f494f0dcea..6185f9475fa2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -66,9 +66,11 @@ int rn_get_active_display_cnt_wa( for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; + /* Extend the WA to DP for Linux*/ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || - stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) + stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT) tmds_present = true; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 513676a6f52b..af7004b770ae 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -190,6 +190,10 @@ void dcn3_init_clocks(struct clk_mgr *clk_mgr_base) &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz, &num_levels); + /* SOCCLK */ + dcn3_init_single_clock(clk_mgr, PPCLK_SOCCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz, + &num_levels); // DPREFCLK ??? /* DISPCLK */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 7b7d884d58be..4a4894e9d9c9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -48,6 +48,21 @@ #include "dc_dmub_srv.h" +#include "yellow_carp_offset.h" + +#define regCLK1_CLK_PLL_REQ 0x0237 +#define regCLK1_CLK_PLL_REQ_BASE_IDX 0 + +#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 +#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc +#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 +#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL +#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L +#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L + +#define REG(reg_name) \ + (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) + #define TO_CLK_MGR_DCN31(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn31, base) @@ -124,10 +139,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, * also if safe to lower is false, we just go in the higher state */ if (safe_to_lower) { - if (new_clocks->z9_support == DCN_Z9_SUPPORT_ALLOW && - new_clocks->z9_support != clk_mgr_base->clks.z9_support) { + if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW && + new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { dcn31_smu_set_Z9_support(clk_mgr, true); - clk_mgr_base->clks.z9_support = new_clocks->z9_support; + clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { @@ -148,10 +163,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, } } } else { - if (new_clocks->z9_support == DCN_Z9_SUPPORT_DISALLOW && - new_clocks->z9_support != clk_mgr_base->clks.z9_support) { + if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW && + new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { dcn31_smu_set_Z9_support(clk_mgr, false); - clk_mgr_base->clks.z9_support = new_clocks->z9_support; + clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { @@ -229,7 +244,32 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) { - return 0; + /* get FbMult value */ + struct fixed31_32 pll_req; + unsigned int fbmult_frac_val = 0; + unsigned int fbmult_int_val = 0; + + /* + * Register value of fbmult is in 8.16 format, we are converting to 31.32 + * to leverage the fix point operations available in driver + */ + + REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ + REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ + + pll_req = dc_fixpt_from_int(fbmult_int_val); + + /* + * since fractional part is only 16 bit in register definition but is 32 bit + * in our fix point definiton, need to shift left by 16 to obtain correct value + */ + pll_req.value |= fbmult_frac_val << 16; + + /* multiply by REFCLK period */ + pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); + + /* integer part is now VCO frequency in kHz */ + return dc_fixpt_floor(pll_req); } static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base) @@ -246,7 +286,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr) clk_mgr->clks.p_state_change_support = true; clk_mgr->clks.prev_p_state_change_support = true; clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; - clk_mgr->clks.z9_support = DCN_Z9_SUPPORT_UNKNOWN; + clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; } static bool dcn31_are_clock_states_equal(struct dc_clocks *a, @@ -260,7 +300,7 @@ static bool dcn31_are_clock_states_equal(struct dc_clocks *a, return false; else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) return false; - else if (a->z9_support != b->z9_support) + else if (a->zstate_support != b->zstate_support) return false; else if (a->dtbclk_en != b->dtbclk_en) return false; @@ -592,6 +632,7 @@ void dcn31_clk_mgr_construct( clk_mgr->base.dprefclk_ss_percentage = 0; clk_mgr->base.dprefclk_ss_divider = 1000; clk_mgr->base.ss_on_dprefclk = false; + clk_mgr->base.dfs_ref_freq_khz = 48000; clk_mgr->smu_wm_set.wm_set = (struct dcn31_watermarks *)dm_helpers_allocate_gpu_mem( clk_mgr->base.base.ctx, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h index cc21cf75eafd..f8f100535526 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h @@ -27,60 +27,6 @@ #define __DCN31_CLK_MGR_H__ #include "clk_mgr_internal.h" -//CLK1_CLK_PLL_REQ -#ifndef CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT -#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 -#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc -#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 -#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL -#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L -#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L -//CLK1_CLK0_DFS_CNTL -#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER__SHIFT 0x0 -#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER_MASK 0x0000007FL -/*DPREF clock related*/ -#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0 -#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL -#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0 -#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL -#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0 -#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL -#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0 -#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL - -//CLK3_0_CLK3_CLK_PLL_REQ -#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 -#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc -#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 -#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL -#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L -#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L - -#define mmCLK0_CLK3_DFS_CNTL 0x16C60 -#define mmCLK00_CLK0_CLK3_DFS_CNTL 0x16C60 -#define mmCLK01_CLK0_CLK3_DFS_CNTL 0x16E60 -#define mmCLK02_CLK0_CLK3_DFS_CNTL 0x17060 -#define mmCLK03_CLK0_CLK3_DFS_CNTL 0x17260 - -#define mmCLK0_CLK_PLL_REQ 0x16C10 -#define mmCLK00_CLK0_CLK_PLL_REQ 0x16C10 -#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E10 -#define mmCLK02_CLK0_CLK_PLL_REQ 0x17010 -#define mmCLK03_CLK0_CLK_PLL_REQ 0x17210 - -#define mmCLK1_CLK_PLL_REQ 0x1B00D -#define mmCLK10_CLK1_CLK_PLL_REQ 0x1B00D -#define mmCLK11_CLK1_CLK_PLL_REQ 0x1B20D -#define mmCLK12_CLK1_CLK_PLL_REQ 0x1B40D -#define mmCLK13_CLK1_CLK_PLL_REQ 0x1B60D - -#define mmCLK2_CLK_PLL_REQ 0x17E0D - -/*AMCLK*/ -#define mmCLK11_CLK1_CLK0_DFS_CNTL 0x1B23F -#define mmCLK11_CLK1_CLK_PLL_REQ 0x1B20D -#endif - struct dcn31_watermarks; struct dcn31_smu_watermark_set { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 66db5e988bc1..dad4a4c18bcf 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -31,8 +31,8 @@ #include "dcn31_smu.h" #include "yellow_carp_offset.h" -#include "mp/mp_13_0_1_offset.h" -#include "mp/mp_13_0_1_sh_mask.h" +#include "mp/mp_13_0_2_offset.h" +#include "mp/mp_13_0_2_sh_mask.h" #define REG(reg_name) \ (MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 605e297b7a59..a30283fa5173 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1530,6 +1530,12 @@ void dc_z10_restore(struct dc *dc) if (dc->hwss.z10_restore) dc->hwss.z10_restore(dc); } + +void dc_z10_save_init(struct dc *dc) +{ + if (dc->hwss.z10_save_init) + dc->hwss.z10_save_init(dc); +} #endif /* * Applies given context to HW and copy it into current context. diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index b8832bdde2bc..a6d0fd24fd02 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1620,11 +1620,12 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train { enum dc_status status = DC_OK; - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - status = configure_lttpr_mode_non_transparent(link, lt_settings); - else + if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) status = configure_lttpr_mode_transparent(link); + else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + status = configure_lttpr_mode_non_transparent(link, lt_settings); + return status; } @@ -1784,7 +1785,6 @@ bool perform_link_training_with_retries( link_enc = stream->link_enc; else link_enc = link->link_enc; - ASSERT(link_enc); /* We need to do this before the link training to ensure the idle pattern in SST * mode will be sent right after the link training @@ -1820,8 +1820,7 @@ bool perform_link_training_with_retries( */ panel_mode = DP_PANEL_MODE_DEFAULT; } - } else - panel_mode = DP_PANEL_MODE_DEFAULT; + } } #endif @@ -3603,29 +3602,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) bool dp_retrieve_lttpr_cap(struct dc_link *link) { uint8_t lttpr_dpcd_data[6]; - bool vbios_lttpr_enable = false; - bool vbios_lttpr_interop = false; - struct dc_bios *bios = link->dc->ctx->dc_bios; + bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable; + bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; enum dc_status status = DC_ERROR_UNEXPECTED; bool is_lttpr_present = false; memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data)); - /* Query BIOS to determine if LTTPR functionality is forced on by system */ - if (bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = bios->funcs->get_lttpr_caps(bios, &is_vbios_lttpr_enable); - vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - if (bios->funcs->get_lttpr_interop) { - enum bp_result bp_query_result; - uint8_t is_vbios_interop_enabled = 0; - - bp_query_result = bios->funcs->get_lttpr_interop(bios, &is_vbios_interop_enabled); - vbios_lttpr_interop = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; - } /* * Logic to determine LTTPR mode @@ -4650,7 +4632,10 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) } } - if (link->dpcd_caps.panel_mode_edp) { + if (link->dpcd_caps.panel_mode_edp && + (link->connector_signal == SIGNAL_TYPE_EDP || + (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + link->is_internal_display))) { return DP_PANEL_MODE_EDP; } @@ -4914,9 +4899,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link) { uint32_t default_backlight; - if (link && - (link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 || - link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) { + if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { if (!dc_link_read_default_bl_aux(link, &default_backlight)) default_backlight = 150000; // if < 5 nits or > 5000, it might be wrong readback diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index a6a67244a322..1596f6b7fed7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1062,7 +1062,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) * so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3 * did not show such problems, so this seems to be the exception. */ - if (plane_state->ctx->dce_version != DCE_VERSION_11_0) + if (plane_state->ctx->dce_version > DCE_VERSION_11_0) pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP; else pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c index f2b39ec35c89..cde8ed2560b3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c @@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c */ memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config)); dc->vm_pa_config.valid = true; +#if defined(CONFIG_DRM_AMD_DC_DCN) + dc_z10_save_init(dc); +#endif } return num_vmids; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 45640f1c26c4..21d78289b048 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -183,6 +183,8 @@ struct dc_caps { unsigned int cursor_cache_size; struct dc_plane_cap planes[MAX_PLANES]; struct dc_color_caps color; + bool vbios_lttpr_aware; + bool vbios_lttpr_enable; }; struct dc_bug_wa { @@ -354,10 +356,10 @@ enum dcn_pwr_state { }; #if defined(CONFIG_DRM_AMD_DC_DCN) -enum dcn_z9_support_state { - DCN_Z9_SUPPORT_UNKNOWN, - DCN_Z9_SUPPORT_ALLOW, - DCN_Z9_SUPPORT_DISALLOW, +enum dcn_zstate_support_state { + DCN_ZSTATE_SUPPORT_UNKNOWN, + DCN_ZSTATE_SUPPORT_ALLOW, + DCN_ZSTATE_SUPPORT_DISALLOW, }; #endif /* @@ -378,7 +380,7 @@ struct dc_clocks { int dramclk_khz; bool p_state_change_support; #if defined(CONFIG_DRM_AMD_DC_DCN) - enum dcn_z9_support_state z9_support; + enum dcn_zstate_support_state zstate_support; bool dtbclk_en; #endif enum dcn_pwr_state pwr_state; @@ -1336,6 +1338,7 @@ void dc_hardware_release(struct dc *dc); bool dc_set_psr_allow_active(struct dc *dc, bool enable); #if defined(CONFIG_DRM_AMD_DC_DCN) void dc_z10_restore(struct dc *dc); +void dc_z10_save_init(struct dc *dc); #endif bool dc_enable_dmub_notifications(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index df6539e4c730..0464a8f3db3c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -636,6 +636,7 @@ struct dce_hwseq_registers { uint32_t ODM_MEM_PWR_CTRL3; uint32_t DMU_MEM_PWR_CNTL; uint32_t MMHUBBUB_MEM_PWR_CNTL; + uint32_t DCHUBBUB_ARB_HOSTVM_CNTL; }; /* set field name */ #define HWS_SF(blk_name, reg_name, field_name, post_fix)\ @@ -1110,7 +1111,8 @@ struct dce_hwseq_registers { type DOMAIN_POWER_FORCEON;\ type DOMAIN_POWER_GATE;\ type DOMAIN_PGFSM_PWR_STATUS;\ - type HPO_HDMISTREAMCLK_G_GATE_DIS; + type HPO_HDMISTREAMCLK_G_GATE_DIS;\ + type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 673b93f4fea5..cb9767ddf93d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -217,6 +217,8 @@ static void dpp1_dscl_set_lb( const struct line_buffer_params *lb_params, enum lb_memory_config mem_size_config) { + uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */ + /* LB */ if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { /* DSCL caps: pixel data processed in fixed format */ @@ -239,9 +241,12 @@ static void dpp1_dscl_set_lb( LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ } + if (dpp->base.caps->max_lb_partitions == 31) + max_partitions = 31; + REG_SET_2(LB_MEMORY_CTRL, 0, MEMORY_CONFIG, mem_size_config, - LB_MAX_PARTITIONS, 63); + LB_MAX_PARTITIONS, max_partitions); } static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 7fa9fc656b0c..f6e747f25ebe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -464,7 +464,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc) REG_UPDATE_2(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_X, - h_blank_start - 200 - 1, + (h_blank_start - 200 - 1) / optc1->opp_count, MASTER_UPDATE_LOCK_DB_Y, v_blank_start - 1); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 1b05a37b674d..b173fa3653b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2093,8 +2093,10 @@ int dcn20_populate_dml_pipes_from_context( - timing->v_border_bottom; pipes[pipe_cnt].pipe.dest.htotal = timing->h_total; pipes[pipe_cnt].pipe.dest.vtotal = v_total; - pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable; - pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable; + pipes[pipe_cnt].pipe.dest.hactive = + timing->h_addressable + timing->h_border_left + timing->h_border_right; + pipes[pipe_cnt].pipe.dest.vactive = + timing->v_addressable + timing->v_border_top + timing->v_border_bottom; pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE; pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0; if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) @@ -3079,6 +3081,37 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) return false; } +static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struct dc_state *context) +{ + int plane_count; + int i; + + plane_count = 0; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].plane_state) + plane_count++; + } + + /* + * Zstate is allowed in following scenarios: + * 1. Single eDP with PSR enabled + * 2. 0 planes (No memory requests) + * 3. Single eDP without PSR but > 5ms stutter period + */ + if (plane_count == 0) + return DCN_ZSTATE_SUPPORT_ALLOW; + else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { + struct dc_link *link = context->streams[0]->sink->link; + + if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled) + || context->bw_ctx.dml.vba.StutterPeriod > 5000.0) + return DCN_ZSTATE_SUPPORT_ALLOW; + else + return DCN_ZSTATE_SUPPORT_DISALLOW; + } else + return DCN_ZSTATE_SUPPORT_DISALLOW; +} + void dcn20_calculate_dlg_params( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -3086,7 +3119,6 @@ void dcn20_calculate_dlg_params( int vlevel) { int i, pipe_idx; - int plane_count; /* Writeback MCIF_WB arbitration parameters */ dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); @@ -3102,17 +3134,7 @@ void dcn20_calculate_dlg_params( != dm_dram_clock_change_unsupported; context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; - context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ? - DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW; - - plane_count = 0; - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (context->res_ctx.pipe_ctx[i].plane_state) - plane_count++; - } - - if (plane_count == 0) - context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW; + context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context); context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index f3d98e3ba624..bf0a198eae15 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -109,6 +109,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { .max_page_table_levels = 4, .pte_chunk_size_kbytes = 2, .meta_chunk_size_kbytes = 2, + .min_meta_chunk_size_bytes = 256, .writeback_chunk_size_kbytes = 2, .line_buffer_size_bits = 789504, .is_line_buffer_bpp_fixed = 0, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index 2140b75540cf..23a52d47e61c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -383,13 +383,6 @@ bool dpp3_get_optimal_number_of_taps( int min_taps_y, min_taps_c; enum lb_memory_config lb_config; - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ - if (scl_data->viewport.width != scl_data->h_active && - scl_data->viewport.height != scl_data->v_active && - dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && - scl_data->format == PIXEL_FORMAT_FP16) - return false; - if (scl_data->viewport.width > scl_data->h_active && dpp->ctx->dc->debug.max_downscale_src_width != 0 && scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) @@ -1440,15 +1433,6 @@ bool dpp3_construct( dpp->tf_shift = tf_shift; dpp->tf_mask = tf_mask; - dpp->lb_pixel_depth_supported = - LB_PIXEL_DEPTH_18BPP | - LB_PIXEL_DEPTH_24BPP | - LB_PIXEL_DEPTH_30BPP | - LB_PIXEL_DEPTH_36BPP; - - dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; - dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/ - return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h index 3fa86cd090a0..ac644ae6b9f2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h @@ -154,6 +154,7 @@ SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \ SRI(CURSOR_CONTROL, CURSOR0_, id),\ SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ + SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \ SRI(DSCL_MEM_PWR_CTRL, DSCL, id) #define DPP_REG_LIST_DCN30(id)\ @@ -163,8 +164,6 @@ SRI(CM_SHAPER_LUT_DATA, CM, id),\ SRI(CM_MEM_PWR_CTRL2, CM, id), \ SRI(CM_MEM_PWR_STATUS2, CM, id), \ - SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \ - SRI(DSCL_MEM_PWR_CTRL, DSCL, id), \ SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\ SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\ SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 596c97dce67e..28e15ebf2f43 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm( } pri_pipe->next_odm_pipe = sec_pipe; sec_pipe->prev_odm_pipe = pri_pipe; - ASSERT(sec_pipe->top_pipe == NULL); if (!sec_pipe->top_pipe) sec_pipe->stream_res.opp = pool->opps[pipe_idx]; @@ -2617,6 +2616,26 @@ static bool dcn30_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, + &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 9776d1737818..912285fdce18 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); } -static void calculate_wm_set_for_vlevel( - int vlevel, - struct wm_range_table_entry *table_entry, - struct dcn_watermarks *wm_set, - struct display_mode_lib *dml, - display_e2e_pipe_params_st *pipes, - int pipe_cnt) -{ - double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; - - ASSERT(vlevel < dml->soc.num_states); - /* only pipe 0 is read for voltage and dcf/soc clocks */ - pipes[0].clks_cfg.voltage = vlevel; - pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; - - dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; - dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; - dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; - - wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; - wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; - wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; - wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; - wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; - dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; - -} - -static void dcn301_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel_req) -{ - int i, pipe_idx; - int vlevel, vlevel_max; - struct wm_range_table_entry *table_entry; - struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; - - ASSERT(bw_params); - - vlevel_max = bw_params->clk_table.num_entries - 1; - - /* WM Set D */ - table_entry = &bw_params->wm_table.entries[WM_D]; - if (table_entry->wm_type == WM_TYPE_RETRAINING) - vlevel = 0; - else - vlevel = vlevel_max; - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, - &context->bw_ctx.dml, pipes, pipe_cnt); - /* WM Set C */ - table_entry = &bw_params->wm_table.entries[WM_C]; - vlevel = min(max(vlevel_req, 2), vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, - &context->bw_ctx.dml, pipes, pipe_cnt); - /* WM Set B */ - table_entry = &bw_params->wm_table.entries[WM_B]; - vlevel = min(max(vlevel_req, 1), vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, - &context->bw_ctx.dml, pipes, pipe_cnt); - - /* WM Set A */ - table_entry = &bw_params->wm_table.entries[WM_A]; - vlevel = min(vlevel_req, vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, - &context->bw_ctx.dml, pipes, pipe_cnt); - - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); - pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - - if (dc->config.forced_clocks) { - pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; - pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; - } - if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) - pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; - if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) - pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; - - pipe_idx++; - } - - dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); -} - static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, + .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 16a75ba0ca82..7d3ff5d44402 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -1398,11 +1398,18 @@ void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; - dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[0].dtbclk_mhz; + /* Populate from bw_params for DTBCLK, SOCCLK */ + if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0) + dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz; + else + dcn3_02_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) + dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz; + else + dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */ - /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */ + /* FCLK, PHYCLK_D18, DSCCLK */ dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz; - dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[0].socclk_mhz; dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz; } /* re-init DML with updated bb */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 34b89464ae02..dc7823d23ba8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = { .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ .num_states = 1, - .sr_exit_time_us = 26.5, - .sr_enter_plus_exit_time_us = 31, + .sr_exit_time_us = 35.5, + .sr_enter_plus_exit_time_us = 40, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, @@ -1326,11 +1326,18 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; dcn3_03_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; dcn3_03_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; - dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[0].dtbclk_mhz; + /* Populate from bw_params for DTBCLK, SOCCLK */ + if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0) + dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz; + else + dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) + dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz; + else + dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; /* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */ - /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */ + /* FCLK, PHYCLK_D18, DSCCLK */ dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz; - dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[0].socclk_mhz; dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz; } /* re-init DML with updated bb */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index fc1fc1a4bf8b..8a2119d8ca0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -47,6 +47,7 @@ #include "dce/dmub_outbox.h" #include "dc_link_dp.h" #include "inc/link_dpcd.h" +#include "dcn10/dcn10_hw_sequencer.h" #define DC_LOGGER_INIT(logger) @@ -390,7 +391,7 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal); is_dp = dc_is_dp_signal(pipe_ctx->stream->signal); - if (!is_hdmi_tmds) + if (!is_hdmi_tmds && !is_dp) return; if (is_hdmi_tmds) @@ -403,6 +404,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) &pipe_ctx->stream_res.encoder_info_frame); } } +void dcn31_z10_save_init(struct dc *dc) +{ + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; + cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT; + + dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); +} void dcn31_z10_restore(struct dc *dc) { @@ -594,3 +607,20 @@ bool dcn31_is_abm_supported(struct dc *dc, } return false; } + +static void apply_riommu_invalidation_wa(struct dc *dc) +{ + struct dce_hwseq *hws = dc->hwseq; + + if (!hws->wa.early_riommu_invalidation) + return; + + REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, 0); +} + +void dcn31_init_pipes(struct dc *dc, struct dc_state *context) +{ + dcn10_init_pipes(dc, context); + apply_riommu_invalidation_wa(dc); + +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h index ff72f0fdd5be..140435e4f7ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h @@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane( void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx); void dcn31_z10_restore(struct dc *dc); +void dcn31_z10_save_init(struct dc *dc); void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config); @@ -52,5 +53,6 @@ void dcn31_reset_hw_ctx_wrap( struct dc_state *context); bool dcn31_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); +void dcn31_init_pipes(struct dc *dc, struct dc_state *context); #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index e3048f8827d2..b30d923471cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -93,18 +93,18 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .z10_restore = dcn31_z10_restore, + .z10_save_init = dcn31_z10_save_init, .is_abm_supported = dcn31_is_abm_supported, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .update_visual_confirm_color = dcn20_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn31_private_funcs = { - .init_pipes = dcn10_init_pipes, + .init_pipes = dcn31_init_pipes, .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index c67bc9544f5d..cd3248dc31d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -220,6 +220,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = { .sr_exit_z8_time_us = 402.0, .sr_enter_plus_exit_z8_time_us = 520.0, .writeback_latency_us = 12.0, + .dram_channel_width_bytes = 4, .round_trip_ping_latency_dcfclk_cycles = 106, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, @@ -741,6 +742,7 @@ static const struct dccg_mask dccg_mask = { #define HWSEQ_DCN31_REG_LIST()\ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ SR(DIO_MEM_PWR_CTRL), \ SR(ODM_MEM_PWR_CTRL3), \ SR(DMU_MEM_PWR_CNTL), \ @@ -801,6 +803,7 @@ static const struct dce_hwseq_registers hwseq_reg = { #define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ @@ -1299,6 +1302,7 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; + hws->wa.early_riommu_invalidation = true; } return hws; } @@ -1964,6 +1968,22 @@ static bool dcn31_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index c26e742e8137..6655bb99fdfd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -841,6 +841,9 @@ static bool CalculatePrefetchSchedule( else *DestinationLinesForPrefetch = dst_y_prefetch_equ; + // Limit to prevent overflow in DST_Y_PREFETCH register + *DestinationLinesForPrefetch = dml_min(*DestinationLinesForPrefetch, 63.75); + dml_print("DML: VStartup: %d\n", VStartup); dml_print("DML: TCalc: %f\n", TCalc); dml_print("DML: TWait: %f\n", TWait); @@ -4889,7 +4892,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true) && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0] - || mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode)); + || mode_lib->vba.NextPrefetchMode <= mode_lib->vba.MaxPrefetchMode)); if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) { mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h index 2a0db2b03047..9ac9d5e8df8b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h @@ -289,6 +289,9 @@ struct dpp_caps { /* DSCL processing pixel data in fixed or float format */ enum dscl_data_processing_format dscl_data_proc_format; + /* max LB partitions */ + unsigned int max_lb_partitions; + /* Calculates the number of partitions in the line buffer. * The implementation of this function is overloaded for * different versions of DSCL LB. diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 5ab008e62b82..ad5f2adcc40d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -237,6 +237,7 @@ struct hw_sequencer_funcs { int width, int height, int offset); void (*z10_restore)(struct dc *dc); + void (*z10_save_init)(struct dc *dc); void (*update_visual_confirm_color)(struct dc *dc, struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index f7f7e4fff0c2..082549f75978 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -41,6 +41,7 @@ struct dce_hwseq_wa { bool DEGVIDCN10_254; bool DEGVIDCN21; bool disallow_self_refresh_during_multi_plane_transition; + bool early_riommu_invalidation; }; struct hwseq_wa_state { diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7c4734f905d9..7fafb8d6c1da 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -856,6 +856,11 @@ enum dmub_cmd_idle_opt_type { * DCN hardware restore. */ DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0, + + /** + * DCN hardware save. + */ + DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1 }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 973de346410d..27c7fa3110c8 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -267,11 +267,13 @@ void dmub_dcn31_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub) { - uint32_t is_hw_init; + union dmub_fw_boot_status status; + uint32_t is_enable; - REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init); + status.all = REG_READ(DMCUB_SCRATCH0); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable); - return is_hw_init != 0; + return is_enable != 0 && status.bits.dal_fw; } bool dmub_dcn31_is_supported(struct dmub_srv *dmub) diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h deleted file mode 100644 index dfacc6b5d89d..000000000000 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#ifndef _mp_13_0_1_OFFSET_HEADER -#define _mp_13_0_1_OFFSET_HEADER - - - -// addressBlock: mp_SmuMp0_SmnDec -// base address: 0x0 -#define regMP0_SMN_C2PMSG_32 0x0060 -#define regMP0_SMN_C2PMSG_32_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_33 0x0061 -#define regMP0_SMN_C2PMSG_33_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_34 0x0062 -#define regMP0_SMN_C2PMSG_34_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_35 0x0063 -#define regMP0_SMN_C2PMSG_35_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_36 0x0064 -#define regMP0_SMN_C2PMSG_36_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_37 0x0065 -#define regMP0_SMN_C2PMSG_37_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_38 0x0066 -#define regMP0_SMN_C2PMSG_38_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_39 0x0067 -#define regMP0_SMN_C2PMSG_39_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_40 0x0068 -#define regMP0_SMN_C2PMSG_40_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_41 0x0069 -#define regMP0_SMN_C2PMSG_41_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_42 0x006a -#define regMP0_SMN_C2PMSG_42_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_43 0x006b -#define regMP0_SMN_C2PMSG_43_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_44 0x006c -#define regMP0_SMN_C2PMSG_44_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_45 0x006d -#define regMP0_SMN_C2PMSG_45_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_46 0x006e -#define regMP0_SMN_C2PMSG_46_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_47 0x006f -#define regMP0_SMN_C2PMSG_47_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_48 0x0070 -#define regMP0_SMN_C2PMSG_48_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_49 0x0071 -#define regMP0_SMN_C2PMSG_49_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_50 0x0072 -#define regMP0_SMN_C2PMSG_50_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_51 0x0073 -#define regMP0_SMN_C2PMSG_51_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_52 0x0074 -#define regMP0_SMN_C2PMSG_52_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_53 0x0075 -#define regMP0_SMN_C2PMSG_53_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_54 0x0076 -#define regMP0_SMN_C2PMSG_54_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_55 0x0077 -#define regMP0_SMN_C2PMSG_55_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_56 0x0078 -#define regMP0_SMN_C2PMSG_56_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_57 0x0079 -#define regMP0_SMN_C2PMSG_57_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_58 0x007a -#define regMP0_SMN_C2PMSG_58_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_59 0x007b -#define regMP0_SMN_C2PMSG_59_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_60 0x007c -#define regMP0_SMN_C2PMSG_60_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_61 0x007d -#define regMP0_SMN_C2PMSG_61_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_62 0x007e -#define regMP0_SMN_C2PMSG_62_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_63 0x007f -#define regMP0_SMN_C2PMSG_63_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_64 0x0080 -#define regMP0_SMN_C2PMSG_64_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_65 0x0081 -#define regMP0_SMN_C2PMSG_65_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_66 0x0082 -#define regMP0_SMN_C2PMSG_66_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_67 0x0083 -#define regMP0_SMN_C2PMSG_67_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_68 0x0084 -#define regMP0_SMN_C2PMSG_68_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_69 0x0085 -#define regMP0_SMN_C2PMSG_69_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_70 0x0086 -#define regMP0_SMN_C2PMSG_70_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_71 0x0087 -#define regMP0_SMN_C2PMSG_71_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_72 0x0088 -#define regMP0_SMN_C2PMSG_72_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_73 0x0089 -#define regMP0_SMN_C2PMSG_73_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_74 0x008a -#define regMP0_SMN_C2PMSG_74_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_75 0x008b -#define regMP0_SMN_C2PMSG_75_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_76 0x008c -#define regMP0_SMN_C2PMSG_76_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_77 0x008d -#define regMP0_SMN_C2PMSG_77_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_78 0x008e -#define regMP0_SMN_C2PMSG_78_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_79 0x008f -#define regMP0_SMN_C2PMSG_79_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_80 0x0090 -#define regMP0_SMN_C2PMSG_80_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_81 0x0091 -#define regMP0_SMN_C2PMSG_81_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_82 0x0092 -#define regMP0_SMN_C2PMSG_82_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_83 0x0093 -#define regMP0_SMN_C2PMSG_83_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_84 0x0094 -#define regMP0_SMN_C2PMSG_84_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_85 0x0095 -#define regMP0_SMN_C2PMSG_85_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_86 0x0096 -#define regMP0_SMN_C2PMSG_86_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_87 0x0097 -#define regMP0_SMN_C2PMSG_87_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_88 0x0098 -#define regMP0_SMN_C2PMSG_88_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_89 0x0099 -#define regMP0_SMN_C2PMSG_89_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_90 0x009a -#define regMP0_SMN_C2PMSG_90_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_91 0x009b -#define regMP0_SMN_C2PMSG_91_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_92 0x009c -#define regMP0_SMN_C2PMSG_92_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_93 0x009d -#define regMP0_SMN_C2PMSG_93_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_94 0x009e -#define regMP0_SMN_C2PMSG_94_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_95 0x009f -#define regMP0_SMN_C2PMSG_95_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_96 0x00a0 -#define regMP0_SMN_C2PMSG_96_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_97 0x00a1 -#define regMP0_SMN_C2PMSG_97_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_98 0x00a2 -#define regMP0_SMN_C2PMSG_98_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_99 0x00a3 -#define regMP0_SMN_C2PMSG_99_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_100 0x00a4 -#define regMP0_SMN_C2PMSG_100_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_101 0x00a5 -#define regMP0_SMN_C2PMSG_101_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_102 0x00a6 -#define regMP0_SMN_C2PMSG_102_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_103 0x00a7 -#define regMP0_SMN_C2PMSG_103_BASE_IDX 0 -#define regMP0_SMN_IH_CREDIT 0x00c1 -#define regMP0_SMN_IH_CREDIT_BASE_IDX 0 -#define regMP0_SMN_IH_SW_INT 0x00c2 -#define regMP0_SMN_IH_SW_INT_BASE_IDX 0 -#define regMP0_SMN_IH_SW_INT_CTRL 0x00c3 -#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0 - - -// addressBlock: mp_SmuMp1_SmnDec -// base address: 0x0 -#define regMP1_SMN_C2PMSG_32 0x0260 -#define regMP1_SMN_C2PMSG_32_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_33 0x0261 -#define regMP1_SMN_C2PMSG_33_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_34 0x0262 -#define regMP1_SMN_C2PMSG_34_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_35 0x0263 -#define regMP1_SMN_C2PMSG_35_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_36 0x0264 -#define regMP1_SMN_C2PMSG_36_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_37 0x0265 -#define regMP1_SMN_C2PMSG_37_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_38 0x0266 -#define regMP1_SMN_C2PMSG_38_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_39 0x0267 -#define regMP1_SMN_C2PMSG_39_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_40 0x0268 -#define regMP1_SMN_C2PMSG_40_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_41 0x0269 -#define regMP1_SMN_C2PMSG_41_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_42 0x026a -#define regMP1_SMN_C2PMSG_42_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_43 0x026b -#define regMP1_SMN_C2PMSG_43_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_44 0x026c -#define regMP1_SMN_C2PMSG_44_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_45 0x026d -#define regMP1_SMN_C2PMSG_45_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_46 0x026e -#define regMP1_SMN_C2PMSG_46_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_47 0x026f -#define regMP1_SMN_C2PMSG_47_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_48 0x0270 -#define regMP1_SMN_C2PMSG_48_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_49 0x0271 -#define regMP1_SMN_C2PMSG_49_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_50 0x0272 -#define regMP1_SMN_C2PMSG_50_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_51 0x0273 -#define regMP1_SMN_C2PMSG_51_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_52 0x0274 -#define regMP1_SMN_C2PMSG_52_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_53 0x0275 -#define regMP1_SMN_C2PMSG_53_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_54 0x0276 -#define regMP1_SMN_C2PMSG_54_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_55 0x0277 -#define regMP1_SMN_C2PMSG_55_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_56 0x0278 -#define regMP1_SMN_C2PMSG_56_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_57 0x0279 -#define regMP1_SMN_C2PMSG_57_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_58 0x027a -#define regMP1_SMN_C2PMSG_58_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_59 0x027b -#define regMP1_SMN_C2PMSG_59_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_60 0x027c -#define regMP1_SMN_C2PMSG_60_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_61 0x027d -#define regMP1_SMN_C2PMSG_61_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_62 0x027e -#define regMP1_SMN_C2PMSG_62_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_63 0x027f -#define regMP1_SMN_C2PMSG_63_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_64 0x0280 -#define regMP1_SMN_C2PMSG_64_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_65 0x0281 -#define regMP1_SMN_C2PMSG_65_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_66 0x0282 -#define regMP1_SMN_C2PMSG_66_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_67 0x0283 -#define regMP1_SMN_C2PMSG_67_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_68 0x0284 -#define regMP1_SMN_C2PMSG_68_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_69 0x0285 -#define regMP1_SMN_C2PMSG_69_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_70 0x0286 -#define regMP1_SMN_C2PMSG_70_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_71 0x0287 -#define regMP1_SMN_C2PMSG_71_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_72 0x0288 -#define regMP1_SMN_C2PMSG_72_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_73 0x0289 -#define regMP1_SMN_C2PMSG_73_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_74 0x028a -#define regMP1_SMN_C2PMSG_74_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_75 0x028b -#define regMP1_SMN_C2PMSG_75_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_76 0x028c -#define regMP1_SMN_C2PMSG_76_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_77 0x028d -#define regMP1_SMN_C2PMSG_77_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_78 0x028e -#define regMP1_SMN_C2PMSG_78_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_79 0x028f -#define regMP1_SMN_C2PMSG_79_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_80 0x0290 -#define regMP1_SMN_C2PMSG_80_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_81 0x0291 -#define regMP1_SMN_C2PMSG_81_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_82 0x0292 -#define regMP1_SMN_C2PMSG_82_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_83 0x0293 -#define regMP1_SMN_C2PMSG_83_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_84 0x0294 -#define regMP1_SMN_C2PMSG_84_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_85 0x0295 -#define regMP1_SMN_C2PMSG_85_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_86 0x0296 -#define regMP1_SMN_C2PMSG_86_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_87 0x0297 -#define regMP1_SMN_C2PMSG_87_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_88 0x0298 -#define regMP1_SMN_C2PMSG_88_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_89 0x0299 -#define regMP1_SMN_C2PMSG_89_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_90 0x029a -#define regMP1_SMN_C2PMSG_90_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_91 0x029b -#define regMP1_SMN_C2PMSG_91_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_92 0x029c -#define regMP1_SMN_C2PMSG_92_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_93 0x029d -#define regMP1_SMN_C2PMSG_93_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_94 0x029e -#define regMP1_SMN_C2PMSG_94_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_95 0x029f -#define regMP1_SMN_C2PMSG_95_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_96 0x02a0 -#define regMP1_SMN_C2PMSG_96_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_97 0x02a1 -#define regMP1_SMN_C2PMSG_97_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_98 0x02a2 -#define regMP1_SMN_C2PMSG_98_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_99 0x02a3 -#define regMP1_SMN_C2PMSG_99_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_100 0x02a4 -#define regMP1_SMN_C2PMSG_100_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_101 0x02a5 -#define regMP1_SMN_C2PMSG_101_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_102 0x02a6 -#define regMP1_SMN_C2PMSG_102_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_103 0x02a7 -#define regMP1_SMN_C2PMSG_103_BASE_IDX 0 -#define regMP1_SMN_IH_CREDIT 0x02c1 -#define regMP1_SMN_IH_CREDIT_BASE_IDX 0 -#define regMP1_SMN_IH_SW_INT 0x02c2 -#define regMP1_SMN_IH_SW_INT_BASE_IDX 0 -#define regMP1_SMN_IH_SW_INT_CTRL 0x02c3 -#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0 -#define regMP1_SMN_FPS_CNT 0x02c4 -#define regMP1_SMN_FPS_CNT_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH0 0x0340 -#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH1 0x0341 -#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH2 0x0342 -#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH3 0x0343 -#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH4 0x0344 -#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH5 0x0345 -#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH6 0x0346 -#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH7 0x0347 -#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 0 - - -#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h deleted file mode 100644 index 2d5e8b58e693..000000000000 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h +++ /dev/null @@ -1,531 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#ifndef _mp_13_0_1_SH_MASK_HEADER -#define _mp_13_0_1_SH_MASK_HEADER - - -// addressBlock: mp_SmuMp0_SmnDec -//MP0_SMN_C2PMSG_32 -#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_33 -#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_34 -#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_35 -#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_36 -#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_37 -#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_38 -#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_39 -#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_40 -#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_41 -#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_42 -#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_43 -#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_44 -#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_45 -#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_46 -#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_47 -#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_48 -#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_49 -#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_50 -#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_51 -#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_52 -#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_53 -#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_54 -#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_55 -#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_56 -#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_57 -#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_58 -#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_59 -#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_60 -#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_61 -#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_62 -#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_63 -#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_64 -#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_65 -#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_66 -#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_67 -#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_68 -#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_69 -#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_70 -#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_71 -#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_72 -#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_73 -#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_74 -#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_75 -#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_76 -#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_77 -#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_78 -#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_79 -#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_80 -#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_81 -#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_82 -#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_83 -#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_84 -#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_85 -#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_86 -#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_87 -#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_88 -#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_89 -#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_90 -#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_91 -#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_92 -#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_93 -#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_94 -#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_95 -#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_96 -#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_97 -#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_98 -#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_99 -#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_100 -#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_101 -#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_102 -#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_103 -#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_IH_CREDIT -#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 -#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 -#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L -#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L -//MP0_SMN_IH_SW_INT -#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0 -#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8 -#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL -#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L -//MP0_SMN_IH_SW_INT_CTRL -#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 -#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 -#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L -#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L - - -// addressBlock: mp_SmuMp1Pub_CruDec -//MP1_FIRMWARE_FLAGS -#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0 -#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1 -#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L -#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL - - -// addressBlock: mp_SmuMp1_SmnDec -//MP1_SMN_C2PMSG_32 -#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_33 -#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_34 -#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_35 -#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_36 -#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_37 -#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_38 -#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_39 -#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_40 -#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_41 -#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_42 -#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_43 -#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_44 -#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_45 -#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_46 -#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_47 -#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_48 -#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_49 -#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_50 -#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_51 -#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_52 -#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_53 -#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_54 -#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_55 -#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_56 -#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_57 -#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_58 -#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_59 -#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_60 -#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_61 -#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_62 -#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_63 -#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_64 -#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_65 -#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_66 -#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_67 -#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_68 -#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_69 -#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_70 -#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_71 -#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_72 -#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_73 -#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_74 -#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_75 -#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_76 -#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_77 -#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_78 -#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_79 -#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_80 -#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_81 -#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_82 -#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_83 -#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_84 -#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_85 -#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_86 -#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_87 -#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_88 -#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_89 -#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_90 -#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_91 -#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_92 -#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_93 -#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_94 -#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_95 -#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_96 -#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_97 -#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_98 -#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_99 -#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_100 -#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_101 -#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_102 -#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_103 -#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_IH_CREDIT -#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 -#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 -#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L -#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L -//MP1_SMN_IH_SW_INT -#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0 -#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8 -#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL -#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L -//MP1_SMN_IH_SW_INT_CTRL -#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 -#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 -#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L -#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L -//MP1_SMN_FPS_CNT -#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0 -#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH0 -#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH1 -#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH2 -#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH3 -#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH4 -#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH5 -#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH6 -#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH7 -#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL - - -#endif diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 3811e58dd857..44955458fe38 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -590,7 +590,7 @@ struct atom_firmware_info_v3_4 { uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id uint8_t board_i2c_feature_slave_addr; - uint8_t reserved3; + uint8_t ras_rom_i2c_slave_addr; uint16_t bootup_mvddq_mv; uint16_t bootup_mvpp_mv; uint32_t zfbstartaddrin16mb; diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h index 610266088ff1..35fa0d8e92dd 100644 --- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h @@ -101,7 +101,8 @@ #define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x41 #define PPSMC_MSG_GfxDriverResetRecovery 0x42 -#define PPSMC_Message_Count 0x43 +#define PPSMC_MSG_BoardPowerCalibration 0x43 +#define PPSMC_Message_Count 0x44 //PPSMC Reset Types #define PPSMC_RESET_TYPE_WARM_RESET 0x00 diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h index 89a16dcd0fff..1d3765b873df 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h @@ -225,7 +225,8 @@ __SMU_DUMMY_MAP(DisableDeterminism), \ __SMU_DUMMY_MAP(SetUclkDpmMode), \ __SMU_DUMMY_MAP(LightSBR), \ - __SMU_DUMMY_MAP(GfxDriverResetRecovery), + __SMU_DUMMY_MAP(GfxDriverResetRecovery), \ + __SMU_DUMMY_MAP(BoardPowerCalibration), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index 1962a5877191..f61b5c914a3d 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -34,7 +34,7 @@ #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE #define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03 #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF -#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0x9 +#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD /* MP Apertures */ #define MP0_Public 0x03800000 diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h index 6119a36b2cba..dc91eb608791 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -26,6 +26,7 @@ #include "amdgpu_smu.h" #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF +#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 #define SMU13_DRIVER_IF_VERSION_ALDE 0x07 /* MP Apertures */ diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h deleted file mode 100644 index b6c976a4d578..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __SMU_V13_0_1_H__ -#define __SMU_V13_0_1_H__ - -#include "amdgpu_smu.h" - -#define SMU13_0_1_DRIVER_IF_VERSION_INV 0xFFFFFFFF -#define SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP 0x3 - -/* MP Apertures */ -#define MP0_Public 0x03800000 -#define MP0_SRAM 0x03900000 -#define MP1_Public 0x03b00000 -#define MP1_SRAM 0x03c00004 - -/* address block */ -#define smnMP1_FIRMWARE_FLAGS 0x3010024 - - -#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) - -int smu_v13_0_1_check_fw_status(struct smu_context *smu); - -int smu_v13_0_1_check_fw_version(struct smu_context *smu); - -int smu_v13_0_1_fini_smc_tables(struct smu_context *smu); - -int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu); - -int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu); - -int smu_v13_0_1_set_driver_table_location(struct smu_context *smu); - -int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable); -#endif -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h index 5627de734246..c5e26d619bf0 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h @@ -111,7 +111,9 @@ typedef struct { uint32_t InWhisperMode : 1; uint32_t spare0 : 1; uint32_t ZstateStatus : 4; - uint32_t spare1 :12; + uint32_t spare1 : 4; + uint32_t DstateFun : 4; + uint32_t DstateDev : 4; // MP1_EXT_SCRATCH2 uint32_t P2JobHandler :24; uint32_t RsmuPmiP2FinishedCnt : 8; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 25979106fd25..02e8c6e5448d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -5127,6 +5127,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) return size; } +static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (adev->pdev->device == 0x6860); +} + static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) { struct vega10_hwmgr *data = hwmgr->backend; @@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui } out: - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, + if (vega10_get_power_profile_mode_quirks(hwmgr)) + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, + 1 << power_profile_mode, + NULL); + else + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1), NULL); + hwmgr->power_profile_mode = power_profile_mode; return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index c751f717a0da..d92dd2c7448e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -353,8 +353,7 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint32_t val; - if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO || - powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) { + if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO) { val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); smu_baco->platform_support = (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 388c5cb5c647..0a5d46ac9ccd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1528,6 +1528,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: if (amdgpu_runtime_pm == 2) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 18681dc458da..bcaaa086fc2f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -256,7 +256,7 @@ static int vangogh_tables_init(struct smu_context *smu) return 0; err3_out: - kfree(smu_table->clocks_table); + kfree(smu_table->watermarks_table); err2_out: kfree(smu_table->gpu_metrics_table); err1_out: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile index 9b3a8503f5cd..d4c4c495762c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU13_MGR = smu_v13_0.o aldebaran_ppt.o smu_v13_0_1.o yellow_carp_ppt.o +SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 9316a726195c..cb5485cf243f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -134,6 +134,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0), MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0), MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), + MSG_MAP(BoardPowerCalibration, PPSMC_MSG_BoardPowerCalibration, 0), }; static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = { @@ -440,6 +441,39 @@ static int aldebaran_setup_pptable(struct smu_context *smu) return ret; } +static bool aldebaran_is_primary(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->smuio.funcs && adev->smuio.funcs->get_die_id) + return adev->smuio.funcs->get_die_id(adev) == 0; + + return true; +} + +static int aldebaran_run_board_btc(struct smu_context *smu) +{ + u32 smu_version; + int ret; + + if (!aldebaran_is_primary(smu)) + return 0; + + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) { + dev_err(smu->adev->dev, "Failed to get smu version!\n"); + return ret; + } + if (smu_version <= 0x00441d00) + return 0; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL); + if (ret) + dev_err(smu->adev->dev, "Board power calibration failed!\n"); + + return ret; +} + static int aldebaran_run_btc(struct smu_context *smu) { int ret; @@ -447,6 +481,8 @@ static int aldebaran_run_btc(struct smu_context *smu) ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); if (ret) dev_err(smu->adev->dev, "RunDcBtc failed!\n"); + else + ret = aldebaran_run_board_btc(smu); return ret; } @@ -524,16 +560,6 @@ static int aldebaran_freqs_in_same_level(int32_t frequency1, return (abs(frequency1 - frequency2) <= EPSILON); } -static bool aldebaran_is_primary(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - - if (adev->smuio.funcs && adev->smuio.funcs->get_die_id) - return adev->smuio.funcs->get_die_id(adev) == 0; - - return true; -} - static int aldebaran_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a3dc7194aaf8..a421ba85bd6d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -210,6 +210,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) case CHIP_ALDEBARAN: smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; break; + case CHIP_YELLOW_CARP: + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP; + break; default: dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; @@ -694,6 +697,27 @@ failed: return ret; } +int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) +{ + int ret = 0; + struct amdgpu_device *adev = smu->adev; + + switch (adev->asic_type) { + case CHIP_YELLOW_CARP: + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) + return 0; + if (enable) + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); + else + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); + break; + default: + break; + } + + return ret; +} + int smu_v13_0_system_features_control(struct smu_context *smu, bool en) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c deleted file mode 100644 index 61917b49f2bf..000000000000 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -//#include <linux/reboot.h> - -#define SWSMU_CODE_LAYER_L3 - -#include "amdgpu.h" -#include "amdgpu_smu.h" -#include "smu_v13_0_1.h" -#include "soc15_common.h" -#include "smu_cmn.h" -#include "atomfirmware.h" -#include "amdgpu_atomfirmware.h" -#include "amdgpu_atombios.h" -#include "atom.h" - -#include "asic_reg/mp/mp_13_0_1_offset.h" -#include "asic_reg/mp/mp_13_0_1_sh_mask.h" - -/* - * DO NOT use these for err/warn/info/debug messages. - * Use dev_err, dev_warn, dev_info and dev_dbg instead. - * They are more MGPU friendly. - */ -#undef pr_err -#undef pr_warn -#undef pr_info -#undef pr_debug - -int smu_v13_0_1_check_fw_status(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - uint32_t mp1_fw_flags; - - mp1_fw_flags = RREG32_PCIE(MP1_Public | - (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); - - if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> - MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) - return 0; - - return -EIO; -} - -int smu_v13_0_1_check_fw_version(struct smu_context *smu) -{ - uint32_t if_version = 0xff, smu_version = 0xff; - uint16_t smu_major; - uint8_t smu_minor, smu_debug; - int ret = 0; - - ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); - if (ret) - return ret; - - smu_major = (smu_version >> 16) & 0xffff; - smu_minor = (smu_version >> 8) & 0xff; - smu_debug = (smu_version >> 0) & 0xff; - - switch (smu->adev->asic_type) { - case CHIP_YELLOW_CARP: - smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP; - break; - - default: - dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); - smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_INV; - break; - } - - dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n", - smu_version, smu_major, smu_minor, smu_debug); - - /* - * 1. if_version mismatch is not critical as our fw is designed - * to be backward compatible. - * 2. New fw usually brings some optimizations. But that's visible - * only on the paired driver. - * Considering above, we just leave user a warning message instead - * of halt driver loading. - */ - if (if_version != smu->smc_driver_if_version) { - dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " - "smu fw version = 0x%08x (%d.%d.%d)\n", - smu->smc_driver_if_version, if_version, - smu_version, smu_major, smu_minor, smu_debug); - dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); - } - - return ret; -} - -int smu_v13_0_1_fini_smc_tables(struct smu_context *smu) -{ - struct smu_table_context *smu_table = &smu->smu_table; - - kfree(smu_table->clocks_table); - smu_table->clocks_table = NULL; - - kfree(smu_table->metrics_table); - smu_table->metrics_table = NULL; - - kfree(smu_table->watermarks_table); - smu_table->watermarks_table = NULL; - - return 0; -} - -static int smu_v13_0_1_atom_get_smu_clockinfo(struct amdgpu_device *adev, - uint8_t clk_id, - uint8_t syspll_id, - uint32_t *clk_freq) -{ - struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; - struct atom_get_smu_clock_info_output_parameters_v3_1 *output; - int ret, index; - - input.clk_id = clk_id; - input.syspll_id = syspll_id; - input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; - index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, - getsmuclockinfo); - - ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, - (uint32_t *)&input); - if (ret) - return -EINVAL; - - output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; - *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; - - return 0; -} - -int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu) -{ - int ret, index; - uint16_t size; - uint8_t frev, crev; - struct atom_common_table_header *header; - struct atom_firmware_info_v3_4 *v_3_4; - struct atom_firmware_info_v3_3 *v_3_3; - struct atom_firmware_info_v3_1 *v_3_1; - - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - firmwareinfo); - - ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, - (uint8_t **)&header); - if (ret) - return ret; - - if (header->format_revision != 3) { - dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n"); - return -EINVAL; - } - - switch (header->content_revision) { - case 0: - case 1: - case 2: - v_3_1 = (struct atom_firmware_info_v3_1 *)header; - smu->smu_table.boot_values.revision = v_3_1->firmware_revision; - smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; - smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; - smu->smu_table.boot_values.socclk = 0; - smu->smu_table.boot_values.dcefclk = 0; - smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; - smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; - smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; - smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; - smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; - break; - case 3: - v_3_3 = (struct atom_firmware_info_v3_3 *)header; - smu->smu_table.boot_values.revision = v_3_3->firmware_revision; - smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; - smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; - smu->smu_table.boot_values.socclk = 0; - smu->smu_table.boot_values.dcefclk = 0; - smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; - smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; - smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; - smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; - smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; - break; - case 4: - default: - v_3_4 = (struct atom_firmware_info_v3_4 *)header; - smu->smu_table.boot_values.revision = v_3_4->firmware_revision; - smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; - smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; - smu->smu_table.boot_values.socclk = 0; - smu->smu_table.boot_values.dcefclk = 0; - smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; - smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; - smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; - smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; - smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; - break; - } - - smu->smu_table.boot_values.format_revision = header->format_revision; - smu->smu_table.boot_values.content_revision = header->content_revision; - - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL0_SOCCLK_ID, - (uint8_t)0, - &smu->smu_table.boot_values.socclk); - - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID, - (uint8_t)0, - &smu->smu_table.boot_values.dcefclk); - - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL0_ECLK_ID, - (uint8_t)0, - &smu->smu_table.boot_values.eclk); - - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL0_VCLK_ID, - (uint8_t)0, - &smu->smu_table.boot_values.vclk); - - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL0_DCLK_ID, - (uint8_t)0, - &smu->smu_table.boot_values.dclk); - - if ((smu->smu_table.boot_values.format_revision == 3) && - (smu->smu_table.boot_values.content_revision >= 2)) - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL1_0_FCLK_ID, - (uint8_t)SMU11_SYSPLL1_2_ID, - &smu->smu_table.boot_values.fclk); - - return 0; -} - -int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu) -{ - struct smu_table_context *smu_table = &smu->smu_table; - - return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); -} - -int smu_v13_0_1_set_driver_table_location(struct smu_context *smu) -{ - struct smu_table *driver_table = &smu->smu_table.driver_table; - int ret = 0; - - if (!driver_table->mc_address) - return 0; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(driver_table->mc_address), - NULL); - - if (ret) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(driver_table->mc_address), - NULL); - - return ret; -} - -int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable) -{ - int ret = 0; - struct amdgpu_device *adev = smu->adev; - - switch (adev->asic_type) { - case CHIP_YELLOW_CARP: - if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) - return 0; - if (enable) - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); - else - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); - break; - default: - break; - } - - return ret; -} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 18a1ffdca227..0cfeb9fc7c03 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -25,7 +25,7 @@ #include "amdgpu.h" #include "amdgpu_smu.h" -#include "smu_v13_0_1.h" +#include "smu_v13_0.h" #include "smu13_driver_if_yellow_carp.h" #include "yellow_carp_ppt.h" #include "smu_v13_0_1_ppsmc.h" @@ -186,6 +186,22 @@ err0_out: return -ENOMEM; } +static int yellow_carp_fini_smc_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + kfree(smu_table->clocks_table); + smu_table->clocks_table = NULL; + + kfree(smu_table->metrics_table); + smu_table->metrics_table = NULL; + + kfree(smu_table->watermarks_table); + smu_table->watermarks_table = NULL; + + return 0; +} + static int yellow_carp_system_features_control(struct smu_context *smu, bool en) { struct smu_feature *feature = &smu->smu_feature; @@ -282,13 +298,9 @@ static int yellow_carp_mode_reset(struct smu_context *smu, int type) if (index < 0) return index == -EACCES ? 0 : index; - mutex_lock(&smu->message_lock); - - ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); - - mutex_unlock(&smu->message_lock); - - mdelay(10); + ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); + if (ret) + dev_err(smu->adev->dev, "Failed to mode reset!\n"); return ret; } @@ -659,6 +671,13 @@ static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v2_1); } +static int yellow_carp_set_default_dpm_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); +} + static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) { @@ -1203,17 +1222,17 @@ static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *sm } static const struct pptable_funcs yellow_carp_ppt_funcs = { - .check_fw_status = smu_v13_0_1_check_fw_status, - .check_fw_version = smu_v13_0_1_check_fw_version, + .check_fw_status = smu_v13_0_check_fw_status, + .check_fw_version = smu_v13_0_check_fw_version, .init_smc_tables = yellow_carp_init_smc_tables, - .fini_smc_tables = smu_v13_0_1_fini_smc_tables, - .get_vbios_bootup_values = smu_v13_0_1_get_vbios_bootup_values, + .fini_smc_tables = yellow_carp_fini_smc_tables, + .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, .system_features_control = yellow_carp_system_features_control, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, .send_smc_msg = smu_cmn_send_smc_msg, .dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable, .dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable, - .set_default_dpm_table = smu_v13_0_1_set_default_dpm_tables, + .set_default_dpm_table = yellow_carp_set_default_dpm_tables, .read_sensor = yellow_carp_read_sensor, .is_dpm_running = yellow_carp_is_dpm_running, .set_watermarks_table = yellow_carp_set_watermarks_table, @@ -1222,8 +1241,8 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = { .get_gpu_metrics = yellow_carp_get_gpu_metrics, .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, - .set_driver_table_location = smu_v13_0_1_set_driver_table_location, - .gfx_off_control = smu_v13_0_1_gfx_off_control, + .set_driver_table_location = smu_v13_0_set_driver_table_location, + .gfx_off_control = smu_v13_0_gfx_off_control, .post_init = yellow_carp_post_smu_init, .mode2_reset = yellow_carp_mode2_reset, .get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index d29907955ff7..5d82891c3222 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, req.request.sequence = req32.request.sequence; req.request.signal = req32.request.signal; err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED); - if (err) - return err; req32.reply.type = req.reply.type; req32.reply.sequence = req.reply.sequence; @@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, if (copy_to_user(argp, &req32, sizeof(req32))) return -EFAULT; - return 0; + return err; } #if defined(CONFIG_X86) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 98ae00661656..f454e0424086 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -834,6 +834,9 @@ long drm_ioctl(struct file *filp, if (drm_dev_is_unplugged(dev)) return -ENODEV; + if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE) + return -ENOTTY; + is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END; if (is_driver_ioctl) { diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 5b6922e28ef2..aa667fa71158 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2166,7 +2166,8 @@ static void init_vbt_missing_defaults(struct drm_i915_private *i915) { enum port port; - int ports = PORT_A | PORT_B | PORT_C | PORT_D | PORT_E | PORT_F; + int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | + BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F); if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915)) return; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index be716b56e8e0..00dade49665b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2463,6 +2463,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, } } +/* Splitter enable for eDP MSO is limited to certain pipes. */ +static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915) +{ + if (IS_ALDERLAKE_P(i915)) + return BIT(PIPE_A) | BIT(PIPE_B); + else + return BIT(PIPE_A); +} + static void intel_ddi_mso_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -2480,8 +2489,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder, if (!pipe_config->splitter.enable) return; - /* Splitter enable is supported for pipe A only. */ - if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) { + if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) { pipe_config->splitter.enable = false; return; } @@ -2513,10 +2521,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) return; if (crtc_state->splitter.enable) { - /* Splitter enable is supported for pipe A only. */ - if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) - return; - dss1 |= SPLITTER_ENABLE; dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap); if (crtc_state->splitter.link_count == 2) @@ -4743,12 +4747,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) dig_port->hpd_pulse = intel_dp_hpd_pulse; - /* Splitter enable for eDP MSO is limited to certain pipes. */ - if (dig_port->dp.mso_link_count) { - encoder->pipe_mask = BIT(PIPE_A); - if (IS_ALDERLAKE_P(dev_priv)) - encoder->pipe_mask |= BIT(PIPE_B); - } + if (dig_port->dp.mso_link_count) + encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv); } /* In theory we don't need the encoder->type check, but leave it just in diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3bad4e00f7be..0a8a2395c8ac 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -5746,16 +5746,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) switch (crtc_state->pipe_bpp) { case 18: - val |= PIPEMISC_DITHER_6_BPC; + val |= PIPEMISC_6_BPC; break; case 24: - val |= PIPEMISC_DITHER_8_BPC; + val |= PIPEMISC_8_BPC; break; case 30: - val |= PIPEMISC_DITHER_10_BPC; + val |= PIPEMISC_10_BPC; break; case 36: - val |= PIPEMISC_DITHER_12_BPC; + /* Port output 12BPC defined for ADLP+ */ + if (DISPLAY_VER(dev_priv) > 12) + val |= PIPEMISC_12_BPC_ADLP; break; default: MISSING_CASE(crtc_state->pipe_bpp); @@ -5808,15 +5810,27 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); - switch (tmp & PIPEMISC_DITHER_BPC_MASK) { - case PIPEMISC_DITHER_6_BPC: + switch (tmp & PIPEMISC_BPC_MASK) { + case PIPEMISC_6_BPC: return 18; - case PIPEMISC_DITHER_8_BPC: + case PIPEMISC_8_BPC: return 24; - case PIPEMISC_DITHER_10_BPC: + case PIPEMISC_10_BPC: return 30; - case PIPEMISC_DITHER_12_BPC: - return 36; + /* + * PORT OUTPUT 12 BPC defined for ADLP+. + * + * TODO: + * For previous platforms with DSI interface, bits 5:7 + * are used for storing pipe_bpp irrespective of dithering. + * Since the value of 12 BPC is not defined for these bits + * on older platforms, need to find a workaround for 12 BPC + * MIPI DSI HW readout. + */ + case PIPEMISC_12_BPC_ADLP: + if (DISPLAY_VER(dev_priv) > 12) + return 36; + fallthrough; default: MISSING_CASE(tmp); return 0; @@ -11361,13 +11375,19 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); vlv_dsi_init(dev_priv); - } else if (DISPLAY_VER(dev_priv) >= 9) { + } else if (DISPLAY_VER(dev_priv) == 10) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); intel_ddi_init(dev_priv, PORT_D); intel_ddi_init(dev_priv, PORT_E); intel_ddi_init(dev_priv, PORT_F); + } else if (DISPLAY_VER(dev_priv) >= 9) { + intel_ddi_init(dev_priv, PORT_A); + intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_C); + intel_ddi_init(dev_priv, PORT_D); + intel_ddi_init(dev_priv, PORT_E); } else if (HAS_DDI(dev_priv)) { u32 found; diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 4298ae684d7d..86b7ac7b65ec 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -6387,13 +6387,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915) if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_enable_dc9(i915); - /* Tweaked Wa_14010685332:icp,jsp,mcc */ - if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) - intel_de_rmw(i915, SOUTH_CHICKEN1, - SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_enable_pc8(i915); } + + /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ + if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) + intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); } void intel_display_power_resume_early(struct drm_i915_private *i915) @@ -6402,13 +6402,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915) IS_BROXTON(i915)) { gen9_sanitize_dc_state(i915); bxt_disable_dc9(i915); - /* Tweaked Wa_14010685332:icp,jsp,mcc */ - if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) - intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); - } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); } + + /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ + if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) + intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); } void intel_display_power_suspend(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 08bceae40aa8..053a3c2f7267 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) return lttpr_count; } -EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps); static u8 dp_voltage_max(u8 preemph) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index a8abc9af5ff4..4a6419d7be93 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -25,10 +25,8 @@ #include "i915_gem_clflush.h" #include "i915_gem_context.h" #include "i915_gem_ioctls.h" -#include "i915_sw_fence_work.h" #include "i915_trace.h" #include "i915_user_extensions.h" -#include "i915_memcpy.h" struct eb_vma { struct i915_vma *vma; @@ -1456,6 +1454,10 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, int err; struct intel_engine_cs *engine = eb->engine; + /* If we need to copy for the cmdparser, we will stall anyway */ + if (eb_use_cmdparser(eb)) + return ERR_PTR(-EWOULDBLOCK); + if (!reloc_can_use_engine(engine)) { engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0]; if (!engine) @@ -2372,217 +2374,6 @@ shadow_batch_pin(struct i915_execbuffer *eb, return vma; } -struct eb_parse_work { - struct dma_fence_work base; - struct intel_engine_cs *engine; - struct i915_vma *batch; - struct i915_vma *shadow; - struct i915_vma *trampoline; - unsigned long batch_offset; - unsigned long batch_length; - unsigned long *jump_whitelist; - const void *batch_map; - void *shadow_map; -}; - -static int __eb_parse(struct dma_fence_work *work) -{ - struct eb_parse_work *pw = container_of(work, typeof(*pw), base); - int ret; - bool cookie; - - cookie = dma_fence_begin_signalling(); - ret = intel_engine_cmd_parser(pw->engine, - pw->batch, - pw->batch_offset, - pw->batch_length, - pw->shadow, - pw->jump_whitelist, - pw->shadow_map, - pw->batch_map); - dma_fence_end_signalling(cookie); - - return ret; -} - -static void __eb_parse_release(struct dma_fence_work *work) -{ - struct eb_parse_work *pw = container_of(work, typeof(*pw), base); - - if (!IS_ERR_OR_NULL(pw->jump_whitelist)) - kfree(pw->jump_whitelist); - - if (pw->batch_map) - i915_gem_object_unpin_map(pw->batch->obj); - else - i915_gem_object_unpin_pages(pw->batch->obj); - - i915_gem_object_unpin_map(pw->shadow->obj); - - if (pw->trampoline) - i915_active_release(&pw->trampoline->active); - i915_active_release(&pw->shadow->active); - i915_active_release(&pw->batch->active); -} - -static const struct dma_fence_work_ops eb_parse_ops = { - .name = "eb_parse", - .work = __eb_parse, - .release = __eb_parse_release, -}; - -static inline int -__parser_mark_active(struct i915_vma *vma, - struct intel_timeline *tl, - struct dma_fence *fence) -{ - struct intel_gt_buffer_pool_node *node = vma->private; - - return i915_active_ref(&node->active, tl->fence_context, fence); -} - -static int -parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl) -{ - int err; - - mutex_lock(&tl->mutex); - - err = __parser_mark_active(pw->shadow, tl, &pw->base.dma); - if (err) - goto unlock; - - if (pw->trampoline) { - err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma); - if (err) - goto unlock; - } - -unlock: - mutex_unlock(&tl->mutex); - return err; -} - -static int eb_parse_pipeline(struct i915_execbuffer *eb, - struct i915_vma *shadow, - struct i915_vma *trampoline) -{ - struct eb_parse_work *pw; - struct drm_i915_gem_object *batch = eb->batch->vma->obj; - bool needs_clflush; - int err; - - GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset)); - GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length)); - - pw = kzalloc(sizeof(*pw), GFP_KERNEL); - if (!pw) - return -ENOMEM; - - err = i915_active_acquire(&eb->batch->vma->active); - if (err) - goto err_free; - - err = i915_active_acquire(&shadow->active); - if (err) - goto err_batch; - - if (trampoline) { - err = i915_active_acquire(&trampoline->active); - if (err) - goto err_shadow; - } - - pw->shadow_map = i915_gem_object_pin_map(shadow->obj, I915_MAP_WB); - if (IS_ERR(pw->shadow_map)) { - err = PTR_ERR(pw->shadow_map); - goto err_trampoline; - } - - needs_clflush = - !(batch->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ); - - pw->batch_map = ERR_PTR(-ENODEV); - if (needs_clflush && i915_has_memcpy_from_wc()) - pw->batch_map = i915_gem_object_pin_map(batch, I915_MAP_WC); - - if (IS_ERR(pw->batch_map)) { - err = i915_gem_object_pin_pages(batch); - if (err) - goto err_unmap_shadow; - pw->batch_map = NULL; - } - - pw->jump_whitelist = - intel_engine_cmd_parser_alloc_jump_whitelist(eb->batch_len, - trampoline); - if (IS_ERR(pw->jump_whitelist)) { - err = PTR_ERR(pw->jump_whitelist); - goto err_unmap_batch; - } - - dma_fence_work_init(&pw->base, &eb_parse_ops); - - pw->engine = eb->engine; - pw->batch = eb->batch->vma; - pw->batch_offset = eb->batch_start_offset; - pw->batch_length = eb->batch_len; - pw->shadow = shadow; - pw->trampoline = trampoline; - - /* Mark active refs early for this worker, in case we get interrupted */ - err = parser_mark_active(pw, eb->context->timeline); - if (err) - goto err_commit; - - err = dma_resv_reserve_shared(pw->batch->resv, 1); - if (err) - goto err_commit; - - err = dma_resv_reserve_shared(shadow->resv, 1); - if (err) - goto err_commit; - - /* Wait for all writes (and relocs) into the batch to complete */ - err = i915_sw_fence_await_reservation(&pw->base.chain, - pw->batch->resv, NULL, false, - 0, I915_FENCE_GFP); - if (err < 0) - goto err_commit; - - /* Keep the batch alive and unwritten as we parse */ - dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma); - - /* Force execution to wait for completion of the parser */ - dma_resv_add_excl_fence(shadow->resv, &pw->base.dma); - - dma_fence_work_commit_imm(&pw->base); - return 0; - -err_commit: - i915_sw_fence_set_error_once(&pw->base.chain, err); - dma_fence_work_commit_imm(&pw->base); - return err; - -err_unmap_batch: - if (pw->batch_map) - i915_gem_object_unpin_map(batch); - else - i915_gem_object_unpin_pages(batch); -err_unmap_shadow: - i915_gem_object_unpin_map(shadow->obj); -err_trampoline: - if (trampoline) - i915_active_release(&trampoline->active); -err_shadow: - i915_active_release(&shadow->active); -err_batch: - i915_active_release(&eb->batch->vma->active); -err_free: - kfree(pw); - return err; -} - static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma) { /* @@ -2672,7 +2463,15 @@ static int eb_parse(struct i915_execbuffer *eb) goto err_trampoline; } - err = eb_parse_pipeline(eb, shadow, trampoline); + err = dma_resv_reserve_shared(shadow->resv, 1); + if (err) + goto err_trampoline; + + err = intel_engine_cmd_parser(eb->engine, + eb->batch->vma, + eb->batch_start_offset, + eb->batch_len, + shadow, trampoline); if (err) goto err_unpin_batch; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index f4fb68e8955a..e382b7f2353b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -62,6 +62,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj, switch (obj->mm.madv) { case I915_MADV_DONTNEED: i915_gem_object_truncate(obj); + return; case __I915_MADV_PURGED: return; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c index 4df505e4c53a..16162fc2782d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c @@ -125,6 +125,10 @@ static int igt_gpu_reloc(void *arg) intel_gt_pm_get(&eb.i915->gt); for_each_uabi_engine(eb.engine, eb.i915) { + if (intel_engine_requires_cmd_parser(eb.engine) || + intel_engine_using_cmd_parser(eb.engine)) + continue; + reloc_cache_init(&eb.reloc_cache, eb.i915); memset(map, POISON_INUSE, 4096); diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 21c8b7350b7a..da4f5eb43ac2 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -303,10 +303,7 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm, __i915_gem_object_pin_pages(pt->base); i915_gem_object_make_unshrinkable(pt->base); - if (lvl || - gen8_pt_count(*start, end) < I915_PDES || - intel_vgpu_active(vm->i915)) - fill_px(pt, vm->scratch[lvl]->encode); + fill_px(pt, vm->scratch[lvl]->encode); spin_lock(&pd->lock); if (likely(!pd->entry[idx])) { diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index cac7f3f44642..f8948de72036 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -348,7 +348,7 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt) if (intel_has_pending_fb_unpin(ggtt->vm.i915)) return ERR_PTR(-EAGAIN); - return ERR_PTR(-EDEADLK); + return ERR_PTR(-ENOBUFS); } int __i915_vma_pin_fence(struct i915_vma *vma) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 98eb48c24c46..cde0a477fb49 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1977,6 +1977,21 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, if (drm_WARN_ON(&i915->drm, !engine)) return -EINVAL; + /* + * Due to d3_entered is used to indicate skipping PPGTT invalidation on + * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after + * vGPU reset if in resuming. + * In S0ix exit, the device power state also transite from D3 to D0 as + * S3 resume, but no vGPU reset (triggered by QEMU devic model). After + * S0ix exit, all engines continue to work. However the d3_entered + * remains set which will break next vGPU reset logic (miss the expected + * PPGTT invalidation). + * Engines can only work in D0. Thus the 1st elsp write gives GVT a + * chance to clear d3_entered. + */ + if (vgpu->d3_entered) + vgpu->d3_entered = false; + execlist = &vgpu->submission.execlist[engine->id]; execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; @@ -3134,6 +3149,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_D(_MMIO(0xb110), D_BDW); + MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS); MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0, D_BDW_PLUS, NULL, force_nonpriv_write); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index b8ac80765461..f776c470914d 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -105,6 +105,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ + {RCS0, GEN9_SCRATCH1, 0, false}, /* 0xb11c */ + {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */ {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 3992c25a191d..a3b4d99d64b9 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1145,19 +1145,41 @@ find_reg(const struct intel_engine_cs *engine, u32 addr) static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, struct drm_i915_gem_object *src_obj, unsigned long offset, unsigned long length, - void *dst, const void *src) + bool *needs_clflush_after) { - bool needs_clflush = - !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ); - - if (src) { - GEM_BUG_ON(!needs_clflush); - i915_unaligned_memcpy_from_wc(dst, src + offset, length); - } else { - struct scatterlist *sg; + unsigned int src_needs_clflush; + unsigned int dst_needs_clflush; + void *dst, *src; + int ret; + + ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush); + if (ret) + return ERR_PTR(ret); + + dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB); + i915_gem_object_finish_access(dst_obj); + if (IS_ERR(dst)) + return dst; + + ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush); + if (ret) { + i915_gem_object_unpin_map(dst_obj); + return ERR_PTR(ret); + } + + src = ERR_PTR(-ENODEV); + if (src_needs_clflush && i915_has_memcpy_from_wc()) { + src = i915_gem_object_pin_map(src_obj, I915_MAP_WC); + if (!IS_ERR(src)) { + i915_unaligned_memcpy_from_wc(dst, + src + offset, + length); + i915_gem_object_unpin_map(src_obj); + } + } + if (IS_ERR(src)) { + unsigned long x, n, remain; void *ptr; - unsigned int x, sg_ofs; - unsigned long remain; /* * We can avoid clflushing partial cachelines before the write @@ -1168,40 +1190,34 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, * validate up to the end of the batch. */ remain = length; - if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) + if (dst_needs_clflush & CLFLUSH_BEFORE) remain = round_up(remain, boot_cpu_data.x86_clflush_size); ptr = dst; x = offset_in_page(offset); - sg = i915_gem_object_get_sg(src_obj, offset >> PAGE_SHIFT, &sg_ofs, false); - - while (remain) { - unsigned long sg_max = sg->length >> PAGE_SHIFT; - - for (; remain && sg_ofs < sg_max; sg_ofs++) { - unsigned long len = min(remain, PAGE_SIZE - x); - void *map; - - map = kmap_atomic(nth_page(sg_page(sg), sg_ofs)); - if (needs_clflush) - drm_clflush_virt_range(map + x, len); - memcpy(ptr, map + x, len); - kunmap_atomic(map); - - ptr += len; - remain -= len; - x = 0; - } - - sg_ofs = 0; - sg = sg_next(sg); + for (n = offset >> PAGE_SHIFT; remain; n++) { + int len = min(remain, PAGE_SIZE - x); + + src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); + if (src_needs_clflush) + drm_clflush_virt_range(src + x, len); + memcpy(ptr, src + x, len); + kunmap_atomic(src); + + ptr += len; + remain -= len; + x = 0; } } + i915_gem_object_finish_access(src_obj); + memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32)); /* dst_obj is returned with vmap pinned */ + *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER; + return dst; } @@ -1360,6 +1376,9 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length, if (target_cmd_index == offset) return 0; + if (IS_ERR(jump_whitelist)) + return PTR_ERR(jump_whitelist); + if (!test_bit(target_cmd_index, jump_whitelist)) { DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n", jump_target); @@ -1369,28 +1388,10 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length, return 0; } -/** - * intel_engine_cmd_parser_alloc_jump_whitelist() - preallocate jump whitelist for intel_engine_cmd_parser() - * @batch_length: length of the commands in batch_obj - * @trampoline: Whether jump trampolines are used. - * - * Preallocates a jump whitelist for parsing the cmd buffer in intel_engine_cmd_parser(). - * This has to be preallocated, because the command parser runs in signaling context, - * and may not allocate any memory. - * - * Return: NULL or pointer to a jump whitelist, or ERR_PTR() on failure. Use - * IS_ERR() to check for errors. Must bre freed() with kfree(). - * - * NULL is a valid value, meaning no allocation was required. - */ -unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length, - bool trampoline) +static unsigned long *alloc_whitelist(u32 batch_length) { unsigned long *jmp; - if (trampoline) - return NULL; - /* * We expect batch_length to be less than 256KiB for known users, * i.e. we need at most an 8KiB bitmap allocation which should be @@ -1415,9 +1416,7 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length, * @batch_offset: byte offset in the batch at which execution starts * @batch_length: length of the commands in batch_obj * @shadow: validated copy of the batch buffer in question - * @jump_whitelist: buffer preallocated with intel_engine_cmd_parser_alloc_jump_whitelist() - * @shadow_map: mapping to @shadow vma - * @batch_map: mapping to @batch vma + * @trampoline: true if we need to trampoline into privileged execution * * Parses the specified batch buffer looking for privilege violations as * described in the overview. @@ -1425,21 +1424,21 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length, * Return: non-zero if the parser finds violations or otherwise fails; -EACCES * if the batch appears legal but should use hardware parsing */ + int intel_engine_cmd_parser(struct intel_engine_cs *engine, struct i915_vma *batch, unsigned long batch_offset, unsigned long batch_length, struct i915_vma *shadow, - unsigned long *jump_whitelist, - void *shadow_map, - const void *batch_map) + bool trampoline) { u32 *cmd, *batch_end, offset = 0; struct drm_i915_cmd_descriptor default_desc = noop_desc; const struct drm_i915_cmd_descriptor *desc = &default_desc; + bool needs_clflush_after = false; + unsigned long *jump_whitelist; u64 batch_addr, shadow_addr; int ret = 0; - bool trampoline = !jump_whitelist; GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd))); GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd))); @@ -1447,8 +1446,18 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, batch->size)); GEM_BUG_ON(!batch_length); - cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length, - shadow_map, batch_map); + cmd = copy_batch(shadow->obj, batch->obj, + batch_offset, batch_length, + &needs_clflush_after); + if (IS_ERR(cmd)) { + DRM_DEBUG("CMD: Failed to copy batch\n"); + return PTR_ERR(cmd); + } + + jump_whitelist = NULL; + if (!trampoline) + /* Defer failure until attempted use */ + jump_whitelist = alloc_whitelist(batch_length); shadow_addr = gen8_canonical_addr(shadow->node.start); batch_addr = gen8_canonical_addr(batch->node.start + batch_offset); @@ -1549,6 +1558,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, i915_gem_object_flush_map(shadow->obj); + if (!IS_ERR_OR_NULL(jump_whitelist)) + kfree(jump_whitelist); + i915_gem_object_unpin_map(shadow->obj); return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 38ff2fb89744..b30397b04529 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1906,17 +1906,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); -unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length, - bool trampoline); - int intel_engine_cmd_parser(struct intel_engine_cs *engine, struct i915_vma *batch, unsigned long batch_offset, unsigned long batch_length, struct i915_vma *shadow, - unsigned long *jump_whitelist, - void *shadow_map, - const void *batch_map); + bool trampoline); #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 /* intel_device_info.c */ diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c index 77f1911c463b..3acb0b6be284 100644 --- a/drivers/gpu/drm/i915/i915_globals.c +++ b/drivers/gpu/drm/i915/i915_globals.c @@ -138,7 +138,7 @@ void i915_globals_unpark(void) atomic_inc(&active); } -static void __exit __i915_globals_flush(void) +static void __i915_globals_flush(void) { atomic_inc(&active); /* skip shrinking */ @@ -148,7 +148,7 @@ static void __exit __i915_globals_flush(void) atomic_dec(&active); } -void __exit i915_globals_exit(void) +void i915_globals_exit(void) { GEM_BUG_ON(atomic_read(&active)); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 35c97c39f125..966664610c8c 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -727,9 +727,18 @@ static void err_print_gt(struct drm_i915_error_state_buf *m, if (GRAPHICS_VER(m->i915) >= 12) { int i; - for (i = 0; i < GEN12_SFC_DONE_MAX; i++) + for (i = 0; i < GEN12_SFC_DONE_MAX; i++) { + /* + * SFC_DONE resides in the VD forcewake domain, so it + * only exists if the corresponding VCS engine is + * present. + */ + if (!HAS_ENGINE(gt->_gt, _VCS(i * 2))) + continue; + err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, gt->sfc_done[i]); + } err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done); } @@ -1581,6 +1590,14 @@ static void gt_record_regs(struct intel_gt_coredump *gt) if (GRAPHICS_VER(i915) >= 12) { for (i = 0; i < GEN12_SFC_DONE_MAX; i++) { + /* + * SFC_DONE resides in the VD forcewake domain, so it + * only exists if the corresponding VCS engine is + * present. + */ + if (!HAS_ENGINE(gt->_gt, _VCS(i * 2))) + continue; + gt->sfc_done[i] = intel_uncore_read(uncore, GEN12_SFC_DONE(i)); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c03943198089..c3816f5c6900 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3064,24 +3064,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -static void cnp_display_clock_wa(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - - /* - * Wa_14010685332:cnp/cmp,tgp,adp - * TODO: Clarify which platforms this applies to - * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as - * on earlier platforms and whether the workaround is also needed for runtime suspend/resume - */ - if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP || - (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) { - intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, - SBCLK_RUN_REFCLK_DIS); - intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); - } -} - static void gen8_display_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -3115,7 +3097,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_reset(dev_priv); - cnp_display_clock_wa(dev_priv); } static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) @@ -3159,8 +3140,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) GEN3_IRQ_RESET(uncore, SDE); - - cnp_display_clock_wa(dev_priv); } static void gen11_irq_reset(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 83b500bb170c..2880ec57c97d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -1195,6 +1195,7 @@ static int __init i915_init(void) err = pci_register_driver(&i915_pci_driver); if (err) { i915_pmu_exit(); + i915_globals_exit(); return err; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 94fde5ca26ae..476bb3b9ad11 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -422,7 +422,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1) #define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0) -#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100) +#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000) #define GEN12_SFC_DONE_MAX 4 #define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228) @@ -6163,11 +6163,17 @@ enum { #define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */ #define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11) #define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ -#define PIPEMISC_DITHER_BPC_MASK (7 << 5) -#define PIPEMISC_DITHER_8_BPC (0 << 5) -#define PIPEMISC_DITHER_10_BPC (1 << 5) -#define PIPEMISC_DITHER_6_BPC (2 << 5) -#define PIPEMISC_DITHER_12_BPC (3 << 5) +/* + * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with + * valid values of: 6, 8, 10 BPC. + * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of: + * 6, 8, 10, 12 BPC. + */ +#define PIPEMISC_BPC_MASK (7 << 5) +#define PIPEMISC_8_BPC (0 << 5) +#define PIPEMISC_10_BPC (1 << 5) +#define PIPEMISC_6_BPC (2 << 5) +#define PIPEMISC_12_BPC_ADLP (4 << 5) /* adlp+ */ #define PIPEMISC_DITHER_ENABLE (1 << 4) #define PIPEMISC_DITHER_TYPE_MASK (3 << 2) #define PIPEMISC_DITHER_TYPE_SP (0 << 2) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 1014c71cf7f5..37aef1308573 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1426,10 +1426,8 @@ i915_request_await_execution(struct i915_request *rq, do { fence = *child++; - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - i915_sw_fence_set_error_once(&rq->submit, fence->error); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) continue; - } if (fence->context == rq->fence.context) continue; @@ -1527,10 +1525,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) do { fence = *child++; - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - i915_sw_fence_set_error_once(&rq->submit, fence->error); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) continue; - } /* * Requests on the same timeline are explicitly ordered, along diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 7eaa92fee421..e0a10f36acc1 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -325,7 +325,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->pipe_mask &= ~BIT(PIPE_C); info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } - } else if (HAS_DISPLAY(dev_priv) && GRAPHICS_VER(dev_priv) >= 9) { + } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) { u32 dfsm = intel_de_read(dev_priv, SKL_DFSM); if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { @@ -340,7 +340,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->pipe_mask &= ~BIT(PIPE_C); info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } - if (GRAPHICS_VER(dev_priv) >= 12 && + + if (DISPLAY_VER(dev_priv) >= 12 && (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { info->pipe_mask &= ~BIT(PIPE_D); info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); @@ -352,10 +353,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) info->display.has_fbc = 0; - if (GRAPHICS_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) + if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) info->display.has_dmc = 0; - if (GRAPHICS_VER(dev_priv) >= 10 && + if (DISPLAY_VER(dev_priv) >= 10 && (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE)) info->display.has_dsc = 0; } diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index 96ea1a2c11dd..f54392ec4fab 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -203,6 +203,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev) unsigned long status, val, val1; int plane_id, dma0_state, dma1_state; struct kmb_drm_private *kmb = to_kmb(dev); + u32 ctrl = 0; status = kmb_read_lcd(kmb, LCD_INT_STATUS); @@ -227,6 +228,19 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev) kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, kmb->plane_status[plane_id].ctrl); + ctrl = kmb_read_lcd(kmb, LCD_CONTROL); + if (!(ctrl & (LCD_CTRL_VL1_ENABLE | + LCD_CTRL_VL2_ENABLE | + LCD_CTRL_GL1_ENABLE | + LCD_CTRL_GL2_ENABLE))) { + /* If no LCD layers are using DMA, + * then disable DMA pipelined AXI read + * transactions. + */ + kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, + LCD_CTRL_PIPELINE_DMA); + } + kmb->plane_status[plane_id].disable = false; } } @@ -411,10 +425,10 @@ static const struct drm_driver kmb_driver = { .fops = &fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, .name = "kmb-drm", - .desc = "KEEMBAY DISPLAY DRIVER ", - .date = "20201008", - .major = 1, - .minor = 0, + .desc = "KEEMBAY DISPLAY DRIVER", + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, }; static int kmb_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h index 02e806712a64..ebbaa5f422d5 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.h +++ b/drivers/gpu/drm/kmb/kmb_drv.h @@ -15,6 +15,11 @@ #define KMB_MAX_HEIGHT 1080 /*Max height in pixels */ #define KMB_MIN_WIDTH 1920 /*Max width in pixels */ #define KMB_MIN_HEIGHT 1080 /*Max height in pixels */ + +#define DRIVER_DATE "20210223" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 1 + #define KMB_LCD_DEFAULT_CLK 200000000 #define KMB_SYS_CLK_MHZ 500 diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c index d5b6195856d1..ecee6782612d 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.c +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -427,8 +427,14 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl); - /* FIXME no doc on how to set output format,these values are - * taken from the Myriadx tests + /* Enable pipeline AXI read transactions for the DMA + * after setting graphics layers. This must be done + * in a separate write cycle. + */ + kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA); + + /* FIXME no doc on how to set output format, these values are taken + * from the Myriadx tests */ out_format |= LCD_OUTF_FORMAT_RGB888; @@ -526,6 +532,11 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm) plane->id = i; } + /* Disable pipeline AXI read transactions for the DMA + * prior to setting graphics layers + */ + kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA); + return primary; cleanup: drmm_kfree(drm, plane); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c index 6f4c80bbc0eb..473f5bb5cbad 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_color.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -133,6 +133,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev) static int mtk_disp_color_remove(struct platform_device *pdev) { + component_del(&pdev->dev, &mtk_disp_color_component_ops); + return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index fa9d79963cd3..5326989d5206 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -423,6 +423,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) static int mtk_disp_ovl_remove(struct platform_device *pdev) { + component_del(&pdev->dev, &mtk_disp_ovl_component_ops); + return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index bced555648b0..e94738fe4db8 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -605,11 +605,15 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct mtk_dpi *dpi = bridge->driver_private; + struct mtk_dpi *dpi = bridge_to_dpi(bridge); unsigned int out_bus_format; out_bus_format = bridge_state->output_bus_cfg.format; + if (out_bus_format == MEDIA_BUS_FMT_FIXED) + if (dpi->conf->num_output_fmts) + out_bus_format = dpi->conf->output_fmts[0]; + dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n", bridge_state->input_bus_cfg.format, bridge_state->output_bus_cfg.format); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 474efb844249..735efe79f075 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -532,13 +532,10 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, struct drm_atomic_state *state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - const struct drm_plane_helper_funcs *plane_helper_funcs = - plane->helper_private; if (!mtk_crtc->enabled) return; - plane_helper_funcs->atomic_update(plane, state); mtk_drm_crtc_update_config(mtk_crtc, false); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 75bc00e17fc4..50d20562e612 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -34,6 +34,7 @@ #define DISP_AAL_EN 0x0000 #define DISP_AAL_SIZE 0x0030 +#define DISP_AAL_OUTPUT_SIZE 0x04d8 #define DISP_DITHER_EN 0x0000 #define DITHER_EN BIT(0) @@ -197,6 +198,7 @@ static void mtk_aal_config(struct device *dev, unsigned int w, struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE); + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_OUTPUT_SIZE); } static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index b5582dcf564c..e6dcb34d3052 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -110,6 +110,35 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane, true, true); } +static void mtk_plane_update_new_state(struct drm_plane_state *new_state, + struct mtk_plane_state *mtk_plane_state) +{ + struct drm_framebuffer *fb = new_state->fb; + struct drm_gem_object *gem; + struct mtk_drm_gem_obj *mtk_gem; + unsigned int pitch, format; + dma_addr_t addr; + + gem = fb->obj[0]; + mtk_gem = to_mtk_gem_obj(gem); + addr = mtk_gem->dma_addr; + pitch = fb->pitches[0]; + format = fb->format->format; + + addr += (new_state->src.x1 >> 16) * fb->format->cpp[0]; + addr += (new_state->src.y1 >> 16) * pitch; + + mtk_plane_state->pending.enable = true; + mtk_plane_state->pending.pitch = pitch; + mtk_plane_state->pending.format = format; + mtk_plane_state->pending.addr = addr; + mtk_plane_state->pending.x = new_state->dst.x1; + mtk_plane_state->pending.y = new_state->dst.y1; + mtk_plane_state->pending.width = drm_rect_width(&new_state->dst); + mtk_plane_state->pending.height = drm_rect_height(&new_state->dst); + mtk_plane_state->pending.rotation = new_state->rotation; +} + static void mtk_plane_atomic_async_update(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -126,8 +155,10 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane, plane->state->src_h = new_state->src_h; plane->state->src_w = new_state->src_w; swap(plane->state->fb, new_state->fb); - new_plane_state->pending.async_dirty = true; + mtk_plane_update_new_state(new_state, new_plane_state); + wmb(); /* Make sure the above parameters are set before update */ + new_plane_state->pending.async_dirty = true; mtk_drm_crtc_async_update(new_state->crtc, plane, state); } @@ -189,14 +220,8 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); - struct drm_crtc *crtc = new_state->crtc; - struct drm_framebuffer *fb = new_state->fb; - struct drm_gem_object *gem; - struct mtk_drm_gem_obj *mtk_gem; - unsigned int pitch, format; - dma_addr_t addr; - if (!crtc || WARN_ON(!fb)) + if (!new_state->crtc || WARN_ON(!new_state->fb)) return; if (!new_state->visible) { @@ -204,24 +229,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, return; } - gem = fb->obj[0]; - mtk_gem = to_mtk_gem_obj(gem); - addr = mtk_gem->dma_addr; - pitch = fb->pitches[0]; - format = fb->format->format; - - addr += (new_state->src.x1 >> 16) * fb->format->cpp[0]; - addr += (new_state->src.y1 >> 16) * pitch; - - mtk_plane_state->pending.enable = true; - mtk_plane_state->pending.pitch = pitch; - mtk_plane_state->pending.format = format; - mtk_plane_state->pending.addr = addr; - mtk_plane_state->pending.x = new_state->dst.x1; - mtk_plane_state->pending.y = new_state->dst.y1; - mtk_plane_state->pending.width = drm_rect_width(&new_state->dst); - mtk_plane_state->pending.height = drm_rect_height(&new_state->dst); - mtk_plane_state->pending.rotation = new_state->rotation; + mtk_plane_update_new_state(new_state, mtk_plane_state); wmb(); /* Make sure the above parameters are set before update */ mtk_plane_state->pending.dirty = true; } diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h index 446e7961da48..0f3cafab8860 100644 --- a/drivers/gpu/drm/meson/meson_registers.h +++ b/drivers/gpu/drm/meson/meson_registers.h @@ -634,6 +634,11 @@ #define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc #define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd +/* osd1 HDR */ +#define OSD1_HDR2_CTRL 0x38a0 +#define OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN BIT(13) +#define OSD1_HDR2_CTRL_REG_ONLY_MAT BIT(16) + /* osd2 scaler */ #define OSD2_VSC_PHASE_STEP 0x3d00 #define OSD2_VSC_INI_PHASE 0x3d01 diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index aede0c67a57f..259f3e6bec90 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -425,9 +425,14 @@ void meson_viu_init(struct meson_drm *priv) if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) meson_viu_load_matrix(priv); - else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) + else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff, true); + /* fix green/pink color distortion from vendor u-boot */ + writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT | + OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0, + priv->io_base + _REG(OSD1_HDR2_CTRL)); + } /* Initialize OSD1 fifo control register */ reg = VIU_OSD_DDR_PRIORITY_URGENT | diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index d01c4c919504..704dace895cb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -296,7 +296,7 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = { static const struct dpu_mdp_cfg sm8250_mdp[] = { { .name = "top_0", .id = MDP_TOP, - .base = 0x0, .len = 0x45C, + .base = 0x0, .len = 0x494, .features = 0, .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */ .clk_ctrls[DPU_CLK_CTRL_VIG0] = { diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index ca96e3514790..c0423e76eed7 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -771,6 +771,7 @@ int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog) dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, dp_catalog->width_blanking); dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active); + dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0); return 0; } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index ee221d835fa0..eaddfd739885 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1526,7 +1526,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) * running. Add the global reset just before disabling the * link clocks and core clocks. */ - ret = dp_ctrl_off(&ctrl->dp_ctrl); + ret = dp_ctrl_off_link_stream(&ctrl->dp_ctrl); if (ret) { DRM_ERROR("failed to disable DP controller\n"); return ret; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 051c1be1de7e..867388a399ad 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -219,6 +219,7 @@ static int dp_display_bind(struct device *dev, struct device *master, goto end; } + dp->aux->drm_dev = drm; rc = dp_aux_register(dp->aux); if (rc) { DRM_ERROR("DRM DP AUX register failed\n"); @@ -1311,6 +1312,10 @@ static int dp_pm_resume(struct device *dev) else dp->dp_display.is_connected = false; + dp_display_handle_plugged_change(g_dp_display, + dp->dp_display.is_connected); + + mutex_unlock(&dp->event_mutex); return 0; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 141178754231..1e8a971a86f2 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1169,7 +1169,7 @@ static int msm_gem_new_impl(struct drm_device *dev, case MSM_BO_CACHED_COHERENT: if (priv->has_cached_coherent) break; - /* fallthrough */ + fallthrough; default: DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", (flags & MSM_BO_CACHE_MASK)); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index eed2a762e9dd..bcaddbba564d 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -142,6 +142,9 @@ static const struct iommu_flush_ops null_tlb_ops = { .tlb_add_page = msm_iommu_tlb_add_page, }; +static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, + unsigned long iova, int flags, void *arg); + struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) { struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev); @@ -157,6 +160,13 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) if (!ttbr1_cfg) return ERR_PTR(-ENODEV); + /* + * Defer setting the fault handler until we have a valid adreno_smmu + * to avoid accidentially installing a GPU specific fault handler for + * the display's iommu + */ + iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu); + pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL); if (!pagetable) return ERR_PTR(-ENOMEM); @@ -300,7 +310,6 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) iommu->domain = domain; msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU); - iommu_set_fault_handler(domain, msm_fault_handler, iommu); atomic_set(&iommu->pagetables, 0); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index f949767698fc..bcb0310a41b6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2237,6 +2237,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) interlock[NV50_DISP_INTERLOCK_CORE] = 0; } + /* Finish updating head(s)... + * + * NVD is rather picky about both where window assignments can change, + * *and* about certain core and window channel states matching. + * + * The EFI GOP driver on newer GPUs configures window channels with a + * different output format to what we do, and the core channel update + * in the assign_windows case above would result in a state mismatch. + * + * Delay some of the head update until after that point to workaround + * the issue. This only affects the initial modeset. + * + * TODO: handle this better when adding flexible window mapping + */ + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); + struct nv50_head *head = nv50_head(crtc); + + NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name, + asyh->set.mask, asyh->clr.mask); + + if (asyh->set.mask) { + nv50_head_flush_set_wndw(head, asyh); + interlock[NV50_DISP_INTERLOCK_CORE] = 1; + } + } + /* Update plane(s). */ for_each_new_plane_in_state(state, plane, new_plane_state, i) { struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index ec361d17e900..d66f97280282 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head, } void -nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) +nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh) { - if (asyh->set.view ) head->func->view (head, asyh); - if (asyh->set.mode ) head->func->mode (head, asyh); - if (asyh->set.core ) head->func->core_set(head, asyh); if (asyh->set.olut ) { asyh->olut.offset = nv50_lut_load(&head->olut, asyh->olut.buffer, @@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) asyh->olut.load); head->func->olut_set(head, asyh); } +} + +void +nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + if (asyh->set.view ) head->func->view (head, asyh); + if (asyh->set.mode ) head->func->mode (head, asyh); + if (asyh->set.core ) head->func->core_set(head, asyh); if (asyh->set.curs ) head->func->curs_set(head, asyh); if (asyh->set.base ) head->func->base (head, asyh); if (asyh->set.ovly ) head->func->ovly (head, asyh); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h index dae841dc05fd..0bac6be9ba34 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.h +++ b/drivers/gpu/drm/nouveau/dispnv50/head.h @@ -21,6 +21,7 @@ struct nv50_head { struct nv50_head *nv50_head_create(struct drm_device *, int index); void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh); +void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh); void nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool flush); diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h index 0b86c44878e0..59759c4fb62e 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h @@ -4,7 +4,8 @@ struct nv_device_v0 { __u8 version; - __u8 pad01[7]; + __u8 priv; + __u8 pad02[6]; __u64 device; /* device identifier, ~0 for client default */ }; diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index ba2c28ea43d2..c68cc957248e 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -61,8 +61,6 @@ #define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e #define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e #define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e -#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e -#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e #define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f #define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h index 347d2c020bd1..5d9395e651b6 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/client.h +++ b/drivers/gpu/drm/nouveau/include/nvif/client.h @@ -9,7 +9,6 @@ struct nvif_client { const struct nvif_driver *driver; u64 version; u8 route; - bool super; }; int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device, diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h index 8e85b936eaa0..7a3af05f7f98 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/driver.h +++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h @@ -11,7 +11,7 @@ struct nvif_driver { void (*fini)(void *priv); int (*suspend)(void *priv); int (*resume)(void *priv); - int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack); + int (*ioctl)(void *priv, void *data, u32 size, void **hack); void __iomem *(*map)(void *priv, u64 handle, u32 size); void (*unmap)(void *priv, void __iomem *ptr, u32 size); bool keep; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h index 5d7017fe5039..2f86606e708c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h @@ -13,7 +13,6 @@ struct nvkm_client { struct nvkm_client_notify *notify[32]; struct rb_root objroot; - bool super; void *data; int (*ntfy)(const void *, u32, const void *, u32); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h index 71ed147ad077..f52918a43246 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h @@ -4,5 +4,5 @@ #include <core/os.h> struct nvkm_client; -int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **); +int nvkm_ioctl(struct nvkm_client *, void *, u32, void **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h index 0911e73f7424..70e7887ef4b4 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h @@ -15,7 +15,6 @@ struct nvkm_vma { u8 refd:3; /* Current page type (index, or NONE for unreferenced). */ bool used:1; /* Region allocated. */ bool part:1; /* Region was split from an allocated region by map(). */ - bool user:1; /* Region user-allocated. */ bool busy:1; /* Region busy (for temporarily preventing user access). */ bool mapped:1; /* Region contains valid pages. */ struct nvkm_memory *memory; /* Memory currently mapped into VMA. */ diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index b45ec3086285..4107b7006539 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -570,11 +570,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) } client->route = NVDRM_OBJECT_ABI16; - client->super = true; ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle, NV_DMA_IN_MEMORY, &args, sizeof(args), &ntfy->object); - client->super = false; client->route = NVDRM_OBJECT_NVIF; if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4f3a5357dd56..6d07e653f82d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -149,6 +149,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) */ if (bo->base.dev) drm_gem_object_release(&bo->base); + else + dma_resv_fini(&bo->base._resv); kfree(nvbo); } @@ -330,6 +332,10 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, if (IS_ERR(nvbo)) return PTR_ERR(nvbo); + nvbo->bo.base.size = size; + dma_resv_init(&nvbo->bo.base._resv); + drm_vma_node_reset(&nvbo->bo.base.vma_node); + ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 40362600eed2..80099ef75702 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -86,12 +86,6 @@ nouveau_channel_del(struct nouveau_channel **pchan) struct nouveau_channel *chan = *pchan; if (chan) { struct nouveau_cli *cli = (void *)chan->user.client; - bool super; - - if (cli) { - super = cli->base.super; - cli->base.super = true; - } if (chan->fence) nouveau_fence(chan->drm)->context_del(chan); @@ -111,9 +105,6 @@ nouveau_channel_del(struct nouveau_channel **pchan) nouveau_bo_unpin(chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer); kfree(chan); - - if (cli) - cli->base.super = super; } *pchan = NULL; } @@ -512,20 +503,16 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, struct nouveau_channel **pchan) { struct nouveau_cli *cli = (void *)device->object.client; - bool super; int ret; /* hack until fencenv50 is fixed, and agp access relaxed */ - super = cli->base.super; - cli->base.super = true; - ret = nouveau_channel_ind(drm, device, arg0, priv, pchan); if (ret) { NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret); ret = nouveau_channel_dma(drm, device, pchan); if (ret) { NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret); - goto done; + return ret; } } @@ -533,15 +520,13 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, if (ret) { NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret); nouveau_channel_del(pchan); - goto done; + return ret; } ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst); if (ret) nouveau_channel_del(pchan); -done: - cli->base.super = super; return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index a616cf4573b8..ba4cd5f83725 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -244,6 +244,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE, &(struct nv_device_v0) { .device = ~0, + .priv = true, }, sizeof(struct nv_device_v0), &cli->device); if (ret) { @@ -1086,8 +1087,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) if (ret) goto done; - cli->base.super = false; - fpriv->driver_priv = cli; mutex_lock(&drm->client.mutex); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 0de6549fb875..2ca3207c13fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -41,8 +41,6 @@ nouveau_mem_map(struct nouveau_mem *mem, struct gf100_vmm_map_v0 gf100; } args; u32 argc = 0; - bool super; - int ret; switch (vmm->object.oclass) { case NVIF_CLASS_VMM_NV04: @@ -73,12 +71,7 @@ nouveau_mem_map(struct nouveau_mem *mem, return -ENOSYS; } - super = vmm->object.client->super; - vmm->object.client->super = true; - ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, - &mem->mem, 0); - vmm->object.client->super = super; - return ret; + return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0); } void @@ -99,7 +92,6 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) struct nouveau_drm *drm = cli->drm; struct nvif_mmu *mmu = &cli->mmu; struct nvif_mem_ram_v0 args = {}; - bool super = cli->base.super; u8 type; int ret; @@ -122,11 +114,9 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) args.dma = tt->dma_address; mutex_lock(&drm->master.lock); - cli->base.super = true; ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT, reg->num_pages << PAGE_SHIFT, &args, sizeof(args), &mem->mem); - cli->base.super = super; mutex_unlock(&drm->master.lock); return ret; } @@ -138,12 +128,10 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) struct nouveau_cli *cli = mem->cli; struct nouveau_drm *drm = cli->drm; struct nvif_mmu *mmu = &cli->mmu; - bool super = cli->base.super; u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page); int ret; mutex_lock(&drm->master.lock); - cli->base.super = true; switch (cli->mem->oclass) { case NVIF_CLASS_MEM_GF100: ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass, @@ -167,7 +155,6 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) WARN_ON(1); break; } - cli->base.super = super; mutex_unlock(&drm->master.lock); reg->start = mem->mem.addr >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c index b3f29b1ce9ea..52f5793b7274 100644 --- a/drivers/gpu/drm/nouveau/nouveau_nvif.c +++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c @@ -52,9 +52,9 @@ nvkm_client_map(void *priv, u64 handle, u32 size) } static int -nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack) +nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack) { - return nvkm_ioctl(priv, super, data, size, hack); + return nvkm_ioctl(priv, data, size, hack); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 82b583f5fca8..b0c3422cb01f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -237,14 +237,11 @@ void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) { if (limit > start) { - bool super = svmm->vmm->vmm.object.client->super; - svmm->vmm->vmm.object.client->super = true; nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR, &(struct nvif_vmm_pfnclr_v0) { .addr = start, .size = limit - start, }, sizeof(struct nvif_vmm_pfnclr_v0)); - svmm->vmm->vmm.object.client->super = super; } } @@ -634,9 +631,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, NVIF_VMM_PFNMAP_V0_A | NVIF_VMM_PFNMAP_V0_HOST; - svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); - svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); unlock_page(page); @@ -702,9 +697,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, nouveau_hmm_convert_pfn(drm, &range, args); - svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); - svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); out: @@ -928,10 +921,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm, mutex_lock(&svmm->mutex); - svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) + npages * sizeof(args->p.phys[0]), NULL); - svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); } diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 9dc10b17ad34..5da1f4d223d7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -32,6 +32,9 @@ #include <nvif/event.h> #include <nvif/ioctl.h> +#include <nvif/class.h> +#include <nvif/cl0080.h> + struct usif_notify_p { struct drm_pending_event base; struct { @@ -261,7 +264,7 @@ usif_object_dtor(struct usif_object *object) } static int -usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) +usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16) { struct nouveau_cli *cli = nouveau_cli(f); struct nvif_client *client = &cli->base; @@ -271,23 +274,48 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) struct usif_object *object; int ret = -ENOSYS; + if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) + return ret; + + switch (args->v0.oclass) { + case NV_DMA_FROM_MEMORY: + case NV_DMA_TO_MEMORY: + case NV_DMA_IN_MEMORY: + return -EINVAL; + case NV_DEVICE: { + union { + struct nv_device_v0 v0; + } *args = data; + + if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) + return ret; + + args->v0.priv = false; + break; + } + default: + if (!parent_abi16) + return -EINVAL; + break; + } + if (!(object = kmalloc(sizeof(*object), GFP_KERNEL))) return -ENOMEM; list_add(&object->head, &cli->objects); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { - object->route = args->v0.route; - object->token = args->v0.token; - args->v0.route = NVDRM_OBJECT_USIF; - args->v0.token = (unsigned long)(void *)object; - ret = nvif_client_ioctl(client, argv, argc); - args->v0.token = object->token; - args->v0.route = object->route; + object->route = args->v0.route; + object->token = args->v0.token; + args->v0.route = NVDRM_OBJECT_USIF; + args->v0.token = (unsigned long)(void *)object; + ret = nvif_client_ioctl(client, argv, argc); + if (ret) { + usif_object_dtor(object); + return ret; } - if (ret) - usif_object_dtor(object); - return ret; + args->v0.token = object->token; + args->v0.route = object->route; + return 0; } int @@ -301,6 +329,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) struct nvif_ioctl_v0 v0; } *argv = data; struct usif_object *object; + bool abi16 = false; u8 owner; int ret; @@ -331,11 +360,13 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) mutex_unlock(&cli->mutex); goto done; } + + abi16 = true; } switch (argv->v0.type) { case NVIF_IOCTL_V0_NEW: - ret = usif_object_new(filp, data, size, argv, argc); + ret = usif_object_new(filp, data, size, argv, argc, abi16); break; case NVIF_IOCTL_V0_NTFY_NEW: ret = usif_notify_new(filp, data, size, argv, argc); diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c index 12644f811b3e..a3264a0e933a 100644 --- a/drivers/gpu/drm/nouveau/nvif/client.c +++ b/drivers/gpu/drm/nouveau/nvif/client.c @@ -32,7 +32,7 @@ int nvif_client_ioctl(struct nvif_client *client, void *data, u32 size) { - return client->driver->ioctl(client->object.priv, client->super, data, size, NULL); + return client->driver->ioctl(client->object.priv, data, size, NULL); } int @@ -80,7 +80,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device, client->object.client = client; client->object.handle = ~0; client->route = NVIF_IOCTL_V0_ROUTE_NVIF; - client->super = true; client->driver = parent->driver; if (ret == 0) { diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c index 671a5c0199e0..dce1ecee2af5 100644 --- a/drivers/gpu/drm/nouveau/nvif/object.c +++ b/drivers/gpu/drm/nouveau/nvif/object.c @@ -44,8 +44,7 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack) } else return -ENOSYS; - return client->driver->ioctl(client->object.priv, client->super, - data, size, hack); + return client->driver->ioctl(client->object.priv, data, size, hack); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c index d777df5a64e6..735cb6816f10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c @@ -426,8 +426,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type, } int -nvkm_ioctl(struct nvkm_client *client, bool supervisor, - void *data, u32 size, void **hack) +nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack) { struct nvkm_object *object = &client->object; union { @@ -435,7 +434,6 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor, } *args = data; int ret = -ENOSYS; - client->super = supervisor; nvif_ioctl(object, "size %d\n", size); if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index b930f539feec..93ddf63d1114 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2624,6 +2624,26 @@ nv174_chipset = { .dma = { 0x00000001, gv100_dma_new }, }; +static const struct nvkm_device_chip +nv177_chipset = { + .name = "GA107", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gpio = { 0x00000001, ga102_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, + .disp = { 0x00000001, ga102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, +}; + static int nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, struct nvkm_notify *notify) @@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x168: device->chip = &nv168_chipset; break; case 0x172: device->chip = &nv172_chipset; break; case 0x174: device->chip = &nv174_chipset; break; + case 0x177: device->chip = &nv177_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { switch (device->chipset) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index fea9d8f2b10c..f28894fdede9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -397,7 +397,7 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size, return ret; /* give priviledged clients register access */ - if (client->super) + if (args->v0.priv) func = &nvkm_udevice_super; else func = &nvkm_udevice; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 55fbfe28c6dc..9669472a2749 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) return ret; } -static void +void nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior) { struct nvkm_dp *dp = nvkm_dp(outp); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h index 428b3f488f03..e484d0c3b0d4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h @@ -32,6 +32,7 @@ struct nvkm_dp { int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *, struct nvkm_outp **); +void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *); /* DPCD Receiver Capabilities */ #define DPCD_RC00_DPCD_REV 0x00000 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index dffcac249211..129982fef7ef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -22,6 +22,7 @@ * Authors: Ben Skeggs */ #include "outp.h" +#include "dp.h" #include "ior.h" #include <subdev/bios.h> @@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp) if (!ior->arm.head || ior->arm.proto != proto) { OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head, ior->arm.proto, proto); + + /* The EFI GOP driver on Ampere can leave unused DP links routed, + * which we don't expect. The DisableLT IED script *should* get + * us back to where we need to be. + */ + if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP) + nvkm_dp_disable(outp, ior); + return; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c index d20cc0681a88..797131ed7d67 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c @@ -26,7 +26,6 @@ #include <core/client.h> #include <core/gpuobj.h> #include <subdev/fb.h> -#include <subdev/instmem.h> #include <nvif/cl0002.h> #include <nvif/unpack.h> @@ -72,11 +71,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma, union { struct nv_dma_v0 v0; } *args = *pdata; - struct nvkm_device *device = dma->engine.subdev.device; - struct nvkm_client *client = oclass->client; struct nvkm_object *parent = oclass->parent; - struct nvkm_instmem *instmem = device->imem; - struct nvkm_fb *fb = device->fb; void *data = *pdata; u32 size = *psize; int ret = -ENOSYS; @@ -109,23 +104,13 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma, dmaobj->target = NV_MEM_TARGET_VM; break; case NV_DMA_V0_TARGET_VRAM: - if (!client->super) { - if (dmaobj->limit >= fb->ram->size - instmem->reserved) - return -EACCES; - if (device->card_type >= NV_50) - return -EACCES; - } dmaobj->target = NV_MEM_TARGET_VRAM; break; case NV_DMA_V0_TARGET_PCI: - if (!client->super) - return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI; break; case NV_DMA_V0_TARGET_PCI_US: case NV_DMA_V0_TARGET_AGP: - if (!client->super) - return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; break; default: diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index 90e9a0972a44..3209eb7af65f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -27,8 +27,6 @@ nvkm-y += nvkm/engine/fifo/dmanv04.o nvkm-y += nvkm/engine/fifo/dmanv10.o nvkm-y += nvkm/engine/fifo/dmanv17.o nvkm-y += nvkm/engine/fifo/dmanv40.o -nvkm-y += nvkm/engine/fifo/dmanv50.o -nvkm-y += nvkm/engine/fifo/dmag84.o nvkm-y += nvkm/engine/fifo/gpfifonv50.o nvkm-y += nvkm/engine/fifo/gpfifog84.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h index af8bdf275552..3a95730d7ff5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h @@ -48,8 +48,6 @@ void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int); int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push, const struct nvkm_oclass *, struct nv50_fifo_chan *); -extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass; extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass; -extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass; extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c deleted file mode 100644 index fc34cddcd2f5..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include "channv50.h" - -#include <core/client.h> -#include <core/ramht.h> - -#include <nvif/class.h> -#include <nvif/cl826e.h> -#include <nvif/unpack.h> - -static int -g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, - void *data, u32 size, struct nvkm_object **pobject) -{ - struct nvkm_object *parent = oclass->parent; - union { - struct g82_channel_dma_v0 v0; - } *args = data; - struct nv50_fifo *fifo = nv50_fifo(base); - struct nv50_fifo_chan *chan; - int ret = -ENOSYS; - - nvif_ioctl(parent, "create channel dma size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { - nvif_ioctl(parent, "create channel dma vers %d vmm %llx " - "pushbuf %llx offset %016llx\n", - args->v0.version, args->v0.vmm, args->v0.pushbuf, - args->v0.offset); - if (!args->v0.pushbuf) - return -EINVAL; - } else - return ret; - - if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) - return -ENOMEM; - *pobject = &chan->base.object; - - ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf, - oclass, chan); - if (ret) - return ret; - - args->v0.chid = chan->base.chid; - - nvkm_kmap(chan->ramfc); - nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078); - nvkm_wo32(chan->ramfc, 0x44, 0x01003fff); - nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4); - nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff); - nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff); - nvkm_wo32(chan->ramfc, 0x78, 0x00000000); - nvkm_wo32(chan->ramfc, 0x7c, 0x30000001); - nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | - (4 << 24) /* SEARCH_FULL */ | - (chan->ramht->gpuobj->node->offset >> 4)); - nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10); - nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12); - nvkm_done(chan->ramfc); - return 0; -} - -const struct nvkm_fifo_chan_oclass -g84_fifo_dma_oclass = { - .base.oclass = G82_CHANNEL_DMA, - .base.minver = 0, - .base.maxver = 0, - .ctor = g84_fifo_dma_new, -}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c deleted file mode 100644 index 8043718ad150..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include "channv50.h" - -#include <core/client.h> -#include <core/ramht.h> - -#include <nvif/class.h> -#include <nvif/cl506e.h> -#include <nvif/unpack.h> - -static int -nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, - void *data, u32 size, struct nvkm_object **pobject) -{ - struct nvkm_object *parent = oclass->parent; - union { - struct nv50_channel_dma_v0 v0; - } *args = data; - struct nv50_fifo *fifo = nv50_fifo(base); - struct nv50_fifo_chan *chan; - int ret = -ENOSYS; - - nvif_ioctl(parent, "create channel dma size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { - nvif_ioctl(parent, "create channel dma vers %d vmm %llx " - "pushbuf %llx offset %016llx\n", - args->v0.version, args->v0.vmm, args->v0.pushbuf, - args->v0.offset); - if (!args->v0.pushbuf) - return -EINVAL; - } else - return ret; - - if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) - return -ENOMEM; - *pobject = &chan->base.object; - - ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf, - oclass, chan); - if (ret) - return ret; - - args->v0.chid = chan->base.chid; - - nvkm_kmap(chan->ramfc); - nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078); - nvkm_wo32(chan->ramfc, 0x44, 0x01003fff); - nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4); - nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff); - nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff); - nvkm_wo32(chan->ramfc, 0x78, 0x00000000); - nvkm_wo32(chan->ramfc, 0x7c, 0x30000001); - nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | - (4 << 24) /* SEARCH_FULL */ | - (chan->ramht->gpuobj->node->offset >> 4)); - nvkm_done(chan->ramfc); - return 0; -} - -const struct nvkm_fifo_chan_oclass -nv50_fifo_dma_oclass = { - .base.oclass = NV50_CHANNEL_DMA, - .base.minver = 0, - .base.maxver = 0, - .ctor = nv50_fifo_dma_new, -}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c index c0a7d0f21dac..3885c3830b94 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c @@ -119,7 +119,6 @@ g84_fifo = { .uevent_init = g84_fifo_uevent_init, .uevent_fini = g84_fifo_uevent_fini, .chan = { - &g84_fifo_dma_oclass, &g84_fifo_gpfifo_oclass, NULL }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c index b6900a52bcce..ae6c4d846eb5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c @@ -341,8 +341,6 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass, "runlist %016llx priv %d\n", args->v0.version, args->v0.vmm, args->v0.ioffset, args->v0.ilength, args->v0.runlist, args->v0.priv); - if (args->v0.priv && !oclass->client->super) - return -EINVAL; return gk104_fifo_gpfifo_new_(fifo, &args->v0.runlist, &args->v0.chid, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c index ee4967b706a7..743791c514fe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c @@ -226,8 +226,6 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass, "runlist %016llx priv %d\n", args->v0.version, args->v0.vmm, args->v0.ioffset, args->v0.ilength, args->v0.runlist, args->v0.priv); - if (args->v0.priv && !oclass->client->super) - return -EINVAL; return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo, &args->v0.runlist, &args->v0.chid, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c index abef7fb6e2d3..99aafa103a31 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c @@ -65,8 +65,6 @@ tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass, "runlist %016llx priv %d\n", args->v0.version, args->v0.vmm, args->v0.ioffset, args->v0.ilength, args->v0.runlist, args->v0.priv); - if (args->v0.priv && !oclass->client->super) - return -EINVAL; return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo, &args->v0.runlist, &args->v0.chid, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c index be94156ea248..a08742cf425a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c @@ -136,7 +136,6 @@ nv50_fifo = { .pause = nv04_fifo_pause, .start = nv04_fifo_start, .chan = { - &nv50_fifo_dma_oclass, &nv50_fifo_gpfifo_oclass, NULL }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c index fac2f9a45ea6..e530bb8b3b17 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c @@ -41,7 +41,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle) object = nvkm_object_search(client, handle, &nvkm_umem); if (IS_ERR(object)) { - if (client->super && client != master) { + if (client != master) { spin_lock(&master->lock); list_for_each_entry(umem, &master->umem, head) { if (umem->object.object == handle) { @@ -53,8 +53,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle) } } else { umem = nvkm_umem(object); - if (!umem->priv || client->super) - memory = nvkm_memory_ref(umem->memory); + memory = nvkm_memory_ref(umem->memory); } return memory ? memory : ERR_PTR(-ENOENT); @@ -167,7 +166,6 @@ nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, nvkm_object_ctor(&nvkm_umem, oclass, &umem->object); umem->mmu = mmu; umem->type = mmu->type[type].type; - umem->priv = oclass->client->super; INIT_LIST_HEAD(&umem->head); *pobject = &umem->object; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h index 85cf692d620a..d56a594016cc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h @@ -8,7 +8,6 @@ struct nvkm_umem { struct nvkm_object object; struct nvkm_mmu *mmu; u8 type:8; - bool priv:1; bool mappable:1; bool io:1; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c index 0e4b8941da37..6870fda4b188 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c @@ -34,7 +34,7 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index, { struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; - if (mmu->func->mem.user.oclass && oclass->client->super) { + if (mmu->func->mem.user.oclass) { if (index-- == 0) { oclass->base = mmu->func->mem.user; oclass->ctor = nvkm_umem_new; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c index c43b8248c682..d6a1f8d04c09 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c @@ -45,7 +45,6 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle) static int nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_pfnclr_v0 v0; } *args = argv; @@ -59,9 +58,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc) } else return ret; - if (!client->super) - return -ENOENT; - if (size) { mutex_lock(&vmm->mutex); ret = nvkm_vmm_pfn_unmap(vmm, addr, size); @@ -74,7 +70,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc) static int nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_pfnmap_v0 v0; } *args = argv; @@ -93,9 +88,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) } else return ret; - if (!client->super) - return -ENOENT; - if (size) { mutex_lock(&vmm->mutex); ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys); @@ -108,7 +100,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) static int nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_unmap_v0 v0; } *args = argv; @@ -130,9 +121,8 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) goto done; } - if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { - VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, - vma->user, !client->super, vma->busy); + if (ret = -ENOENT, vma->busy) { + VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); goto done; } @@ -181,9 +171,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) goto fail; } - if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { - VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, - vma->user, !client->super, vma->busy); + if (ret = -ENOENT, vma->busy) { + VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); goto fail; } @@ -230,7 +219,6 @@ fail: static int nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_put_v0 v0; } *args = argv; @@ -252,9 +240,8 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc) goto done; } - if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { - VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, - vma->user, !client->super, vma->busy); + if (ret = -ENOENT, vma->busy) { + VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); goto done; } @@ -268,7 +255,6 @@ done: static int nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_get_v0 v0; } *args = argv; @@ -297,7 +283,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc) return ret; args->v0.addr = vma->addr; - vma->user = !client->super; return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 710f3f8dc7c9..8bf00b396ec1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -774,7 +774,6 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) new->refd = vma->refd; new->used = vma->used; new->part = vma->part; - new->user = vma->user; new->busy = vma->busy; new->mapped = vma->mapped; list_add(&new->head, &vma->head); @@ -951,7 +950,7 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm, static void nvkm_vma_dump(struct nvkm_vma *vma) { - printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n", + printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n", vma->addr, (u64)vma->size, vma->used ? '-' : 'F', vma->mapref ? 'R' : '-', @@ -959,7 +958,6 @@ nvkm_vma_dump(struct nvkm_vma *vma) vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-', vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-', vma->part ? 'P' : '-', - vma->user ? 'U' : '-', vma->busy ? 'B' : '-', vma->mapped ? 'M' : '-', vma->memory); @@ -1024,7 +1022,6 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size) vma->mapref = true; vma->sparse = false; vma->used = true; - vma->user = true; nvkm_vmm_node_insert(vmm, vma); list_add_tail(&vma->head, &vmm->list); return 0; @@ -1615,7 +1612,6 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) vma->page = NVKM_VMA_PAGE_NONE; vma->refd = NVKM_VMA_PAGE_NONE; vma->used = false; - vma->user = false; nvkm_vmm_put_region(vmm, vma); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index f02abd9cb4dd..b5e733783b5b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -534,15 +534,13 @@ int gp100_vmm_mthd(struct nvkm_vmm *vmm, struct nvkm_client *client, u32 mthd, void *argv, u32 argc) { - if (client->super) { - switch (mthd) { - case GP100_VMM_VN_FAULT_REPLAY: - return gp100_vmm_fault_replay(vmm, argv, argc); - case GP100_VMM_VN_FAULT_CANCEL: - return gp100_vmm_fault_cancel(vmm, argv, argc); - default: - break; - } + switch (mthd) { + case GP100_VMM_VN_FAULT_REPLAY: + return gp100_vmm_fault_replay(vmm, argv, argc); + case GP100_VMM_VN_FAULT_CANCEL: + return gp100_vmm_fault_cancel(vmm, argv, argc); + default: + break; } return -EINVAL; } diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c index ef70140c5b09..873cbd38e6d3 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c @@ -706,9 +706,7 @@ static int nt35510_power_on(struct nt35510 *nt) if (ret) return ret; - ret = nt35510_read_id(nt); - if (ret) - return ret; + nt35510_read_id(nt); /* Set up stuff in manufacturer control, page 1 */ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR, diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index 2229f1af2ca8..46029c5610c8 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -447,7 +447,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c) drm_panel_remove(&ts->base); mipi_dsi_device_unregister(ts->dsi); - kfree(ts->dsi); return 0; } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 21939d4352cf..1b80290c2b53 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -4166,7 +4166,7 @@ static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = { .modes = &yes_optoelectronics_ytc700tlag_05_201c_mode, .num_modes = 1, - .bpc = 6, + .bpc = 8, .size = { .width = 154, .height = 90, diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 19fd39d9a00c..37a1b6a6ad6d 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -127,7 +127,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo, struct qxl_bo *qbo; struct qxl_device *qdev; - if (!qxl_ttm_bo_is_qxl_bo(bo)) + if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource) return; qbo = to_qxl_bo(bo); qdev = to_qxl(qbo->tbo.base.dev); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1b950b45cf4b..8d7fd65ccced 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -102,6 +102,9 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, return; } + if (!mem) + return; + man = ttm_manager_type(bdev, mem->mem_type); list_move_tail(&bo->lru, &man->lru[bo->priority]); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 2f57f824e6db..763fa6f4e07d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -63,6 +63,9 @@ int ttm_mem_io_reserve(struct ttm_device *bdev, void ttm_mem_io_free(struct ttm_device *bdev, struct ttm_resource *mem) { + if (!mem) + return; + if (!mem->bus.offset && !mem->bus.addr) return; diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index 5f31acec3ad7..2df59b3c2ea1 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -44,6 +44,8 @@ static unsigned ttm_glob_use_count; struct ttm_global ttm_glob; EXPORT_SYMBOL(ttm_glob); +struct dentry *ttm_debugfs_root; + static void ttm_global_release(void) { struct ttm_global *glob = &ttm_glob; @@ -53,6 +55,7 @@ static void ttm_global_release(void) goto out; ttm_pool_mgr_fini(); + debugfs_remove(ttm_debugfs_root); __free_page(glob->dummy_read_page); memset(glob, 0, sizeof(*glob)); @@ -73,6 +76,11 @@ static int ttm_global_init(void) si_meminfo(&si); + ttm_debugfs_root = debugfs_create_dir("ttm", NULL); + if (IS_ERR(ttm_debugfs_root)) { + ttm_debugfs_root = NULL; + } + /* Limit the number of pages in the pool to about 50% of the total * system memory. */ @@ -100,6 +108,10 @@ static int ttm_global_init(void) debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root, &glob->bo_count); out: + if (ret && ttm_debugfs_root) + debugfs_remove(ttm_debugfs_root); + if (ret) + --ttm_glob_use_count; mutex_unlock(&ttm_global_mutex); return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index 997c458f68a9..7fcdef278c74 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -72,22 +72,6 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp) return tmp; } -struct dentry *ttm_debugfs_root; - -static int __init ttm_init(void) -{ - ttm_debugfs_root = debugfs_create_dir("ttm", NULL); - return 0; -} - -static void __exit ttm_exit(void) -{ - debugfs_remove(ttm_debugfs_root); -} - -module_init(ttm_init); -module_exit(ttm_exit); - MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse"); MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 03395386e8a7..f4b08a8705b3 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -181,6 +181,9 @@ int ttm_range_man_fini(struct ttm_device *bdev, struct drm_mm *mm = &rman->mm; int ret; + if (!man) + return 0; + ttm_resource_manager_set_used(man, false); ret = ttm_resource_manager_evict_all(bdev, man); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index aab1b36ceb3c..c2876731ee2d 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1857,38 +1857,46 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) vc4_hdmi_cec_update_clk_div(vc4_hdmi); if (vc4_hdmi->variant->external_irq_controller) { - ret = devm_request_threaded_irq(&pdev->dev, - platform_get_irq_byname(pdev, "cec-rx"), - vc4_cec_irq_handler_rx_bare, - vc4_cec_irq_handler_rx_thread, 0, - "vc4 hdmi cec rx", vc4_hdmi); + ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"), + vc4_cec_irq_handler_rx_bare, + vc4_cec_irq_handler_rx_thread, 0, + "vc4 hdmi cec rx", vc4_hdmi); if (ret) goto err_delete_cec_adap; - ret = devm_request_threaded_irq(&pdev->dev, - platform_get_irq_byname(pdev, "cec-tx"), - vc4_cec_irq_handler_tx_bare, - vc4_cec_irq_handler_tx_thread, 0, - "vc4 hdmi cec tx", vc4_hdmi); + ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-tx"), + vc4_cec_irq_handler_tx_bare, + vc4_cec_irq_handler_tx_thread, 0, + "vc4 hdmi cec tx", vc4_hdmi); if (ret) - goto err_delete_cec_adap; + goto err_remove_cec_rx_handler; } else { HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff); - ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0), - vc4_cec_irq_handler, - vc4_cec_irq_handler_thread, 0, - "vc4 hdmi cec", vc4_hdmi); + ret = request_threaded_irq(platform_get_irq(pdev, 0), + vc4_cec_irq_handler, + vc4_cec_irq_handler_thread, 0, + "vc4 hdmi cec", vc4_hdmi); if (ret) goto err_delete_cec_adap; } ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev); if (ret < 0) - goto err_delete_cec_adap; + goto err_remove_handlers; return 0; +err_remove_handlers: + if (vc4_hdmi->variant->external_irq_controller) + free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi); + else + free_irq(platform_get_irq(pdev, 0), vc4_hdmi); + +err_remove_cec_rx_handler: + if (vc4_hdmi->variant->external_irq_controller) + free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi); + err_delete_cec_adap: cec_delete_adapter(vc4_hdmi->cec_adap); @@ -1897,6 +1905,15 @@ err_delete_cec_adap: static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) { + struct platform_device *pdev = vc4_hdmi->pdev; + + if (vc4_hdmi->variant->external_irq_controller) { + free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi); + free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi); + } else { + free_irq(platform_get_irq(pdev, 0), vc4_hdmi); + } + cec_unregister_adapter(vc4_hdmi->cec_adap); } #else diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6f5ea00973e0..45aeeca9b8f6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -36,6 +36,7 @@ #include <drm/drm_ioctl.h> #include <drm/drm_sysfs.h> #include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_placement.h> #include <generated/utsrelease.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index d1cef3b69e9d..5652d982b1ce 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -492,7 +492,7 @@ struct vmw_private { resource_size_t vram_start; resource_size_t vram_size; resource_size_t prim_bb_mem; - void __iomem *rmmio; + u32 __iomem *rmmio; u32 *fifo_mem; resource_size_t fifo_mem_size; uint32_t fb_max_width; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 5648664f71bc..f2d625415458 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -354,7 +354,6 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ttm_bo_unpin(bo); ttm_bo_unreserve(bo); - ttm_bo_unpin(batch->otable_bo); ttm_bo_put(batch->otable_bo); batch->otable_bo = NULL; } diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 160554903ef9..76937f716fbe 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -576,7 +576,7 @@ config HID_LOGITECH_HIDPP depends on HID_LOGITECH select POWER_SUPPLY help - Support for Logitech devices relyingon the HID++ Logitech specification + Support for Logitech devices relying on the HID++ Logitech specification Say Y if you want support for Logitech devices relying on the HID++ specification. Such devices are the various Logitech Touchpads (T650, diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c index 96e2577fa37e..8d68796aa905 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c @@ -58,7 +58,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx) cmd_base.cmd_v2.sensor_id = sensor_idx; cmd_base.cmd_v2.length = 16; - writeq(0x0, privdata->mmio + AMD_C2P_MSG2); + writeq(0x0, privdata->mmio + AMD_C2P_MSG1); writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0); } diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 6b8f0d004d34..dc6bd4299c54 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -501,6 +501,8 @@ static const struct hid_device_id apple_devices[] = { APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), .driver_data = APPLE_HAS_FN }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), + .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO), .driver_data = APPLE_HAS_FN }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO), diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index fca8fc78a78a..fb807c8e989b 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -485,9 +485,6 @@ static void asus_kbd_backlight_set(struct led_classdev *led_cdev, { struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, cdev); - if (led->brightness == brightness) - return; - led->brightness = brightness; schedule_work(&led->work); } diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c index f43a8406cb9a..4ef1c3b8094e 100644 --- a/drivers/hid/hid-ft260.c +++ b/drivers/hid/hid-ft260.c @@ -742,7 +742,7 @@ static int ft260_is_interface_enabled(struct hid_device *hdev) int ret; ret = ft260_get_system_config(hdev, &cfg); - if (ret) + if (ret < 0) return ret; ft260_dbg("interface: 0x%02x\n", interface); @@ -754,23 +754,16 @@ static int ft260_is_interface_enabled(struct hid_device *hdev) switch (cfg.chip_mode) { case FT260_MODE_ALL: case FT260_MODE_BOTH: - if (interface == 1) { + if (interface == 1) hid_info(hdev, "uart interface is not supported\n"); - return 0; - } - ret = 1; + else + ret = 1; break; case FT260_MODE_UART: - if (interface == 0) { - hid_info(hdev, "uart is unsupported on interface 0\n"); - ret = 0; - } + hid_info(hdev, "uart interface is not supported\n"); break; case FT260_MODE_I2C: - if (interface == 1) { - hid_info(hdev, "i2c is unsupported on interface 1\n"); - ret = 0; - } + ret = 1; break; } return ret; @@ -785,7 +778,7 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len, if (ret < 0) return ret; - return scnprintf(buf, PAGE_SIZE, "%hi\n", *field); + return scnprintf(buf, PAGE_SIZE, "%d\n", *field); } static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len, @@ -797,7 +790,7 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len, if (ret < 0) return ret; - return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field)); + return scnprintf(buf, PAGE_SIZE, "%d\n", le16_to_cpu(*field)); } #define FT260_ATTR_SHOW(name, reptype, id, type, func) \ @@ -1004,11 +997,9 @@ err_hid_stop: static void ft260_remove(struct hid_device *hdev) { - int ret; struct ft260_device *dev = hid_get_drvdata(hdev); - ret = ft260_is_interface_enabled(hdev); - if (ret <= 0) + if (!dev) return; sysfs_remove_group(&hdev->dev.kobj, &ft260_attr_group); diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c index 6b1fa971b33e..91bf4d01e91a 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c @@ -784,6 +784,17 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work) } } +static void hid_ishtp_cl_resume_handler(struct work_struct *work) +{ + struct ishtp_cl_data *client_data = container_of(work, struct ishtp_cl_data, resume_work); + struct ishtp_cl *hid_ishtp_cl = client_data->hid_ishtp_cl; + + if (ishtp_wait_resume(ishtp_get_ishtp_device(hid_ishtp_cl))) { + client_data->suspended = false; + wake_up_interruptible(&client_data->ishtp_resume_wait); + } +} + ishtp_print_log ishtp_hid_print_trace; /** @@ -822,6 +833,8 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device) init_waitqueue_head(&client_data->ishtp_resume_wait); INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler); + INIT_WORK(&client_data->resume_work, hid_ishtp_cl_resume_handler); + ishtp_hid_print_trace = ishtp_trace_callback(cl_device); @@ -921,7 +934,7 @@ static int hid_ishtp_cl_resume(struct device *device) hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__, hid_ishtp_cl); - client_data->suspended = false; + schedule_work(&client_data->resume_work); return 0; } diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h index f88443a7d935..6a5cc11aefd8 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid.h +++ b/drivers/hid/intel-ish-hid/ishtp-hid.h @@ -135,6 +135,7 @@ struct ishtp_cl_data { int multi_packet_cnt; struct work_struct work; + struct work_struct resume_work; struct ishtp_cl_device *cl_device; }; diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index f0802b047ed8..aa2c51624012 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c @@ -314,13 +314,6 @@ static int ishtp_cl_device_resume(struct device *dev) if (!device) return 0; - /* - * When ISH needs hard reset, it is done asynchrnously, hence bus - * resume will be called before full ISH resume - */ - if (device->ishtp_dev->resume_flag) - return 0; - driver = to_ishtp_cl_driver(dev->driver); if (driver && driver->driver.pm) { if (driver->driver.pm->resume) @@ -850,6 +843,28 @@ struct device *ishtp_device(struct ishtp_cl_device *device) EXPORT_SYMBOL(ishtp_device); /** + * ishtp_wait_resume() - Wait for IPC resume + * + * Wait for IPC resume + * + * Return: resume complete or not + */ +bool ishtp_wait_resume(struct ishtp_device *dev) +{ + /* 50ms to get resume response */ + #define WAIT_FOR_RESUME_ACK_MS 50 + + /* Waiting to get resume response */ + if (dev->resume_flag) + wait_event_interruptible_timeout(dev->resume_wait, + !dev->resume_flag, + msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS)); + + return (!dev->resume_flag); +} +EXPORT_SYMBOL_GPL(ishtp_wait_resume); + +/** * ishtp_get_pci_device() - Return PCI device dev pointer * This interface is used to return PCI device pointer * from ishtp_cl_device instance. diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig index dcf3a235870f..7c2032f7f44d 100644 --- a/drivers/hid/usbhid/Kconfig +++ b/drivers/hid/usbhid/Kconfig @@ -38,7 +38,7 @@ config USB_HIDDEV help Say Y here if you want to support HID devices (from the USB specification standpoint) that aren't strictly user interface - devices, like monitor controls and Uninterruptable Power Supplies. + devices, like monitor controls and Uninterruptible Power Supplies. This module supports these devices separately using a separate event interface on /dev/usb/hiddevX (char 180:96 to 180:111). diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 81d7d12bcf34..81ba642adcb7 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2548,6 +2548,9 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, int slot; slot = input_mt_get_slot_by_key(input, hid_data->id); + if (slot < 0) + return; + input_mt_slot(input, slot); input_mt_report_slot_state(input, MT_TOOL_FINGER, prox); } @@ -3831,7 +3834,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, wacom_wac->shared->touch->product == 0xF6) { input_dev->evbit[0] |= BIT_MASK(EV_SW); __set_bit(SW_MUTE_DEVICE, input_dev->swbit); - wacom_wac->shared->has_mute_touch_switch = true; + wacom_wac->has_mute_touch_switch = true; } fallthrough; diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index caf6d0c4bc1b..142308526ec6 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -605,6 +605,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) */ mutex_lock(&vmbus_connection.channel_mutex); + list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { + if (guid_equal(&channel->offermsg.offer.if_type, + &newchannel->offermsg.offer.if_type) && + guid_equal(&channel->offermsg.offer.if_instance, + &newchannel->offermsg.offer.if_instance)) { + fnew = false; + newchannel->primary_channel = channel; + break; + } + } + init_vp_index(newchannel); /* Remember the channels that should be cleaned up upon suspend. */ @@ -617,16 +628,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) */ atomic_dec(&vmbus_connection.offer_in_progress); - list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { - if (guid_equal(&channel->offermsg.offer.if_type, - &newchannel->offermsg.offer.if_type) && - guid_equal(&channel->offermsg.offer.if_instance, - &newchannel->offermsg.offer.if_instance)) { - fnew = false; - break; - } - } - if (fnew) { list_add_tail(&newchannel->listentry, &vmbus_connection.chn_list); @@ -647,7 +648,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) /* * Process the sub-channel. */ - newchannel->primary_channel = channel; list_add_tail(&newchannel->sc_list, &channel->sc_list); } @@ -684,6 +684,30 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) } /* + * Check if CPUs used by other channels of the same device. + * It should only be called by init_vp_index(). + */ +static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn) +{ + struct vmbus_channel *primary = chn->primary_channel; + struct vmbus_channel *sc; + + lockdep_assert_held(&vmbus_connection.channel_mutex); + + if (!primary) + return false; + + if (primary->target_cpu == cpu) + return true; + + list_for_each_entry(sc, &primary->sc_list, sc_list) + if (sc != chn && sc->target_cpu == cpu) + return true; + + return false; +} + +/* * We use this state to statically distribute the channel interrupt load. */ static int next_numa_node_id; @@ -702,6 +726,7 @@ static int next_numa_node_id; static void init_vp_index(struct vmbus_channel *channel) { bool perf_chn = hv_is_perf_channel(channel); + u32 i, ncpu = num_online_cpus(); cpumask_var_t available_mask; struct cpumask *alloced_mask; u32 target_cpu; @@ -724,31 +749,38 @@ static void init_vp_index(struct vmbus_channel *channel) return; } - while (true) { - numa_node = next_numa_node_id++; - if (numa_node == nr_node_ids) { - next_numa_node_id = 0; - continue; + for (i = 1; i <= ncpu + 1; i++) { + while (true) { + numa_node = next_numa_node_id++; + if (numa_node == nr_node_ids) { + next_numa_node_id = 0; + continue; + } + if (cpumask_empty(cpumask_of_node(numa_node))) + continue; + break; + } + alloced_mask = &hv_context.hv_numa_map[numa_node]; + + if (cpumask_weight(alloced_mask) == + cpumask_weight(cpumask_of_node(numa_node))) { + /* + * We have cycled through all the CPUs in the node; + * reset the alloced map. + */ + cpumask_clear(alloced_mask); } - if (cpumask_empty(cpumask_of_node(numa_node))) - continue; - break; - } - alloced_mask = &hv_context.hv_numa_map[numa_node]; - if (cpumask_weight(alloced_mask) == - cpumask_weight(cpumask_of_node(numa_node))) { - /* - * We have cycled through all the CPUs in the node; - * reset the alloced map. - */ - cpumask_clear(alloced_mask); - } + cpumask_xor(available_mask, alloced_mask, + cpumask_of_node(numa_node)); - cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node)); + target_cpu = cpumask_first(available_mask); + cpumask_set_cpu(target_cpu, alloced_mask); - target_cpu = cpumask_first(available_mask); - cpumask_set_cpu(target_cpu, alloced_mask); + if (channel->offermsg.offer.sub_channel_index >= ncpu || + i > ncpu || !hv_cpuself_used(target_cpu, channel)) + break; + } channel->target_cpu = target_cpu; diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index 84530fd80998..f026e5c0e777 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -8,6 +8,7 @@ menuconfig CORESIGHT depends on OF || ACPI select ARM_AMBA select PERF_EVENTS + select CONFIGFS_FS help This framework provides a kernel interface for the CoreSight debug and trace drivers to register themselves with. It's intended to build diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index d60816509755..b6c4a48140ec 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile @@ -4,7 +4,9 @@ # obj-$(CONFIG_CORESIGHT) += coresight.o coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \ - coresight-sysfs.o + coresight-sysfs.o coresight-syscfg.o coresight-config.o \ + coresight-cfg-preload.o coresight-cfg-afdo.o \ + coresight-syscfg-configfs.o obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o coresight-tmc-y := coresight-tmc-core.o coresight-tmc-etf.o \ coresight-tmc-etr.o @@ -16,7 +18,8 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm3x-y := coresight-etm3x-core.o coresight-etm-cp14.o \ coresight-etm3x-sysfs.o obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o -coresight-etm4x-y := coresight-etm4x-core.o coresight-etm4x-sysfs.o +coresight-etm4x-y := coresight-etm4x-core.o coresight-etm4x-sysfs.o \ + coresight-etm4x-cfg.o obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o diff --git a/drivers/hwtracing/coresight/coresight-cfg-afdo.c b/drivers/hwtracing/coresight/coresight-cfg-afdo.c new file mode 100644 index 000000000000..84b31184252b --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-cfg-afdo.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(C) 2020 Linaro Limited. All rights reserved. + * Author: Mike Leach <mike.leach@linaro.org> + */ + +#include "coresight-config.h" + +/* ETMv4 includes and features */ +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) +#include "coresight-etm4x-cfg.h" + +/* preload configurations and features */ + +/* preload in features for ETMv4 */ + +/* strobe feature */ +static struct cscfg_parameter_desc strobe_params[] = { + { + .name = "window", + .value = 5000, + }, + { + .name = "period", + .value = 10000, + }, +}; + +static struct cscfg_regval_desc strobe_regs[] = { + /* resource selectors */ + { + .type = CS_CFG_REG_TYPE_RESOURCE, + .offset = TRCRSCTLRn(2), + .hw_info = ETM4_CFG_RES_SEL, + .val32 = 0x20001, + }, + { + .type = CS_CFG_REG_TYPE_RESOURCE, + .offset = TRCRSCTLRn(3), + .hw_info = ETM4_CFG_RES_SEQ, + .val32 = 0x20002, + }, + /* strobe window counter 0 - reload from param 0 */ + { + .type = CS_CFG_REG_TYPE_RESOURCE | CS_CFG_REG_TYPE_VAL_SAVE, + .offset = TRCCNTVRn(0), + .hw_info = ETM4_CFG_RES_CTR, + }, + { + .type = CS_CFG_REG_TYPE_RESOURCE | CS_CFG_REG_TYPE_VAL_PARAM, + .offset = TRCCNTRLDVRn(0), + .hw_info = ETM4_CFG_RES_CTR, + .val32 = 0, + }, + { + .type = CS_CFG_REG_TYPE_RESOURCE, + .offset = TRCCNTCTLRn(0), + .hw_info = ETM4_CFG_RES_CTR, + .val32 = 0x10001, + }, + /* strobe period counter 1 - reload from param 1 */ + { + .type = CS_CFG_REG_TYPE_RESOURCE | CS_CFG_REG_TYPE_VAL_SAVE, + .offset = TRCCNTVRn(1), + .hw_info = ETM4_CFG_RES_CTR, + }, + { + .type = CS_CFG_REG_TYPE_RESOURCE | CS_CFG_REG_TYPE_VAL_PARAM, + .offset = TRCCNTRLDVRn(1), + .hw_info = ETM4_CFG_RES_CTR, + .val32 = 1, + }, + { + .type = CS_CFG_REG_TYPE_RESOURCE, + .offset = TRCCNTCTLRn(1), + .hw_info = ETM4_CFG_RES_CTR, + .val32 = 0x8102, + }, + /* sequencer */ + { + .type = CS_CFG_REG_TYPE_RESOURCE, + .offset = TRCSEQEVRn(0), + .hw_info = ETM4_CFG_RES_SEQ, + .val32 = 0x0081, + }, + { + .type = CS_CFG_REG_TYPE_RESOURCE, + .offset = TRCSEQEVRn(1), + .hw_info = ETM4_CFG_RES_SEQ, + .val32 = 0x0000, + }, + /* view-inst */ + { + .type = CS_CFG_REG_TYPE_STD | CS_CFG_REG_TYPE_VAL_MASK, + .offset = TRCVICTLR, + .val32 = 0x0003, + .mask32 = 0x0003, + }, + /* end of regs */ +}; + +struct cscfg_feature_desc strobe_etm4x = { + .name = "strobing", + .description = "Generate periodic trace capture windows.\n" + "parameter \'window\': a number of CPU cycles (W)\n" + "parameter \'period\': trace enabled for W cycles every period x W cycles\n", + .match_flags = CS_CFG_MATCH_CLASS_SRC_ETM4, + .nr_params = ARRAY_SIZE(strobe_params), + .params_desc = strobe_params, + .nr_regs = ARRAY_SIZE(strobe_regs), + .regs_desc = strobe_regs, +}; + +/* create an autofdo configuration */ + +/* we will provide 9 sets of preset parameter values */ +#define AFDO_NR_PRESETS 9 +/* the total number of parameters in used features */ +#define AFDO_NR_PARAMS ARRAY_SIZE(strobe_params) + +static const char *afdo_ref_names[] = { + "strobing", +}; + +/* + * set of presets leaves strobing window constant while varying period to allow + * experimentation with mark / space ratios for various workloads + */ +static u64 afdo_presets[AFDO_NR_PRESETS][AFDO_NR_PARAMS] = { + { 5000, 2 }, + { 5000, 4 }, + { 5000, 8 }, + { 5000, 16 }, + { 5000, 64 }, + { 5000, 128 }, + { 5000, 512 }, + { 5000, 1024 }, + { 5000, 4096 }, +}; + +struct cscfg_config_desc afdo_etm4x = { + .name = "autofdo", + .description = "Setup ETMs with strobing for autofdo\n" + "Supplied presets allow experimentation with mark-space ratio for various loads\n", + .nr_feat_refs = ARRAY_SIZE(afdo_ref_names), + .feat_ref_names = afdo_ref_names, + .nr_presets = AFDO_NR_PRESETS, + .nr_total_params = AFDO_NR_PARAMS, + .presets = &afdo_presets[0][0], +}; + +/* end of ETM4x configurations */ +#endif /* IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) */ diff --git a/drivers/hwtracing/coresight/coresight-cfg-preload.c b/drivers/hwtracing/coresight/coresight-cfg-preload.c new file mode 100644 index 000000000000..751af3710d56 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-cfg-preload.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(C) 2020 Linaro Limited. All rights reserved. + * Author: Mike Leach <mike.leach@linaro.org> + */ + +#include "coresight-cfg-preload.h" +#include "coresight-config.h" +#include "coresight-syscfg.h" + +/* Basic features and configurations pre-loaded on initialisation */ + +static struct cscfg_feature_desc *preload_feats[] = { +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) + &strobe_etm4x, +#endif + NULL +}; + +static struct cscfg_config_desc *preload_cfgs[] = { +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) + &afdo_etm4x, +#endif + NULL +}; + +/* preload called on initialisation */ +int cscfg_preload(void) +{ + return cscfg_load_config_sets(preload_cfgs, preload_feats); +} diff --git a/drivers/hwtracing/coresight/coresight-cfg-preload.h b/drivers/hwtracing/coresight/coresight-cfg-preload.h new file mode 100644 index 000000000000..21299e175477 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-cfg-preload.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(C) 2020 Linaro Limited. All rights reserved. + * Author: Mike Leach <mike.leach@linaro.org> + */ + +/* declare preloaded configurations and features */ + +/* from coresight-cfg-afdo.c - etm 4x features */ +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) +extern struct cscfg_feature_desc strobe_etm4x; +extern struct cscfg_config_desc afdo_etm4x; +#endif diff --git a/drivers/hwtracing/coresight/coresight-config.c b/drivers/hwtracing/coresight/coresight-config.c new file mode 100644 index 000000000000..4723bf7402a2 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-config.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(C) 2020 Linaro Limited. All rights reserved. + * Author: Mike Leach <mike.leach@linaro.org> + */ + +#include <linux/sysfs.h> +#include "coresight-config.h" +#include "coresight-priv.h" + +/* + * This provides a set of generic functions that operate on configurations + * and features to manage the handling of parameters, the programming and + * saving of registers used by features on devices. + */ + +/* + * Write the value held in the register structure into the driver internal memory + * location. + */ +static void cscfg_set_reg(struct cscfg_regval_csdev *reg_csdev) +{ + u32 *p_val32 = (u32 *)reg_csdev->driver_regval; + u32 tmp32 = reg_csdev->reg_desc.val32; + + if (reg_csdev->reg_desc.type & CS_CFG_REG_TYPE_VAL_64BIT) { + *((u64 *)reg_csdev->driver_regval) = reg_csdev->reg_desc.val64; + return; + } + + if (reg_csdev->reg_desc.type & CS_CFG_REG_TYPE_VAL_MASK) { + tmp32 = *p_val32; + tmp32 &= ~reg_csdev->reg_desc.mask32; + tmp32 |= reg_csdev->reg_desc.val32 & reg_csdev->reg_desc.mask32; + } + *p_val32 = tmp32; +} + +/* + * Read the driver value into the reg if this is marked as one we want to save. + */ +static void cscfg_save_reg(struct cscfg_regval_csdev *reg_csdev) +{ + if (!(reg_csdev->reg_desc.type & CS_CFG_REG_TYPE_VAL_SAVE)) + return; + if (reg_csdev->reg_desc.type & CS_CFG_REG_TYPE_VAL_64BIT) + reg_csdev->reg_desc.val64 = *(u64 *)(reg_csdev->driver_regval); + else + reg_csdev->reg_desc.val32 = *(u32 *)(reg_csdev->driver_regval); +} + +/* + * Some register values are set from parameters. Initialise these registers + * from the current parameter values. + */ +static void cscfg_init_reg_param(struct cscfg_feature_csdev *feat_csdev, + struct cscfg_regval_desc *reg_desc, + struct cscfg_regval_csdev *reg_csdev) +{ + struct cscfg_parameter_csdev *param_csdev; + + /* for param, load routines have validated the index */ + param_csdev = &feat_csdev->params_csdev[reg_desc->param_idx]; + param_csdev->reg_csdev = reg_csdev; + param_csdev->val64 = reg_csdev->reg_desc.type & CS_CFG_REG_TYPE_VAL_64BIT; + + if (param_csdev->val64) + reg_csdev->reg_desc.val64 = param_csdev->current_value; + else + reg_csdev->reg_desc.val32 = (u32)param_csdev->current_value; +} + +/* set values into the driver locations referenced in cscfg_reg_csdev */ +static int cscfg_set_on_enable(struct cscfg_feature_csdev *feat_csdev) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(feat_csdev->drv_spinlock, flags); + for (i = 0; i < feat_csdev->nr_regs; i++) + cscfg_set_reg(&feat_csdev->regs_csdev[i]); + spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags); + dev_dbg(&feat_csdev->csdev->dev, "Feature %s: %s", + feat_csdev->feat_desc->name, "set on enable"); + return 0; +} + +/* copy back values from the driver locations referenced in cscfg_reg_csdev */ +static void cscfg_save_on_disable(struct cscfg_feature_csdev *feat_csdev) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(feat_csdev->drv_spinlock, flags); + for (i = 0; i < feat_csdev->nr_regs; i++) + cscfg_save_reg(&feat_csdev->regs_csdev[i]); + spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags); + dev_dbg(&feat_csdev->csdev->dev, "Feature %s: %s", + feat_csdev->feat_desc->name, "save on disable"); +} + +/* default reset - restore default values */ +void cscfg_reset_feat(struct cscfg_feature_csdev *feat_csdev) +{ + struct cscfg_regval_desc *reg_desc; + struct cscfg_regval_csdev *reg_csdev; + int i; + + /* + * set the default values for all parameters and regs from the + * relevant static descriptors. + */ + for (i = 0; i < feat_csdev->nr_params; i++) + feat_csdev->params_csdev[i].current_value = + feat_csdev->feat_desc->params_desc[i].value; + + for (i = 0; i < feat_csdev->nr_regs; i++) { + reg_desc = &feat_csdev->feat_desc->regs_desc[i]; + reg_csdev = &feat_csdev->regs_csdev[i]; + reg_csdev->reg_desc.type = reg_desc->type; + + /* check if reg set from a parameter otherwise desc default */ + if (reg_desc->type & CS_CFG_REG_TYPE_VAL_PARAM) + cscfg_init_reg_param(feat_csdev, reg_desc, reg_csdev); + else + /* + * for normal values the union between val64 & val32 + mask32 + * allows us to init using the 64 bit value + */ + reg_csdev->reg_desc.val64 = reg_desc->val64; + } +} + +/* + * For the selected presets, we set the register associated with the parameter, to + * the value of the preset index associated with the parameter. + */ +static int cscfg_update_presets(struct cscfg_config_csdev *config_csdev, int preset) +{ + int i, j, val_idx = 0, nr_cfg_params; + struct cscfg_parameter_csdev *param_csdev; + struct cscfg_feature_csdev *feat_csdev; + const struct cscfg_config_desc *config_desc = config_csdev->config_desc; + const char *name; + const u64 *preset_base; + u64 val; + + /* preset in range 1 to nr_presets */ + if (preset < 1 || preset > config_desc->nr_presets) + return -EINVAL; + /* + * Go through the array of features, assigning preset values to + * feature parameters in the order they appear. + * There should be precisely the same number of preset values as the + * sum of number of parameters over all the features - but we will + * ensure there is no overrun. + */ + nr_cfg_params = config_desc->nr_total_params; + preset_base = &config_desc->presets[(preset - 1) * nr_cfg_params]; + for (i = 0; i < config_csdev->nr_feat; i++) { + feat_csdev = config_csdev->feats_csdev[i]; + if (!feat_csdev->nr_params) + continue; + + for (j = 0; j < feat_csdev->nr_params; j++) { + param_csdev = &feat_csdev->params_csdev[j]; + name = feat_csdev->feat_desc->params_desc[j].name; + val = preset_base[val_idx++]; + if (param_csdev->val64) { + dev_dbg(&config_csdev->csdev->dev, + "set param %s (%lld)", name, val); + param_csdev->reg_csdev->reg_desc.val64 = val; + } else { + param_csdev->reg_csdev->reg_desc.val32 = (u32)val; + dev_dbg(&config_csdev->csdev->dev, + "set param %s (%d)", name, (u32)val); + } + } + + /* exit early if all params filled */ + if (val_idx >= nr_cfg_params) + break; + } + return 0; +} + +/* + * if we are not using a preset, then need to update the feature params + * with current values. This sets the register associated with the parameter + * with the current value of that parameter. + */ +static int cscfg_update_curr_params(struct cscfg_config_csdev *config_csdev) +{ + int i, j; + struct cscfg_feature_csdev *feat_csdev; + struct cscfg_parameter_csdev *param_csdev; + const char *name; + u64 val; + + for (i = 0; i < config_csdev->nr_feat; i++) { + feat_csdev = config_csdev->feats_csdev[i]; + if (!feat_csdev->nr_params) + continue; + for (j = 0; j < feat_csdev->nr_params; j++) { + param_csdev = &feat_csdev->params_csdev[j]; + name = feat_csdev->feat_desc->params_desc[j].name; + val = param_csdev->current_value; + if (param_csdev->val64) { + dev_dbg(&config_csdev->csdev->dev, + "set param %s (%lld)", name, val); + param_csdev->reg_csdev->reg_desc.val64 = val; + } else { + param_csdev->reg_csdev->reg_desc.val32 = (u32)val; + dev_dbg(&config_csdev->csdev->dev, + "set param %s (%d)", name, (u32)val); + } + } + } + return 0; +} + +/* + * Configuration values will be programmed into the driver locations if enabling, or read + * from relevant locations on disable. + */ +static int cscfg_prog_config(struct cscfg_config_csdev *config_csdev, bool enable) +{ + int i, err = 0; + struct cscfg_feature_csdev *feat_csdev; + struct coresight_device *csdev; + + for (i = 0; i < config_csdev->nr_feat; i++) { + feat_csdev = config_csdev->feats_csdev[i]; + csdev = feat_csdev->csdev; + dev_dbg(&csdev->dev, "cfg %s; %s feature:%s", config_csdev->config_desc->name, + enable ? "enable" : "disable", feat_csdev->feat_desc->name); + + if (enable) + err = cscfg_set_on_enable(feat_csdev); + else + cscfg_save_on_disable(feat_csdev); + + if (err) + break; + } + return err; +} + +/* + * Enable configuration for the device. Will result in the internal driver data + * being updated ready for programming into the device. + * + * @config_csdev: config_csdev to set. + * @preset: preset values to use - 0 for default. + */ +int cscfg_csdev_enable_config(struct cscfg_config_csdev *config_csdev, int preset) +{ + int err = 0; + + if (preset) + err = cscfg_update_presets(config_csdev, preset); + else + err = cscfg_update_curr_params(config_csdev); + if (!err) + err = cscfg_prog_config(config_csdev, true); + return err; +} + +void cscfg_csdev_disable_config(struct cscfg_config_csdev *config_csdev) +{ + cscfg_prog_config(config_csdev, false); +} diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h new file mode 100644 index 000000000000..25eb6c632692 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-config.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 Linaro Limited, All rights reserved. + * Author: Mike Leach <mike.leach@linaro.org> + */ + +#ifndef _CORESIGHT_CORESIGHT_CONFIG_H +#define _CORESIGHT_CORESIGHT_CONFIG_H + +#include <linux/coresight.h> +#include <linux/types.h> + +/* CoreSight Configuration Management - component and system wide configuration */ + +/* + * Register type flags for register value descriptor: + * describe how the value is interpreted, and handled. + */ +#define CS_CFG_REG_TYPE_STD 0x80 /* reg is standard reg */ +#define CS_CFG_REG_TYPE_RESOURCE 0x40 /* reg is a resource */ +#define CS_CFG_REG_TYPE_VAL_PARAM 0x08 /* reg value uses param */ +#define CS_CFG_REG_TYPE_VAL_MASK 0x04 /* reg value bit masked */ +#define CS_CFG_REG_TYPE_VAL_64BIT 0x02 /* reg value 64 bit */ +#define CS_CFG_REG_TYPE_VAL_SAVE 0x01 /* reg value save on disable */ + +/* + * flags defining what device class a feature will match to when processing a + * system configuration - used by config data and devices. + */ +#define CS_CFG_MATCH_CLASS_SRC_ALL 0x0001 /* match any source */ +#define CS_CFG_MATCH_CLASS_SRC_ETM4 0x0002 /* match any ETMv4 device */ + +/* flags defining device instance matching - used in config match desc data. */ +#define CS_CFG_MATCH_INST_ANY 0x80000000 /* any instance of a class */ + +/* + * Limit number of presets in a configuration + * This is related to the number of bits (4) we use to select the preset on + * the perf command line. Preset 0 is always none selected. + * See PMU_FORMAT_ATTR(preset, "config:0-3") in coresight-etm-perf.c + */ +#define CS_CFG_CONFIG_PRESET_MAX 15 + +/** + * Parameter descriptor for a device feature. + * + * @name: Name of parameter. + * @value: Initial or default value. + */ +struct cscfg_parameter_desc { + const char *name; + u64 value; +}; + +/** + * Representation of register value and a descriptor of register usage. + * + * Used as a descriptor in the feature descriptors. + * Used as a value in when in a feature loading into a csdev. + * + * Supports full 64 bit register value, or 32 bit value with optional mask + * value. + * + * @type: define register usage and interpretation. + * @offset: the address offset for register in the hardware device (per device specification). + * @hw_info: optional hardware device type specific information. (ETM / CTI specific etc) + * @val64: 64 bit value. + * @val32: 32 bit value. + * @mask32: 32 bit mask when using 32 bit value to access device register - if mask type. + * @param_idx: parameter index value into parameter array if param type. + */ +struct cscfg_regval_desc { + struct { + u32 type:8; + u32 offset:12; + u32 hw_info:12; + }; + union { + u64 val64; + struct { + u32 val32; + u32 mask32; + }; + u32 param_idx; + }; +}; + +/** + * Device feature descriptor - combination of registers and parameters to + * program a device to implement a specific complex function. + * + * @name: feature name. + * @description: brief description of the feature. + * @item: List entry. + * @match_flags: matching information if loading into a device + * @nr_params: number of parameters used. + * @params_desc: array of parameters used. + * @nr_regs: number of registers used. + * @regs_desc: array of registers used. + */ +struct cscfg_feature_desc { + const char *name; + const char *description; + struct list_head item; + u32 match_flags; + int nr_params; + struct cscfg_parameter_desc *params_desc; + int nr_regs; + struct cscfg_regval_desc *regs_desc; +}; + +/** + * Configuration descriptor - describes selectable system configuration. + * + * A configuration describes device features in use, and may provide preset + * values for the parameters in those features. + * + * A single set of presets is the sum of the parameters declared by + * all the features in use - this value is @nr_total_params. + * + * @name: name of the configuration - used for selection. + * @description: description of the purpose of the configuration. + * @item: list entry. + * @nr_feat_refs: Number of features used in this configuration. + * @feat_ref_names: references to features used in this configuration. + * @nr_presets: Number of sets of presets supplied by this configuration. + * @nr_total_params: Sum of all parameters declared by used features + * @presets: Array of preset values. + * @event_ea: Extended attribute for perf event value + * @active_cnt: ref count for activate on this configuration. + * + */ +struct cscfg_config_desc { + const char *name; + const char *description; + struct list_head item; + int nr_feat_refs; + const char **feat_ref_names; + int nr_presets; + int nr_total_params; + const u64 *presets; /* nr_presets * nr_total_params */ + struct dev_ext_attribute *event_ea; + atomic_t active_cnt; +}; + +/** + * config register instance - part of a loaded feature. + * maps register values to csdev driver structures + * + * @reg_desc: value to use when setting feature on device / store for + * readback of volatile values. + * @driver_regval: pointer to internal driver element used to set the value + * in hardware. + */ +struct cscfg_regval_csdev { + struct cscfg_regval_desc reg_desc; + void *driver_regval; +}; + +/** + * config parameter instance - part of a loaded feature. + * + * @feat_csdev: parent feature + * @reg_csdev: register value updated by this parameter. + * @current_value: current value of parameter - may be set by user via + * sysfs, or modified during device operation. + * @val64: true if 64 bit value + */ +struct cscfg_parameter_csdev { + struct cscfg_feature_csdev *feat_csdev; + struct cscfg_regval_csdev *reg_csdev; + u64 current_value; + bool val64; +}; + +/** + * Feature instance loaded into a CoreSight device. + * + * When a feature is loaded into a specific device, then this structure holds + * the connections between the register / parameter values used and the + * internal data structures that are written when the feature is enabled. + * + * Since applying a feature modifies internal data structures in the device, + * then we have a reference to the device spinlock to protect access to these + * structures (@drv_spinlock). + * + * @feat_desc: pointer to the static descriptor for this feature. + * @csdev: parent CoreSight device instance. + * @node: list entry into feature list for this device. + * @drv_spinlock: device spinlock for access to driver register data. + * @nr_params: number of parameters. + * @params_csdev: current parameter values on this device + * @nr_regs: number of registers to be programmed. + * @regs_csdev: Programming details for the registers + */ +struct cscfg_feature_csdev { + const struct cscfg_feature_desc *feat_desc; + struct coresight_device *csdev; + struct list_head node; + spinlock_t *drv_spinlock; + int nr_params; + struct cscfg_parameter_csdev *params_csdev; + int nr_regs; + struct cscfg_regval_csdev *regs_csdev; +}; + +/** + * Configuration instance when loaded into a CoreSight device. + * + * The instance contains references to loaded features on this device that are + * used by the configuration. + * + * @config_desc:reference to the descriptor for this configuration + * @csdev: parent coresight device for this configuration instance. + * @enabled: true if configuration is enabled on this device. + * @node: list entry within the coresight device + * @nr_feat: Number of features on this device that are used in the + * configuration. + * @feats_csdev:references to the device features to enable. + */ +struct cscfg_config_csdev { + const struct cscfg_config_desc *config_desc; + struct coresight_device *csdev; + bool enabled; + struct list_head node; + int nr_feat; + struct cscfg_feature_csdev *feats_csdev[0]; +}; + +/** + * Coresight device operations. + * + * Registered coresight devices provide these operations to manage feature + * instances compatible with the device hardware and drivers + * + * @load_feat: Pass a feature descriptor into the device and create the + * loaded feature instance (struct cscfg_feature_csdev). + */ +struct cscfg_csdev_feat_ops { + int (*load_feat)(struct coresight_device *csdev, + struct cscfg_feature_csdev *feat_csdev); +}; + +/* coresight config helper functions*/ + +/* enable / disable config on a device - called with appropriate locks set.*/ +int cscfg_csdev_enable_config(struct cscfg_config_csdev *config_csdev, int preset); +void cscfg_csdev_disable_config(struct cscfg_config_csdev *config_csdev); + +/* reset a feature to default values */ +void cscfg_reset_feat(struct cscfg_feature_csdev *feat_csdev); + +#endif /* _CORESIGHT_CORESIGHT_CONFIG_H */ diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 1002605db8ba..8a18c71df37a 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -21,6 +21,7 @@ #include "coresight-etm-perf.h" #include "coresight-priv.h" +#include "coresight-syscfg.h" static DEFINE_MUTEX(coresight_mutex); static DEFINE_PER_CPU(struct coresight_device *, csdev_sink); @@ -1763,13 +1764,22 @@ static int __init coresight_init(void) ret = etm_perf_init(); if (ret) - bus_unregister(&coresight_bustype); + goto exit_bus_unregister; + /* initialise the coresight syscfg API */ + ret = cscfg_init(); + if (!ret) + return 0; + + etm_perf_exit(); +exit_bus_unregister: + bus_unregister(&coresight_bustype); return ret; } static void __exit coresight_exit(void) { + cscfg_exit(); etm_perf_exit(); bus_unregister(&coresight_bustype); } diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 9731d3a96073..00de46565bc4 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -588,11 +588,11 @@ static int debug_probe(struct amba_device *adev, const struct amba_id *id) drvdata->base = base; - get_online_cpus(); + cpus_read_lock(); per_cpu(debug_drvdata, drvdata->cpu) = drvdata; ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data, drvdata, 1); - put_online_cpus(); + cpus_read_unlock(); if (ret) { dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu); diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index 6f398377fec9..8ebd728d3a80 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -18,8 +18,10 @@ #include <linux/types.h> #include <linux/workqueue.h> +#include "coresight-config.h" #include "coresight-etm-perf.h" #include "coresight-priv.h" +#include "coresight-syscfg.h" static struct pmu etm_pmu; static bool etm_perf_up; @@ -57,8 +59,13 @@ PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID)); PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2)); PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS)); PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK)); +/* preset - if sink ID is used as a configuration selector */ +PMU_FORMAT_ATTR(preset, "config:0-3"); /* Sink ID - same for all ETMs */ PMU_FORMAT_ATTR(sinkid, "config2:0-31"); +/* config ID - set if a system configuration is selected */ +PMU_FORMAT_ATTR(configid, "config2:32-63"); + /* * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1 @@ -88,6 +95,8 @@ static struct attribute *etm_config_formats_attr[] = { &format_attr_timestamp.attr, &format_attr_retstack.attr, &format_attr_sinkid.attr, + &format_attr_preset.attr, + &format_attr_configid.attr, NULL, }; @@ -105,9 +114,19 @@ static const struct attribute_group etm_pmu_sinks_group = { .attrs = etm_config_sinks_attr, }; +static struct attribute *etm_config_events_attr[] = { + NULL, +}; + +static const struct attribute_group etm_pmu_events_group = { + .name = "events", + .attrs = etm_config_events_attr, +}; + static const struct attribute_group *etm_pmu_attr_groups[] = { &etm_pmu_format_group, &etm_pmu_sinks_group, + &etm_pmu_events_group, NULL, }; @@ -196,6 +215,10 @@ static void free_event_data(struct work_struct *work) /* Free the sink buffers, if there are any */ free_sink_buffer(event_data); + /* clear any configuration we were using */ + if (event_data->cfg_hash) + cscfg_deactivate_config(event_data->cfg_hash); + for_each_cpu(cpu, mask) { struct list_head **ppath; @@ -273,7 +296,7 @@ static bool sinks_compatible(struct coresight_device *a, static void *etm_setup_aux(struct perf_event *event, void **pages, int nr_pages, bool overwrite) { - u32 id; + u32 id, cfg_hash; int cpu = event->cpu; cpumask_t *mask; struct coresight_device *sink = NULL; @@ -286,11 +309,19 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, INIT_WORK(&event_data->work, free_event_data); /* First get the selected sink from user space. */ - if (event->attr.config2) { + if (event->attr.config2 & GENMASK_ULL(31, 0)) { id = (u32)event->attr.config2; sink = user_sink = coresight_get_sink_by_id(id); } + /* check if user wants a coresight configuration selected */ + cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32); + if (cfg_hash) { + if (cscfg_activate_config(cfg_hash)) + goto err; + event_data->cfg_hash = cfg_hash; + } + mask = &event_data->mask; /* @@ -658,68 +689,127 @@ static ssize_t etm_perf_sink_name_show(struct device *dev, return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var)); } -int etm_perf_add_symlink_sink(struct coresight_device *csdev) +static struct dev_ext_attribute * +etm_perf_add_symlink_group(struct device *dev, const char *name, const char *group_name) { - int ret; + struct dev_ext_attribute *ea; unsigned long hash; - const char *name; + int ret; struct device *pmu_dev = etm_pmu.dev; - struct device *dev = &csdev->dev; - struct dev_ext_attribute *ea; - - if (csdev->type != CORESIGHT_DEV_TYPE_SINK && - csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) - return -EINVAL; - - if (csdev->ea != NULL) - return -EINVAL; if (!etm_perf_up) - return -EPROBE_DEFER; + return ERR_PTR(-EPROBE_DEFER); ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL); if (!ea) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - name = dev_name(dev); - /* See function coresight_get_sink_by_id() to know where this is used */ + /* + * If this function is called adding a sink then the hash is used for + * sink selection - see function coresight_get_sink_by_id(). + * If adding a configuration then the hash is used for selection in + * cscfg_activate_config() + */ hash = hashlen_hash(hashlen_string(NULL, name)); sysfs_attr_init(&ea->attr.attr); ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL); if (!ea->attr.attr.name) - return -ENOMEM; + return ERR_PTR(-ENOMEM); ea->attr.attr.mode = 0444; - ea->attr.show = etm_perf_sink_name_show; ea->var = (unsigned long *)hash; ret = sysfs_add_file_to_group(&pmu_dev->kobj, - &ea->attr.attr, "sinks"); + &ea->attr.attr, group_name); - if (!ret) - csdev->ea = ea; + return ret ? ERR_PTR(ret) : ea; +} - return ret; +int etm_perf_add_symlink_sink(struct coresight_device *csdev) +{ + const char *name; + struct device *dev = &csdev->dev; + int err = 0; + + if (csdev->type != CORESIGHT_DEV_TYPE_SINK && + csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) + return -EINVAL; + + if (csdev->ea != NULL) + return -EINVAL; + + name = dev_name(dev); + csdev->ea = etm_perf_add_symlink_group(dev, name, "sinks"); + if (IS_ERR(csdev->ea)) { + err = PTR_ERR(csdev->ea); + csdev->ea = NULL; + } else + csdev->ea->attr.show = etm_perf_sink_name_show; + + return err; } -void etm_perf_del_symlink_sink(struct coresight_device *csdev) +static void etm_perf_del_symlink_group(struct dev_ext_attribute *ea, const char *group_name) { struct device *pmu_dev = etm_pmu.dev; - struct dev_ext_attribute *ea = csdev->ea; + sysfs_remove_file_from_group(&pmu_dev->kobj, + &ea->attr.attr, group_name); +} + +void etm_perf_del_symlink_sink(struct coresight_device *csdev) +{ if (csdev->type != CORESIGHT_DEV_TYPE_SINK && csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) return; - if (!ea) + if (!csdev->ea) return; - sysfs_remove_file_from_group(&pmu_dev->kobj, - &ea->attr.attr, "sinks"); + etm_perf_del_symlink_group(csdev->ea, "sinks"); csdev->ea = NULL; } +static ssize_t etm_perf_cscfg_event_show(struct device *dev, + struct device_attribute *dattr, + char *buf) +{ + struct dev_ext_attribute *ea; + + ea = container_of(dattr, struct dev_ext_attribute, attr); + return scnprintf(buf, PAGE_SIZE, "configid=0x%lx\n", (unsigned long)(ea->var)); +} + +int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc) +{ + int err = 0; + + if (config_desc->event_ea != NULL) + return 0; + + config_desc->event_ea = etm_perf_add_symlink_group(dev, config_desc->name, "events"); + + /* set the show function to the custom cscfg event */ + if (!IS_ERR(config_desc->event_ea)) + config_desc->event_ea->attr.show = etm_perf_cscfg_event_show; + else { + err = PTR_ERR(config_desc->event_ea); + config_desc->event_ea = NULL; + } + + return err; +} + +void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) +{ + if (!config_desc->event_ea) + return; + + etm_perf_del_symlink_group(config_desc->event_ea, "events"); + config_desc->event_ea = NULL; +} + int __init etm_perf_init(void) { int ret; @@ -748,7 +838,7 @@ int __init etm_perf_init(void) return ret; } -void __exit etm_perf_exit(void) +void etm_perf_exit(void) { perf_pmu_unregister(&etm_pmu); } diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h index 3e4f2ad5e193..468f7799ab4f 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.h +++ b/drivers/hwtracing/coresight/coresight-etm-perf.h @@ -11,6 +11,7 @@ #include "coresight-priv.h" struct coresight_device; +struct cscfg_config_desc; /* * In both ETMv3 and v4 the maximum number of address comparator implentable @@ -48,12 +49,14 @@ struct etm_filters { * @work: Handle to free allocated memory outside IRQ context. * @mask: Hold the CPU(s) this event was set for. * @snk_config: The sink configuration. + * @cfg_hash: The hash id of any coresight config selected. * @path: An array of path, each slot for one CPU. */ struct etm_event_data { struct work_struct work; cpumask_t mask; void *snk_config; + u32 cfg_hash; struct list_head * __percpu *path; }; @@ -69,6 +72,9 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle) return data->snk_config; return NULL; } +int etm_perf_add_symlink_cscfg(struct device *dev, + struct cscfg_config_desc *config_desc); +void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc); #else static inline int etm_perf_symlink(struct coresight_device *csdev, bool link) { return -EINVAL; } @@ -79,10 +85,14 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle) { return NULL; } +int etm_perf_add_symlink_cscfg(struct device *dev, + struct cscfg_config_desc *config_desc) +{ return -EINVAL; } +void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) {} #endif /* CONFIG_CORESIGHT */ int __init etm_perf_init(void); -void __exit etm_perf_exit(void); +void etm_perf_exit(void); #endif diff --git a/drivers/hwtracing/coresight/coresight-etm4x-cfg.c b/drivers/hwtracing/coresight/coresight-etm4x-cfg.c new file mode 100644 index 000000000000..d2ea903231b2 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm4x-cfg.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(C) 2020 Linaro Limited. All rights reserved. + * Author: Mike Leach <mike.leach@linaro.org> + */ + +#include "coresight-etm4x.h" +#include "coresight-etm4x-cfg.h" +#include "coresight-priv.h" +#include "coresight-syscfg.h" + +/* defines to associate register IDs with driver data locations */ +#define CHECKREG(cval, elem) \ + { \ + if (offset == cval) { \ + reg_csdev->driver_regval = &drvcfg->elem; \ + err = 0; \ + break; \ + } \ + } + +#define CHECKREGIDX(cval, elem, off_idx, mask) \ + { \ + if (mask == cval) { \ + reg_csdev->driver_regval = &drvcfg->elem[off_idx]; \ + err = 0; \ + break; \ + } \ + } + +/** + * etm4_cfg_map_reg_offset - validate and map the register offset into a + * location in the driver config struct. + * + * Limits the number of registers that can be accessed and programmed in + * features, to those which are used to control the trace capture parameters. + * + * Omits or limits access to those which the driver must use exclusively. + * + * Invalid offsets will result in fail code return and feature load failure. + * + * @drvdata: driver data to map into. + * @reg: register to map. + * @offset: device offset for the register + */ +static int etm4_cfg_map_reg_offset(struct etmv4_drvdata *drvdata, + struct cscfg_regval_csdev *reg_csdev, u32 offset) +{ + int err = -EINVAL, idx; + struct etmv4_config *drvcfg = &drvdata->config; + u32 off_mask; + + if (((offset >= TRCEVENTCTL0R) && (offset <= TRCVIPCSSCTLR)) || + ((offset >= TRCSEQRSTEVR) && (offset <= TRCEXTINSELR)) || + ((offset >= TRCCIDCCTLR0) && (offset <= TRCVMIDCCTLR1))) { + do { + CHECKREG(TRCEVENTCTL0R, eventctrl0); + CHECKREG(TRCEVENTCTL1R, eventctrl1); + CHECKREG(TRCSTALLCTLR, stall_ctrl); + CHECKREG(TRCTSCTLR, ts_ctrl); + CHECKREG(TRCSYNCPR, syncfreq); + CHECKREG(TRCCCCTLR, ccctlr); + CHECKREG(TRCBBCTLR, bb_ctrl); + CHECKREG(TRCVICTLR, vinst_ctrl); + CHECKREG(TRCVIIECTLR, viiectlr); + CHECKREG(TRCVISSCTLR, vissctlr); + CHECKREG(TRCVIPCSSCTLR, vipcssctlr); + CHECKREG(TRCSEQRSTEVR, seq_rst); + CHECKREG(TRCSEQSTR, seq_state); + CHECKREG(TRCEXTINSELR, ext_inp); + CHECKREG(TRCCIDCCTLR0, ctxid_mask0); + CHECKREG(TRCCIDCCTLR1, ctxid_mask1); + CHECKREG(TRCVMIDCCTLR0, vmid_mask0); + CHECKREG(TRCVMIDCCTLR1, vmid_mask1); + } while (0); + } else if ((offset & GENMASK(11, 4)) == TRCSEQEVRn(0)) { + /* sequencer state control registers */ + idx = (offset & GENMASK(3, 0)) / 4; + if (idx < ETM_MAX_SEQ_STATES) { + reg_csdev->driver_regval = &drvcfg->seq_ctrl[idx]; + err = 0; + } + } else if ((offset >= TRCSSCCRn(0)) && (offset <= TRCSSPCICRn(7))) { + /* 32 bit, 8 off indexed register sets */ + idx = (offset & GENMASK(4, 0)) / 4; + off_mask = (offset & GENMASK(11, 5)); + do { + CHECKREGIDX(TRCSSCCRn(0), ss_ctrl, idx, off_mask); + CHECKREGIDX(TRCSSCSRn(0), ss_status, idx, off_mask); + CHECKREGIDX(TRCSSPCICRn(0), ss_pe_cmp, idx, off_mask); + } while (0); + } else if ((offset >= TRCCIDCVRn(0)) && (offset <= TRCVMIDCVRn(7))) { + /* 64 bit, 8 off indexed register sets */ + idx = (offset & GENMASK(5, 0)) / 8; + off_mask = (offset & GENMASK(11, 6)); + do { + CHECKREGIDX(TRCCIDCVRn(0), ctxid_pid, idx, off_mask); + CHECKREGIDX(TRCVMIDCVRn(0), vmid_val, idx, off_mask); + } while (0); + } else if ((offset >= TRCRSCTLRn(2)) && + (offset <= TRCRSCTLRn((ETM_MAX_RES_SEL - 1)))) { + /* 32 bit resource selection regs, 32 off, skip fixed 0,1 */ + idx = (offset & GENMASK(6, 0)) / 4; + if (idx < ETM_MAX_RES_SEL) { + reg_csdev->driver_regval = &drvcfg->res_ctrl[idx]; + err = 0; + } + } else if ((offset >= TRCACVRn(0)) && + (offset <= TRCACATRn((ETM_MAX_SINGLE_ADDR_CMP - 1)))) { + /* 64 bit addr cmp regs, 16 off */ + idx = (offset & GENMASK(6, 0)) / 8; + off_mask = offset & GENMASK(11, 7); + do { + CHECKREGIDX(TRCACVRn(0), addr_val, idx, off_mask); + CHECKREGIDX(TRCACATRn(0), addr_acc, idx, off_mask); + } while (0); + } else if ((offset >= TRCCNTRLDVRn(0)) && + (offset <= TRCCNTVRn((ETMv4_MAX_CNTR - 1)))) { + /* 32 bit counter regs, 4 off (ETMv4_MAX_CNTR - 1) */ + idx = (offset & GENMASK(3, 0)) / 4; + off_mask = offset & GENMASK(11, 4); + do { + CHECKREGIDX(TRCCNTRLDVRn(0), cntrldvr, idx, off_mask); + CHECKREGIDX(TRCCNTCTLRn(0), cntr_ctrl, idx, off_mask); + CHECKREGIDX(TRCCNTVRn(0), cntr_val, idx, off_mask); + } while (0); + } + return err; +} + +/** + * etm4_cfg_load_feature - load a feature into a device instance. + * + * @csdev: An ETMv4 CoreSight device. + * @feat: The feature to be loaded. + * + * The function will load a feature instance into the device, checking that + * the register definitions are valid for the device. + * + * Parameter and register definitions will be converted into internal + * structures that are used to set the values in the driver when the + * feature is enabled for the device. + * + * The feature spinlock pointer is initialised to the same spinlock + * that the driver uses to protect the internal register values. + */ +static int etm4_cfg_load_feature(struct coresight_device *csdev, + struct cscfg_feature_csdev *feat_csdev) +{ + struct device *dev = csdev->dev.parent; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); + const struct cscfg_feature_desc *feat_desc = feat_csdev->feat_desc; + u32 offset; + int i = 0, err = 0; + + /* + * essential we set the device spinlock - this is used in the generic + * programming routines when copying values into the drvdata structures + * via the pointers setup in etm4_cfg_map_reg_offset(). + */ + feat_csdev->drv_spinlock = &drvdata->spinlock; + + /* process the register descriptions */ + for (i = 0; i < feat_csdev->nr_regs && !err; i++) { + offset = feat_desc->regs_desc[i].offset; + err = etm4_cfg_map_reg_offset(drvdata, &feat_csdev->regs_csdev[i], offset); + } + return err; +} + +/* match information when loading configurations */ +#define CS_CFG_ETM4_MATCH_FLAGS (CS_CFG_MATCH_CLASS_SRC_ALL | \ + CS_CFG_MATCH_CLASS_SRC_ETM4) + +int etm4_cscfg_register(struct coresight_device *csdev) +{ + struct cscfg_csdev_feat_ops ops; + + ops.load_feat = &etm4_cfg_load_feature; + + return cscfg_register_csdev(csdev, CS_CFG_ETM4_MATCH_FLAGS, &ops); +} diff --git a/drivers/hwtracing/coresight/coresight-etm4x-cfg.h b/drivers/hwtracing/coresight/coresight-etm4x-cfg.h new file mode 100644 index 000000000000..32dab34c1dac --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm4x-cfg.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CORESIGHT_ETM4X_CFG_H +#define _CORESIGHT_ETM4X_CFG_H + +#include "coresight-config.h" +#include "coresight-etm4x.h" + +/* ETMv4 specific config defines */ + +/* resource IDs */ + +#define ETM4_CFG_RES_CTR 0x001 +#define ETM4_CFG_RES_CMP 0x002 +#define ETM4_CFG_RES_CMP_PAIR0 0x003 +#define ETM4_CFG_RES_CMP_PAIR1 0x004 +#define ETM4_CFG_RES_SEL 0x005 +#define ETM4_CFG_RES_SEL_PAIR0 0x006 +#define ETM4_CFG_RES_SEL_PAIR1 0x007 +#define ETM4_CFG_RES_SEQ 0x008 +#define ETM4_CFG_RES_TS 0x009 +#define ETM4_CFG_RES_MASK 0x00F + +/* ETMv4 specific config functions */ +int etm4_cscfg_register(struct coresight_device *csdev); + +#endif /* CORESIGHT_ETM4X_CFG_H */ diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index da27cd4a3c38..e24252eaf8e4 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -39,6 +39,8 @@ #include "coresight-etm4x.h" #include "coresight-etm-perf.h" +#include "coresight-etm4x-cfg.h" +#include "coresight-syscfg.h" static int boot_enable; module_param(boot_enable, int, 0444); @@ -561,12 +563,15 @@ out: return ret; } -static int etm4_parse_event_config(struct etmv4_drvdata *drvdata, +static int etm4_parse_event_config(struct coresight_device *csdev, struct perf_event *event) { int ret = 0; + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct etmv4_config *config = &drvdata->config; struct perf_event_attr *attr = &event->attr; + unsigned long cfg_hash; + int preset; /* Clear configuration from previous run */ memset(config, 0, sizeof(struct etmv4_config)); @@ -632,6 +637,20 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata, /* bit[12], Return stack enable bit */ config->cfg |= BIT(12); + /* + * Set any selected configuration and preset. + * + * This extracts the values of PMU_FORMAT_ATTR(configid) and PMU_FORMAT_ATTR(preset) + * in the perf attributes defined in coresight-etm-perf.c. + * configid uses bits 63:32 of attr->config2, preset uses bits 3:0 of attr->config. + * A zero configid means no configuration active, preset = 0 means no preset selected. + */ + if (attr->config2 & GENMASK_ULL(63, 32)) { + cfg_hash = (u32)(attr->config2 >> 32); + preset = attr->config & 0xF; + ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset); + } + out: return ret; } @@ -648,7 +667,7 @@ static int etm4_enable_perf(struct coresight_device *csdev, } /* Configure the tracer based on the session's specifics */ - ret = etm4_parse_event_config(drvdata, event); + ret = etm4_parse_event_config(csdev, event); if (ret) goto out; /* And enable it */ @@ -794,11 +813,18 @@ static int etm4_disable_perf(struct coresight_device *csdev, u32 control; struct etm_filters *filters = event->hw.addr_filters; struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + struct perf_event_attr *attr = &event->attr; if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) return -EINVAL; etm4_disable_hw(drvdata); + /* + * The config_id occupies bits 63:32 of the config2 perf event attr + * field. If this is non-zero then we will have enabled a config. + */ + if (attr->config2 & GENMASK_ULL(63, 32)) + cscfg_csdev_disable_active_config(csdev); /* * Check if the start/stop logic was active when the unit was stopped. @@ -1939,6 +1965,13 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) return ret; } + /* register with config infrastructure & load any current features */ + ret = etm4_cscfg_register(drvdata->csdev); + if (ret) { + coresight_unregister(drvdata->csdev); + return ret; + } + etmdrvdata[drvdata->cpu] = drvdata; dev_info(&drvdata->csdev->dev, "CPU%d: %s v%d.%d initialized\n", @@ -2025,6 +2058,7 @@ static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata) cpus_read_unlock(); + cscfg_unregister_csdev(drvdata->csdev); coresight_unregister(drvdata->csdev); return 0; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index 007bad9e7ad8..a0640fa5c55b 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -9,6 +9,7 @@ #include <linux/sysfs.h> #include "coresight-etm4x.h" #include "coresight-priv.h" +#include "coresight-syscfg.h" static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) { @@ -269,6 +270,8 @@ static ssize_t reset_store(struct device *dev, spin_unlock(&drvdata->spinlock); + cscfg_csdev_reset_feats(to_coresight_device(dev)); + return size; } static DEVICE_ATTR_WO(reset); diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c new file mode 100644 index 000000000000..c547816b9000 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020 Linaro Limited, All rights reserved. + * Author: Mike Leach <mike.leach@linaro.org> + */ + +#include <linux/configfs.h> + +#include "coresight-syscfg-configfs.h" + +/* create a default ci_type. */ +static inline struct config_item_type *cscfg_create_ci_type(void) +{ + struct config_item_type *ci_type; + + ci_type = devm_kzalloc(cscfg_device(), sizeof(*ci_type), GFP_KERNEL); + if (ci_type) + ci_type->ct_owner = THIS_MODULE; + + return ci_type; +} + +/* configurations sub-group */ + +/* attributes for the config view group */ +static ssize_t cscfg_cfg_description_show(struct config_item *item, char *page) +{ + struct cscfg_fs_config *fs_config = container_of(to_config_group(item), + struct cscfg_fs_config, group); + + return scnprintf(page, PAGE_SIZE, "%s", fs_config->config_desc->description); +} +CONFIGFS_ATTR_RO(cscfg_cfg_, description); + +static ssize_t cscfg_cfg_feature_refs_show(struct config_item *item, char *page) +{ + struct cscfg_fs_config *fs_config = container_of(to_config_group(item), + struct cscfg_fs_config, group); + const struct cscfg_config_desc *config_desc = fs_config->config_desc; + ssize_t ch_used = 0; + int i; + + for (i = 0; i < config_desc->nr_feat_refs; i++) + ch_used += scnprintf(page + ch_used, PAGE_SIZE - ch_used, + "%s\n", config_desc->feat_ref_names[i]); + return ch_used; +} +CONFIGFS_ATTR_RO(cscfg_cfg_, feature_refs); + +/* list preset values in order of features and params */ +static ssize_t cscfg_cfg_values_show(struct config_item *item, char *page) +{ + const struct cscfg_feature_desc *feat_desc; + const struct cscfg_config_desc *config_desc; + struct cscfg_fs_preset *fs_preset; + int i, j, val_idx, preset_idx; + ssize_t used = 0; + + fs_preset = container_of(to_config_group(item), struct cscfg_fs_preset, group); + config_desc = fs_preset->config_desc; + + if (!config_desc->nr_presets) + return 0; + + preset_idx = fs_preset->preset_num - 1; + + /* start index on the correct array line */ + val_idx = config_desc->nr_total_params * preset_idx; + + /* + * A set of presets is the sum of all params in used features, + * in order of declaration of features and params in the features + */ + for (i = 0; i < config_desc->nr_feat_refs; i++) { + feat_desc = cscfg_get_named_feat_desc(config_desc->feat_ref_names[i]); + for (j = 0; j < feat_desc->nr_params; j++) { + used += scnprintf(page + used, PAGE_SIZE - used, + "%s.%s = 0x%llx ", + feat_desc->name, + feat_desc->params_desc[j].name, + config_desc->presets[val_idx++]); + } + } + used += scnprintf(page + used, PAGE_SIZE - used, "\n"); + + return used; +} +CONFIGFS_ATTR_RO(cscfg_cfg_, values); + +static struct configfs_attribute *cscfg_config_view_attrs[] = { + &cscfg_cfg_attr_description, + &cscfg_cfg_attr_feature_refs, + NULL, +}; + +static struct config_item_type cscfg_config_view_type = { + .ct_owner = THIS_MODULE, + .ct_attrs = cscfg_config_view_attrs, +}; + +static struct configfs_attribute *cscfg_config_preset_attrs[] = { + &cscfg_cfg_attr_values, + NULL, +}; + +static struct config_item_type cscfg_config_preset_type = { + .ct_owner = THIS_MODULE, + .ct_attrs = cscfg_config_preset_attrs, +}; + +static int cscfg_add_preset_groups(struct cscfg_fs_config *cfg_view) +{ + int preset_num; + struct cscfg_fs_preset *cfg_fs_preset; + struct cscfg_config_desc *config_desc = cfg_view->config_desc; + char name[CONFIGFS_ITEM_NAME_LEN]; + + if (!config_desc->nr_presets) + return 0; + + for (preset_num = 1; preset_num <= config_desc->nr_presets; preset_num++) { + cfg_fs_preset = devm_kzalloc(cscfg_device(), + sizeof(struct cscfg_fs_preset), GFP_KERNEL); + + if (!cfg_fs_preset) + return -ENOMEM; + + snprintf(name, CONFIGFS_ITEM_NAME_LEN, "preset%d", preset_num); + cfg_fs_preset->preset_num = preset_num; + cfg_fs_preset->config_desc = cfg_view->config_desc; + config_group_init_type_name(&cfg_fs_preset->group, name, + &cscfg_config_preset_type); + configfs_add_default_group(&cfg_fs_preset->group, &cfg_view->group); + } + return 0; +} + +static struct config_group *cscfg_create_config_group(struct cscfg_config_desc *config_desc) +{ + struct cscfg_fs_config *cfg_view; + struct device *dev = cscfg_device(); + int err; + + if (!dev) + return ERR_PTR(-EINVAL); + + cfg_view = devm_kzalloc(dev, sizeof(struct cscfg_fs_config), GFP_KERNEL); + if (!cfg_view) + return ERR_PTR(-ENOMEM); + + cfg_view->config_desc = config_desc; + config_group_init_type_name(&cfg_view->group, config_desc->name, &cscfg_config_view_type); + + /* add in a preset<n> dir for each preset */ + err = cscfg_add_preset_groups(cfg_view); + if (err) + return ERR_PTR(err); + + return &cfg_view->group; +} + +/* attributes for features view */ + +static ssize_t cscfg_feat_description_show(struct config_item *item, char *page) +{ + struct cscfg_fs_feature *fs_feat = container_of(to_config_group(item), + struct cscfg_fs_feature, group); + + return scnprintf(page, PAGE_SIZE, "%s", fs_feat->feat_desc->description); +} +CONFIGFS_ATTR_RO(cscfg_feat_, description); + +static ssize_t cscfg_feat_matches_show(struct config_item *item, char *page) +{ + struct cscfg_fs_feature *fs_feat = container_of(to_config_group(item), + struct cscfg_fs_feature, group); + u32 match_flags = fs_feat->feat_desc->match_flags; + int used = 0; + + if (match_flags & CS_CFG_MATCH_CLASS_SRC_ALL) + used = scnprintf(page, PAGE_SIZE, "SRC_ALL "); + + if (match_flags & CS_CFG_MATCH_CLASS_SRC_ETM4) + used += scnprintf(page + used, PAGE_SIZE - used, "SRC_ETMV4 "); + + used += scnprintf(page + used, PAGE_SIZE - used, "\n"); + return used; +} +CONFIGFS_ATTR_RO(cscfg_feat_, matches); + +static ssize_t cscfg_feat_nr_params_show(struct config_item *item, char *page) +{ + struct cscfg_fs_feature *fs_feat = container_of(to_config_group(item), + struct cscfg_fs_feature, group); + + return scnprintf(page, PAGE_SIZE, "%d\n", fs_feat->feat_desc->nr_params); +} +CONFIGFS_ATTR_RO(cscfg_feat_, nr_params); + +/* base feature desc attrib structures */ +static struct configfs_attribute *cscfg_feature_view_attrs[] = { + &cscfg_feat_attr_description, + &cscfg_feat_attr_matches, + &cscfg_feat_attr_nr_params, + NULL, +}; + +static struct config_item_type cscfg_feature_view_type = { + .ct_owner = THIS_MODULE, + .ct_attrs = cscfg_feature_view_attrs, +}; + +static ssize_t cscfg_param_value_show(struct config_item *item, char *page) +{ + struct cscfg_fs_param *param_item = container_of(to_config_group(item), + struct cscfg_fs_param, group); + u64 value = param_item->feat_desc->params_desc[param_item->param_idx].value; + + return scnprintf(page, PAGE_SIZE, "0x%llx\n", value); +} + +static ssize_t cscfg_param_value_store(struct config_item *item, + const char *page, size_t size) +{ + struct cscfg_fs_param *param_item = container_of(to_config_group(item), + struct cscfg_fs_param, group); + struct cscfg_feature_desc *feat_desc = param_item->feat_desc; + int param_idx = param_item->param_idx; + u64 value; + int err; + + err = kstrtoull(page, 0, &value); + if (!err) + err = cscfg_update_feat_param_val(feat_desc, param_idx, value); + + return err ? err : size; +} +CONFIGFS_ATTR(cscfg_param_, value); + +static struct configfs_attribute *cscfg_param_view_attrs[] = { + &cscfg_param_attr_value, + NULL, +}; + +static struct config_item_type cscfg_param_view_type = { + .ct_owner = THIS_MODULE, + .ct_attrs = cscfg_param_view_attrs, +}; + +/* + * configfs has far less functionality provided to add attributes dynamically than sysfs, + * and the show and store fns pass the enclosing config_item so the actual attribute cannot + * be determined. Therefore we add each item as a group directory, with a value attribute. + */ +static int cscfg_create_params_group_items(struct cscfg_feature_desc *feat_desc, + struct config_group *params_group) +{ + struct device *dev = cscfg_device(); + struct cscfg_fs_param *param_item; + int i; + + /* parameter items - as groups with default_value attribute */ + for (i = 0; i < feat_desc->nr_params; i++) { + param_item = devm_kzalloc(dev, sizeof(struct cscfg_fs_param), GFP_KERNEL); + if (!param_item) + return -ENOMEM; + param_item->feat_desc = feat_desc; + param_item->param_idx = i; + config_group_init_type_name(¶m_item->group, + feat_desc->params_desc[i].name, + &cscfg_param_view_type); + configfs_add_default_group(¶m_item->group, params_group); + } + return 0; +} + +static struct config_group *cscfg_create_feature_group(struct cscfg_feature_desc *feat_desc) +{ + struct cscfg_fs_feature *feat_view; + struct config_item_type *params_group_type; + struct config_group *params_group = NULL; + struct device *dev = cscfg_device(); + int item_err; + + if (!dev) + return ERR_PTR(-EINVAL); + + feat_view = devm_kzalloc(dev, sizeof(struct cscfg_fs_feature), GFP_KERNEL); + if (!feat_view) + return ERR_PTR(-ENOMEM); + + if (feat_desc->nr_params) { + params_group = devm_kzalloc(dev, sizeof(struct config_group), GFP_KERNEL); + if (!params_group) + return ERR_PTR(-ENOMEM); + + params_group_type = cscfg_create_ci_type(); + if (!params_group_type) + return ERR_PTR(-ENOMEM); + } + + feat_view->feat_desc = feat_desc; + config_group_init_type_name(&feat_view->group, + feat_desc->name, + &cscfg_feature_view_type); + if (params_group) { + config_group_init_type_name(params_group, "params", params_group_type); + configfs_add_default_group(params_group, &feat_view->group); + item_err = cscfg_create_params_group_items(feat_desc, params_group); + if (item_err) + return ERR_PTR(item_err); + } + return &feat_view->group; +} + +static struct config_item_type cscfg_configs_type = { + .ct_owner = THIS_MODULE, +}; + +static struct config_group cscfg_configs_grp = { + .cg_item = { + .ci_namebuf = "configurations", + .ci_type = &cscfg_configs_type, + }, +}; + +/* add configuration to configurations group */ +int cscfg_configfs_add_config(struct cscfg_config_desc *config_desc) +{ + struct config_group *new_group; + int err; + + new_group = cscfg_create_config_group(config_desc); + if (IS_ERR(new_group)) + return PTR_ERR(new_group); + err = configfs_register_group(&cscfg_configs_grp, new_group); + return err; +} + +static struct config_item_type cscfg_features_type = { + .ct_owner = THIS_MODULE, +}; + +static struct config_group cscfg_features_grp = { + .cg_item = { + .ci_namebuf = "features", + .ci_type = &cscfg_features_type, + }, +}; + +/* add feature to features group */ +int cscfg_configfs_add_feature(struct cscfg_feature_desc *feat_desc) +{ + struct config_group *new_group; + int err; + + new_group = cscfg_create_feature_group(feat_desc); + if (IS_ERR(new_group)) + return PTR_ERR(new_group); + err = configfs_register_group(&cscfg_features_grp, new_group); + return err; +} + +int cscfg_configfs_init(struct cscfg_manager *cscfg_mgr) +{ + struct configfs_subsystem *subsys; + struct config_item_type *ci_type; + + if (!cscfg_mgr) + return -EINVAL; + + ci_type = cscfg_create_ci_type(); + if (!ci_type) + return -ENOMEM; + + subsys = &cscfg_mgr->cfgfs_subsys; + config_item_set_name(&subsys->su_group.cg_item, CSCFG_FS_SUBSYS_NAME); + subsys->su_group.cg_item.ci_type = ci_type; + + config_group_init(&subsys->su_group); + mutex_init(&subsys->su_mutex); + + /* Add default groups to subsystem */ + config_group_init(&cscfg_configs_grp); + configfs_add_default_group(&cscfg_configs_grp, &subsys->su_group); + + config_group_init(&cscfg_features_grp); + configfs_add_default_group(&cscfg_features_grp, &subsys->su_group); + + return configfs_register_subsystem(subsys); +} + +void cscfg_configfs_release(struct cscfg_manager *cscfg_mgr) +{ + configfs_unregister_subsystem(&cscfg_mgr->cfgfs_subsys); +} diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.h b/drivers/hwtracing/coresight/coresight-syscfg-configfs.h new file mode 100644 index 000000000000..7d6ffe35ca4c --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Coresight system configuration driver - support for configfs. + */ + +#ifndef CORESIGHT_SYSCFG_CONFIGFS_H +#define CORESIGHT_SYSCFG_CONFIGFS_H + +#include <linux/configfs.h> +#include "coresight-syscfg.h" + +#define CSCFG_FS_SUBSYS_NAME "cs-syscfg" + +/* container for configuration view */ +struct cscfg_fs_config { + struct cscfg_config_desc *config_desc; + struct config_group group; +}; + +/* container for feature view */ +struct cscfg_fs_feature { + struct cscfg_feature_desc *feat_desc; + struct config_group group; +}; + +/* container for parameter view */ +struct cscfg_fs_param { + int param_idx; + struct cscfg_feature_desc *feat_desc; + struct config_group group; +}; + +/* container for preset view */ +struct cscfg_fs_preset { + int preset_num; + struct cscfg_config_desc *config_desc; + struct config_group group; +}; + +int cscfg_configfs_init(struct cscfg_manager *cscfg_mgr); +void cscfg_configfs_release(struct cscfg_manager *cscfg_mgr); +int cscfg_configfs_add_config(struct cscfg_config_desc *config_desc); +int cscfg_configfs_add_feature(struct cscfg_feature_desc *feat_desc); + +#endif /* CORESIGHT_SYSCFG_CONFIGFS_H */ diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c new file mode 100644 index 000000000000..fc0760f55c53 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-syscfg.c @@ -0,0 +1,847 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020 Linaro Limited, All rights reserved. + * Author: Mike Leach <mike.leach@linaro.org> + */ + +#include <linux/platform_device.h> + +#include "coresight-config.h" +#include "coresight-etm-perf.h" +#include "coresight-syscfg.h" +#include "coresight-syscfg-configfs.h" + +/* + * cscfg_ API manages configurations and features for the entire coresight + * infrastructure. + * + * It allows the loading of configurations and features, and loads these into + * coresight devices as appropriate. + */ + +/* protect the cscsg_data and device */ +static DEFINE_MUTEX(cscfg_mutex); + +/* only one of these */ +static struct cscfg_manager *cscfg_mgr; + +/* load features and configuations into the lists */ + +/* get name feature instance from a coresight device list of features */ +static struct cscfg_feature_csdev * +cscfg_get_feat_csdev(struct coresight_device *csdev, const char *name) +{ + struct cscfg_feature_csdev *feat_csdev = NULL; + + list_for_each_entry(feat_csdev, &csdev->feature_csdev_list, node) { + if (strcmp(feat_csdev->feat_desc->name, name) == 0) + return feat_csdev; + } + return NULL; +} + +/* allocate the device config instance - with max number of used features */ +static struct cscfg_config_csdev * +cscfg_alloc_csdev_cfg(struct coresight_device *csdev, int nr_feats) +{ + struct cscfg_config_csdev *config_csdev = NULL; + struct device *dev = csdev->dev.parent; + + /* this is being allocated using the devm for the coresight device */ + config_csdev = devm_kzalloc(dev, + offsetof(struct cscfg_config_csdev, feats_csdev[nr_feats]), + GFP_KERNEL); + if (!config_csdev) + return NULL; + + config_csdev->csdev = csdev; + return config_csdev; +} + +/* Load a config into a device if there are any feature matches between config and device */ +static int cscfg_add_csdev_cfg(struct coresight_device *csdev, + struct cscfg_config_desc *config_desc) +{ + struct cscfg_config_csdev *config_csdev = NULL; + struct cscfg_feature_csdev *feat_csdev; + unsigned long flags; + int i; + + /* look at each required feature and see if it matches any feature on the device */ + for (i = 0; i < config_desc->nr_feat_refs; i++) { + /* look for a matching name */ + feat_csdev = cscfg_get_feat_csdev(csdev, config_desc->feat_ref_names[i]); + if (feat_csdev) { + /* + * At least one feature on this device matches the config + * add a config instance to the device and a reference to the feature. + */ + if (!config_csdev) { + config_csdev = cscfg_alloc_csdev_cfg(csdev, + config_desc->nr_feat_refs); + if (!config_csdev) + return -ENOMEM; + config_csdev->config_desc = config_desc; + } + config_csdev->feats_csdev[config_csdev->nr_feat++] = feat_csdev; + } + } + /* if matched features, add config to device.*/ + if (config_csdev) { + spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + list_add(&config_csdev->node, &csdev->config_csdev_list); + spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + } + + return 0; +} + +/* + * Add the config to the set of registered devices - call with mutex locked. + * Iterates through devices - any device that matches one or more of the + * configuration features will load it, the others will ignore it. + */ +static int cscfg_add_cfg_to_csdevs(struct cscfg_config_desc *config_desc) +{ + struct cscfg_registered_csdev *csdev_item; + int err; + + list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) { + err = cscfg_add_csdev_cfg(csdev_item->csdev, config_desc); + if (err) + return err; + } + return 0; +} + +/* + * Allocate a feature object for load into a csdev. + * memory allocated using the csdev->dev object using devm managed allocator. + */ +static struct cscfg_feature_csdev * +cscfg_alloc_csdev_feat(struct coresight_device *csdev, struct cscfg_feature_desc *feat_desc) +{ + struct cscfg_feature_csdev *feat_csdev = NULL; + struct device *dev = csdev->dev.parent; + int i; + + feat_csdev = devm_kzalloc(dev, sizeof(struct cscfg_feature_csdev), GFP_KERNEL); + if (!feat_csdev) + return NULL; + + /* parameters are optional - could be 0 */ + feat_csdev->nr_params = feat_desc->nr_params; + + /* + * if we need parameters, zero alloc the space here, the load routine in + * the csdev device driver will fill out some information according to + * feature descriptor. + */ + if (feat_csdev->nr_params) { + feat_csdev->params_csdev = devm_kcalloc(dev, feat_csdev->nr_params, + sizeof(struct cscfg_parameter_csdev), + GFP_KERNEL); + if (!feat_csdev->params_csdev) + return NULL; + + /* + * fill in the feature reference in the param - other fields + * handled by loader in csdev. + */ + for (i = 0; i < feat_csdev->nr_params; i++) + feat_csdev->params_csdev[i].feat_csdev = feat_csdev; + } + + /* + * Always have registers to program - again the load routine in csdev device + * will fill out according to feature descriptor and device requirements. + */ + feat_csdev->nr_regs = feat_desc->nr_regs; + feat_csdev->regs_csdev = devm_kcalloc(dev, feat_csdev->nr_regs, + sizeof(struct cscfg_regval_csdev), + GFP_KERNEL); + if (!feat_csdev->regs_csdev) + return NULL; + + /* load the feature default values */ + feat_csdev->feat_desc = feat_desc; + feat_csdev->csdev = csdev; + + return feat_csdev; +} + +/* load one feature into one coresight device */ +static int cscfg_load_feat_csdev(struct coresight_device *csdev, + struct cscfg_feature_desc *feat_desc, + struct cscfg_csdev_feat_ops *ops) +{ + struct cscfg_feature_csdev *feat_csdev; + unsigned long flags; + int err; + + if (!ops->load_feat) + return -EINVAL; + + feat_csdev = cscfg_alloc_csdev_feat(csdev, feat_desc); + if (!feat_csdev) + return -ENOMEM; + + /* load the feature into the device */ + err = ops->load_feat(csdev, feat_csdev); + if (err) + return err; + + /* add to internal csdev feature list & initialise using reset call */ + cscfg_reset_feat(feat_csdev); + spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + list_add(&feat_csdev->node, &csdev->feature_csdev_list); + spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + + return 0; +} + +/* + * Add feature to any matching devices - call with mutex locked. + * Iterates through devices - any device that matches the feature will be + * called to load it. + */ +static int cscfg_add_feat_to_csdevs(struct cscfg_feature_desc *feat_desc) +{ + struct cscfg_registered_csdev *csdev_item; + int err; + + list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) { + if (csdev_item->match_flags & feat_desc->match_flags) { + err = cscfg_load_feat_csdev(csdev_item->csdev, feat_desc, &csdev_item->ops); + if (err) + return err; + } + } + return 0; +} + +/* check feature list for a named feature - call with mutex locked. */ +static bool cscfg_match_list_feat(const char *name) +{ + struct cscfg_feature_desc *feat_desc; + + list_for_each_entry(feat_desc, &cscfg_mgr->feat_desc_list, item) { + if (strcmp(feat_desc->name, name) == 0) + return true; + } + return false; +} + +/* check all feat needed for cfg are in the list - call with mutex locked. */ +static int cscfg_check_feat_for_cfg(struct cscfg_config_desc *config_desc) +{ + int i; + + for (i = 0; i < config_desc->nr_feat_refs; i++) + if (!cscfg_match_list_feat(config_desc->feat_ref_names[i])) + return -EINVAL; + return 0; +} + +/* + * load feature - add to feature list. + */ +static int cscfg_load_feat(struct cscfg_feature_desc *feat_desc) +{ + int err; + + /* add feature to any matching registered devices */ + err = cscfg_add_feat_to_csdevs(feat_desc); + if (err) + return err; + + list_add(&feat_desc->item, &cscfg_mgr->feat_desc_list); + return 0; +} + +/* + * load config into the system - validate used features exist then add to + * config list. + */ +static int cscfg_load_config(struct cscfg_config_desc *config_desc) +{ + int err; + + /* validate features are present */ + err = cscfg_check_feat_for_cfg(config_desc); + if (err) + return err; + + /* add config to any matching registered device */ + err = cscfg_add_cfg_to_csdevs(config_desc); + if (err) + return err; + + /* add config to perf fs to allow selection */ + err = etm_perf_add_symlink_cscfg(cscfg_device(), config_desc); + if (err) + return err; + + list_add(&config_desc->item, &cscfg_mgr->config_desc_list); + atomic_set(&config_desc->active_cnt, 0); + return 0; +} + +/* get a feature descriptor by name */ +const struct cscfg_feature_desc *cscfg_get_named_feat_desc(const char *name) +{ + const struct cscfg_feature_desc *feat_desc = NULL, *feat_desc_item; + + mutex_lock(&cscfg_mutex); + + list_for_each_entry(feat_desc_item, &cscfg_mgr->feat_desc_list, item) { + if (strcmp(feat_desc_item->name, name) == 0) { + feat_desc = feat_desc_item; + break; + } + } + + mutex_unlock(&cscfg_mutex); + return feat_desc; +} + +/* called with cscfg_mutex held */ +static struct cscfg_feature_csdev * +cscfg_csdev_get_feat_from_desc(struct coresight_device *csdev, + struct cscfg_feature_desc *feat_desc) +{ + struct cscfg_feature_csdev *feat_csdev; + + list_for_each_entry(feat_csdev, &csdev->feature_csdev_list, node) { + if (feat_csdev->feat_desc == feat_desc) + return feat_csdev; + } + return NULL; +} + +int cscfg_update_feat_param_val(struct cscfg_feature_desc *feat_desc, + int param_idx, u64 value) +{ + int err = 0; + struct cscfg_feature_csdev *feat_csdev; + struct cscfg_registered_csdev *csdev_item; + + mutex_lock(&cscfg_mutex); + + /* check if any config active & return busy */ + if (atomic_read(&cscfg_mgr->sys_active_cnt)) { + err = -EBUSY; + goto unlock_exit; + } + + /* set the value */ + if ((param_idx < 0) || (param_idx >= feat_desc->nr_params)) { + err = -EINVAL; + goto unlock_exit; + } + feat_desc->params_desc[param_idx].value = value; + + /* update loaded instances.*/ + list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) { + feat_csdev = cscfg_csdev_get_feat_from_desc(csdev_item->csdev, feat_desc); + if (feat_csdev) + feat_csdev->params_csdev[param_idx].current_value = value; + } + +unlock_exit: + mutex_unlock(&cscfg_mutex); + return err; +} + +/** + * cscfg_load_config_sets - API function to load feature and config sets. + * + * Take a 0 terminated array of feature descriptors and/or configuration + * descriptors and load into the system. + * Features are loaded first to ensure configuration dependencies can be met. + * + * @config_descs: 0 terminated array of configuration descriptors. + * @feat_descs: 0 terminated array of feature descriptors. + */ +int cscfg_load_config_sets(struct cscfg_config_desc **config_descs, + struct cscfg_feature_desc **feat_descs) +{ + int err, i = 0; + + mutex_lock(&cscfg_mutex); + + /* load features first */ + if (feat_descs) { + while (feat_descs[i]) { + err = cscfg_load_feat(feat_descs[i]); + if (!err) + err = cscfg_configfs_add_feature(feat_descs[i]); + if (err) { + pr_err("coresight-syscfg: Failed to load feature %s\n", + feat_descs[i]->name); + goto exit_unlock; + } + i++; + } + } + + /* next any configurations to check feature dependencies */ + i = 0; + if (config_descs) { + while (config_descs[i]) { + err = cscfg_load_config(config_descs[i]); + if (!err) + err = cscfg_configfs_add_config(config_descs[i]); + if (err) { + pr_err("coresight-syscfg: Failed to load configuration %s\n", + config_descs[i]->name); + goto exit_unlock; + } + i++; + } + } + +exit_unlock: + mutex_unlock(&cscfg_mutex); + return err; +} +EXPORT_SYMBOL_GPL(cscfg_load_config_sets); + +/* Handle coresight device registration and add configs and features to devices */ + +/* iterate through config lists and load matching configs to device */ +static int cscfg_add_cfgs_csdev(struct coresight_device *csdev) +{ + struct cscfg_config_desc *config_desc; + int err = 0; + + list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) { + err = cscfg_add_csdev_cfg(csdev, config_desc); + if (err) + break; + } + return err; +} + +/* iterate through feature lists and load matching features to device */ +static int cscfg_add_feats_csdev(struct coresight_device *csdev, + u32 match_flags, + struct cscfg_csdev_feat_ops *ops) +{ + struct cscfg_feature_desc *feat_desc; + int err = 0; + + if (!ops->load_feat) + return -EINVAL; + + list_for_each_entry(feat_desc, &cscfg_mgr->feat_desc_list, item) { + if (feat_desc->match_flags & match_flags) { + err = cscfg_load_feat_csdev(csdev, feat_desc, ops); + if (err) + break; + } + } + return err; +} + +/* Add coresight device to list and copy its matching info */ +static int cscfg_list_add_csdev(struct coresight_device *csdev, + u32 match_flags, + struct cscfg_csdev_feat_ops *ops) +{ + struct cscfg_registered_csdev *csdev_item; + + /* allocate the list entry structure */ + csdev_item = kzalloc(sizeof(struct cscfg_registered_csdev), GFP_KERNEL); + if (!csdev_item) + return -ENOMEM; + + csdev_item->csdev = csdev; + csdev_item->match_flags = match_flags; + csdev_item->ops.load_feat = ops->load_feat; + list_add(&csdev_item->item, &cscfg_mgr->csdev_desc_list); + + INIT_LIST_HEAD(&csdev->feature_csdev_list); + INIT_LIST_HEAD(&csdev->config_csdev_list); + spin_lock_init(&csdev->cscfg_csdev_lock); + + return 0; +} + +/* remove a coresight device from the list and free data */ +static void cscfg_list_remove_csdev(struct coresight_device *csdev) +{ + struct cscfg_registered_csdev *csdev_item, *tmp; + + list_for_each_entry_safe(csdev_item, tmp, &cscfg_mgr->csdev_desc_list, item) { + if (csdev_item->csdev == csdev) { + list_del(&csdev_item->item); + kfree(csdev_item); + break; + } + } +} + +/** + * cscfg_register_csdev - register a coresight device with the syscfg manager. + * + * Registers the coresight device with the system. @match_flags used to check + * if the device is a match for registered features. Any currently registered + * configurations and features that match the device will be loaded onto it. + * + * @csdev: The coresight device to register. + * @match_flags: Matching information to load features. + * @ops: Standard operations supported by the device. + */ +int cscfg_register_csdev(struct coresight_device *csdev, + u32 match_flags, + struct cscfg_csdev_feat_ops *ops) +{ + int ret = 0; + + mutex_lock(&cscfg_mutex); + + /* add device to list of registered devices */ + ret = cscfg_list_add_csdev(csdev, match_flags, ops); + if (ret) + goto reg_csdev_unlock; + + /* now load any registered features and configs matching the device. */ + ret = cscfg_add_feats_csdev(csdev, match_flags, ops); + if (ret) { + cscfg_list_remove_csdev(csdev); + goto reg_csdev_unlock; + } + + ret = cscfg_add_cfgs_csdev(csdev); + if (ret) { + cscfg_list_remove_csdev(csdev); + goto reg_csdev_unlock; + } + + pr_info("CSCFG registered %s", dev_name(&csdev->dev)); + +reg_csdev_unlock: + mutex_unlock(&cscfg_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(cscfg_register_csdev); + +/** + * cscfg_unregister_csdev - remove coresight device from syscfg manager. + * + * @csdev: Device to remove. + */ +void cscfg_unregister_csdev(struct coresight_device *csdev) +{ + mutex_lock(&cscfg_mutex); + cscfg_list_remove_csdev(csdev); + mutex_unlock(&cscfg_mutex); +} +EXPORT_SYMBOL_GPL(cscfg_unregister_csdev); + +/** + * cscfg_csdev_reset_feats - reset features for a CoreSight device. + * + * Resets all parameters and register values for any features loaded + * into @csdev to their default values. + * + * @csdev: The CoreSight device. + */ +void cscfg_csdev_reset_feats(struct coresight_device *csdev) +{ + struct cscfg_feature_csdev *feat_csdev; + unsigned long flags; + + spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + if (list_empty(&csdev->feature_csdev_list)) + goto unlock_exit; + + list_for_each_entry(feat_csdev, &csdev->feature_csdev_list, node) + cscfg_reset_feat(feat_csdev); + +unlock_exit: + spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); +} +EXPORT_SYMBOL_GPL(cscfg_csdev_reset_feats); + +/** + * cscfg_activate_config - Mark a configuration descriptor as active. + * + * This will be seen when csdev devices are enabled in the system. + * Only activated configurations can be enabled on individual devices. + * Activation protects the configuration from alteration or removal while + * active. + * + * Selection by hash value - generated from the configuration name when it + * was loaded and added to the cs_etm/configurations file system for selection + * by perf. + * + * Increments the configuration descriptor active count and the global active + * count. + * + * @cfg_hash: Hash value of the selected configuration name. + */ +int cscfg_activate_config(unsigned long cfg_hash) +{ + struct cscfg_config_desc *config_desc; + int err = -EINVAL; + + mutex_lock(&cscfg_mutex); + + list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) { + if ((unsigned long)config_desc->event_ea->var == cfg_hash) { + /* + * increment the global active count - control changes to + * active configurations + */ + atomic_inc(&cscfg_mgr->sys_active_cnt); + + /* + * mark the descriptor as active so enable config on a + * device instance will use it + */ + atomic_inc(&config_desc->active_cnt); + + err = 0; + dev_dbg(cscfg_device(), "Activate config %s.\n", config_desc->name); + break; + } + } + mutex_unlock(&cscfg_mutex); + + return err; +} +EXPORT_SYMBOL_GPL(cscfg_activate_config); + +/** + * cscfg_deactivate_config - Mark a config descriptor as inactive. + * + * Decrement the configuration and global active counts. + * + * @cfg_hash: Hash value of the selected configuration name. + */ +void cscfg_deactivate_config(unsigned long cfg_hash) +{ + struct cscfg_config_desc *config_desc; + + mutex_lock(&cscfg_mutex); + + list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) { + if ((unsigned long)config_desc->event_ea->var == cfg_hash) { + atomic_dec(&config_desc->active_cnt); + atomic_dec(&cscfg_mgr->sys_active_cnt); + dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name); + break; + } + } + mutex_unlock(&cscfg_mutex); +} +EXPORT_SYMBOL_GPL(cscfg_deactivate_config); + +/** + * cscfg_csdev_enable_active_config - Enable matching active configuration for device. + * + * Enables the configuration selected by @cfg_hash if the configuration is supported + * on the device and has been activated. + * + * If active and supported the CoreSight device @csdev will be programmed with the + * configuration, using @preset parameters. + * + * Should be called before driver hardware enable for the requested device, prior to + * programming and enabling the physical hardware. + * + * @csdev: CoreSight device to program. + * @cfg_hash: Selector for the configuration. + * @preset: Preset parameter values to use, 0 for current / default values. + */ +int cscfg_csdev_enable_active_config(struct coresight_device *csdev, + unsigned long cfg_hash, int preset) +{ + struct cscfg_config_csdev *config_csdev_active = NULL, *config_csdev_item; + const struct cscfg_config_desc *config_desc; + unsigned long flags; + int err = 0; + + /* quickly check global count */ + if (!atomic_read(&cscfg_mgr->sys_active_cnt)) + return 0; + + /* + * Look for matching configuration - set the active configuration + * context if found. + */ + spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + list_for_each_entry(config_csdev_item, &csdev->config_csdev_list, node) { + config_desc = config_csdev_item->config_desc; + if ((atomic_read(&config_desc->active_cnt)) && + ((unsigned long)config_desc->event_ea->var == cfg_hash)) { + config_csdev_active = config_csdev_item; + csdev->active_cscfg_ctxt = (void *)config_csdev_active; + break; + } + } + spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + + /* + * If found, attempt to enable + */ + if (config_csdev_active) { + /* + * Call the generic routine that will program up the internal + * driver structures prior to programming up the hardware. + * This routine takes the driver spinlock saved in the configs. + */ + err = cscfg_csdev_enable_config(config_csdev_active, preset); + if (!err) { + /* + * Successful programming. Check the active_cscfg_ctxt + * pointer to ensure no pre-emption disabled it via + * cscfg_csdev_disable_active_config() before + * we could start. + * + * Set enabled if OK, err if not. + */ + spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + if (csdev->active_cscfg_ctxt) + config_csdev_active->enabled = true; + else + err = -EBUSY; + spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + } + } + return err; +} +EXPORT_SYMBOL_GPL(cscfg_csdev_enable_active_config); + +/** + * cscfg_csdev_disable_active_config - disable an active config on the device. + * + * Disables the active configuration on the CoreSight device @csdev. + * Disable will save the values of any registers marked in the configurations + * as save on disable. + * + * Should be called after driver hardware disable for the requested device, + * after disabling the physical hardware and reading back registers. + * + * @csdev: The CoreSight device. + */ +void cscfg_csdev_disable_active_config(struct coresight_device *csdev) +{ + struct cscfg_config_csdev *config_csdev; + unsigned long flags; + + /* + * Check if we have an active config, and that it was successfully enabled. + * If it was not enabled, we have no work to do, otherwise mark as disabled. + * Clear the active config pointer. + */ + spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + config_csdev = (struct cscfg_config_csdev *)csdev->active_cscfg_ctxt; + if (config_csdev) { + if (!config_csdev->enabled) + config_csdev = NULL; + else + config_csdev->enabled = false; + } + csdev->active_cscfg_ctxt = NULL; + spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + + /* true if there was an enabled active config */ + if (config_csdev) + cscfg_csdev_disable_config(config_csdev); +} +EXPORT_SYMBOL_GPL(cscfg_csdev_disable_active_config); + +/* Initialise system configuration management device. */ + +struct device *cscfg_device(void) +{ + return cscfg_mgr ? &cscfg_mgr->dev : NULL; +} + +/* Must have a release function or the kernel will complain on module unload */ +static void cscfg_dev_release(struct device *dev) +{ + kfree(cscfg_mgr); + cscfg_mgr = NULL; +} + +/* a device is needed to "own" some kernel elements such as sysfs entries. */ +static int cscfg_create_device(void) +{ + struct device *dev; + int err = -ENOMEM; + + mutex_lock(&cscfg_mutex); + if (cscfg_mgr) { + err = -EINVAL; + goto create_dev_exit_unlock; + } + + cscfg_mgr = kzalloc(sizeof(struct cscfg_manager), GFP_KERNEL); + if (!cscfg_mgr) + goto create_dev_exit_unlock; + + /* setup the device */ + dev = cscfg_device(); + dev->release = cscfg_dev_release; + dev->init_name = "cs_system_cfg"; + + err = device_register(dev); + if (err) + cscfg_dev_release(dev); + +create_dev_exit_unlock: + mutex_unlock(&cscfg_mutex); + return err; +} + +static void cscfg_clear_device(void) +{ + struct cscfg_config_desc *cfg_desc; + + mutex_lock(&cscfg_mutex); + list_for_each_entry(cfg_desc, &cscfg_mgr->config_desc_list, item) { + etm_perf_del_symlink_cscfg(cfg_desc); + } + cscfg_configfs_release(cscfg_mgr); + device_unregister(cscfg_device()); + mutex_unlock(&cscfg_mutex); +} + +/* Initialise system config management API device */ +int __init cscfg_init(void) +{ + int err = 0; + + err = cscfg_create_device(); + if (err) + return err; + + err = cscfg_configfs_init(cscfg_mgr); + if (err) + goto exit_err; + + INIT_LIST_HEAD(&cscfg_mgr->csdev_desc_list); + INIT_LIST_HEAD(&cscfg_mgr->feat_desc_list); + INIT_LIST_HEAD(&cscfg_mgr->config_desc_list); + atomic_set(&cscfg_mgr->sys_active_cnt, 0); + + /* preload built-in configurations */ + err = cscfg_preload(); + if (err) + goto exit_err; + + dev_info(cscfg_device(), "CoreSight Configuration manager initialised"); + return 0; + +exit_err: + cscfg_clear_device(); + return err; +} + +void cscfg_exit(void) +{ + cscfg_clear_device(); +} diff --git a/drivers/hwtracing/coresight/coresight-syscfg.h b/drivers/hwtracing/coresight/coresight-syscfg.h new file mode 100644 index 000000000000..8d018efd6ead --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-syscfg.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Coresight system configuration driver. + */ + +#ifndef CORESIGHT_SYSCFG_H +#define CORESIGHT_SYSCFG_H + +#include <linux/configfs.h> +#include <linux/coresight.h> +#include <linux/device.h> + +#include "coresight-config.h" + +/** + * System configuration manager device. + * + * Contains lists of the loaded configurations and features, plus a list of CoreSight devices + * registered with the system as supporting configuration management. + * + * Need a device to 'own' some coresight system wide sysfs entries in + * perf events, configfs etc. + * + * @dev: The device. + * @csdev_desc_list: List of coresight devices registered with the configuration manager. + * @feat_desc_list: List of feature descriptors to load into registered devices. + * @config_desc_list: List of system configuration descriptors to load into registered devices. + * @sys_active_cnt: Total number of active config descriptor references. + * @cfgfs_subsys: configfs subsystem used to manage configurations. + */ +struct cscfg_manager { + struct device dev; + struct list_head csdev_desc_list; + struct list_head feat_desc_list; + struct list_head config_desc_list; + atomic_t sys_active_cnt; + struct configfs_subsystem cfgfs_subsys; +}; + +/* get reference to dev in cscfg_manager */ +struct device *cscfg_device(void); + +/** + * List entry for Coresight devices that are registered as supporting complex + * config operations. + * + * @csdev: The registered device. + * @match_flags: The matching type information for adding features. + * @ops: Operations supported by the registered device. + * @item: list entry. + */ +struct cscfg_registered_csdev { + struct coresight_device *csdev; + u32 match_flags; + struct cscfg_csdev_feat_ops ops; + struct list_head item; +}; + +/* internal core operations for cscfg */ +int __init cscfg_init(void); +void cscfg_exit(void); +int cscfg_preload(void); +const struct cscfg_feature_desc *cscfg_get_named_feat_desc(const char *name); +int cscfg_update_feat_param_val(struct cscfg_feature_desc *feat_desc, + int param_idx, u64 value); + + +/* syscfg manager external API */ +int cscfg_load_config_sets(struct cscfg_config_desc **cfg_descs, + struct cscfg_feature_desc **feat_descs); +int cscfg_register_csdev(struct coresight_device *csdev, u32 match_flags, + struct cscfg_csdev_feat_ops *ops); +void cscfg_unregister_csdev(struct coresight_device *csdev); +int cscfg_activate_config(unsigned long cfg_hash); +void cscfg_deactivate_config(unsigned long cfg_hash); +void cscfg_csdev_reset_feats(struct coresight_device *csdev); +int cscfg_csdev_enable_active_config(struct coresight_device *csdev, + unsigned long cfg_hash, int preset); +void cscfg_csdev_disable_active_config(struct coresight_device *csdev); + +#endif /* CORESIGHT_SYSCFG_H */ diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index cceaf69279a9..6304d1dd2dd6 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -1224,14 +1224,14 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave) disable_irq(iproc_i2c->irq); + tasklet_kill(&iproc_i2c->slave_rx_tasklet); + /* disable all slave interrupts */ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); tmp &= ~(IE_S_ALL_INTERRUPT_MASK << IE_S_ALL_INTERRUPT_SHIFT); iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp); - tasklet_kill(&iproc_i2c->slave_rx_tasklet); - /* Erase the slave address programmed */ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET); tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT); diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 6d5014ebaab5..a6ea1eb1394e 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -635,8 +635,8 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id) status = readb(i2c->base + MPC_I2C_SR); if (status & CSR_MIF) { - /* Read again to allow register to stabilise */ - status = readb(i2c->base + MPC_I2C_SR); + /* Wait up to 100us for transfer to properly complete */ + readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100); writeb(0, i2c->base + MPC_I2C_SR); mpc_i2c_do_intr(i2c, status); return IRQ_HANDLED; diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index cb64fe649390..77f576e51652 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -141,7 +141,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, if (count > 8192) count = 8192; - tmp = kmalloc(count, GFP_KERNEL); + tmp = kzalloc(count, GFP_KERNEL); if (tmp == NULL) return -ENOMEM; @@ -150,7 +150,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, ret = i2c_master_recv(client, tmp, count); if (ret >= 0) - ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret; + if (copy_to_user(buf, tmp, ret)) + ret = -EFAULT; kfree(tmp); return ret; } diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index 0e56ace61103..8d8b1ba42ff8 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -231,6 +231,7 @@ config DMARD10 config FXLS8962AF tristate + depends on I2C || !I2C # cannot be built-in for modular I2C config FXLS8962AF_I2C tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver" @@ -247,6 +248,7 @@ config FXLS8962AF_I2C config FXLS8962AF_SPI tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver" depends on SPI + depends on I2C || !I2C select FXLS8962AF select REGMAP_SPI help diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index 078d87865fde..0019f1ea7df2 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -637,7 +637,7 @@ static int fxls8962af_i2c_raw_read_errata3(struct fxls8962af_data *data, return ret; } - return ret; + return 0; } static int fxls8962af_fifo_transfer(struct fxls8962af_data *data, diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c index 6ef09609be9f..f9c8385c72d3 100644 --- a/drivers/iio/adc/palmas_gpadc.c +++ b/drivers/iio/adc/palmas_gpadc.c @@ -664,8 +664,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc) adc_period = adc->auto_conversion_period; for (i = 0; i < 16; ++i) { - if (((1000 * (1 << i)) / 32) < adc_period) - continue; + if (((1000 * (1 << i)) / 32) >= adc_period) + break; } if (i > 0) i--; diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c index 2383eacada87..a2b83f0bd526 100644 --- a/drivers/iio/adc/ti-ads7950.c +++ b/drivers/iio/adc/ti-ads7950.c @@ -568,7 +568,6 @@ static int ti_ads7950_probe(struct spi_device *spi) st->ring_xfer.tx_buf = &st->tx_buf[0]; st->ring_xfer.rx_buf = &st->rx_buf[0]; /* len will be set later */ - st->ring_xfer.cs_change = true; spi_message_add_tail(&st->ring_xfer, &st->ring_msg); diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index 2a957f19048e..9e0fce917ce4 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -25,6 +25,8 @@ #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> +#include <linux/time.h> + #define HDC100X_REG_TEMP 0x00 #define HDC100X_REG_HUMIDITY 0x01 @@ -166,7 +168,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data, struct iio_chan_spec const *chan) { struct i2c_client *client = data->client; - int delay = data->adc_int_us[chan->address]; + int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC; int ret; __be16 val; @@ -316,7 +318,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct hdc100x_data *data = iio_priv(indio_dev); struct i2c_client *client = data->client; - int delay = data->adc_int_us[0] + data->adc_int_us[1]; + int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC; int ret; /* dual read starts at temp register */ diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index a5b421f42287..b9a06ca29bee 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -411,12 +411,11 @@ int __adis_initial_startup(struct adis *adis) int ret; /* check if the device has rst pin low */ - gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS); + gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(gpio)) return PTR_ERR(gpio); if (gpio) { - gpiod_set_value_cansleep(gpio, 1); msleep(10); /* bring device out of reset */ gpiod_set_value_cansleep(gpio, 0); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 515a7e95a421..5d3b8b8d163d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -926,12 +926,25 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) return ret; } +static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) +{ + struct ib_qp_attr qp_attr; + int qp_attr_mask, ret; + + qp_attr.qp_state = IB_QPS_INIT; + ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); + if (ret) + return ret; + + return ib_modify_qp(qp, &qp_attr, qp_attr_mask); +} + int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr) { struct rdma_id_private *id_priv; struct ib_qp *qp; - int ret = 0; + int ret; id_priv = container_of(id, struct rdma_id_private, id); if (id->device != pd->device) { @@ -948,6 +961,8 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, if (id->qp_type == IB_QPT_UD) ret = cma_init_ud_qp(id_priv, qp); + else + ret = cma_init_conn_qp(id_priv, qp); if (ret) goto out_destroy; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index d5674026512a..a8688a92c760 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -120,6 +120,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) if (!chip_ctx) return -ENOMEM; chip_ctx->chip_num = bp->chip_num; + chip_ctx->hw_stats_size = bp->hw_ring_stats_size; rdev->chip_ctx = chip_ctx; /* rest members to follow eventually */ @@ -550,6 +551,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, dma_addr_t dma_map, u32 *fw_stats_ctx_id) { + struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx; struct hwrm_stat_ctx_alloc_output resp = {0}; struct hwrm_stat_ctx_alloc_input req = {0}; struct bnxt_en_dev *en_dev = rdev->en_dev; @@ -566,7 +568,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); req.update_period_ms = cpu_to_le32(1000); req.stats_dma_addr = cpu_to_le64(dma_map); - req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext)); + req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size); req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 17f0701b3cee..44282a8cdd4f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -56,6 +56,7 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, struct bnxt_qplib_stats *stats); static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, + struct bnxt_qplib_chip_ctx *cctx, struct bnxt_qplib_stats *stats); /* PBL */ @@ -559,7 +560,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, goto fail; stats_alloc: /* Stats */ - rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats); + rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats); if (rc) goto fail; @@ -889,15 +890,12 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, } static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, + struct bnxt_qplib_chip_ctx *cctx, struct bnxt_qplib_stats *stats) { memset(stats, 0, sizeof(*stats)); stats->fw_id = -1; - /* 128 byte aligned context memory is required only for 57500. - * However making this unconditional, it does not harm previous - * generation. - */ - stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128); + stats->size = cctx->hw_stats_size; stats->dma = dma_alloc_coherent(&pdev->dev, stats->size, &stats->dma_map, GFP_KERNEL); if (!stats->dma) { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index c291f495ae91..91031502e8f5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -54,6 +54,7 @@ struct bnxt_qplib_chip_ctx { u16 chip_num; u8 chip_rev; u8 chip_metal; + u16 hw_stats_size; struct bnxt_qplib_drv_modes modes; }; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 6c8c910f4e86..c7e8d7b3baa1 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return !err || err == -ENODATA ? npolled : err; } +void c4iw_cq_rem_ref(struct c4iw_cq *chp) +{ + if (refcount_dec_and_test(&chp->refcnt)) + complete(&chp->cq_rel_comp); +} + int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { struct c4iw_cq *chp; @@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) chp = to_c4iw_cq(ib_cq); xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); - refcount_dec(&chp->refcnt); - wait_event(chp->wait, !refcount_read(&chp->refcnt)); + c4iw_cq_rem_ref(chp); + wait_for_completion(&chp->cq_rel_comp); ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, ibucontext); @@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, spin_lock_init(&chp->lock); spin_lock_init(&chp->comp_handler_lock); refcount_set(&chp->refcnt, 1); - init_waitqueue_head(&chp->wait); + init_completion(&chp->cq_rel_comp); ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL); if (ret) goto err_destroy_cq; diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index 7798d090888b..34211a533d5c 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) break; } done: - if (refcount_dec_and_test(&chp->refcnt)) - wake_up(&chp->wait); + c4iw_cq_rem_ref(chp); c4iw_qp_rem_ref(&qhp->ibqp); out: return; @@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); - if (refcount_dec_and_test(&chp->refcnt)) - wake_up(&chp->wait); + c4iw_cq_rem_ref(chp); } else { pr_debug("unknown cqid 0x%x\n", qid); xa_unlock_irqrestore(&dev->cqs, flag); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 3883af3d2312..ac5f581aff4c 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -428,7 +428,7 @@ struct c4iw_cq { spinlock_t lock; spinlock_t comp_handler_lock; refcount_t refcnt; - wait_queue_head_t wait; + struct completion cq_rel_comp; struct c4iw_wr_wait *wr_waitp; }; @@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); +void c4iw_cq_rem_ref(struct c4iw_cq *chp); int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 8f68cc3ff193..84f3f2b5f097 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -213,8 +213,10 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) hr_cmd->context = kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL); - if (!hr_cmd->context) + if (!hr_cmd->context) { + hr_dev->cmd_mod = 0; return -ENOMEM; + } for (i = 0; i < hr_cmd->max_cmds; ++i) { hr_cmd->context[i].token = i; @@ -228,7 +230,6 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) spin_lock_init(&hr_cmd->context_lock); hr_cmd->use_events = 1; - down(&hr_cmd->poll_sem); return 0; } @@ -239,8 +240,6 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) kfree(hr_cmd->context); hr_cmd->use_events = 0; - - up(&hr_cmd->poll_sem); } struct hns_roce_cmd_mailbox * diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 078a97193f0e..cc6eab14a222 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -873,11 +873,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) if (hr_dev->cmd_mod) { ret = hns_roce_cmd_use_events(hr_dev); - if (ret) { + if (ret) dev_warn(dev, "Cmd event mode failed, set back to poll!\n"); - hns_roce_cmd_use_polling(hr_dev); - } } ret = hns_roce_init_hem(hr_dev); diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index b1023a7d0bd1..f1e5515256e0 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -2845,7 +2845,7 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf, * parses fpm commit info and copy base value * of hmc objects in hmc_info */ -static enum irdma_status_code +static void irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf, struct irdma_hmc_obj_info *info, u32 *sd) { @@ -2915,7 +2915,6 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf, else *sd = (u32)(size >> 21); - return 0; } /** @@ -4187,11 +4186,9 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, * @dev: sc device struct * @count: allocate count */ -enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count) +void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count) { writel(count, dev->hw_regs[IRDMA_AEQALLOC]); - - return 0; } /** @@ -4434,9 +4431,9 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, &commit_fpm_mem, true, wait_type); if (!ret_code) - ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf, - hmc_info->hmc_obj, - &hmc_info->sd_table.sd_cnt); + irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf, + hmc_info->hmc_obj, + &hmc_info->sd_table.sd_cnt); print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16, 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE, false); diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index 7afb8a6a0526..00de5ee9a260 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -1920,7 +1920,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf) * irdma_set_hw_rsrc - set hw memory resources. * @rf: RDMA PCI function */ -static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf) +static void irdma_set_hw_rsrc(struct irdma_pci_f *rf) { rf->allocated_qps = (void *)(rf->mem_rsrc + (sizeof(struct irdma_arp_entry) * rf->arp_table_size)); @@ -1937,8 +1937,6 @@ static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf) spin_lock_init(&rf->arp_lock); spin_lock_init(&rf->qptable_lock); spin_lock_init(&rf->qh_list_lock); - - return 0; } /** @@ -2000,9 +1998,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; - ret = irdma_set_hw_rsrc(rf); - if (ret) - goto set_hw_rsrc_fail; + irdma_set_hw_rsrc(rf); set_bit(0, rf->allocated_mrs); set_bit(0, rf->allocated_qps); @@ -2025,9 +2021,6 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) return 0; -set_hw_rsrc_fail: - kfree(rf->mem_rsrc); - rf->mem_rsrc = NULL; mem_rsrc_kzalloc_fail: kfree(rf->allocated_ws_nodes); rf->allocated_ws_nodes = NULL; diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c index ea59432351fb..51a41359e0b4 100644 --- a/drivers/infiniband/hw/irdma/main.c +++ b/drivers/infiniband/hw/irdma/main.c @@ -215,10 +215,10 @@ static void irdma_remove(struct auxiliary_device *aux_dev) pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn)); } -static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf) +static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf, + struct ice_vsi *vsi) { struct irdma_pci_f *rf = iwdev->rf; - struct ice_vsi *vsi = ice_get_main_vsi(pf); rf->cdev = pf; rf->gen_ops.register_qset = irdma_lan_register_qset; @@ -253,12 +253,15 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ struct iidc_auxiliary_dev, adev); struct ice_pf *pf = iidc_adev->pf; + struct ice_vsi *vsi = ice_get_main_vsi(pf); struct iidc_qos_params qos_info = {}; struct irdma_device *iwdev; struct irdma_pci_f *rf; struct irdma_l2params l2params = {}; int err; + if (!vsi) + return -EIO; iwdev = ib_alloc_device(irdma_device, ibdev); if (!iwdev) return -ENOMEM; @@ -268,7 +271,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ return -ENOMEM; } - irdma_fill_device_info(iwdev, pf); + irdma_fill_device_info(iwdev, pf, vsi); rf = iwdev->rf; if (irdma_ctrl_init_hw(rf)) { diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index 7387b83e826d..874bc25a938b 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -1222,8 +1222,7 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, struct irdma_aeq_init_info *info); enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, struct irdma_aeqe_info *info); -enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, - u32 count); +void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count); void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, int abi_ver); diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index a6d52c20091c..5fb92de1f015 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -931,7 +931,7 @@ enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp, enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, struct irdma_post_rq_info *info) { - u32 total_size = 0, wqe_idx, i, byte_off; + u32 wqe_idx, i, byte_off; u32 addl_frag_cnt; __le64 *wqe; u64 hdr; @@ -939,9 +939,6 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, if (qp->max_rq_frag_cnt < info->num_sges) return IRDMA_ERR_INVALID_FRAG_COUNT; - for (i = 0; i < info->num_sges; i++) - total_size += info->sg_list[i].len; - wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx); if (!wqe) return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 9712f6902ba8..717147ed0519 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -557,7 +557,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) * @iwqp: qp ptr * @init_info: initialize info to return */ -static int irdma_setup_virt_qp(struct irdma_device *iwdev, +static void irdma_setup_virt_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, struct irdma_qp_init_info *init_info) { @@ -574,8 +574,6 @@ static int irdma_setup_virt_qp(struct irdma_device *iwdev, init_info->sq_pa = qpmr->sq_pbl.addr; init_info->rq_pa = qpmr->rq_pbl.addr; } - - return 0; } /** @@ -914,7 +912,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd, } } init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; - err_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info); + irdma_setup_virt_qp(iwdev, iwqp, &init_info); } else { init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 7abeb576b3c5..b8e5e371bb19 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -945,7 +945,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, u32 *cqb = NULL; void *cqc; int cqe_size; - unsigned int irqn; int eqn; int err; @@ -984,7 +983,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, INIT_WORK(&cq->notify_work, notify_soft_wc_handler); } - err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); + err = mlx5_vector2eqn(dev->mdev, vector, &eqn); if (err) goto err_cqb; @@ -1007,7 +1006,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, goto err_cqb; mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); - cq->mcq.irqn = irqn; if (udata) cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; else diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index eb9b0a2707f8..c869b2a91a28 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -975,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)( struct mlx5_ib_dev *dev; int user_vector; int dev_eqn; - unsigned int irqn; int err; if (uverbs_copy_from(&user_vector, attrs, @@ -987,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)( return PTR_ERR(c); dev = to_mdev(c->ibucontext.device); - err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn); + err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn); if (err < 0) return err; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3263851ea574..3f1c5a4f158b 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) */ spin_unlock_irq(&ent->lock); need_delay = need_resched() || someone_adding(cache) || - time_after(jiffies, - READ_ONCE(cache->last_add) + 300 * HZ); + !time_after(jiffies, + READ_ONCE(cache->last_add) + 300 * HZ); spin_lock_irq(&ent->lock); if (ent->disabled) goto out; diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 6aabcb4de235..be4bcb420fab 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int num_buf; void *vaddr; int err; + int i; umem = ib_umem_get(pd->ibpd.device, start, length, access); if (IS_ERR(umem)) { - pr_warn("err %d from rxe_umem_get\n", - (int)PTR_ERR(umem)); + pr_warn("%s: Unable to pin memory region err = %d\n", + __func__, (int)PTR_ERR(umem)); err = PTR_ERR(umem); - goto err1; + goto err_out; } mr->umem = umem; @@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, err = rxe_mr_alloc(mr, num_buf); if (err) { - pr_warn("err %d from rxe_mr_alloc\n", err); - ib_umem_release(umem); - goto err1; + pr_warn("%s: Unable to allocate memory for map\n", + __func__); + goto err_release_umem; } mr->page_shift = PAGE_SHIFT; @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, vaddr = page_address(sg_page_iter_page(&sg_iter)); if (!vaddr) { - pr_warn("null vaddr\n"); - ib_umem_release(umem); + pr_warn("%s: Unable to get virtual address\n", + __func__); err = -ENOMEM; - goto err1; + goto err_cleanup_map; } buf->addr = (uintptr_t)vaddr; @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, return 0; -err1: +err_cleanup_map: + for (i = 0; i < mr->num_map; i++) + kfree(mr->map[i]); + kfree(mr->map); +err_release_umem: + ib_umem_release(umem); +err_out: return err; } diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index dec92928a1cd..5ac27f28ace1 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -259,6 +259,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, iph->version = IPVERSION; iph->ihl = sizeof(struct iphdr) >> 2; + iph->tot_len = htons(skb->len); iph->frag_off = df; iph->protocol = proto; iph->tos = tos; diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 3743dc39b60c..360ec67cb9e1 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -318,7 +318,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp) pr_warn("%s: invalid num_sge in SRQ entry\n", __func__); return RESPST_ERR_MALFORMED_WQE; } - size = sizeof(wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge); + size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge); memcpy(&qp->resp.srq_wqe, wqe, size); qp->resp.wqe = &qp->resp.srq_wqe.wqe; diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 8a1e70e00876..9050ca1f4285 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -403,7 +403,7 @@ struct icc_path *devm_of_icc_get(struct device *dev, const char *name) { struct icc_path **ptr, *path; - ptr = devres_alloc(devm_icc_release, sizeof(**ptr), GFP_KERNEL); + ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); @@ -959,6 +959,9 @@ EXPORT_SYMBOL_GPL(icc_link_destroy); */ void icc_node_add(struct icc_node *node, struct icc_provider *provider) { + if (WARN_ON(node->provider)) + return; + mutex_lock(&icc_lock); node->provider = provider; @@ -973,9 +976,14 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider) } node->avg_bw = node->init_avg; node->peak_bw = node->init_peak; + + if (provider->pre_aggregate) + provider->pre_aggregate(node); + if (provider->aggregate) provider->aggregate(node, 0, node->init_avg, node->init_peak, &node->avg_bw, &node->peak_bw); + provider->set(node, node); node->avg_bw = 0; node->peak_bw = 0; @@ -1106,6 +1114,8 @@ void icc_sync_state(struct device *dev) dev_dbg(p->dev, "interconnect provider is in synced state\n"); list_for_each_entry(n, &p->nodes, node_list) { if (n->init_avg || n->init_peak) { + n->init_avg = 0; + n->init_peak = 0; aggregate_requests(n); p->set(n, n); } diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig index 0d7a2500d0b8..daf1e25f6042 100644 --- a/drivers/interconnect/qcom/Kconfig +++ b/drivers/interconnect/qcom/Kconfig @@ -83,6 +83,15 @@ config INTERCONNECT_QCOM_SC7280 This is a driver for the Qualcomm Network-on-Chip on sc7280-based platforms. +config INTERCONNECT_QCOM_SC8180X + tristate "Qualcomm SC8180X interconnect driver" + depends on INTERCONNECT_QCOM_RPMH_POSSIBLE + select INTERCONNECT_QCOM_RPMH + select INTERCONNECT_QCOM_BCM_VOTER + help + This is a driver for the Qualcomm Network-on-Chip on sc8180x-based + platforms. + config INTERCONNECT_QCOM_SDM660 tristate "Qualcomm SDM660 interconnect driver" depends on INTERCONNECT_QCOM diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile index 2880129a6fe4..69300b1d48ef 100644 --- a/drivers/interconnect/qcom/Makefile +++ b/drivers/interconnect/qcom/Makefile @@ -9,6 +9,7 @@ qnoc-qcs404-objs := qcs404.o icc-rpmh-obj := icc-rpmh.o qnoc-sc7180-objs := sc7180.o qnoc-sc7280-objs := sc7280.o +qnoc-sc8180x-objs := sc8180x.o qnoc-sdm660-objs := sdm660.o qnoc-sdm845-objs := sdm845.o qnoc-sdx55-objs := sdx55.o @@ -26,6 +27,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o obj-$(CONFIG_INTERCONNECT_QCOM_SC7280) += qnoc-sc7280.o +obj-$(CONFIG_INTERCONNECT_QCOM_SC8180X) += qnoc-sc8180x.o obj-$(CONFIG_INTERCONNECT_QCOM_SDM660) += qnoc-sdm660.o obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index bf01d09dba6c..3eb7936d2cf6 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -7,6 +7,7 @@ #include <linux/interconnect-provider.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/slab.h> #include "bcm-voter.h" @@ -57,6 +58,11 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, qn->sum_avg[i] += avg_bw; qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw); } + + if (node->init_avg || node->init_peak) { + qn->sum_avg[i] = max_t(u64, qn->sum_avg[i], node->init_avg); + qn->max_peak[i] = max_t(u64, qn->max_peak[i], node->init_peak); + } } *agg_avg += avg_bw; @@ -79,7 +85,6 @@ EXPORT_SYMBOL_GPL(qcom_icc_aggregate); int qcom_icc_set(struct icc_node *src, struct icc_node *dst) { struct qcom_icc_provider *qp; - struct qcom_icc_node *qn; struct icc_node *node; if (!src) @@ -88,12 +93,6 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst) node = src; qp = to_qcom_provider(node->provider); - qn = node->data; - - qn->sum_avg[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->sum_avg[QCOM_ICC_BUCKET_AMC], - node->avg_bw); - qn->max_peak[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->max_peak[QCOM_ICC_BUCKET_AMC], - node->peak_bw); qcom_icc_bcm_voter_commit(qp->voter); @@ -184,4 +183,96 @@ int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev) } EXPORT_SYMBOL_GPL(qcom_icc_bcm_init); +int qcom_icc_rpmh_probe(struct platform_device *pdev) +{ + const struct qcom_icc_desc *desc; + struct device *dev = &pdev->dev; + struct icc_onecell_data *data; + struct icc_provider *provider; + struct qcom_icc_node **qnodes, *qn; + struct qcom_icc_provider *qp; + struct icc_node *node; + size_t num_nodes, i, j; + int ret; + + desc = of_device_get_match_data(dev); + if (!desc) + return -EINVAL; + + qnodes = desc->nodes; + num_nodes = desc->num_nodes; + + qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL); + if (!qp) + return -ENOMEM; + + data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes), GFP_KERNEL); + if (!data) + return -ENOMEM; + + provider = &qp->provider; + provider->dev = dev; + provider->set = qcom_icc_set; + provider->pre_aggregate = qcom_icc_pre_aggregate; + provider->aggregate = qcom_icc_aggregate; + provider->xlate_extended = qcom_icc_xlate_extended; + INIT_LIST_HEAD(&provider->nodes); + provider->data = data; + + qp->dev = dev; + qp->bcms = desc->bcms; + qp->num_bcms = desc->num_bcms; + + qp->voter = of_bcm_voter_get(qp->dev, NULL); + if (IS_ERR(qp->voter)) + return PTR_ERR(qp->voter); + + ret = icc_provider_add(provider); + if (ret) + return ret; + + for (i = 0; i < qp->num_bcms; i++) + qcom_icc_bcm_init(qp->bcms[i], dev); + + for (i = 0; i < num_nodes; i++) { + qn = qnodes[i]; + if (!qn) + continue; + + node = icc_node_create(qn->id); + if (IS_ERR(node)) { + ret = PTR_ERR(node); + goto err; + } + + node->name = qn->name; + node->data = qn; + icc_node_add(node, provider); + + for (j = 0; j < qn->num_links; j++) + icc_link_create(node, qn->links[j]); + + data->nodes[i] = node; + } + + data->num_nodes = num_nodes; + platform_set_drvdata(pdev, qp); + + return 0; +err: + icc_nodes_remove(provider); + icc_provider_del(provider); + return ret; +} +EXPORT_SYMBOL_GPL(qcom_icc_rpmh_probe); + +int qcom_icc_rpmh_remove(struct platform_device *pdev) +{ + struct qcom_icc_provider *qp = platform_get_drvdata(pdev); + + icc_nodes_remove(&qp->provider); + return icc_provider_del(&qp->provider); +} +EXPORT_SYMBOL_GPL(qcom_icc_rpmh_remove); + MODULE_LICENSE("GPL v2"); diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h index e5f61ab989e7..4bfc060529ba 100644 --- a/drivers/interconnect/qcom/icc-rpmh.h +++ b/drivers/interconnect/qcom/icc-rpmh.h @@ -134,5 +134,7 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst); struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data); int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev); void qcom_icc_pre_aggregate(struct icc_node *node); +int qcom_icc_rpmh_probe(struct platform_device *pdev); +int qcom_icc_rpmh_remove(struct platform_device *pdev); #endif diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c index 695f28789e98..c7af143980de 100644 --- a/drivers/interconnect/qcom/osm-l3.c +++ b/drivers/interconnect/qcom/osm-l3.c @@ -15,6 +15,7 @@ #include <dt-bindings/interconnect/qcom,osm-l3.h> #include "sc7180.h" +#include "sc8180x.h" #include "sdm845.h" #include "sm8150.h" #include "sm8250.h" @@ -37,7 +38,7 @@ #define OSM_L3_MAX_LINKS 1 -#define to_qcom_provider(_provider) \ +#define to_osm_l3_provider(_provider) \ container_of(_provider, struct qcom_osm_l3_icc_provider, provider) struct qcom_osm_l3_icc_provider { @@ -49,14 +50,14 @@ struct qcom_osm_l3_icc_provider { }; /** - * struct qcom_icc_node - Qualcomm specific interconnect nodes + * struct qcom_osm_l3_node - Qualcomm specific interconnect nodes * @name: the node name used in debugfs * @links: an array of nodes where we can go next while traversing * @id: a unique node identifier * @num_links: the total number of @links * @buswidth: width of the interconnect between a node and the bus */ -struct qcom_icc_node { +struct qcom_osm_l3_node { const char *name; u16 links[OSM_L3_MAX_LINKS]; u16 id; @@ -64,8 +65,8 @@ struct qcom_icc_node { u16 buswidth; }; -struct qcom_icc_desc { - const struct qcom_icc_node **nodes; +struct qcom_osm_l3_desc { + const struct qcom_osm_l3_node **nodes; size_t num_nodes; unsigned int lut_row_size; unsigned int reg_freq_lut; @@ -73,7 +74,7 @@ struct qcom_icc_desc { }; #define DEFINE_QNODE(_name, _id, _buswidth, ...) \ - static const struct qcom_icc_node _name = { \ + static const struct qcom_osm_l3_node _name = { \ .name = #_name, \ .id = _id, \ .buswidth = _buswidth, \ @@ -84,12 +85,12 @@ struct qcom_icc_desc { DEFINE_QNODE(sdm845_osm_apps_l3, SDM845_MASTER_OSM_L3_APPS, 16, SDM845_SLAVE_OSM_L3); DEFINE_QNODE(sdm845_osm_l3, SDM845_SLAVE_OSM_L3, 16); -static const struct qcom_icc_node *sdm845_osm_l3_nodes[] = { +static const struct qcom_osm_l3_node *sdm845_osm_l3_nodes[] = { [MASTER_OSM_L3_APPS] = &sdm845_osm_apps_l3, [SLAVE_OSM_L3] = &sdm845_osm_l3, }; -static const struct qcom_icc_desc sdm845_icc_osm_l3 = { +static const struct qcom_osm_l3_desc sdm845_icc_osm_l3 = { .nodes = sdm845_osm_l3_nodes, .num_nodes = ARRAY_SIZE(sdm845_osm_l3_nodes), .lut_row_size = OSM_LUT_ROW_SIZE, @@ -100,12 +101,12 @@ static const struct qcom_icc_desc sdm845_icc_osm_l3 = { DEFINE_QNODE(sc7180_osm_apps_l3, SC7180_MASTER_OSM_L3_APPS, 16, SC7180_SLAVE_OSM_L3); DEFINE_QNODE(sc7180_osm_l3, SC7180_SLAVE_OSM_L3, 16); -static const struct qcom_icc_node *sc7180_osm_l3_nodes[] = { +static const struct qcom_osm_l3_node *sc7180_osm_l3_nodes[] = { [MASTER_OSM_L3_APPS] = &sc7180_osm_apps_l3, [SLAVE_OSM_L3] = &sc7180_osm_l3, }; -static const struct qcom_icc_desc sc7180_icc_osm_l3 = { +static const struct qcom_osm_l3_desc sc7180_icc_osm_l3 = { .nodes = sc7180_osm_l3_nodes, .num_nodes = ARRAY_SIZE(sc7180_osm_l3_nodes), .lut_row_size = OSM_LUT_ROW_SIZE, @@ -113,15 +114,31 @@ static const struct qcom_icc_desc sc7180_icc_osm_l3 = { .reg_perf_state = OSM_REG_PERF_STATE, }; +DEFINE_QNODE(sc8180x_osm_apps_l3, SC8180X_MASTER_OSM_L3_APPS, 32, SC8180X_SLAVE_OSM_L3); +DEFINE_QNODE(sc8180x_osm_l3, SC8180X_SLAVE_OSM_L3, 32); + +static const struct qcom_osm_l3_node *sc8180x_osm_l3_nodes[] = { + [MASTER_OSM_L3_APPS] = &sc8180x_osm_apps_l3, + [SLAVE_OSM_L3] = &sc8180x_osm_l3, +}; + +static const struct qcom_osm_l3_desc sc8180x_icc_osm_l3 = { + .nodes = sc8180x_osm_l3_nodes, + .num_nodes = ARRAY_SIZE(sc8180x_osm_l3_nodes), + .lut_row_size = OSM_LUT_ROW_SIZE, + .reg_freq_lut = OSM_REG_FREQ_LUT, + .reg_perf_state = OSM_REG_PERF_STATE, +}; + DEFINE_QNODE(sm8150_osm_apps_l3, SM8150_MASTER_OSM_L3_APPS, 32, SM8150_SLAVE_OSM_L3); DEFINE_QNODE(sm8150_osm_l3, SM8150_SLAVE_OSM_L3, 32); -static const struct qcom_icc_node *sm8150_osm_l3_nodes[] = { +static const struct qcom_osm_l3_node *sm8150_osm_l3_nodes[] = { [MASTER_OSM_L3_APPS] = &sm8150_osm_apps_l3, [SLAVE_OSM_L3] = &sm8150_osm_l3, }; -static const struct qcom_icc_desc sm8150_icc_osm_l3 = { +static const struct qcom_osm_l3_desc sm8150_icc_osm_l3 = { .nodes = sm8150_osm_l3_nodes, .num_nodes = ARRAY_SIZE(sm8150_osm_l3_nodes), .lut_row_size = OSM_LUT_ROW_SIZE, @@ -132,12 +149,12 @@ static const struct qcom_icc_desc sm8150_icc_osm_l3 = { DEFINE_QNODE(sm8250_epss_apps_l3, SM8250_MASTER_EPSS_L3_APPS, 32, SM8250_SLAVE_EPSS_L3); DEFINE_QNODE(sm8250_epss_l3, SM8250_SLAVE_EPSS_L3, 32); -static const struct qcom_icc_node *sm8250_epss_l3_nodes[] = { +static const struct qcom_osm_l3_node *sm8250_epss_l3_nodes[] = { [MASTER_EPSS_L3_APPS] = &sm8250_epss_apps_l3, [SLAVE_EPSS_L3_SHARED] = &sm8250_epss_l3, }; -static const struct qcom_icc_desc sm8250_icc_epss_l3 = { +static const struct qcom_osm_l3_desc sm8250_icc_epss_l3 = { .nodes = sm8250_epss_l3_nodes, .num_nodes = ARRAY_SIZE(sm8250_epss_l3_nodes), .lut_row_size = EPSS_LUT_ROW_SIZE, @@ -145,11 +162,11 @@ static const struct qcom_icc_desc sm8250_icc_epss_l3 = { .reg_perf_state = EPSS_REG_PERF_STATE, }; -static int qcom_icc_set(struct icc_node *src, struct icc_node *dst) +static int qcom_osm_l3_set(struct icc_node *src, struct icc_node *dst) { struct qcom_osm_l3_icc_provider *qp; struct icc_provider *provider; - const struct qcom_icc_node *qn; + const struct qcom_osm_l3_node *qn; struct icc_node *n; unsigned int index; u32 agg_peak = 0; @@ -158,7 +175,7 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst) qn = src->data; provider = src->provider; - qp = to_qcom_provider(provider); + qp = to_osm_l3_provider(provider); list_for_each_entry(n, &provider->nodes, node_list) provider->aggregate(n, 0, n->avg_bw, n->peak_bw, @@ -191,10 +208,10 @@ static int qcom_osm_l3_probe(struct platform_device *pdev) u32 info, src, lval, i, prev_freq = 0, freq; static unsigned long hw_rate, xo_rate; struct qcom_osm_l3_icc_provider *qp; - const struct qcom_icc_desc *desc; + const struct qcom_osm_l3_desc *desc; struct icc_onecell_data *data; struct icc_provider *provider; - const struct qcom_icc_node **qnodes; + const struct qcom_osm_l3_node **qnodes; struct icc_node *node; size_t num_nodes; struct clk *clk; @@ -264,7 +281,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev) provider = &qp->provider; provider->dev = &pdev->dev; - provider->set = qcom_icc_set; + provider->set = qcom_osm_l3_set; provider->aggregate = icc_std_aggregate; provider->xlate = of_icc_xlate_onecell; INIT_LIST_HEAD(&provider->nodes); @@ -286,7 +303,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev) } node->name = qnodes[i]->name; - /* Cast away const and add it back in qcom_icc_set() */ + /* Cast away const and add it back in qcom_osm_l3_set() */ node->data = (void *)qnodes[i]; icc_node_add(node, provider); @@ -311,6 +328,7 @@ static const struct of_device_id osm_l3_of_match[] = { { .compatible = "qcom,sc7180-osm-l3", .data = &sc7180_icc_osm_l3 }, { .compatible = "qcom,sdm845-osm-l3", .data = &sdm845_icc_osm_l3 }, { .compatible = "qcom,sm8150-osm-l3", .data = &sm8150_icc_osm_l3 }, + { .compatible = "qcom,sc8180x-osm-l3", .data = &sc8180x_icc_osm_l3 }, { .compatible = "qcom,sm8250-epss-l3", .data = &sm8250_icc_epss_l3 }, { } }; diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c index 8d9044ed18ab..12d59c36df53 100644 --- a/drivers/interconnect/qcom/sc7180.c +++ b/drivers/interconnect/qcom/sc7180.c @@ -504,98 +504,6 @@ static struct qcom_icc_desc sc7180_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; -static int qnoc_probe(struct platform_device *pdev) -{ - const struct qcom_icc_desc *desc; - struct icc_onecell_data *data; - struct icc_provider *provider; - struct qcom_icc_node **qnodes; - struct qcom_icc_provider *qp; - struct icc_node *node; - size_t num_nodes, i; - int ret; - - desc = device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - qnodes = desc->nodes; - num_nodes = desc->num_nodes; - - qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); - if (!qp) - return -ENOMEM; - - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); - if (!data) - return -ENOMEM; - - provider = &qp->provider; - provider->dev = &pdev->dev; - provider->set = qcom_icc_set; - provider->pre_aggregate = qcom_icc_pre_aggregate; - provider->aggregate = qcom_icc_aggregate; - provider->xlate_extended = qcom_icc_xlate_extended; - INIT_LIST_HEAD(&provider->nodes); - provider->data = data; - - qp->dev = &pdev->dev; - qp->bcms = desc->bcms; - qp->num_bcms = desc->num_bcms; - - qp->voter = of_bcm_voter_get(qp->dev, NULL); - if (IS_ERR(qp->voter)) - return PTR_ERR(qp->voter); - - ret = icc_provider_add(provider); - if (ret) { - dev_err(&pdev->dev, "error adding interconnect provider\n"); - return ret; - } - - for (i = 0; i < qp->num_bcms; i++) - qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); - - for (i = 0; i < num_nodes; i++) { - size_t j; - - if (!qnodes[i]) - continue; - - node = icc_node_create(qnodes[i]->id); - if (IS_ERR(node)) { - ret = PTR_ERR(node); - goto err; - } - - node->name = qnodes[i]->name; - node->data = qnodes[i]; - icc_node_add(node, provider); - - for (j = 0; j < qnodes[i]->num_links; j++) - icc_link_create(node, qnodes[i]->links[j]); - - data->nodes[i] = node; - } - data->num_nodes = num_nodes; - - platform_set_drvdata(pdev, qp); - - return 0; -err: - icc_nodes_remove(provider); - icc_provider_del(provider); - return ret; -} - -static int qnoc_remove(struct platform_device *pdev) -{ - struct qcom_icc_provider *qp = platform_get_drvdata(pdev); - - icc_nodes_remove(&qp->provider); - return icc_provider_del(&qp->provider); -} - static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sc7180-aggre1-noc", .data = &sc7180_aggre1_noc}, @@ -628,8 +536,8 @@ static const struct of_device_id qnoc_of_match[] = { MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { - .probe = qnoc_probe, - .remove = qnoc_remove, + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-sc7180", .of_match_table = qnoc_of_match, diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c index 8d1b55c3705c..f8b34f6cbb0d 100644 --- a/drivers/interconnect/qcom/sc7280.c +++ b/drivers/interconnect/qcom/sc7280.c @@ -1802,98 +1802,6 @@ static struct qcom_icc_desc sc7280_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; -static int qnoc_probe(struct platform_device *pdev) -{ - const struct qcom_icc_desc *desc; - struct icc_onecell_data *data; - struct icc_provider *provider; - struct qcom_icc_node **qnodes; - struct qcom_icc_provider *qp; - struct icc_node *node; - size_t num_nodes, i; - int ret; - - desc = device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - qnodes = desc->nodes; - num_nodes = desc->num_nodes; - - qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); - if (!qp) - return -ENOMEM; - - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); - if (!data) - return -ENOMEM; - - provider = &qp->provider; - provider->dev = &pdev->dev; - provider->set = qcom_icc_set; - provider->pre_aggregate = qcom_icc_pre_aggregate; - provider->aggregate = qcom_icc_aggregate; - provider->xlate_extended = qcom_icc_xlate_extended; - INIT_LIST_HEAD(&provider->nodes); - provider->data = data; - - qp->dev = &pdev->dev; - qp->bcms = desc->bcms; - qp->num_bcms = desc->num_bcms; - - qp->voter = of_bcm_voter_get(qp->dev, NULL); - if (IS_ERR(qp->voter)) - return PTR_ERR(qp->voter); - - ret = icc_provider_add(provider); - if (ret) { - dev_err(&pdev->dev, "error adding interconnect provider\n"); - return ret; - } - - for (i = 0; i < qp->num_bcms; i++) - qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); - - for (i = 0; i < num_nodes; i++) { - size_t j; - - if (!qnodes[i]) - continue; - - node = icc_node_create(qnodes[i]->id); - if (IS_ERR(node)) { - ret = PTR_ERR(node); - goto err; - } - - node->name = qnodes[i]->name; - node->data = qnodes[i]; - icc_node_add(node, provider); - - for (j = 0; j < qnodes[i]->num_links; j++) - icc_link_create(node, qnodes[i]->links[j]); - - data->nodes[i] = node; - } - data->num_nodes = num_nodes; - - platform_set_drvdata(pdev, qp); - - return 0; -err: - icc_nodes_remove(provider); - icc_provider_del(provider); - return ret; -} - -static int qnoc_remove(struct platform_device *pdev) -{ - struct qcom_icc_provider *qp = platform_get_drvdata(pdev); - - icc_nodes_remove(&qp->provider); - return icc_provider_del(&qp->provider); -} - static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sc7280-aggre1-noc", .data = &sc7280_aggre1_noc}, @@ -1924,8 +1832,8 @@ static const struct of_device_id qnoc_of_match[] = { MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { - .probe = qnoc_probe, - .remove = qnoc_remove, + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-sc7280", .of_match_table = qnoc_of_match, diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c new file mode 100644 index 000000000000..e9adf05b9330 --- /dev/null +++ b/drivers/interconnect/qcom/sc8180x.c @@ -0,0 +1,626 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Linaro Ltd. + */ + +#include <linux/device.h> +#include <linux/interconnect-provider.h> +#include <linux/module.h> +#include <linux/of_device.h> + +#include <dt-bindings/interconnect/qcom,sc8180x.h> + +#include "bcm-voter.h" +#include "icc-rpmh.h" +#include "sc8180x.h" + +DEFINE_QNODE(mas_qhm_a1noc_cfg, SC8180X_MASTER_A1NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_A1NOC); +DEFINE_QNODE(mas_xm_ufs_card, SC8180X_MASTER_UFS_CARD, 1, 8, SC8180X_A1NOC_SNOC_SLV); +DEFINE_QNODE(mas_xm_ufs_g4, SC8180X_MASTER_UFS_GEN4, 1, 8, SC8180X_A1NOC_SNOC_SLV); +DEFINE_QNODE(mas_xm_ufs_mem, SC8180X_MASTER_UFS_MEM, 1, 8, SC8180X_A1NOC_SNOC_SLV); +DEFINE_QNODE(mas_xm_usb3_0, SC8180X_MASTER_USB3, 1, 8, SC8180X_A1NOC_SNOC_SLV); +DEFINE_QNODE(mas_xm_usb3_1, SC8180X_MASTER_USB3_1, 1, 8, SC8180X_A1NOC_SNOC_SLV); +DEFINE_QNODE(mas_xm_usb3_2, SC8180X_MASTER_USB3_2, 1, 16, SC8180X_A1NOC_SNOC_SLV); +DEFINE_QNODE(mas_qhm_a2noc_cfg, SC8180X_MASTER_A2NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_A2NOC); +DEFINE_QNODE(mas_qhm_qdss_bam, SC8180X_MASTER_QDSS_BAM, 1, 4, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_qhm_qspi, SC8180X_MASTER_QSPI_0, 1, 4, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_qhm_qspi1, SC8180X_MASTER_QSPI_1, 1, 4, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_qhm_qup0, SC8180X_MASTER_QUP_0, 1, 4, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_qhm_qup1, SC8180X_MASTER_QUP_1, 1, 4, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_qhm_qup2, SC8180X_MASTER_QUP_2, 1, 4, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_qhm_sensorss_ahb, SC8180X_MASTER_SENSORS_AHB, 1, 4, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_qxm_crypto, SC8180X_MASTER_CRYPTO_CORE_0, 1, 8, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_qxm_ipa, SC8180X_MASTER_IPA, 1, 8, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_xm_emac, SC8180X_MASTER_EMAC, 1, 8, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_xm_pcie3_0, SC8180X_MASTER_PCIE, 1, 8, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC); +DEFINE_QNODE(mas_xm_pcie3_1, SC8180X_MASTER_PCIE_1, 1, 16, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC); +DEFINE_QNODE(mas_xm_pcie3_2, SC8180X_MASTER_PCIE_2, 1, 8, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC); +DEFINE_QNODE(mas_xm_pcie3_3, SC8180X_MASTER_PCIE_3, 1, 16, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC); +DEFINE_QNODE(mas_xm_qdss_etr, SC8180X_MASTER_QDSS_ETR, 1, 8, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_xm_sdc2, SC8180X_MASTER_SDCC_2, 1, 8, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_xm_sdc4, SC8180X_MASTER_SDCC_4, 1, 8, SC8180X_A2NOC_SNOC_SLV); +DEFINE_QNODE(mas_qxm_camnoc_hf0_uncomp, SC8180X_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP); +DEFINE_QNODE(mas_qxm_camnoc_hf1_uncomp, SC8180X_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP); +DEFINE_QNODE(mas_qxm_camnoc_sf_uncomp, SC8180X_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP); +DEFINE_QNODE(mas_qnm_npu, SC8180X_MASTER_NPU, 1, 32, SC8180X_SLAVE_CDSP_MEM_NOC); +DEFINE_QNODE(mas_qnm_snoc, SC8180X_SNOC_CNOC_MAS, 1, 8, SC8180X_SLAVE_TLMM_SOUTH, SC8180X_SLAVE_CDSP_CFG, SC8180X_SLAVE_SPSS_CFG, SC8180X_SLAVE_CAMERA_CFG, SC8180X_SLAVE_SDCC_4, SC8180X_SLAVE_AHB2PHY_CENTER, SC8180X_SLAVE_SDCC_2, SC8180X_SLAVE_PCIE_2_CFG, SC8180X_SLAVE_CNOC_MNOC_CFG, SC8180X_SLAVE_EMAC_CFG, SC8180X_SLAVE_QSPI_0, SC8180X_SLAVE_QSPI_1, SC8180X_SLAVE_TLMM_EAST, SC8180X_SLAVE_SNOC_CFG, SC8180X_SLAVE_AHB2PHY_EAST, SC8180X_SLAVE_GLM, SC8180X_SLAVE_PDM, SC8180X_SLAVE_PCIE_1_CFG, SC8180X_SLAVE_A2NOC_CFG, SC8180X_SLAVE_QDSS_CFG, SC8180X_SLAVE_DISPLAY_CFG, SC8180X_SLAVE_TCSR, SC8180X_SLAVE_UFS_MEM_0_CFG, SC8180X_SLAVE_CNOC_DDRSS, SC8180X_SLAVE_PCIE_0_CFG, SC8180X_SLAVE_QUP_1, SC8180X_SLAVE_QUP_2, SC8180X_SLAVE_NPU_CFG, SC8180X_SLAVE_CRYPTO_0_CFG, SC8180X_SLAVE_GRAPHICS_3D_CFG, SC8180X_SLAVE_VENUS_CFG, SC8180X_SLAVE_TSIF, SC8180X_SLAVE_IPA_CFG, SC8180X_SLAVE_CLK_CTL, SC8180X_SLAVE_SECURITY, SC8180X_SLAVE_AOP, SC8180X_SLAVE_AHB2PHY_WEST, SC8180X_SLAVE_AHB2PHY_SOUTH, SC8180X_SLAVE_SERVICE_CNOC, SC8180X_SLAVE_UFS_CARD_CFG, SC8180X_SLAVE_USB3_1, SC8180X_SLAVE_USB3_2, SC8180X_SLAVE_PCIE_3_CFG, SC8180X_SLAVE_RBCPR_CX_CFG, SC8180X_SLAVE_TLMM_WEST, SC8180X_SLAVE_A1NOC_CFG, SC8180X_SLAVE_AOSS, SC8180X_SLAVE_PRNG, SC8180X_SLAVE_VSENSE_CTRL_CFG, SC8180X_SLAVE_QUP_0, SC8180X_SLAVE_USB3, SC8180X_SLAVE_RBCPR_MMCX_CFG, SC8180X_SLAVE_PIMEM_CFG, SC8180X_SLAVE_UFS_MEM_1_CFG, SC8180X_SLAVE_RBCPR_MX_CFG, SC8180X_SLAVE_IMEM_CFG); +DEFINE_QNODE(mas_qhm_cnoc_dc_noc, SC8180X_MASTER_CNOC_DC_NOC, 1, 4, SC8180X_SLAVE_LLCC_CFG, SC8180X_SLAVE_GEM_NOC_CFG); +DEFINE_QNODE(mas_acm_apps, SC8180X_MASTER_AMPSS_M0, 4, 64, SC8180X_SLAVE_ECC, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); +DEFINE_QNODE(mas_acm_gpu_tcu, SC8180X_MASTER_GPU_TCU, 1, 8, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); +DEFINE_QNODE(mas_acm_sys_tcu, SC8180X_MASTER_SYS_TCU, 1, 8, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); +DEFINE_QNODE(mas_qhm_gemnoc_cfg, SC8180X_MASTER_GEM_NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_GEM_NOC_1, SC8180X_SLAVE_SERVICE_GEM_NOC, SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG); +DEFINE_QNODE(mas_qnm_cmpnoc, SC8180X_MASTER_COMPUTE_NOC, 2, 32, SC8180X_SLAVE_ECC, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); +DEFINE_QNODE(mas_qnm_gpu, SC8180X_MASTER_GRAPHICS_3D, 4, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); +DEFINE_QNODE(mas_qnm_mnoc_hf, SC8180X_MASTER_MNOC_HF_MEM_NOC, 2, 32, SC8180X_SLAVE_LLCC); +DEFINE_QNODE(mas_qnm_mnoc_sf, SC8180X_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); +DEFINE_QNODE(mas_qnm_pcie, SC8180X_MASTER_GEM_NOC_PCIE_SNOC, 1, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); +DEFINE_QNODE(mas_qnm_snoc_gc, SC8180X_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC8180X_SLAVE_LLCC); +DEFINE_QNODE(mas_qnm_snoc_sf, SC8180X_MASTER_SNOC_SF_MEM_NOC, 1, 32, SC8180X_SLAVE_LLCC); +DEFINE_QNODE(mas_qxm_ecc, SC8180X_MASTER_ECC, 2, 32, SC8180X_SLAVE_LLCC); +DEFINE_QNODE(mas_ipa_core_master, SC8180X_MASTER_IPA_CORE, 1, 8, SC8180X_SLAVE_IPA_CORE); +DEFINE_QNODE(mas_llcc_mc, SC8180X_MASTER_LLCC, 8, 4, SC8180X_SLAVE_EBI_CH0); +DEFINE_QNODE(mas_qhm_mnoc_cfg, SC8180X_MASTER_CNOC_MNOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_MNOC); +DEFINE_QNODE(mas_qxm_camnoc_hf0, SC8180X_MASTER_CAMNOC_HF0, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(mas_qxm_camnoc_hf1, SC8180X_MASTER_CAMNOC_HF1, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(mas_qxm_camnoc_sf, SC8180X_MASTER_CAMNOC_SF, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(mas_qxm_mdp0, SC8180X_MASTER_MDP_PORT0, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(mas_qxm_mdp1, SC8180X_MASTER_MDP_PORT1, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(mas_qxm_rot, SC8180X_MASTER_ROTATOR, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(mas_qxm_venus0, SC8180X_MASTER_VIDEO_P0, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(mas_qxm_venus1, SC8180X_MASTER_VIDEO_P1, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(mas_qxm_venus_arm9, SC8180X_MASTER_VIDEO_PROC, 1, 8, SC8180X_SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(mas_qhm_snoc_cfg, SC8180X_MASTER_SNOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_SNOC); +DEFINE_QNODE(mas_qnm_aggre1_noc, SC8180X_A1NOC_SNOC_MAS, 1, 32, SC8180X_SLAVE_SNOC_GEM_NOC_SF, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_QDSS_STM); +DEFINE_QNODE(mas_qnm_aggre2_noc, SC8180X_A2NOC_SNOC_MAS, 1, 16, SC8180X_SLAVE_SNOC_GEM_NOC_SF, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_PCIE_3, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SLAVE_PCIE_2, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_PCIE_0, SC8180X_SLAVE_PCIE_1, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM); +DEFINE_QNODE(mas_qnm_gemnoc, SC8180X_MASTER_GEM_NOC_SNOC, 1, 8, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM); +DEFINE_QNODE(mas_qxm_pimem, SC8180X_MASTER_PIMEM, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM); +DEFINE_QNODE(mas_xm_gic, SC8180X_MASTER_GIC, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM); +DEFINE_QNODE(slv_qns_a1noc_snoc, SC8180X_A1NOC_SNOC_SLV, 1, 32, SC8180X_A1NOC_SNOC_MAS); +DEFINE_QNODE(slv_srvc_aggre1_noc, SC8180X_SLAVE_SERVICE_A1NOC, 1, 4); +DEFINE_QNODE(slv_qns_a2noc_snoc, SC8180X_A2NOC_SNOC_SLV, 1, 16, SC8180X_A2NOC_SNOC_MAS); +DEFINE_QNODE(slv_qns_pcie_mem_noc, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC, 1, 32, SC8180X_MASTER_GEM_NOC_PCIE_SNOC); +DEFINE_QNODE(slv_srvc_aggre2_noc, SC8180X_SLAVE_SERVICE_A2NOC, 1, 4); +DEFINE_QNODE(slv_qns_camnoc_uncomp, SC8180X_SLAVE_CAMNOC_UNCOMP, 1, 32); +DEFINE_QNODE(slv_qns_cdsp_mem_noc, SC8180X_SLAVE_CDSP_MEM_NOC, 2, 32, SC8180X_MASTER_COMPUTE_NOC); +DEFINE_QNODE(slv_qhs_a1_noc_cfg, SC8180X_SLAVE_A1NOC_CFG, 1, 4, SC8180X_MASTER_A1NOC_CFG); +DEFINE_QNODE(slv_qhs_a2_noc_cfg, SC8180X_SLAVE_A2NOC_CFG, 1, 4, SC8180X_MASTER_A2NOC_CFG); +DEFINE_QNODE(slv_qhs_ahb2phy_refgen_center, SC8180X_SLAVE_AHB2PHY_CENTER, 1, 4); +DEFINE_QNODE(slv_qhs_ahb2phy_refgen_east, SC8180X_SLAVE_AHB2PHY_EAST, 1, 4); +DEFINE_QNODE(slv_qhs_ahb2phy_refgen_west, SC8180X_SLAVE_AHB2PHY_WEST, 1, 4); +DEFINE_QNODE(slv_qhs_ahb2phy_south, SC8180X_SLAVE_AHB2PHY_SOUTH, 1, 4); +DEFINE_QNODE(slv_qhs_aop, SC8180X_SLAVE_AOP, 1, 4); +DEFINE_QNODE(slv_qhs_aoss, SC8180X_SLAVE_AOSS, 1, 4); +DEFINE_QNODE(slv_qhs_camera_cfg, SC8180X_SLAVE_CAMERA_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_clk_ctl, SC8180X_SLAVE_CLK_CTL, 1, 4); +DEFINE_QNODE(slv_qhs_compute_dsp, SC8180X_SLAVE_CDSP_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_cpr_cx, SC8180X_SLAVE_RBCPR_CX_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_cpr_mmcx, SC8180X_SLAVE_RBCPR_MMCX_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_cpr_mx, SC8180X_SLAVE_RBCPR_MX_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_crypto0_cfg, SC8180X_SLAVE_CRYPTO_0_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_ddrss_cfg, SC8180X_SLAVE_CNOC_DDRSS, 1, 4, SC8180X_MASTER_CNOC_DC_NOC); +DEFINE_QNODE(slv_qhs_display_cfg, SC8180X_SLAVE_DISPLAY_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_emac_cfg, SC8180X_SLAVE_EMAC_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_glm, SC8180X_SLAVE_GLM, 1, 4); +DEFINE_QNODE(slv_qhs_gpuss_cfg, SC8180X_SLAVE_GRAPHICS_3D_CFG, 1, 8); +DEFINE_QNODE(slv_qhs_imem_cfg, SC8180X_SLAVE_IMEM_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_ipa, SC8180X_SLAVE_IPA_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_mnoc_cfg, SC8180X_SLAVE_CNOC_MNOC_CFG, 1, 4, SC8180X_MASTER_CNOC_MNOC_CFG); +DEFINE_QNODE(slv_qhs_npu_cfg, SC8180X_SLAVE_NPU_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_pcie0_cfg, SC8180X_SLAVE_PCIE_0_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_pcie1_cfg, SC8180X_SLAVE_PCIE_1_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_pcie2_cfg, SC8180X_SLAVE_PCIE_2_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_pcie3_cfg, SC8180X_SLAVE_PCIE_3_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_pdm, SC8180X_SLAVE_PDM, 1, 4); +DEFINE_QNODE(slv_qhs_pimem_cfg, SC8180X_SLAVE_PIMEM_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_prng, SC8180X_SLAVE_PRNG, 1, 4); +DEFINE_QNODE(slv_qhs_qdss_cfg, SC8180X_SLAVE_QDSS_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_qspi_0, SC8180X_SLAVE_QSPI_0, 1, 4); +DEFINE_QNODE(slv_qhs_qspi_1, SC8180X_SLAVE_QSPI_1, 1, 4); +DEFINE_QNODE(slv_qhs_qupv3_east0, SC8180X_SLAVE_QUP_1, 1, 4); +DEFINE_QNODE(slv_qhs_qupv3_east1, SC8180X_SLAVE_QUP_2, 1, 4); +DEFINE_QNODE(slv_qhs_qupv3_west, SC8180X_SLAVE_QUP_0, 1, 4); +DEFINE_QNODE(slv_qhs_sdc2, SC8180X_SLAVE_SDCC_2, 1, 4); +DEFINE_QNODE(slv_qhs_sdc4, SC8180X_SLAVE_SDCC_4, 1, 4); +DEFINE_QNODE(slv_qhs_security, SC8180X_SLAVE_SECURITY, 1, 4); +DEFINE_QNODE(slv_qhs_snoc_cfg, SC8180X_SLAVE_SNOC_CFG, 1, 4, SC8180X_MASTER_SNOC_CFG); +DEFINE_QNODE(slv_qhs_spss_cfg, SC8180X_SLAVE_SPSS_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_tcsr, SC8180X_SLAVE_TCSR, 1, 4); +DEFINE_QNODE(slv_qhs_tlmm_east, SC8180X_SLAVE_TLMM_EAST, 1, 4); +DEFINE_QNODE(slv_qhs_tlmm_south, SC8180X_SLAVE_TLMM_SOUTH, 1, 4); +DEFINE_QNODE(slv_qhs_tlmm_west, SC8180X_SLAVE_TLMM_WEST, 1, 4); +DEFINE_QNODE(slv_qhs_tsif, SC8180X_SLAVE_TSIF, 1, 4); +DEFINE_QNODE(slv_qhs_ufs_card_cfg, SC8180X_SLAVE_UFS_CARD_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_ufs_mem0_cfg, SC8180X_SLAVE_UFS_MEM_0_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_ufs_mem1_cfg, SC8180X_SLAVE_UFS_MEM_1_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_usb3_0, SC8180X_SLAVE_USB3, 1, 4); +DEFINE_QNODE(slv_qhs_usb3_1, SC8180X_SLAVE_USB3_1, 1, 4); +DEFINE_QNODE(slv_qhs_usb3_2, SC8180X_SLAVE_USB3_2, 1, 4); +DEFINE_QNODE(slv_qhs_venus_cfg, SC8180X_SLAVE_VENUS_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_vsense_ctrl_cfg, SC8180X_SLAVE_VSENSE_CTRL_CFG, 1, 4); +DEFINE_QNODE(slv_srvc_cnoc, SC8180X_SLAVE_SERVICE_CNOC, 1, 4); +DEFINE_QNODE(slv_qhs_gemnoc, SC8180X_SLAVE_GEM_NOC_CFG, 1, 4, SC8180X_MASTER_GEM_NOC_CFG); +DEFINE_QNODE(slv_qhs_llcc, SC8180X_SLAVE_LLCC_CFG, 1, 4); +DEFINE_QNODE(slv_qhs_mdsp_ms_mpu_cfg, SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4); +DEFINE_QNODE(slv_qns_ecc, SC8180X_SLAVE_ECC, 1, 32); +DEFINE_QNODE(slv_qns_gem_noc_snoc, SC8180X_SLAVE_GEM_NOC_SNOC, 1, 8, SC8180X_MASTER_GEM_NOC_SNOC); +DEFINE_QNODE(slv_qns_llcc, SC8180X_SLAVE_LLCC, 8, 16, SC8180X_MASTER_LLCC); +DEFINE_QNODE(slv_srvc_gemnoc, SC8180X_SLAVE_SERVICE_GEM_NOC, 1, 4); +DEFINE_QNODE(slv_srvc_gemnoc1, SC8180X_SLAVE_SERVICE_GEM_NOC_1, 1, 4); +DEFINE_QNODE(slv_ipa_core_slave, SC8180X_SLAVE_IPA_CORE, 1, 8); +DEFINE_QNODE(slv_ebi, SC8180X_SLAVE_EBI_CH0, 8, 4); +DEFINE_QNODE(slv_qns2_mem_noc, SC8180X_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC8180X_MASTER_MNOC_SF_MEM_NOC); +DEFINE_QNODE(slv_qns_mem_noc_hf, SC8180X_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SC8180X_MASTER_MNOC_HF_MEM_NOC); +DEFINE_QNODE(slv_srvc_mnoc, SC8180X_SLAVE_SERVICE_MNOC, 1, 4); +DEFINE_QNODE(slv_qhs_apss, SC8180X_SLAVE_APPSS, 1, 8); +DEFINE_QNODE(slv_qns_cnoc, SC8180X_SNOC_CNOC_SLV, 1, 8, SC8180X_SNOC_CNOC_MAS); +DEFINE_QNODE(slv_qns_gemnoc_gc, SC8180X_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SC8180X_MASTER_SNOC_GC_MEM_NOC); +DEFINE_QNODE(slv_qns_gemnoc_sf, SC8180X_SLAVE_SNOC_GEM_NOC_SF, 1, 32, SC8180X_MASTER_SNOC_SF_MEM_NOC); +DEFINE_QNODE(slv_qxs_imem, SC8180X_SLAVE_OCIMEM, 1, 8); +DEFINE_QNODE(slv_qxs_pimem, SC8180X_SLAVE_PIMEM, 1, 8); +DEFINE_QNODE(slv_srvc_snoc, SC8180X_SLAVE_SERVICE_SNOC, 1, 4); +DEFINE_QNODE(slv_xs_pcie_0, SC8180X_SLAVE_PCIE_0, 1, 8); +DEFINE_QNODE(slv_xs_pcie_1, SC8180X_SLAVE_PCIE_1, 1, 8); +DEFINE_QNODE(slv_xs_pcie_2, SC8180X_SLAVE_PCIE_2, 1, 8); +DEFINE_QNODE(slv_xs_pcie_3, SC8180X_SLAVE_PCIE_3, 1, 8); +DEFINE_QNODE(slv_xs_qdss_stm, SC8180X_SLAVE_QDSS_STM, 1, 4); +DEFINE_QNODE(slv_xs_sys_tcu_cfg, SC8180X_SLAVE_TCU, 1, 8); + +DEFINE_QBCM(bcm_acv, "ACV", false, &slv_ebi); +DEFINE_QBCM(bcm_mc0, "MC0", false, &slv_ebi); +DEFINE_QBCM(bcm_sh0, "SH0", false, &slv_qns_llcc); +DEFINE_QBCM(bcm_mm0, "MM0", false, &slv_qns_mem_noc_hf); +DEFINE_QBCM(bcm_co0, "CO0", false, &slv_qns_cdsp_mem_noc); +DEFINE_QBCM(bcm_ce0, "CE0", false, &mas_qxm_crypto); +DEFINE_QBCM(bcm_cn0, "CN0", false, &mas_qnm_snoc, &slv_qhs_a1_noc_cfg, &slv_qhs_a2_noc_cfg, &slv_qhs_ahb2phy_refgen_center, &slv_qhs_ahb2phy_refgen_east, &slv_qhs_ahb2phy_refgen_west, &slv_qhs_ahb2phy_south, &slv_qhs_aop, &slv_qhs_aoss, &slv_qhs_camera_cfg, &slv_qhs_clk_ctl, &slv_qhs_compute_dsp, &slv_qhs_cpr_cx, &slv_qhs_cpr_mmcx, &slv_qhs_cpr_mx, &slv_qhs_crypto0_cfg, &slv_qhs_ddrss_cfg, &slv_qhs_display_cfg, &slv_qhs_emac_cfg, &slv_qhs_glm, &slv_qhs_gpuss_cfg, &slv_qhs_imem_cfg, &slv_qhs_ipa, &slv_qhs_mnoc_cfg, &slv_qhs_npu_cfg, &slv_qhs_pcie0_cfg, &slv_qhs_pcie1_cfg, &slv_qhs_pcie2_cfg, &slv_qhs_pcie3_cfg, &slv_qhs_pdm, &slv_qhs_pimem_cfg, &slv_qhs_prng, &slv_qhs_qdss_cfg, &slv_qhs_qspi_0, &slv_qhs_qspi_1, &slv_qhs_qupv3_east0, &slv_qhs_qupv3_east1, &slv_qhs_qupv3_west, &slv_qhs_sdc2, &slv_qhs_sdc4, &slv_qhs_security, &slv_qhs_snoc_cfg, &slv_qhs_spss_cfg, &slv_qhs_tcsr, &slv_qhs_tlmm_east, &slv_qhs_tlmm_south, &slv_qhs_tlmm_west, &slv_qhs_tsif, &slv_qhs_ufs_card_cfg, &slv_qhs_ufs_mem0_cfg, &slv_qhs_ufs_mem1_cfg, &slv_qhs_usb3_0, &slv_qhs_usb3_1, &slv_qhs_usb3_2, &slv_qhs_venus_cfg, &slv_qhs_vsense_ctrl_cfg, &slv_srvc_cnoc); +DEFINE_QBCM(bcm_mm1, "MM1", false, &mas_qxm_camnoc_hf0_uncomp, &mas_qxm_camnoc_hf1_uncomp, &mas_qxm_camnoc_sf_uncomp, &mas_qxm_camnoc_hf0, &mas_qxm_camnoc_hf1, &mas_qxm_mdp0, &mas_qxm_mdp1); +DEFINE_QBCM(bcm_qup0, "QUP0", false, &mas_qhm_qup0, &mas_qhm_qup1, &mas_qhm_qup2); +DEFINE_QBCM(bcm_sh2, "SH2", false, &slv_qns_gem_noc_snoc); +DEFINE_QBCM(bcm_mm2, "MM2", false, &mas_qxm_camnoc_sf, &mas_qxm_rot, &mas_qxm_venus0, &mas_qxm_venus1, &mas_qxm_venus_arm9, &slv_qns2_mem_noc); +DEFINE_QBCM(bcm_sh3, "SH3", false, &mas_acm_apps); +DEFINE_QBCM(bcm_sn0, "SN0", false, &slv_qns_gemnoc_sf); +DEFINE_QBCM(bcm_sn1, "SN1", false, &slv_qxs_imem); +DEFINE_QBCM(bcm_sn2, "SN2", false, &slv_qns_gemnoc_gc); +DEFINE_QBCM(bcm_co2, "CO2", false, &mas_qnm_npu); +DEFINE_QBCM(bcm_ip0, "IP0", false, &slv_ipa_core_slave); +DEFINE_QBCM(bcm_sn3, "SN3", false, &slv_srvc_aggre1_noc, &slv_qns_cnoc); +DEFINE_QBCM(bcm_sn4, "SN4", false, &slv_qxs_pimem); +DEFINE_QBCM(bcm_sn8, "SN8", false, &slv_xs_pcie_0, &slv_xs_pcie_1, &slv_xs_pcie_2, &slv_xs_pcie_3); +DEFINE_QBCM(bcm_sn9, "SN9", false, &mas_qnm_aggre1_noc); +DEFINE_QBCM(bcm_sn11, "SN11", false, &mas_qnm_aggre2_noc); +DEFINE_QBCM(bcm_sn14, "SN14", false, &slv_qns_pcie_mem_noc); +DEFINE_QBCM(bcm_sn15, "SN15", false, &mas_qnm_gemnoc); + +static struct qcom_icc_bcm *aggre1_noc_bcms[] = { + &bcm_sn3, + &bcm_ce0, + &bcm_qup0, +}; + +static struct qcom_icc_bcm *aggre2_noc_bcms[] = { + &bcm_sn14, + &bcm_ce0, + &bcm_qup0, +}; + +static struct qcom_icc_bcm *camnoc_virt_bcms[] = { + &bcm_mm1, +}; + +static struct qcom_icc_bcm *compute_noc_bcms[] = { + &bcm_co0, + &bcm_co2, +}; + +static struct qcom_icc_bcm *config_noc_bcms[] = { + &bcm_cn0, +}; + +static struct qcom_icc_bcm *gem_noc_bcms[] = { + &bcm_sh0, + &bcm_sh2, + &bcm_sh3, +}; + +static struct qcom_icc_bcm *ipa_virt_bcms[] = { + &bcm_ip0, +}; + +static struct qcom_icc_bcm *mc_virt_bcms[] = { + &bcm_mc0, + &bcm_acv, +}; + +static struct qcom_icc_bcm *mmss_noc_bcms[] = { + &bcm_mm0, + &bcm_mm1, + &bcm_mm2, +}; + +static struct qcom_icc_bcm *system_noc_bcms[] = { + &bcm_sn0, + &bcm_sn1, + &bcm_sn2, + &bcm_sn3, + &bcm_sn4, + &bcm_sn8, + &bcm_sn9, + &bcm_sn11, + &bcm_sn15, +}; + +static struct qcom_icc_node *aggre1_noc_nodes[] = { + [MASTER_A1NOC_CFG] = &mas_qhm_a1noc_cfg, + [MASTER_UFS_CARD] = &mas_xm_ufs_card, + [MASTER_UFS_GEN4] = &mas_xm_ufs_g4, + [MASTER_UFS_MEM] = &mas_xm_ufs_mem, + [MASTER_USB3] = &mas_xm_usb3_0, + [MASTER_USB3_1] = &mas_xm_usb3_1, + [MASTER_USB3_2] = &mas_xm_usb3_2, + [A1NOC_SNOC_SLV] = &slv_qns_a1noc_snoc, + [SLAVE_SERVICE_A1NOC] = &slv_srvc_aggre1_noc, +}; + +static struct qcom_icc_node *aggre2_noc_nodes[] = { + [MASTER_A2NOC_CFG] = &mas_qhm_a2noc_cfg, + [MASTER_QDSS_BAM] = &mas_qhm_qdss_bam, + [MASTER_QSPI_0] = &mas_qhm_qspi, + [MASTER_QSPI_1] = &mas_qhm_qspi1, + [MASTER_QUP_0] = &mas_qhm_qup0, + [MASTER_QUP_1] = &mas_qhm_qup1, + [MASTER_QUP_2] = &mas_qhm_qup2, + [MASTER_SENSORS_AHB] = &mas_qhm_sensorss_ahb, + [MASTER_CRYPTO_CORE_0] = &mas_qxm_crypto, + [MASTER_IPA] = &mas_qxm_ipa, + [MASTER_EMAC] = &mas_xm_emac, + [MASTER_PCIE] = &mas_xm_pcie3_0, + [MASTER_PCIE_1] = &mas_xm_pcie3_1, + [MASTER_PCIE_2] = &mas_xm_pcie3_2, + [MASTER_PCIE_3] = &mas_xm_pcie3_3, + [MASTER_QDSS_ETR] = &mas_xm_qdss_etr, + [MASTER_SDCC_2] = &mas_xm_sdc2, + [MASTER_SDCC_4] = &mas_xm_sdc4, + [A2NOC_SNOC_SLV] = &slv_qns_a2noc_snoc, + [SLAVE_ANOC_PCIE_GEM_NOC] = &slv_qns_pcie_mem_noc, + [SLAVE_SERVICE_A2NOC] = &slv_srvc_aggre2_noc, +}; + +static struct qcom_icc_node *camnoc_virt_nodes[] = { + [MASTER_CAMNOC_HF0_UNCOMP] = &mas_qxm_camnoc_hf0_uncomp, + [MASTER_CAMNOC_HF1_UNCOMP] = &mas_qxm_camnoc_hf1_uncomp, + [MASTER_CAMNOC_SF_UNCOMP] = &mas_qxm_camnoc_sf_uncomp, + [SLAVE_CAMNOC_UNCOMP] = &slv_qns_camnoc_uncomp, +}; + +static struct qcom_icc_node *compute_noc_nodes[] = { + [MASTER_NPU] = &mas_qnm_npu, + [SLAVE_CDSP_MEM_NOC] = &slv_qns_cdsp_mem_noc, +}; + +static struct qcom_icc_node *config_noc_nodes[] = { + [SNOC_CNOC_MAS] = &mas_qnm_snoc, + [SLAVE_A1NOC_CFG] = &slv_qhs_a1_noc_cfg, + [SLAVE_A2NOC_CFG] = &slv_qhs_a2_noc_cfg, + [SLAVE_AHB2PHY_CENTER] = &slv_qhs_ahb2phy_refgen_center, + [SLAVE_AHB2PHY_EAST] = &slv_qhs_ahb2phy_refgen_east, + [SLAVE_AHB2PHY_WEST] = &slv_qhs_ahb2phy_refgen_west, + [SLAVE_AHB2PHY_SOUTH] = &slv_qhs_ahb2phy_south, + [SLAVE_AOP] = &slv_qhs_aop, + [SLAVE_AOSS] = &slv_qhs_aoss, + [SLAVE_CAMERA_CFG] = &slv_qhs_camera_cfg, + [SLAVE_CLK_CTL] = &slv_qhs_clk_ctl, + [SLAVE_CDSP_CFG] = &slv_qhs_compute_dsp, + [SLAVE_RBCPR_CX_CFG] = &slv_qhs_cpr_cx, + [SLAVE_RBCPR_MMCX_CFG] = &slv_qhs_cpr_mmcx, + [SLAVE_RBCPR_MX_CFG] = &slv_qhs_cpr_mx, + [SLAVE_CRYPTO_0_CFG] = &slv_qhs_crypto0_cfg, + [SLAVE_CNOC_DDRSS] = &slv_qhs_ddrss_cfg, + [SLAVE_DISPLAY_CFG] = &slv_qhs_display_cfg, + [SLAVE_EMAC_CFG] = &slv_qhs_emac_cfg, + [SLAVE_GLM] = &slv_qhs_glm, + [SLAVE_GRAPHICS_3D_CFG] = &slv_qhs_gpuss_cfg, + [SLAVE_IMEM_CFG] = &slv_qhs_imem_cfg, + [SLAVE_IPA_CFG] = &slv_qhs_ipa, + [SLAVE_CNOC_MNOC_CFG] = &slv_qhs_mnoc_cfg, + [SLAVE_NPU_CFG] = &slv_qhs_npu_cfg, + [SLAVE_PCIE_0_CFG] = &slv_qhs_pcie0_cfg, + [SLAVE_PCIE_1_CFG] = &slv_qhs_pcie1_cfg, + [SLAVE_PCIE_2_CFG] = &slv_qhs_pcie2_cfg, + [SLAVE_PCIE_3_CFG] = &slv_qhs_pcie3_cfg, + [SLAVE_PDM] = &slv_qhs_pdm, + [SLAVE_PIMEM_CFG] = &slv_qhs_pimem_cfg, + [SLAVE_PRNG] = &slv_qhs_prng, + [SLAVE_QDSS_CFG] = &slv_qhs_qdss_cfg, + [SLAVE_QSPI_0] = &slv_qhs_qspi_0, + [SLAVE_QSPI_1] = &slv_qhs_qspi_1, + [SLAVE_QUP_1] = &slv_qhs_qupv3_east0, + [SLAVE_QUP_2] = &slv_qhs_qupv3_east1, + [SLAVE_QUP_0] = &slv_qhs_qupv3_west, + [SLAVE_SDCC_2] = &slv_qhs_sdc2, + [SLAVE_SDCC_4] = &slv_qhs_sdc4, + [SLAVE_SECURITY] = &slv_qhs_security, + [SLAVE_SNOC_CFG] = &slv_qhs_snoc_cfg, + [SLAVE_SPSS_CFG] = &slv_qhs_spss_cfg, + [SLAVE_TCSR] = &slv_qhs_tcsr, + [SLAVE_TLMM_EAST] = &slv_qhs_tlmm_east, + [SLAVE_TLMM_SOUTH] = &slv_qhs_tlmm_south, + [SLAVE_TLMM_WEST] = &slv_qhs_tlmm_west, + [SLAVE_TSIF] = &slv_qhs_tsif, + [SLAVE_UFS_CARD_CFG] = &slv_qhs_ufs_card_cfg, + [SLAVE_UFS_MEM_0_CFG] = &slv_qhs_ufs_mem0_cfg, + [SLAVE_UFS_MEM_1_CFG] = &slv_qhs_ufs_mem1_cfg, + [SLAVE_USB3] = &slv_qhs_usb3_0, + [SLAVE_USB3_1] = &slv_qhs_usb3_1, + [SLAVE_USB3_2] = &slv_qhs_usb3_2, + [SLAVE_VENUS_CFG] = &slv_qhs_venus_cfg, + [SLAVE_VSENSE_CTRL_CFG] = &slv_qhs_vsense_ctrl_cfg, + [SLAVE_SERVICE_CNOC] = &slv_srvc_cnoc, +}; + +static struct qcom_icc_node *dc_noc_nodes[] = { + [MASTER_CNOC_DC_NOC] = &mas_qhm_cnoc_dc_noc, + [SLAVE_GEM_NOC_CFG] = &slv_qhs_gemnoc, + [SLAVE_LLCC_CFG] = &slv_qhs_llcc, +}; + +static struct qcom_icc_node *gem_noc_nodes[] = { + [MASTER_AMPSS_M0] = &mas_acm_apps, + [MASTER_GPU_TCU] = &mas_acm_gpu_tcu, + [MASTER_SYS_TCU] = &mas_acm_sys_tcu, + [MASTER_GEM_NOC_CFG] = &mas_qhm_gemnoc_cfg, + [MASTER_COMPUTE_NOC] = &mas_qnm_cmpnoc, + [MASTER_GRAPHICS_3D] = &mas_qnm_gpu, + [MASTER_MNOC_HF_MEM_NOC] = &mas_qnm_mnoc_hf, + [MASTER_MNOC_SF_MEM_NOC] = &mas_qnm_mnoc_sf, + [MASTER_GEM_NOC_PCIE_SNOC] = &mas_qnm_pcie, + [MASTER_SNOC_GC_MEM_NOC] = &mas_qnm_snoc_gc, + [MASTER_SNOC_SF_MEM_NOC] = &mas_qnm_snoc_sf, + [MASTER_ECC] = &mas_qxm_ecc, + [SLAVE_MSS_PROC_MS_MPU_CFG] = &slv_qhs_mdsp_ms_mpu_cfg, + [SLAVE_ECC] = &slv_qns_ecc, + [SLAVE_GEM_NOC_SNOC] = &slv_qns_gem_noc_snoc, + [SLAVE_LLCC] = &slv_qns_llcc, + [SLAVE_SERVICE_GEM_NOC] = &slv_srvc_gemnoc, + [SLAVE_SERVICE_GEM_NOC_1] = &slv_srvc_gemnoc1, +}; + +static struct qcom_icc_node *ipa_virt_nodes[] = { + [MASTER_IPA_CORE] = &mas_ipa_core_master, + [SLAVE_IPA_CORE] = &slv_ipa_core_slave, +}; + +static struct qcom_icc_node *mc_virt_nodes[] = { + [MASTER_LLCC] = &mas_llcc_mc, + [SLAVE_EBI_CH0] = &slv_ebi, +}; + +static struct qcom_icc_node *mmss_noc_nodes[] = { + [MASTER_CNOC_MNOC_CFG] = &mas_qhm_mnoc_cfg, + [MASTER_CAMNOC_HF0] = &mas_qxm_camnoc_hf0, + [MASTER_CAMNOC_HF1] = &mas_qxm_camnoc_hf1, + [MASTER_CAMNOC_SF] = &mas_qxm_camnoc_sf, + [MASTER_MDP_PORT0] = &mas_qxm_mdp0, + [MASTER_MDP_PORT1] = &mas_qxm_mdp1, + [MASTER_ROTATOR] = &mas_qxm_rot, + [MASTER_VIDEO_P0] = &mas_qxm_venus0, + [MASTER_VIDEO_P1] = &mas_qxm_venus1, + [MASTER_VIDEO_PROC] = &mas_qxm_venus_arm9, + [SLAVE_MNOC_SF_MEM_NOC] = &slv_qns2_mem_noc, + [SLAVE_MNOC_HF_MEM_NOC] = &slv_qns_mem_noc_hf, + [SLAVE_SERVICE_MNOC] = &slv_srvc_mnoc, +}; + +static struct qcom_icc_node *system_noc_nodes[] = { + [MASTER_SNOC_CFG] = &mas_qhm_snoc_cfg, + [A1NOC_SNOC_MAS] = &mas_qnm_aggre1_noc, + [A2NOC_SNOC_MAS] = &mas_qnm_aggre2_noc, + [MASTER_GEM_NOC_SNOC] = &mas_qnm_gemnoc, + [MASTER_PIMEM] = &mas_qxm_pimem, + [MASTER_GIC] = &mas_xm_gic, + [SLAVE_APPSS] = &slv_qhs_apss, + [SNOC_CNOC_SLV] = &slv_qns_cnoc, + [SLAVE_SNOC_GEM_NOC_GC] = &slv_qns_gemnoc_gc, + [SLAVE_SNOC_GEM_NOC_SF] = &slv_qns_gemnoc_sf, + [SLAVE_OCIMEM] = &slv_qxs_imem, + [SLAVE_PIMEM] = &slv_qxs_pimem, + [SLAVE_SERVICE_SNOC] = &slv_srvc_snoc, + [SLAVE_QDSS_STM] = &slv_xs_qdss_stm, + [SLAVE_TCU] = &slv_xs_sys_tcu_cfg, +}; + +static const struct qcom_icc_desc sc8180x_aggre1_noc = { + .nodes = aggre1_noc_nodes, + .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), + .bcms = aggre1_noc_bcms, + .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), +}; + +static const struct qcom_icc_desc sc8180x_aggre2_noc = { + .nodes = aggre2_noc_nodes, + .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), + .bcms = aggre2_noc_bcms, + .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), +}; + +static const struct qcom_icc_desc sc8180x_camnoc_virt = { + .nodes = camnoc_virt_nodes, + .num_nodes = ARRAY_SIZE(camnoc_virt_nodes), + .bcms = camnoc_virt_bcms, + .num_bcms = ARRAY_SIZE(camnoc_virt_bcms), +}; + +static const struct qcom_icc_desc sc8180x_compute_noc = { + .nodes = compute_noc_nodes, + .num_nodes = ARRAY_SIZE(compute_noc_nodes), + .bcms = compute_noc_bcms, + .num_bcms = ARRAY_SIZE(compute_noc_bcms), +}; + +static const struct qcom_icc_desc sc8180x_config_noc = { + .nodes = config_noc_nodes, + .num_nodes = ARRAY_SIZE(config_noc_nodes), + .bcms = config_noc_bcms, + .num_bcms = ARRAY_SIZE(config_noc_bcms), +}; + +static const struct qcom_icc_desc sc8180x_dc_noc = { + .nodes = dc_noc_nodes, + .num_nodes = ARRAY_SIZE(dc_noc_nodes), +}; + +static const struct qcom_icc_desc sc8180x_gem_noc = { + .nodes = gem_noc_nodes, + .num_nodes = ARRAY_SIZE(gem_noc_nodes), + .bcms = gem_noc_bcms, + .num_bcms = ARRAY_SIZE(gem_noc_bcms), +}; + +static const struct qcom_icc_desc sc8180x_ipa_virt = { + .nodes = ipa_virt_nodes, + .num_nodes = ARRAY_SIZE(ipa_virt_nodes), + .bcms = ipa_virt_bcms, + .num_bcms = ARRAY_SIZE(ipa_virt_bcms), +}; + +static const struct qcom_icc_desc sc8180x_mc_virt = { + .nodes = mc_virt_nodes, + .num_nodes = ARRAY_SIZE(mc_virt_nodes), + .bcms = mc_virt_bcms, + .num_bcms = ARRAY_SIZE(mc_virt_bcms), +}; + +static const struct qcom_icc_desc sc8180x_mmss_noc = { + .nodes = mmss_noc_nodes, + .num_nodes = ARRAY_SIZE(mmss_noc_nodes), + .bcms = mmss_noc_bcms, + .num_bcms = ARRAY_SIZE(mmss_noc_bcms), +}; + +static const struct qcom_icc_desc sc8180x_system_noc = { + .nodes = system_noc_nodes, + .num_nodes = ARRAY_SIZE(system_noc_nodes), + .bcms = system_noc_bcms, + .num_bcms = ARRAY_SIZE(system_noc_bcms), +}; + +static int qnoc_probe(struct platform_device *pdev) +{ + const struct qcom_icc_desc *desc; + struct icc_onecell_data *data; + struct icc_provider *provider; + struct qcom_icc_node **qnodes; + struct qcom_icc_provider *qp; + struct icc_node *node; + size_t num_nodes, i; + int ret; + + desc = device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + qnodes = desc->nodes; + num_nodes = desc->num_nodes; + + qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); + if (!qp) + return -ENOMEM; + + data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); + if (!data) + return -ENOMEM; + + provider = &qp->provider; + provider->dev = &pdev->dev; + provider->set = qcom_icc_set; + provider->pre_aggregate = qcom_icc_pre_aggregate; + provider->aggregate = qcom_icc_aggregate; + provider->xlate = of_icc_xlate_onecell; + INIT_LIST_HEAD(&provider->nodes); + provider->data = data; + + qp->dev = &pdev->dev; + qp->bcms = desc->bcms; + qp->num_bcms = desc->num_bcms; + + qp->voter = of_bcm_voter_get(qp->dev, NULL); + if (IS_ERR(qp->voter)) + return PTR_ERR(qp->voter); + + ret = icc_provider_add(provider); + if (ret) { + dev_err(&pdev->dev, "error adding interconnect provider\n"); + return ret; + } + + for (i = 0; i < qp->num_bcms; i++) + qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); + + for (i = 0; i < num_nodes; i++) { + size_t j; + + if (!qnodes[i]) + continue; + + node = icc_node_create(qnodes[i]->id); + if (IS_ERR(node)) { + ret = PTR_ERR(node); + goto err; + } + + node->name = qnodes[i]->name; + node->data = qnodes[i]; + icc_node_add(node, provider); + + for (j = 0; j < qnodes[i]->num_links; j++) + icc_link_create(node, qnodes[i]->links[j]); + + data->nodes[i] = node; + } + data->num_nodes = num_nodes; + + platform_set_drvdata(pdev, qp); + + return 0; +err: + icc_nodes_remove(provider); + icc_provider_del(provider); + return ret; +} + +static int qnoc_remove(struct platform_device *pdev) +{ + struct qcom_icc_provider *qp = platform_get_drvdata(pdev); + + icc_nodes_remove(&qp->provider); + return icc_provider_del(&qp->provider); +} + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,sc8180x-aggre1-noc", .data = &sc8180x_aggre1_noc }, + { .compatible = "qcom,sc8180x-aggre2-noc", .data = &sc8180x_aggre2_noc }, + { .compatible = "qcom,sc8180x-camnoc-virt", .data = &sc8180x_camnoc_virt }, + { .compatible = "qcom,sc8180x-compute-noc", .data = &sc8180x_compute_noc, }, + { .compatible = "qcom,sc8180x-config-noc", .data = &sc8180x_config_noc }, + { .compatible = "qcom,sc8180x-dc-noc", .data = &sc8180x_dc_noc }, + { .compatible = "qcom,sc8180x-gem-noc", .data = &sc8180x_gem_noc }, + { .compatible = "qcom,sc8180x-ipa-virt", .data = &sc8180x_ipa_virt }, + { .compatible = "qcom,sc8180x-mc-virt", .data = &sc8180x_mc_virt }, + { .compatible = "qcom,sc8180x-mmss-noc", .data = &sc8180x_mmss_noc }, + { .compatible = "qcom,sc8180x-system-noc", .data = &sc8180x_system_noc }, + { } +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qnoc_probe, + .remove = qnoc_remove, + .driver = { + .name = "qnoc-sc8180x", + .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, + }, +}; +module_platform_driver(qnoc_driver); + +MODULE_DESCRIPTION("Qualcomm sc8180x NoC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/interconnect/qcom/sc8180x.h b/drivers/interconnect/qcom/sc8180x.h new file mode 100644 index 000000000000..e70cf7032f80 --- /dev/null +++ b/drivers/interconnect/qcom/sc8180x.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Qualcomm #define SC8180X interconnect IDs + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __DRIVERS_INTERCONNECT_QCOM_SC8180X_H +#define __DRIVERS_INTERCONNECT_QCOM_SC8180X_H + +#define SC8180X_MASTER_A1NOC_CFG 1 +#define SC8180X_MASTER_UFS_CARD 2 +#define SC8180X_MASTER_UFS_GEN4 3 +#define SC8180X_MASTER_UFS_MEM 4 +#define SC8180X_MASTER_USB3 5 +#define SC8180X_MASTER_USB3_1 6 +#define SC8180X_MASTER_USB3_2 7 +#define SC8180X_MASTER_A2NOC_CFG 8 +#define SC8180X_MASTER_QDSS_BAM 9 +#define SC8180X_MASTER_QSPI_0 10 +#define SC8180X_MASTER_QSPI_1 11 +#define SC8180X_MASTER_QUP_0 12 +#define SC8180X_MASTER_QUP_1 13 +#define SC8180X_MASTER_QUP_2 14 +#define SC8180X_MASTER_SENSORS_AHB 15 +#define SC8180X_MASTER_CRYPTO_CORE_0 16 +#define SC8180X_MASTER_IPA 17 +#define SC8180X_MASTER_EMAC 18 +#define SC8180X_MASTER_PCIE 19 +#define SC8180X_MASTER_PCIE_1 20 +#define SC8180X_MASTER_PCIE_2 21 +#define SC8180X_MASTER_PCIE_3 22 +#define SC8180X_MASTER_QDSS_ETR 23 +#define SC8180X_MASTER_SDCC_2 24 +#define SC8180X_MASTER_SDCC_4 25 +#define SC8180X_MASTER_CAMNOC_HF0_UNCOMP 26 +#define SC8180X_MASTER_CAMNOC_HF1_UNCOMP 27 +#define SC8180X_MASTER_CAMNOC_SF_UNCOMP 28 +#define SC8180X_MASTER_NPU 29 +#define SC8180X_SNOC_CNOC_MAS 30 +#define SC8180X_MASTER_CNOC_DC_NOC 31 +#define SC8180X_MASTER_AMPSS_M0 32 +#define SC8180X_MASTER_GPU_TCU 33 +#define SC8180X_MASTER_SYS_TCU 34 +#define SC8180X_MASTER_GEM_NOC_CFG 35 +#define SC8180X_MASTER_COMPUTE_NOC 36 +#define SC8180X_MASTER_GRAPHICS_3D 37 +#define SC8180X_MASTER_MNOC_HF_MEM_NOC 38 +#define SC8180X_MASTER_MNOC_SF_MEM_NOC 39 +#define SC8180X_MASTER_GEM_NOC_PCIE_SNOC 40 +#define SC8180X_MASTER_SNOC_GC_MEM_NOC 41 +#define SC8180X_MASTER_SNOC_SF_MEM_NOC 42 +#define SC8180X_MASTER_ECC 43 +#define SC8180X_MASTER_IPA_CORE 44 +#define SC8180X_MASTER_LLCC 45 +#define SC8180X_MASTER_CNOC_MNOC_CFG 46 +#define SC8180X_MASTER_CAMNOC_HF0 47 +#define SC8180X_MASTER_CAMNOC_HF1 48 +#define SC8180X_MASTER_CAMNOC_SF 49 +#define SC8180X_MASTER_MDP_PORT0 50 +#define SC8180X_MASTER_MDP_PORT1 51 +#define SC8180X_MASTER_ROTATOR 52 +#define SC8180X_MASTER_VIDEO_P0 53 +#define SC8180X_MASTER_VIDEO_P1 54 +#define SC8180X_MASTER_VIDEO_PROC 55 +#define SC8180X_MASTER_SNOC_CFG 56 +#define SC8180X_A1NOC_SNOC_MAS 57 +#define SC8180X_A2NOC_SNOC_MAS 58 +#define SC8180X_MASTER_GEM_NOC_SNOC 59 +#define SC8180X_MASTER_PIMEM 60 +#define SC8180X_MASTER_GIC 61 +#define SC8180X_MASTER_MNOC_HF_MEM_NOC_DISPLAY 62 +#define SC8180X_MASTER_MNOC_SF_MEM_NOC_DISPLAY 63 +#define SC8180X_MASTER_LLCC_DISPLAY 64 +#define SC8180X_MASTER_MDP_PORT0_DISPLAY 65 +#define SC8180X_MASTER_MDP_PORT1_DISPLAY 66 +#define SC8180X_MASTER_ROTATOR_DISPLAY 67 +#define SC8180X_A1NOC_SNOC_SLV 68 +#define SC8180X_SLAVE_SERVICE_A1NOC 69 +#define SC8180X_A2NOC_SNOC_SLV 70 +#define SC8180X_SLAVE_ANOC_PCIE_GEM_NOC 71 +#define SC8180X_SLAVE_SERVICE_A2NOC 72 +#define SC8180X_SLAVE_CAMNOC_UNCOMP 73 +#define SC8180X_SLAVE_CDSP_MEM_NOC 74 +#define SC8180X_SLAVE_A1NOC_CFG 75 +#define SC8180X_SLAVE_A2NOC_CFG 76 +#define SC8180X_SLAVE_AHB2PHY_CENTER 77 +#define SC8180X_SLAVE_AHB2PHY_EAST 78 +#define SC8180X_SLAVE_AHB2PHY_WEST 79 +#define SC8180X_SLAVE_AHB2PHY_SOUTH 80 +#define SC8180X_SLAVE_AOP 81 +#define SC8180X_SLAVE_AOSS 82 +#define SC8180X_SLAVE_CAMERA_CFG 83 +#define SC8180X_SLAVE_CLK_CTL 84 +#define SC8180X_SLAVE_CDSP_CFG 85 +#define SC8180X_SLAVE_RBCPR_CX_CFG 86 +#define SC8180X_SLAVE_RBCPR_MMCX_CFG 87 +#define SC8180X_SLAVE_RBCPR_MX_CFG 88 +#define SC8180X_SLAVE_CRYPTO_0_CFG 89 +#define SC8180X_SLAVE_CNOC_DDRSS 90 +#define SC8180X_SLAVE_DISPLAY_CFG 91 +#define SC8180X_SLAVE_EMAC_CFG 92 +#define SC8180X_SLAVE_GLM 93 +#define SC8180X_SLAVE_GRAPHICS_3D_CFG 94 +#define SC8180X_SLAVE_IMEM_CFG 95 +#define SC8180X_SLAVE_IPA_CFG 96 +#define SC8180X_SLAVE_CNOC_MNOC_CFG 97 +#define SC8180X_SLAVE_NPU_CFG 98 +#define SC8180X_SLAVE_PCIE_0_CFG 99 +#define SC8180X_SLAVE_PCIE_1_CFG 100 +#define SC8180X_SLAVE_PCIE_2_CFG 101 +#define SC8180X_SLAVE_PCIE_3_CFG 102 +#define SC8180X_SLAVE_PDM 103 +#define SC8180X_SLAVE_PIMEM_CFG 104 +#define SC8180X_SLAVE_PRNG 105 +#define SC8180X_SLAVE_QDSS_CFG 106 +#define SC8180X_SLAVE_QSPI_0 107 +#define SC8180X_SLAVE_QSPI_1 108 +#define SC8180X_SLAVE_QUP_1 109 +#define SC8180X_SLAVE_QUP_2 110 +#define SC8180X_SLAVE_QUP_0 111 +#define SC8180X_SLAVE_SDCC_2 112 +#define SC8180X_SLAVE_SDCC_4 113 +#define SC8180X_SLAVE_SECURITY 114 +#define SC8180X_SLAVE_SNOC_CFG 115 +#define SC8180X_SLAVE_SPSS_CFG 116 +#define SC8180X_SLAVE_TCSR 117 +#define SC8180X_SLAVE_TLMM_EAST 118 +#define SC8180X_SLAVE_TLMM_SOUTH 119 +#define SC8180X_SLAVE_TLMM_WEST 120 +#define SC8180X_SLAVE_TSIF 121 +#define SC8180X_SLAVE_UFS_CARD_CFG 122 +#define SC8180X_SLAVE_UFS_MEM_0_CFG 123 +#define SC8180X_SLAVE_UFS_MEM_1_CFG 124 +#define SC8180X_SLAVE_USB3 125 +#define SC8180X_SLAVE_USB3_1 126 +#define SC8180X_SLAVE_USB3_2 127 +#define SC8180X_SLAVE_VENUS_CFG 128 +#define SC8180X_SLAVE_VSENSE_CTRL_CFG 129 +#define SC8180X_SLAVE_SERVICE_CNOC 130 +#define SC8180X_SLAVE_GEM_NOC_CFG 131 +#define SC8180X_SLAVE_LLCC_CFG 132 +#define SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG 133 +#define SC8180X_SLAVE_ECC 134 +#define SC8180X_SLAVE_GEM_NOC_SNOC 135 +#define SC8180X_SLAVE_LLCC 136 +#define SC8180X_SLAVE_SERVICE_GEM_NOC 137 +#define SC8180X_SLAVE_SERVICE_GEM_NOC_1 138 +#define SC8180X_SLAVE_IPA_CORE 139 +#define SC8180X_SLAVE_EBI_CH0 140 +#define SC8180X_SLAVE_MNOC_SF_MEM_NOC 141 +#define SC8180X_SLAVE_MNOC_HF_MEM_NOC 142 +#define SC8180X_SLAVE_SERVICE_MNOC 143 +#define SC8180X_SLAVE_APPSS 144 +#define SC8180X_SNOC_CNOC_SLV 145 +#define SC8180X_SLAVE_SNOC_GEM_NOC_GC 146 +#define SC8180X_SLAVE_SNOC_GEM_NOC_SF 147 +#define SC8180X_SLAVE_OCIMEM 148 +#define SC8180X_SLAVE_PIMEM 149 +#define SC8180X_SLAVE_SERVICE_SNOC 150 +#define SC8180X_SLAVE_PCIE_0 151 +#define SC8180X_SLAVE_PCIE_1 152 +#define SC8180X_SLAVE_PCIE_2 153 +#define SC8180X_SLAVE_PCIE_3 154 +#define SC8180X_SLAVE_QDSS_STM 155 +#define SC8180X_SLAVE_TCU 156 +#define SC8180X_SLAVE_LLCC_DISPLAY 157 +#define SC8180X_SLAVE_EBI_CH0_DISPLAY 158 +#define SC8180X_SLAVE_MNOC_SF_MEM_NOC_DISPLAY 159 +#define SC8180X_SLAVE_MNOC_HF_MEM_NOC_DISPLAY 160 +#define SC8180X_MASTER_OSM_L3_APPS 161 +#define SC8180X_SLAVE_OSM_L3 162 + +#endif diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c index 366870150cbd..d2195079c228 100644 --- a/drivers/interconnect/qcom/sdm845.c +++ b/drivers/interconnect/qcom/sdm845.c @@ -440,101 +440,6 @@ static const struct qcom_icc_desc sdm845_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; -static int qnoc_probe(struct platform_device *pdev) -{ - const struct qcom_icc_desc *desc; - struct icc_onecell_data *data; - struct icc_provider *provider; - struct qcom_icc_node **qnodes; - struct qcom_icc_provider *qp; - struct icc_node *node; - size_t num_nodes, i; - int ret; - - desc = device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - qnodes = desc->nodes; - num_nodes = desc->num_nodes; - - qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); - if (!qp) - return -ENOMEM; - - data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), - GFP_KERNEL); - if (!data) - return -ENOMEM; - - provider = &qp->provider; - provider->dev = &pdev->dev; - provider->set = qcom_icc_set; - provider->pre_aggregate = qcom_icc_pre_aggregate; - provider->aggregate = qcom_icc_aggregate; - provider->xlate_extended = qcom_icc_xlate_extended; - INIT_LIST_HEAD(&provider->nodes); - provider->data = data; - - qp->dev = &pdev->dev; - qp->bcms = desc->bcms; - qp->num_bcms = desc->num_bcms; - - qp->voter = of_bcm_voter_get(qp->dev, NULL); - if (IS_ERR(qp->voter)) { - dev_err(&pdev->dev, "bcm_voter err:%ld\n", PTR_ERR(qp->voter)); - return PTR_ERR(qp->voter); - } - - ret = icc_provider_add(provider); - if (ret) { - dev_err(&pdev->dev, "error adding interconnect provider\n"); - return ret; - } - - for (i = 0; i < qp->num_bcms; i++) - qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); - - for (i = 0; i < num_nodes; i++) { - size_t j; - - if (!qnodes[i]) - continue; - - node = icc_node_create(qnodes[i]->id); - if (IS_ERR(node)) { - ret = PTR_ERR(node); - goto err; - } - - node->name = qnodes[i]->name; - node->data = qnodes[i]; - icc_node_add(node, provider); - - for (j = 0; j < qnodes[i]->num_links; j++) - icc_link_create(node, qnodes[i]->links[j]); - - data->nodes[i] = node; - } - data->num_nodes = num_nodes; - - platform_set_drvdata(pdev, qp); - - return 0; -err: - icc_nodes_remove(provider); - icc_provider_del(provider); - return ret; -} - -static int qnoc_remove(struct platform_device *pdev) -{ - struct qcom_icc_provider *qp = platform_get_drvdata(pdev); - - icc_nodes_remove(&qp->provider); - return icc_provider_del(&qp->provider); -} - static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sdm845-aggre1-noc", .data = &sdm845_aggre1_noc}, @@ -557,8 +462,8 @@ static const struct of_device_id qnoc_of_match[] = { MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { - .probe = qnoc_probe, - .remove = qnoc_remove, + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-sdm845", .of_match_table = qnoc_of_match, diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c index a5a122ee3d21..03d604f84cc5 100644 --- a/drivers/interconnect/qcom/sdx55.c +++ b/drivers/interconnect/qcom/sdx55.c @@ -235,98 +235,6 @@ static const struct qcom_icc_desc sdx55_ipa_virt = { .num_bcms = ARRAY_SIZE(ipa_virt_bcms), }; -static int qnoc_probe(struct platform_device *pdev) -{ - const struct qcom_icc_desc *desc; - struct icc_onecell_data *data; - struct icc_provider *provider; - struct qcom_icc_node **qnodes; - struct qcom_icc_provider *qp; - struct icc_node *node; - size_t num_nodes, i; - int ret; - - desc = device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - qnodes = desc->nodes; - num_nodes = desc->num_nodes; - - qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); - if (!qp) - return -ENOMEM; - - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); - if (!data) - return -ENOMEM; - - provider = &qp->provider; - provider->dev = &pdev->dev; - provider->set = qcom_icc_set; - provider->pre_aggregate = qcom_icc_pre_aggregate; - provider->aggregate = qcom_icc_aggregate; - provider->xlate = of_icc_xlate_onecell; - INIT_LIST_HEAD(&provider->nodes); - provider->data = data; - - qp->dev = &pdev->dev; - qp->bcms = desc->bcms; - qp->num_bcms = desc->num_bcms; - - qp->voter = of_bcm_voter_get(qp->dev, NULL); - if (IS_ERR(qp->voter)) - return PTR_ERR(qp->voter); - - ret = icc_provider_add(provider); - if (ret) { - dev_err(&pdev->dev, "error adding interconnect provider\n"); - return ret; - } - - for (i = 0; i < qp->num_bcms; i++) - qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); - - for (i = 0; i < num_nodes; i++) { - size_t j; - - if (!qnodes[i]) - continue; - - node = icc_node_create(qnodes[i]->id); - if (IS_ERR(node)) { - ret = PTR_ERR(node); - goto err; - } - - node->name = qnodes[i]->name; - node->data = qnodes[i]; - icc_node_add(node, provider); - - for (j = 0; j < qnodes[i]->num_links; j++) - icc_link_create(node, qnodes[i]->links[j]); - - data->nodes[i] = node; - } - data->num_nodes = num_nodes; - - platform_set_drvdata(pdev, qp); - - return 0; -err: - icc_nodes_remove(provider); - icc_provider_del(provider); - return ret; -} - -static int qnoc_remove(struct platform_device *pdev) -{ - struct qcom_icc_provider *qp = platform_get_drvdata(pdev); - - icc_nodes_remove(&qp->provider); - return icc_provider_del(&qp->provider); -} - static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sdx55-mc-virt", .data = &sdx55_mc_virt}, @@ -341,8 +249,8 @@ static const struct of_device_id qnoc_of_match[] = { MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { - .probe = qnoc_probe, - .remove = qnoc_remove, + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-sdx55", .of_match_table = qnoc_of_match, diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c index c76b2c7f9b10..2a85f53802b5 100644 --- a/drivers/interconnect/qcom/sm8150.c +++ b/drivers/interconnect/qcom/sm8150.c @@ -502,98 +502,6 @@ static struct qcom_icc_desc sm8150_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; -static int qnoc_probe(struct platform_device *pdev) -{ - const struct qcom_icc_desc *desc; - struct icc_onecell_data *data; - struct icc_provider *provider; - struct qcom_icc_node **qnodes; - struct qcom_icc_provider *qp; - struct icc_node *node; - size_t num_nodes, i; - int ret; - - desc = device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - qnodes = desc->nodes; - num_nodes = desc->num_nodes; - - qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); - if (!qp) - return -ENOMEM; - - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); - if (!data) - return -ENOMEM; - - provider = &qp->provider; - provider->dev = &pdev->dev; - provider->set = qcom_icc_set; - provider->pre_aggregate = qcom_icc_pre_aggregate; - provider->aggregate = qcom_icc_aggregate; - provider->xlate = of_icc_xlate_onecell; - INIT_LIST_HEAD(&provider->nodes); - provider->data = data; - - qp->dev = &pdev->dev; - qp->bcms = desc->bcms; - qp->num_bcms = desc->num_bcms; - - qp->voter = of_bcm_voter_get(qp->dev, NULL); - if (IS_ERR(qp->voter)) - return PTR_ERR(qp->voter); - - ret = icc_provider_add(provider); - if (ret) { - dev_err(&pdev->dev, "error adding interconnect provider\n"); - return ret; - } - - for (i = 0; i < qp->num_bcms; i++) - qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); - - for (i = 0; i < num_nodes; i++) { - size_t j; - - if (!qnodes[i]) - continue; - - node = icc_node_create(qnodes[i]->id); - if (IS_ERR(node)) { - ret = PTR_ERR(node); - goto err; - } - - node->name = qnodes[i]->name; - node->data = qnodes[i]; - icc_node_add(node, provider); - - for (j = 0; j < qnodes[i]->num_links; j++) - icc_link_create(node, qnodes[i]->links[j]); - - data->nodes[i] = node; - } - data->num_nodes = num_nodes; - - platform_set_drvdata(pdev, qp); - - return 0; -err: - icc_nodes_remove(provider); - icc_provider_del(provider); - return ret; -} - -static int qnoc_remove(struct platform_device *pdev) -{ - struct qcom_icc_provider *qp = platform_get_drvdata(pdev); - - icc_nodes_remove(&qp->provider); - return icc_provider_del(&qp->provider); -} - static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sm8150-aggre1-noc", .data = &sm8150_aggre1_noc}, @@ -622,8 +530,8 @@ static const struct of_device_id qnoc_of_match[] = { MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { - .probe = qnoc_probe, - .remove = qnoc_remove, + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-sm8150", .of_match_table = qnoc_of_match, diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c index cc558fec74e3..8dfb5dea562a 100644 --- a/drivers/interconnect/qcom/sm8250.c +++ b/drivers/interconnect/qcom/sm8250.c @@ -518,98 +518,6 @@ static struct qcom_icc_desc sm8250_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; -static int qnoc_probe(struct platform_device *pdev) -{ - const struct qcom_icc_desc *desc; - struct icc_onecell_data *data; - struct icc_provider *provider; - struct qcom_icc_node **qnodes; - struct qcom_icc_provider *qp; - struct icc_node *node; - size_t num_nodes, i; - int ret; - - desc = device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - qnodes = desc->nodes; - num_nodes = desc->num_nodes; - - qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); - if (!qp) - return -ENOMEM; - - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); - if (!data) - return -ENOMEM; - - provider = &qp->provider; - provider->dev = &pdev->dev; - provider->set = qcom_icc_set; - provider->pre_aggregate = qcom_icc_pre_aggregate; - provider->aggregate = qcom_icc_aggregate; - provider->xlate = of_icc_xlate_onecell; - INIT_LIST_HEAD(&provider->nodes); - provider->data = data; - - qp->dev = &pdev->dev; - qp->bcms = desc->bcms; - qp->num_bcms = desc->num_bcms; - - qp->voter = of_bcm_voter_get(qp->dev, NULL); - if (IS_ERR(qp->voter)) - return PTR_ERR(qp->voter); - - ret = icc_provider_add(provider); - if (ret) { - dev_err(&pdev->dev, "error adding interconnect provider\n"); - return ret; - } - - for (i = 0; i < qp->num_bcms; i++) - qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); - - for (i = 0; i < num_nodes; i++) { - size_t j; - - if (!qnodes[i]) - continue; - - node = icc_node_create(qnodes[i]->id); - if (IS_ERR(node)) { - ret = PTR_ERR(node); - goto err; - } - - node->name = qnodes[i]->name; - node->data = qnodes[i]; - icc_node_add(node, provider); - - for (j = 0; j < qnodes[i]->num_links; j++) - icc_link_create(node, qnodes[i]->links[j]); - - data->nodes[i] = node; - } - data->num_nodes = num_nodes; - - platform_set_drvdata(pdev, qp); - - return 0; -err: - icc_nodes_remove(provider); - icc_provider_del(provider); - return ret; -} - -static int qnoc_remove(struct platform_device *pdev) -{ - struct qcom_icc_provider *qp = platform_get_drvdata(pdev); - - icc_nodes_remove(&qp->provider); - return icc_provider_del(&qp->provider); -} - static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sm8250-aggre1-noc", .data = &sm8250_aggre1_noc}, @@ -638,8 +546,8 @@ static const struct of_device_id qnoc_of_match[] = { MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { - .probe = qnoc_probe, - .remove = qnoc_remove, + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-sm8250", .of_match_table = qnoc_of_match, diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c index 579b6ce8e046..3e26a2175b28 100644 --- a/drivers/interconnect/qcom/sm8350.c +++ b/drivers/interconnect/qcom/sm8350.c @@ -510,99 +510,6 @@ static struct qcom_icc_desc sm8350_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; -static int qnoc_probe(struct platform_device *pdev) -{ - const struct qcom_icc_desc *desc; - struct icc_onecell_data *data; - struct icc_provider *provider; - struct qcom_icc_node **qnodes; - struct qcom_icc_provider *qp; - struct icc_node *node; - size_t num_nodes, i; - int ret; - - desc = of_device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - qnodes = desc->nodes; - num_nodes = desc->num_nodes; - - qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); - if (!qp) - return -ENOMEM; - - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); - if (!data) - return -ENOMEM; - - provider = &qp->provider; - provider->dev = &pdev->dev; - provider->set = qcom_icc_set; - provider->pre_aggregate = qcom_icc_pre_aggregate; - provider->aggregate = qcom_icc_aggregate; - provider->xlate = of_icc_xlate_onecell; - INIT_LIST_HEAD(&provider->nodes); - provider->data = data; - - qp->dev = &pdev->dev; - qp->bcms = desc->bcms; - qp->num_bcms = desc->num_bcms; - - qp->voter = of_bcm_voter_get(qp->dev, NULL); - if (IS_ERR(qp->voter)) - return PTR_ERR(qp->voter); - - ret = icc_provider_add(provider); - if (ret) { - dev_err(&pdev->dev, "error adding interconnect provider\n"); - return ret; - } - - for (i = 0; i < qp->num_bcms; i++) - qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); - - for (i = 0; i < num_nodes; i++) { - size_t j; - - if (!qnodes[i]) - continue; - - node = icc_node_create(qnodes[i]->id); - if (IS_ERR(node)) { - ret = PTR_ERR(node); - goto err; - } - - node->name = qnodes[i]->name; - node->data = qnodes[i]; - icc_node_add(node, provider); - - for (j = 0; j < qnodes[i]->num_links; j++) - icc_link_create(node, qnodes[i]->links[j]); - - data->nodes[i] = node; - } - data->num_nodes = num_nodes; - - platform_set_drvdata(pdev, qp); - - return ret; - -err: - icc_nodes_remove(provider); - icc_provider_del(provider); - return ret; -} - -static int qnoc_remove(struct platform_device *pdev) -{ - struct qcom_icc_provider *qp = platform_get_drvdata(pdev); - - icc_nodes_remove(&qp->provider); - return icc_provider_del(&qp->provider); -} - static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sm8350-aggre1-noc", .data = &sm8350_aggre1_noc}, { .compatible = "qcom,sm8350-aggre2-noc", .data = &sm8350_aggre2_noc}, @@ -619,8 +526,8 @@ static const struct of_device_id qnoc_of_match[] = { MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { - .probe = qnoc_probe, - .remove = qnoc_remove, + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-sm8350", .of_match_table = qnoc_of_match, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index dd20b01771c4..235f9bdaeaf2 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -379,6 +379,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) switch (idx) { case CMDQ_ERR_CERROR_ABT_IDX: dev_err(smmu->dev, "retrying command fetch\n"); + return; case CMDQ_ERR_CERROR_NONE_IDX: return; case CMDQ_ERR_CERROR_ATC_INV_IDX: diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index 25ed444ff94d..021cf8f65ffc 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -849,12 +849,10 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev); if (ret) { dev_err(dev, "Failed to register iommu\n"); - goto err_sysfs_remove; + return ret; } - ret = bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); - if (ret) - goto err_unregister_device; + bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); if (qcom_iommu->local_base) { pm_runtime_get_sync(dev); @@ -863,13 +861,6 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) } return 0; - -err_unregister_device: - iommu_device_unregister(&qcom_iommu->iommu); - -err_sysfs_remove: - iommu_device_sysfs_remove(&qcom_iommu->iommu); - return ret; } static int qcom_iommu_device_remove(struct platform_device *pdev) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 98ba927aee1a..6f0df629353f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -768,6 +768,7 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); sg_free_table(&sh->sgt); + kfree(sh); } #endif /* CONFIG_DMA_REMAP */ diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a6a07d985709..dd22fc7d5176 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2429,10 +2429,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, return 0; } -static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) +static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn) { - unsigned long flags; + struct intel_iommu *iommu = info->iommu; struct context_entry *context; + unsigned long flags; u16 did_old; if (!iommu) @@ -2444,7 +2445,16 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn spin_unlock_irqrestore(&iommu->lock, flags); return; } - did_old = context_domain_id(context); + + if (sm_supported(iommu)) { + if (hw_pass_through && domain_type_is_si(info->domain)) + did_old = FLPT_DEFAULT_DID; + else + did_old = info->domain->iommu_did[iommu->seq_id]; + } else { + did_old = context_domain_id(context); + } + context_clear_entry(context); __iommu_flush_cache(iommu, context, sizeof(*context)); spin_unlock_irqrestore(&iommu->lock, flags); @@ -2462,6 +2472,8 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn 0, 0, DMA_TLB_DSI_FLUSH); + + __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH); } static inline void unlink_domain_info(struct device_domain_info *info) @@ -4425,9 +4437,9 @@ out_free_dmar: static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) { - struct intel_iommu *iommu = opaque; + struct device_domain_info *info = opaque; - domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); + domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff); return 0; } @@ -4437,12 +4449,13 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op * devices, unbinding the driver from any one of them will possibly leave * the others unable to operate. */ -static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) +static void domain_context_clear(struct device_domain_info *info) { - if (!iommu || !dev || !dev_is_pci(dev)) + if (!info->iommu || !info->dev || !dev_is_pci(info->dev)) return; - pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); + pci_for_each_dma_alias(to_pci_dev(info->dev), + &domain_context_clear_one_cb, info); } static void __dmar_remove_one_dev_info(struct device_domain_info *info) @@ -4459,14 +4472,13 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) iommu = info->iommu; domain = info->domain; - if (info->dev) { + if (info->dev && !dev_is_real_dma_subdevice(info->dev)) { if (dev_is_pci(info->dev) && sm_supported(iommu)) intel_pasid_tear_down_entry(iommu, info->dev, PASID_RID2PASID, false); iommu_disable_dev_iotlb(info); - if (!dev_is_real_dma_subdevice(info->dev)) - domain_context_clear(iommu, info->dev); + domain_context_clear(info); intel_pasid_free_table(info->dev); } diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index c6cf44a6c923..9ec374e17469 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -511,7 +511,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, u32 pasid, bool fault_ignore) { struct pasid_entry *pte; - u16 did; + u16 did, pgtt; pte = intel_pasid_get_entry(dev, pasid); if (WARN_ON(!pte)) @@ -521,13 +521,19 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, return; did = pasid_get_domain_id(pte); + pgtt = pasid_pte_get_pgtt(pte); + intel_pasid_clear_entry(dev, pasid, fault_ignore); if (!ecap_coherent(iommu->ecap)) clflush_cache_range(pte, sizeof(*pte)); pasid_cache_invalidation_with_pasid(iommu, did, pasid); - qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + + if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY) + qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + else + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); /* Device IOTLB doesn't need to be flushed in caching mode. */ if (!cap_caching_mode(iommu->cap)) diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 5ff61c3d401f..c11bc8b833b8 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte) return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT; } +/* Get PGTT field of a PASID table entry */ +static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte) +{ + return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7); +} + extern unsigned int intel_pasid_max_id; int intel_pasid_alloc_table(struct device *dev); void intel_pasid_free_table(struct device *dev); diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 9b0f22bc0514..4b9b3f35ba0e 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -675,7 +675,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree_rcu(sdev, rcu); if (list_empty(&svm->devs)) { - intel_svm_free_pasid(mm); if (svm->notifier.ops) { mmu_notifier_unregister(&svm->notifier, mm); /* Clear mm's pasid. */ @@ -690,6 +689,8 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree(svm); } } + /* Drop a PASID reference and free it if no reference. */ + intel_svm_free_pasid(mm); } out: return ret; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 5419c4b9f27a..63f0af10c403 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -924,6 +924,9 @@ void iommu_group_remove_device(struct device *dev) struct iommu_group *group = dev->iommu_group; struct group_device *tmp_device, *device = NULL; + if (!group) + return; + dev_info(dev, "Removing from iommu group %d\n", group->id); /* Pre-notify listeners that a device is being removed. */ diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 94b9d8e5b9a4..9febfb7f3025 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -544,12 +544,14 @@ static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma) } #define DT_HI_MASK GENMASK_ULL(39, 32) +#define DTE_BASE_HI_MASK GENMASK(11, 4) #define DT_SHIFT 28 static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr) { - return (phys_addr_t)(addr & RK_DTE_PT_ADDRESS_MASK) | - ((addr & DT_HI_MASK) << DT_SHIFT); + u64 addr64 = addr; + return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) | + ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT); } static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma) diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c index 3461b0a7dc62..cbfdadecb23b 100644 --- a/drivers/ipack/carriers/tpci200.c +++ b/drivers/ipack/carriers/tpci200.c @@ -89,16 +89,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200) free_irq(tpci200->info->pdev->irq, (void *) tpci200); pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); - pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); - pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); pci_disable_device(tpci200->info->pdev); - pci_dev_put(tpci200->info->pdev); } static void tpci200_enable_irq(struct tpci200_board *tpci200, @@ -257,7 +254,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_disable_pci; + goto err_disable_device; } /* Request IO ID INT space (Bar 3) */ @@ -269,7 +266,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_ip_space; + goto err_ip_interface_bar; } /* Request MEM8 space (Bar 5) */ @@ -280,7 +277,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_ioid_int_space; + goto err_io_id_int_spaces_bar; } /* Request MEM16 space (Bar 4) */ @@ -291,7 +288,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_mem8_space; + goto err_mem8_space_bar; } /* Map internal tpci200 driver user space */ @@ -305,7 +302,7 @@ static int tpci200_register(struct tpci200_board *tpci200) tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); res = -ENOMEM; - goto out_release_mem8_space; + goto err_mem16_space_bar; } /* Initialize lock that protects interface_regs */ @@ -344,18 +341,22 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) unable to register IRQ !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_ioid_int_space; + goto err_interface_regs; } return 0; -out_release_mem8_space: +err_interface_regs: + pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); +err_mem16_space_bar: + pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); +err_mem8_space_bar: pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); -out_release_ioid_int_space: +err_io_id_int_spaces_bar: pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); -out_release_ip_space: +err_ip_interface_bar: pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); -out_disable_pci: +err_disable_device: pci_disable_device(tpci200->info->pdev); return res; } @@ -527,7 +528,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL); if (!tpci200->info) { ret = -ENOMEM; - goto out_err_info; + goto err_tpci200; } pci_dev_get(pdev); @@ -538,7 +539,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory"); ret = -EBUSY; - goto out_err_pci_request; + goto err_tpci200_info; } tpci200->info->cfg_regs = ioremap( pci_resource_start(pdev, TPCI200_CFG_MEM_BAR), @@ -546,7 +547,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (!tpci200->info->cfg_regs) { dev_err(&pdev->dev, "Failed to map PCI Configuration Memory"); ret = -EFAULT; - goto out_err_ioremap; + goto err_request_region; } /* Disable byte swapping for 16 bit IP module access. This will ensure @@ -569,7 +570,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "error during tpci200 install\n"); ret = -ENODEV; - goto out_err_install; + goto err_cfg_regs; } /* Register the carrier in the industry pack bus driver */ @@ -581,7 +582,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "error registering the carrier on ipack driver\n"); ret = -EFAULT; - goto out_err_bus_register; + goto err_tpci200_install; } /* save the bus number given by ipack to logging purpose */ @@ -592,19 +593,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev, tpci200_create_device(tpci200, i); return 0; -out_err_bus_register: +err_tpci200_install: tpci200_uninstall(tpci200); - /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */ - tpci200->info->cfg_regs = NULL; -out_err_install: - if (tpci200->info->cfg_regs) - iounmap(tpci200->info->cfg_regs); -out_err_ioremap: +err_cfg_regs: + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); +err_request_region: pci_release_region(pdev, TPCI200_CFG_MEM_BAR); -out_err_pci_request: - pci_dev_put(pdev); +err_tpci200_info: kfree(tpci200->info); -out_err_info: + pci_dev_put(pdev); +err_tpci200: kfree(tpci200); return ret; } @@ -614,6 +612,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200) ipack_bus_unregister(tpci200->info->ipack_bus); tpci200_uninstall(tpci200); + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); + + pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); + + pci_dev_put(tpci200->info->pdev); + kfree(tpci200->info); kfree(tpci200); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 51f2547c2007..3c44c4bb40fc 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -474,8 +474,6 @@ static void raid1_end_write_request(struct bio *bio) /* * When the device is faulty, it is not necessary to * handle write error. - * For failfast, this is the only remaining device, - * We need to retry the write without FailFast. */ if (!test_bit(Faulty, &rdev->flags)) set_bit(R1BIO_WriteError, &r1_bio->state); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 16977e8e075d..07119d7e0fdf 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -471,12 +471,12 @@ static void raid10_end_write_request(struct bio *bio) /* * When the device is faulty, it is not necessary to * handle write error. - * For failfast, this is the only remaining device, - * We need to retry the write without FailFast. */ if (!test_bit(Faulty, &rdev->flags)) set_bit(R10BIO_WriteError, &r10_bio->state); else { + /* Fail the request */ + set_bit(R10BIO_Degraded, &r10_bio->state); r10_bio->devs[slot].bio = NULL; to_put = bio; dec_rdev = 1; diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 02281d13505f..508ac295eb06 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -1573,6 +1573,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, struct media_request *req) { struct vb2_buffer *vb; + enum vb2_buffer_state orig_state; int ret; if (q->error) { @@ -1673,6 +1674,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, * Add to the queued buffers list, a buffer will stay on it until * dequeued in dqbuf. */ + orig_state = vb->state; list_add_tail(&vb->queued_entry, &q->queued_list); q->queued_count++; q->waiting_for_buffers = false; @@ -1703,8 +1705,17 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, if (q->streaming && !q->start_streaming_called && q->queued_count >= q->min_buffers_needed) { ret = vb2_start_streaming(q); - if (ret) + if (ret) { + /* + * Since vb2_core_qbuf will return with an error, + * we should return it to state DEQUEUED since + * the error indicates that the buffer wasn't queued. + */ + list_del(&vb->queued_entry); + q->queued_count--; + vb->state = orig_state; return ret; + } } dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c index 4657e99df033..59a36f922675 100644 --- a/drivers/media/pci/intel/ipu3/cio2-bridge.c +++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c @@ -173,10 +173,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg, int ret; for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) { - if (!adev->status.enabled) { - acpi_dev_put(adev); + if (!adev->status.enabled) continue; - } if (bridge->n_sensors >= CIO2_NUM_PORTS) { acpi_dev_put(adev); @@ -185,7 +183,6 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg, } sensor = &bridge->sensors[bridge->n_sensors]; - sensor->adev = adev; strscpy(sensor->name, cfg->hid, sizeof(sensor->name)); ret = cio2_bridge_read_acpi_buffer(adev, "SSDB", @@ -215,6 +212,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg, goto err_free_swnodes; } + sensor->adev = acpi_dev_get(adev); adev->fwnode.secondary = fwnode; dev_info(&cio2->dev, "Found supported sensor %s\n", diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index 07f342db6701..7481f553f959 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -385,7 +385,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config) com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER; com.cmd.hdr.Length = 6; - memcpy(&com.cmd.ConfigureBuffers.config, config, 6); + memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6); com.in_len = 6; com.out_len = 0; diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h index 84f04e0e0cb9..3d296f1998a1 100644 --- a/drivers/media/pci/ngene/ngene.h +++ b/drivers/media/pci/ngene/ngene.h @@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS { struct FW_CONFIGURE_FREE_BUFFERS { struct FW_HEADER hdr; - u8 UVI1_BufferLength; - u8 UVI2_BufferLength; - u8 TVO_BufferLength; - u8 AUD1_BufferLength; - u8 AUD2_BufferLength; - u8 TVA_BufferLength; + struct { + u8 UVI1_BufferLength; + u8 UVI2_BufferLength; + u8 TVO_BufferLength; + u8 AUD1_BufferLength; + u8 AUD2_BufferLength; + u8 TVA_BufferLength; + } __packed config; } __attribute__ ((__packed__)); struct FW_CONFIGURE_UART { diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig index 99b51213f871..dda2f27da317 100644 --- a/drivers/media/platform/atmel/Kconfig +++ b/drivers/media/platform/atmel/Kconfig @@ -8,6 +8,7 @@ config VIDEO_ATMEL_ISC select VIDEOBUF2_DMA_CONTIG select REGMAP_MMIO select V4L2_FWNODE + select VIDEO_ATMEL_ISC_BASE help This module makes the ATMEL Image Sensor Controller available as a v4l2 device. @@ -19,10 +20,17 @@ config VIDEO_ATMEL_XISC select VIDEOBUF2_DMA_CONTIG select REGMAP_MMIO select V4L2_FWNODE + select VIDEO_ATMEL_ISC_BASE help This module makes the ATMEL eXtended Image Sensor Controller available as a v4l2 device. +config VIDEO_ATMEL_ISC_BASE + tristate + default n + help + ATMEL ISC and XISC common code base. + config VIDEO_ATMEL_ISI tristate "ATMEL Image Sensor Interface (ISI) support" depends on VIDEO_V4L2 && OF diff --git a/drivers/media/platform/atmel/Makefile b/drivers/media/platform/atmel/Makefile index c5c01556c653..46d264ab7948 100644 --- a/drivers/media/platform/atmel/Makefile +++ b/drivers/media/platform/atmel/Makefile @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only -atmel-isc-objs = atmel-sama5d2-isc.o atmel-isc-base.o -atmel-xisc-objs = atmel-sama7g5-isc.o atmel-isc-base.o +atmel-isc-objs = atmel-sama5d2-isc.o +atmel-xisc-objs = atmel-sama7g5-isc.o obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o +obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += atmel-isc-base.o obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c index 19daa49bf604..136ab7cf36ed 100644 --- a/drivers/media/platform/atmel/atmel-isc-base.c +++ b/drivers/media/platform/atmel/atmel-isc-base.c @@ -378,6 +378,7 @@ int isc_clk_init(struct isc_device *isc) return 0; } +EXPORT_SYMBOL_GPL(isc_clk_init); void isc_clk_cleanup(struct isc_device *isc) { @@ -392,6 +393,7 @@ void isc_clk_cleanup(struct isc_device *isc) clk_unregister(isc_clk->clk); } } +EXPORT_SYMBOL_GPL(isc_clk_cleanup); static int isc_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, @@ -1578,6 +1580,7 @@ irqreturn_t isc_interrupt(int irq, void *dev_id) return ret; } +EXPORT_SYMBOL_GPL(isc_interrupt); static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max) { @@ -2212,6 +2215,7 @@ const struct v4l2_async_notifier_operations isc_async_ops = { .unbind = isc_async_unbind, .complete = isc_async_complete, }; +EXPORT_SYMBOL_GPL(isc_async_ops); void isc_subdev_cleanup(struct isc_device *isc) { @@ -2224,6 +2228,7 @@ void isc_subdev_cleanup(struct isc_device *isc) INIT_LIST_HEAD(&isc->subdev_entities); } +EXPORT_SYMBOL_GPL(isc_subdev_cleanup); int isc_pipeline_init(struct isc_device *isc) { @@ -2264,6 +2269,7 @@ int isc_pipeline_init(struct isc_device *isc) return 0; } +EXPORT_SYMBOL_GPL(isc_pipeline_init); /* regmap configuration */ #define ATMEL_ISC_REG_MAX 0xd5c @@ -2273,4 +2279,9 @@ const struct regmap_config isc_regmap_config = { .val_bits = 32, .max_register = ATMEL_ISC_REG_MAX, }; +EXPORT_SYMBOL_GPL(isc_regmap_config); +MODULE_AUTHOR("Songjun Wu"); +MODULE_AUTHOR("Eugen Hristev"); +MODULE_DESCRIPTION("Atmel ISC common code base"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 83705730e37e..795a012d4020 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -37,7 +37,16 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req) } else { /* read */ requesttype = (USB_TYPE_VENDOR | USB_DIR_IN); - pipe = usb_rcvctrlpipe(d->udev, 0); + + /* + * Zero-length transfers must use usb_sndctrlpipe() and + * rtl28xxu_identify_state() uses a zero-length i2c read + * command to determine the chip type. + */ + if (req->size) + pipe = usb_rcvctrlpipe(d->udev, 0); + else + pipe = usb_sndctrlpipe(d->udev, 0); } ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value, @@ -612,9 +621,8 @@ static int rtl28xxu_read_config(struct dvb_usb_device *d) static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name) { struct rtl28xxu_dev *dev = d_to_priv(d); - u8 buf[1]; int ret; - struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 1, buf}; + struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 0, NULL}; dev_dbg(&d->intf->dev, "\n"); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index f4fb5c52b863..a420b59917db 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -208,6 +208,18 @@ config CS5535_CLOCK_EVENT_SRC MFGPTs have a better resolution and max interval than the generic PIT, and are suitable for use as high-res timers. +config GEHC_ACHC + tristate "GEHC ACHC support" + depends on SPI && SYSFS + depends on SOC_IMX53 || COMPILE_TEST + select FW_LOADER + help + Support for GE ACHC microcontroller, that is part of the GE + PPD device. + + To compile this driver as a module, choose M here: the + module will be called gehc-achc. + config HP_ILO tristate "Channel interface driver for the HP iLO processor" depends on PCI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index e92a56d4442f..68b7b0736f16 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_KGDB_TESTS) += kgdbts.o obj-$(CONFIG_SGI_XP) += sgi-xp/ obj-$(CONFIG_SGI_GRU) += sgi-gru/ obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o +obj-$(CONFIG_GEHC_ACHC) += gehc-achc.o obj-$(CONFIG_HP_ILO) += hpilo.o obj-$(CONFIG_APDS9802ALS) += apds9802als.o obj-$(CONFIG_ISL29003) += isl29003.o diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 7a6f01ace78a..305ffad131a2 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -714,23 +714,20 @@ static int at24_probe(struct i2c_client *client) } /* - * If the 'label' property is not present for the AT24 EEPROM, - * then nvmem_config.id is initialised to NVMEM_DEVID_AUTO, - * and this will append the 'devid' to the name of the NVMEM - * device. This is purely legacy and the AT24 driver has always - * defaulted to this. However, if the 'label' property is - * present then this means that the name is specified by the - * firmware and this name should be used verbatim and so it is - * not necessary to append the 'devid'. + * We initialize nvmem_config.id to NVMEM_DEVID_AUTO even if the + * label property is set as some platform can have multiple eeproms + * with same label and we can not register each of those with same + * label. Failing to register those eeproms trigger cascade failure + * on such platform. */ + nvmem_config.id = NVMEM_DEVID_AUTO; + if (device_property_present(dev, "label")) { - nvmem_config.id = NVMEM_DEVID_NONE; err = device_property_read_string(dev, "label", &nvmem_config.name); if (err) return err; } else { - nvmem_config.id = NVMEM_DEVID_AUTO; nvmem_config.name = dev_name(dev); } diff --git a/drivers/misc/gehc-achc.c b/drivers/misc/gehc-achc.c new file mode 100644 index 000000000000..02f33bc60c56 --- /dev/null +++ b/drivers/misc/gehc-achc.c @@ -0,0 +1,565 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * datasheet: https://www.nxp.com/docs/en/data-sheet/K20P144M120SF3.pdf + * + * Copyright (C) 2018-2021 Collabora + * Copyright (C) 2018-2021 GE Healthcare + */ + +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/gpio/consumer.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/spi/spi.h> + +#define ACHC_MAX_FREQ_HZ 300000 +#define ACHC_FAST_READ_FREQ_HZ 1000000 + +struct achc_data { + struct spi_device *main; + struct spi_device *ezport; + struct gpio_desc *reset; + + struct mutex device_lock; /* avoid concurrent device access */ +}; + +#define EZPORT_RESET_DELAY_MS 100 +#define EZPORT_STARTUP_DELAY_MS 200 +#define EZPORT_WRITE_WAIT_MS 10 +#define EZPORT_TRANSFER_SIZE 2048 + +#define EZPORT_CMD_SP 0x02 /* flash section program */ +#define EZPORT_CMD_RDSR 0x05 /* read status register */ +#define EZPORT_CMD_WREN 0x06 /* write enable */ +#define EZPORT_CMD_FAST_READ 0x0b /* flash read data at high speed */ +#define EZPORT_CMD_RESET 0xb9 /* reset chip */ +#define EZPORT_CMD_BE 0xc7 /* bulk erase */ +#define EZPORT_CMD_SE 0xd8 /* sector erase */ + +#define EZPORT_SECTOR_SIZE 4096 +#define EZPORT_SECTOR_MASK (EZPORT_SECTOR_SIZE - 1) + +#define EZPORT_STATUS_WIP BIT(0) /* write in progress */ +#define EZPORT_STATUS_WEN BIT(1) /* write enable */ +#define EZPORT_STATUS_BEDIS BIT(2) /* bulk erase disable */ +#define EZPORT_STATUS_FLEXRAM BIT(3) /* FlexRAM mode */ +#define EZPORT_STATUS_WEF BIT(6) /* write error flag */ +#define EZPORT_STATUS_FS BIT(7) /* flash security */ + +static void ezport_reset(struct gpio_desc *reset) +{ + gpiod_set_value(reset, 1); + msleep(EZPORT_RESET_DELAY_MS); + gpiod_set_value(reset, 0); + msleep(EZPORT_STARTUP_DELAY_MS); +} + +static int ezport_start_programming(struct spi_device *spi, struct gpio_desc *reset) +{ + struct spi_message msg; + struct spi_transfer assert_cs = { + .cs_change = 1, + }; + struct spi_transfer release_cs = { }; + int ret; + + spi_bus_lock(spi->master); + + /* assert chip select */ + spi_message_init(&msg); + spi_message_add_tail(&assert_cs, &msg); + ret = spi_sync_locked(spi, &msg); + if (ret) + goto fail; + + msleep(EZPORT_STARTUP_DELAY_MS); + + /* reset with asserted chip select to switch into programming mode */ + ezport_reset(reset); + + /* release chip select */ + spi_message_init(&msg); + spi_message_add_tail(&release_cs, &msg); + ret = spi_sync_locked(spi, &msg); + +fail: + spi_bus_unlock(spi->master); + return ret; +} + +static void ezport_stop_programming(struct spi_device *spi, struct gpio_desc *reset) +{ + /* reset without asserted chip select to return into normal mode */ + spi_bus_lock(spi->master); + ezport_reset(reset); + spi_bus_unlock(spi->master); +} + +static int ezport_get_status_register(struct spi_device *spi) +{ + int ret; + + ret = spi_w8r8(spi, EZPORT_CMD_RDSR); + if (ret < 0) + return ret; + if (ret == 0xff) { + dev_err(&spi->dev, "Invalid EzPort status, EzPort is not functional!\n"); + return -EINVAL; + } + + return ret; +} + +static int ezport_soft_reset(struct spi_device *spi) +{ + u8 cmd = EZPORT_CMD_RESET; + int ret; + + ret = spi_write(spi, &cmd, 1); + if (ret < 0) + return ret; + + msleep(EZPORT_STARTUP_DELAY_MS); + + return 0; +} + +static int ezport_send_simple(struct spi_device *spi, u8 cmd) +{ + int ret; + + ret = spi_write(spi, &cmd, 1); + if (ret < 0) + return ret; + + return ezport_get_status_register(spi); +} + +static int ezport_wait_write(struct spi_device *spi, u32 retries) +{ + int ret; + u32 i; + + for (i = 0; i < retries; i++) { + ret = ezport_get_status_register(spi); + if (ret >= 0 && !(ret & EZPORT_STATUS_WIP)) + break; + msleep(EZPORT_WRITE_WAIT_MS); + } + + return ret; +} + +static int ezport_write_enable(struct spi_device *spi) +{ + int ret = 0, retries = 3; + + for (retries = 0; retries < 3; retries++) { + ret = ezport_send_simple(spi, EZPORT_CMD_WREN); + if (ret > 0 && ret & EZPORT_STATUS_WEN) + break; + } + + if (!(ret & EZPORT_STATUS_WEN)) { + dev_err(&spi->dev, "EzPort write enable timed out\n"); + return -ETIMEDOUT; + } + return 0; +} + +static int ezport_bulk_erase(struct spi_device *spi) +{ + int ret; + static const u8 cmd = EZPORT_CMD_BE; + + dev_dbg(&spi->dev, "EzPort bulk erase...\n"); + + ret = ezport_write_enable(spi); + if (ret < 0) + return ret; + + ret = spi_write(spi, &cmd, 1); + if (ret < 0) + return ret; + + ret = ezport_wait_write(spi, 1000); + if (ret < 0) + return ret; + + return 0; +} + +static int ezport_section_erase(struct spi_device *spi, u32 address) +{ + u8 query[] = {EZPORT_CMD_SE, (address >> 16) & 0xff, (address >> 8) & 0xff, address & 0xff}; + int ret; + + dev_dbg(&spi->dev, "Ezport section erase @ 0x%06x...\n", address); + + if (address & EZPORT_SECTOR_MASK) + return -EINVAL; + + ret = ezport_write_enable(spi); + if (ret < 0) + return ret; + + ret = spi_write(spi, query, sizeof(query)); + if (ret < 0) + return ret; + + return ezport_wait_write(spi, 200); +} + +static int ezport_flash_transfer(struct spi_device *spi, u32 address, + const u8 *payload, size_t payload_size) +{ + struct spi_transfer xfers[2] = {}; + u8 *command; + int ret; + + dev_dbg(&spi->dev, "EzPort write %zu bytes @ 0x%06x...\n", payload_size, address); + + ret = ezport_write_enable(spi); + if (ret < 0) + return ret; + + command = kmalloc(4, GFP_KERNEL | GFP_DMA); + if (!command) + return -ENOMEM; + + command[0] = EZPORT_CMD_SP; + command[1] = address >> 16; + command[2] = address >> 8; + command[3] = address >> 0; + + xfers[0].tx_buf = command; + xfers[0].len = 4; + + xfers[1].tx_buf = payload; + xfers[1].len = payload_size; + + ret = spi_sync_transfer(spi, xfers, 2); + kfree(command); + if (ret < 0) + return ret; + + return ezport_wait_write(spi, 40); +} + +static int ezport_flash_compare(struct spi_device *spi, u32 address, + const u8 *payload, size_t payload_size) +{ + struct spi_transfer xfers[2] = {}; + u8 *buffer; + int ret; + + buffer = kmalloc(payload_size + 5, GFP_KERNEL | GFP_DMA); + if (!buffer) + return -ENOMEM; + + buffer[0] = EZPORT_CMD_FAST_READ; + buffer[1] = address >> 16; + buffer[2] = address >> 8; + buffer[3] = address >> 0; + + xfers[0].tx_buf = buffer; + xfers[0].len = 4; + xfers[0].speed_hz = ACHC_FAST_READ_FREQ_HZ; + + xfers[1].rx_buf = buffer + 4; + xfers[1].len = payload_size + 1; + xfers[1].speed_hz = ACHC_FAST_READ_FREQ_HZ; + + ret = spi_sync_transfer(spi, xfers, 2); + if (ret) + goto err; + + /* FAST_READ receives one dummy byte before the real data */ + ret = memcmp(payload, buffer + 4 + 1, payload_size); + if (ret) { + ret = -EBADMSG; + dev_dbg(&spi->dev, "Verification failure @ %06x", address); + print_hex_dump_bytes("fw: ", DUMP_PREFIX_OFFSET, payload, payload_size); + print_hex_dump_bytes("dev: ", DUMP_PREFIX_OFFSET, buffer + 4, payload_size); + } + +err: + kfree(buffer); + return ret; +} + +static int ezport_firmware_compare_data(struct spi_device *spi, + const u8 *data, size_t size) +{ + int ret; + size_t address = 0; + size_t transfer_size; + + dev_dbg(&spi->dev, "EzPort compare data with %zu bytes...\n", size); + + ret = ezport_get_status_register(spi); + if (ret < 0) + return ret; + + if (ret & EZPORT_STATUS_FS) { + dev_info(&spi->dev, "Device is in secure mode (status=0x%02x)!\n", ret); + dev_info(&spi->dev, "FW verification is not possible\n"); + return -EACCES; + } + + while (size - address > 0) { + transfer_size = min((size_t) EZPORT_TRANSFER_SIZE, size - address); + + ret = ezport_flash_compare(spi, address, data+address, transfer_size); + if (ret) + return ret; + + address += transfer_size; + } + + return 0; +} + +static int ezport_firmware_flash_data(struct spi_device *spi, + const u8 *data, size_t size) +{ + int ret; + size_t address = 0; + size_t transfer_size; + + dev_dbg(&spi->dev, "EzPort flash data with %zu bytes...\n", size); + + ret = ezport_get_status_register(spi); + if (ret < 0) + return ret; + + if (ret & EZPORT_STATUS_FS) { + ret = ezport_bulk_erase(spi); + if (ret < 0) + return ret; + if (ret & EZPORT_STATUS_FS) + return -EINVAL; + } + + while (size - address > 0) { + if (!(address & EZPORT_SECTOR_MASK)) { + ret = ezport_section_erase(spi, address); + if (ret < 0) + return ret; + if (ret & EZPORT_STATUS_WIP || ret & EZPORT_STATUS_WEF) + return -EIO; + } + + transfer_size = min((size_t) EZPORT_TRANSFER_SIZE, size - address); + + ret = ezport_flash_transfer(spi, address, + data+address, transfer_size); + if (ret < 0) + return ret; + else if (ret & EZPORT_STATUS_WIP) + return -ETIMEDOUT; + else if (ret & EZPORT_STATUS_WEF) + return -EIO; + + address += transfer_size; + } + + dev_dbg(&spi->dev, "EzPort verify flashed data...\n"); + ret = ezport_firmware_compare_data(spi, data, size); + + /* allow missing FW verfication in secure mode */ + if (ret == -EACCES) + ret = 0; + + if (ret < 0) + dev_err(&spi->dev, "Failed to verify flashed data: %d\n", ret); + + ret = ezport_soft_reset(spi); + if (ret < 0) + dev_warn(&spi->dev, "EzPort reset failed!\n"); + + return ret; +} + +static int ezport_firmware_load(struct spi_device *spi, const char *fwname) +{ + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, fwname, &spi->dev); + if (ret) { + dev_err(&spi->dev, "Could not get firmware: %d\n", ret); + return ret; + } + + ret = ezport_firmware_flash_data(spi, fw->data, fw->size); + + release_firmware(fw); + + return ret; +} + +/** + * ezport_flash - flash device firmware + * @spi: SPI device for NXP EzPort interface + * @reset: the gpio connected to the device reset pin + * @fwname: filename of the firmware that should be flashed + * + * Context: can sleep + * + * Return: 0 on success; negative errno on failure + */ +static int ezport_flash(struct spi_device *spi, struct gpio_desc *reset, const char *fwname) +{ + int ret; + + ret = ezport_start_programming(spi, reset); + if (ret) + return ret; + + ret = ezport_firmware_load(spi, fwname); + + ezport_stop_programming(spi, reset); + + if (ret) + dev_err(&spi->dev, "Failed to flash firmware: %d\n", ret); + else + dev_dbg(&spi->dev, "Finished FW flashing!\n"); + + return ret; +} + +static ssize_t update_firmware_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct achc_data *achc = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret < 0 || value != 1) + return -EINVAL; + + mutex_lock(&achc->device_lock); + ret = ezport_flash(achc->ezport, achc->reset, "achc.bin"); + mutex_unlock(&achc->device_lock); + + if (ret < 0) + return ret; + + return count; +} +static DEVICE_ATTR_WO(update_firmware); + +static ssize_t reset_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct achc_data *achc = dev_get_drvdata(dev); + int ret; + + mutex_lock(&achc->device_lock); + ret = gpiod_get_value(achc->reset); + mutex_unlock(&achc->device_lock); + + if (ret < 0) + return ret; + + return sysfs_emit(buf, "%d\n", ret); +} + +static ssize_t reset_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct achc_data *achc = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret < 0 || value > 1) + return -EINVAL; + + mutex_lock(&achc->device_lock); + gpiod_set_value(achc->reset, value); + mutex_unlock(&achc->device_lock); + + return count; +} +static DEVICE_ATTR_RW(reset); + +static struct attribute *gehc_achc_attrs[] = { + &dev_attr_update_firmware.attr, + &dev_attr_reset.attr, + NULL, +}; +ATTRIBUTE_GROUPS(gehc_achc); + +static void unregister_ezport(void *data) +{ + struct spi_device *ezport = data; + + spi_unregister_device(ezport); +} + +static int gehc_achc_probe(struct spi_device *spi) +{ + struct achc_data *achc; + int ezport_reg, ret; + + spi->max_speed_hz = ACHC_MAX_FREQ_HZ; + spi->bits_per_word = 8; + spi->mode = SPI_MODE_0; + + achc = devm_kzalloc(&spi->dev, sizeof(*achc), GFP_KERNEL); + if (!achc) + return -ENOMEM; + spi_set_drvdata(spi, achc); + achc->main = spi; + + mutex_init(&achc->device_lock); + + ret = of_property_read_u32_index(spi->dev.of_node, "reg", 1, &ezport_reg); + if (ret) + return dev_err_probe(&spi->dev, ret, "missing second reg entry!\n"); + + achc->ezport = spi_new_ancillary_device(spi, ezport_reg); + if (IS_ERR(achc->ezport)) + return PTR_ERR(achc->ezport); + + ret = devm_add_action_or_reset(&spi->dev, unregister_ezport, achc->ezport); + if (ret) + return ret; + + achc->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(achc->reset)) + return dev_err_probe(&spi->dev, PTR_ERR(achc->reset), "Could not get reset gpio\n"); + + return 0; +} + +static const struct spi_device_id gehc_achc_id[] = { + { "ge,achc", 0 }, + { } +}; +MODULE_DEVICE_TABLE(spi, gehc_achc_id); + +static const struct of_device_id gehc_achc_of_match[] = { + { .compatible = "ge,achc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, gehc_achc_of_match); + +static struct spi_driver gehc_achc_spi_driver = { + .driver = { + .name = "gehc-achc", + .of_match_table = gehc_achc_of_match, + .dev_groups = gehc_achc_groups, + }, + .probe = gehc_achc_probe, + .id_table = gehc_achc_id, +}; +module_spi_driver(gehc_achc_spi_driver); + +MODULE_DESCRIPTION("GEHC ACHC driver"); +MODULE_AUTHOR("Sebastian Reichel <sebastian.reichel@collabora.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 88c218a9f8b3..4282b625200f 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -267,6 +267,7 @@ void lkdtm_ARRAY_BOUNDS(void) kfree(not_checked); kfree(checked); pr_err("FAIL: survived array bounds overflow!\n"); + pr_expected_config(CONFIG_UBSAN_BOUNDS); } void lkdtm_CORRUPT_LIST_ADD(void) @@ -506,53 +507,3 @@ noinline void lkdtm_CORRUPT_PAC(void) pr_err("XFAIL: this test is arm64-only\n"); #endif } - -void lkdtm_FORTIFY_OBJECT(void) -{ - struct target { - char a[10]; - } target[2] = {}; - int result; - - /* - * Using volatile prevents the compiler from determining the value of - * 'size' at compile time. Without that, we would get a compile error - * rather than a runtime error. - */ - volatile int size = 11; - - pr_info("trying to read past the end of a struct\n"); - - result = memcmp(&target[0], &target[1], size); - - /* Print result to prevent the code from being eliminated */ - pr_err("FAIL: fortify did not catch an object overread!\n" - "\"%d\" was the memcmp result.\n", result); -} - -void lkdtm_FORTIFY_SUBOBJECT(void) -{ - struct target { - char a[10]; - char b[10]; - } target; - char *src; - - src = kmalloc(20, GFP_KERNEL); - strscpy(src, "over ten bytes", 20); - - pr_info("trying to strcpy past the end of a member of a struct\n"); - - /* - * strncpy(target.a, src, 20); will hit a compile error because the - * compiler knows at build time that target.a < 20 bytes. Use strcpy() - * to force a runtime error. - */ - strcpy(target.a, src); - - /* Use target.a to prevent the code from being eliminated */ - pr_err("FAIL: fortify did not catch an sub-object overrun!\n" - "\"%s\" was copied.\n", target.a); - - kfree(src); -} diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 9dda87c6b54a..95b1c6800a22 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -26,7 +26,6 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/debugfs.h> -#include <linux/init.h> #define DEFAULT_COUNT 10 @@ -82,8 +81,7 @@ static struct crashpoint crashpoints[] = { CRASHPOINT("FS_DEVRW", "ll_rw_block"), CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"), CRASHPOINT("TIMERADD", "hrtimer_start"), - CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"), - CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"), + CRASHPOINT("SCSI_QUEUE_RQ", "scsi_queue_rq"), #endif }; @@ -119,8 +117,6 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(UNSET_SMEP), CRASHTYPE(CORRUPT_PAC), CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), - CRASHTYPE(FORTIFY_OBJECT), - CRASHTYPE(FORTIFY_SUBOBJECT), CRASHTYPE(SLAB_LINEAR_OVERFLOW), CRASHTYPE(VMALLOC_LINEAR_OVERFLOW), CRASHTYPE(WRITE_AFTER_FREE), @@ -180,6 +176,8 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(USERCOPY_KERNEL), CRASHTYPE(STACKLEAK_ERASING), CRASHTYPE(CFI_FORWARD_PROTO), + CRASHTYPE(FORTIFIED_OBJECT), + CRASHTYPE(FORTIFIED_SUBOBJECT), CRASHTYPE(FORTIFIED_STRSCPY), CRASHTYPE(DOUBLE_FAULT), #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c index 0f51d31b57ca..d06458a4858e 100644 --- a/drivers/misc/lkdtm/fortify.c +++ b/drivers/misc/lkdtm/fortify.c @@ -8,6 +8,59 @@ #include <linux/string.h> #include <linux/slab.h> +static volatile int fortify_scratch_space; + +void lkdtm_FORTIFIED_OBJECT(void) +{ + struct target { + char a[10]; + } target[2] = {}; + /* + * Using volatile prevents the compiler from determining the value of + * 'size' at compile time. Without that, we would get a compile error + * rather than a runtime error. + */ + volatile int size = 11; + + pr_info("trying to read past the end of a struct\n"); + + /* Store result to global to prevent the code from being eliminated */ + fortify_scratch_space = memcmp(&target[0], &target[1], size); + + pr_err("FAIL: fortify did not block an object overread!\n"); + pr_expected_config(CONFIG_FORTIFY_SOURCE); +} + +void lkdtm_FORTIFIED_SUBOBJECT(void) +{ + struct target { + char a[10]; + char b[10]; + } target; + volatile int size = 20; + char *src; + + src = kmalloc(size, GFP_KERNEL); + strscpy(src, "over ten bytes", size); + size = strlen(src) + 1; + + pr_info("trying to strcpy past the end of a member of a struct\n"); + + /* + * memcpy(target.a, src, 20); will hit a compile error because the + * compiler knows at build time that target.a < 20 bytes. Use a + * volatile to force a runtime error. + */ + memcpy(target.a, src, size); + + /* Store result to global to prevent the code from being eliminated */ + fortify_scratch_space = target.a[3]; + + pr_err("FAIL: fortify did not block an sub-object overrun!\n"); + pr_expected_config(CONFIG_FORTIFY_SOURCE); + + kfree(src); +} /* * Calls fortified strscpy to test that it returns the same result as vanilla diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c index 3d9aae5821a0..8a92f5a800fa 100644 --- a/drivers/misc/lkdtm/heap.c +++ b/drivers/misc/lkdtm/heap.c @@ -13,6 +13,13 @@ static struct kmem_cache *a_cache; static struct kmem_cache *b_cache; /* + * Using volatile here means the compiler cannot ever make assumptions + * about this value. This means compile-time length checks involving + * this variable cannot be performed; only run-time checks. + */ +static volatile int __offset = 1; + +/* * If there aren't guard pages, it's likely that a consecutive allocation will * let us overflow into the second allocation without overwriting something real. */ @@ -24,7 +31,7 @@ void lkdtm_VMALLOC_LINEAR_OVERFLOW(void) two = vzalloc(PAGE_SIZE); pr_info("Attempting vmalloc linear overflow ...\n"); - memset(one, 0xAA, PAGE_SIZE + 1); + memset(one, 0xAA, PAGE_SIZE + __offset); vfree(two); vfree(one); diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 6a30b60519f3..d7d64d9765eb 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -5,13 +5,17 @@ #define pr_fmt(fmt) "lkdtm: " fmt #include <linux/kernel.h> +#include <generated/compile.h> +#include <generated/utsrelease.h> + +#define LKDTM_KERNEL "kernel (" UTS_RELEASE " " UTS_MACHINE ")" #define pr_expected_config(kconfig) \ { \ if (IS_ENABLED(kconfig)) \ - pr_err("Unexpected! This kernel was built with " #kconfig "=y\n"); \ + pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y\n"); \ else \ - pr_warn("This is probably expected, since this kernel was built *without* " #kconfig "=y\n"); \ + pr_warn("This is probably expected, since this " LKDTM_KERNEL " was built *without* " #kconfig "=y\n"); \ } #ifndef MODULE @@ -21,24 +25,24 @@ int lkdtm_check_bool_cmdline(const char *param); if (IS_ENABLED(kconfig)) { \ switch (lkdtm_check_bool_cmdline(param)) { \ case 0: \ - pr_warn("This is probably expected, since this kernel was built with " #kconfig "=y but booted with '" param "=N'\n"); \ + pr_warn("This is probably expected, since this " LKDTM_KERNEL " was built with " #kconfig "=y but booted with '" param "=N'\n"); \ break; \ case 1: \ - pr_err("Unexpected! This kernel was built with " #kconfig "=y and booted with '" param "=Y'\n"); \ + pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y and booted with '" param "=Y'\n"); \ break; \ default: \ - pr_err("Unexpected! This kernel was built with " #kconfig "=y (and booted without '" param "' specified)\n"); \ + pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y (and booted without '" param "' specified)\n"); \ } \ } else { \ switch (lkdtm_check_bool_cmdline(param)) { \ case 0: \ - pr_warn("This is probably expected, as kernel was built *without* " #kconfig "=y and booted with '" param "=N'\n"); \ + pr_warn("This is probably expected, as this " LKDTM_KERNEL " was built *without* " #kconfig "=y and booted with '" param "=N'\n"); \ break; \ case 1: \ - pr_err("Unexpected! This kernel was built *without* " #kconfig "=y but booted with '" param "=Y'\n"); \ + pr_err("Unexpected! This " LKDTM_KERNEL " was built *without* " #kconfig "=y but booted with '" param "=Y'\n"); \ break; \ default: \ - pr_err("This is probably expected, since this kernel was built *without* " #kconfig "=y (and booted without '" param "' specified)\n"); \ + pr_err("This is probably expected, since this " LKDTM_KERNEL " was built *without* " #kconfig "=y (and booted without '" param "' specified)\n"); \ break; \ } \ } \ @@ -74,8 +78,6 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void); void lkdtm_UNSET_SMEP(void); void lkdtm_DOUBLE_FAULT(void); void lkdtm_CORRUPT_PAC(void); -void lkdtm_FORTIFY_OBJECT(void); -void lkdtm_FORTIFY_SUBOBJECT(void); /* heap.c */ void __init lkdtm_heap_init(void); @@ -150,6 +152,8 @@ void lkdtm_STACKLEAK_ERASING(void); void lkdtm_CFI_FORWARD_PROTO(void); /* fortify.c */ +void lkdtm_FORTIFIED_OBJECT(void); +void lkdtm_FORTIFIED_SUBOBJECT(void); void lkdtm_FORTIFIED_STRSCPY(void); /* powerpc.c */ diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 935acc6bbf3c..09188d9afc06 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -31,7 +31,7 @@ * * Return: written size bytes or < 0 on error */ -ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag, +ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, unsigned int mode) { struct mei_device *bus; @@ -232,8 +232,8 @@ out: * * < 0 on error */ -ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, - u8 vtag) +ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, + size_t length, u8 vtag) { struct mei_cl *cl = cldev->cl; @@ -296,7 +296,7 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag); * * written size in bytes * * < 0 on error */ -ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length) +ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length) { return mei_cldev_send_vtag(cldev, buf, length, 0); } @@ -552,7 +552,7 @@ EXPORT_SYMBOL_GPL(mei_cldev_ver); * * Return: true if me client is initialized and connected */ -bool mei_cldev_enabled(struct mei_cl_device *cldev) +bool mei_cldev_enabled(const struct mei_cl_device *cldev) { return mei_cl_is_connected(cldev->cl); } @@ -771,8 +771,8 @@ EXPORT_SYMBOL_GPL(mei_cldev_disable); * Return: id on success; NULL if no id is matching */ static const -struct mei_cl_device_id *mei_cl_device_find(struct mei_cl_device *cldev, - struct mei_cl_driver *cldrv) +struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev, + const struct mei_cl_driver *cldrv) { const struct mei_cl_device_id *id; const uuid_le *uuid; @@ -815,8 +815,8 @@ struct mei_cl_device_id *mei_cl_device_find(struct mei_cl_device *cldev, */ static int mei_cl_device_match(struct device *dev, struct device_driver *drv) { - struct mei_cl_device *cldev = to_mei_cl_device(dev); - struct mei_cl_driver *cldrv = to_mei_cl_driver(drv); + const struct mei_cl_device *cldev = to_mei_cl_device(dev); + const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv); const struct mei_cl_device_id *found_id; if (!cldev) diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index b12cdcde9436..418056fb1489 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -160,7 +160,7 @@ int mei_cl_vt_support_check(const struct mei_cl *cl); * * Return: true if the host client is connected */ -static inline bool mei_cl_is_connected(struct mei_cl *cl) +static inline bool mei_cl_is_connected(const struct mei_cl *cl) { return cl->state == MEI_FILE_CONNECTED; } diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index b7b6ef344e80..694f866f87ef 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -356,7 +356,7 @@ struct mei_hw_ops { /* MEI bus API*/ void mei_cl_bus_rescan_work(struct work_struct *work); void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); -ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag, +ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, unsigned int mode); ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, unsigned int mode, unsigned long timeout); diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 1b2868ca4f2a..d1137a95ad02 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -862,6 +862,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, err = -ENOMEM; goto err_release_irq; } + misc_device->parent = &pdev->dev; misc_device->fops = &pci_endpoint_test_fops, err = misc_register(misc_device); diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c index a43c401017ae..741116b3d995 100644 --- a/drivers/misc/pvpanic/pvpanic-pci.c +++ b/drivers/misc/pvpanic/pvpanic-pci.c @@ -108,4 +108,6 @@ static struct pci_driver pvpanic_pci_driver = { }, }; +MODULE_DEVICE_TABLE(pci, pvpanic_pci_id_tbl); + module_pci_driver(pvpanic_pci_driver); diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 40ac59dd018c..9afda47efbf2 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -282,7 +282,7 @@ static void gru_unload_mm_tracker(struct gru_state *gru, */ void gts_drop(struct gru_thread_state *gts) { - if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { + if (gts && refcount_dec_and_test(>s->ts_refcnt)) { if (gts->ts_gms) gru_drop_mmu_notifier(gts->ts_gms); kfree(gts); @@ -323,7 +323,7 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, STAT(gts_alloc); memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ - atomic_set(>s->ts_refcnt, 1); + refcount_set(>s->ts_refcnt, 1); mutex_init(>s->ts_ctxlock); gts->ts_cbr_au_count = cbr_au_count; gts->ts_dsr_au_count = dsr_au_count; @@ -888,7 +888,7 @@ again: gts->ts_gru = gru; gts->ts_blade = gru->gs_blade_id; gts->ts_ctxnum = gru_assign_context_number(gru); - atomic_inc(>s->ts_refcnt); + refcount_inc(>s->ts_refcnt); gru->gs_gts[gts->ts_ctxnum] = gts; spin_unlock(&gru->gs_lock); diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 5ce8f3081e96..e4c067c61251 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -129,6 +129,7 @@ * */ +#include <linux/refcount.h> #include <linux/rmap.h> #include <linux/interrupt.h> #include <linux/mutex.h> @@ -358,7 +359,7 @@ struct gru_thread_state { enabled */ int ts_ctxnum; /* context number where the context is loaded */ - atomic_t ts_refcnt; /* reference count GTS */ + refcount_t ts_refcnt; /* reference count GTS */ unsigned char ts_dsr_au_count;/* Number of DSR resources required for contest */ unsigned char ts_cbr_au_count;/* Number of CBR resources diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 7791bde81a36..ba9ae0e2df0f 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -1742,7 +1742,7 @@ xpc_init_mq_node(int nid) { int cpu; - get_online_cpus(); + cpus_read_lock(); for_each_cpu(cpu, cpumask_of_node(nid)) { xpc_activate_mq_uv = @@ -1753,7 +1753,7 @@ xpc_init_mq_node(int nid) break; } if (IS_ERR(xpc_activate_mq_uv)) { - put_online_cpus(); + cpus_read_unlock(); return PTR_ERR(xpc_activate_mq_uv); } @@ -1767,11 +1767,11 @@ xpc_init_mq_node(int nid) } if (IS_ERR(xpc_notify_mq_uv)) { xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); - put_online_cpus(); + cpus_read_unlock(); return PTR_ERR(xpc_notify_mq_uv); } - put_online_cpus(); + cpus_read_unlock(); return 0; } diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index 93638ae2753a..4c26b19f5154 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -97,7 +97,24 @@ static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block, struct sram_partition *part = &sram->partition[sram->partitions]; mutex_init(&part->lock); - part->base = sram->virt_base + block->start; + + if (sram->config && sram->config->map_only_reserved) { + void __iomem *virt_base; + + if (sram->no_memory_wc) + virt_base = devm_ioremap_resource(sram->dev, &block->res); + else + virt_base = devm_ioremap_resource_wc(sram->dev, &block->res); + + if (IS_ERR(virt_base)) { + dev_err(sram->dev, "could not map SRAM at %pr\n", &block->res); + return PTR_ERR(virt_base); + } + + part->base = virt_base; + } else { + part->base = sram->virt_base + block->start; + } if (block->pool) { ret = sram_add_pool(sram, block, start, part); @@ -198,6 +215,7 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res) block->start = child_res.start - res->start; block->size = resource_size(&child_res); + block->res = child_res; list_add_tail(&block->list, &reserve_list); if (of_find_property(child, "export", NULL)) @@ -295,15 +313,17 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res) */ cur_size = block->start - cur_start; - dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n", - cur_start, cur_start + cur_size); + if (sram->pool) { + dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n", + cur_start, cur_start + cur_size); - ret = gen_pool_add_virt(sram->pool, - (unsigned long)sram->virt_base + cur_start, - res->start + cur_start, cur_size, -1); - if (ret < 0) { - sram_free_partitions(sram); - goto err_chunks; + ret = gen_pool_add_virt(sram->pool, + (unsigned long)sram->virt_base + cur_start, + res->start + cur_start, cur_size, -1); + if (ret < 0) { + sram_free_partitions(sram); + goto err_chunks; + } } /* next allocation after this reserved block */ @@ -331,40 +351,63 @@ static int atmel_securam_wait(void) 10000, 500000); } +static const struct sram_config atmel_securam_config = { + .init = atmel_securam_wait, +}; + +/* + * SYSRAM contains areas that are not accessible by the + * kernel, such as the first 256K that is reserved for TZ. + * Accesses to those areas (including speculative accesses) + * trigger SErrors. As such we must map only the areas of + * SYSRAM specified in the device tree. + */ +static const struct sram_config tegra_sysram_config = { + .map_only_reserved = true, +}; + static const struct of_device_id sram_dt_ids[] = { { .compatible = "mmio-sram" }, - { .compatible = "atmel,sama5d2-securam", .data = atmel_securam_wait }, + { .compatible = "atmel,sama5d2-securam", .data = &atmel_securam_config }, + { .compatible = "nvidia,tegra186-sysram", .data = &tegra_sysram_config }, + { .compatible = "nvidia,tegra194-sysram", .data = &tegra_sysram_config }, {} }; static int sram_probe(struct platform_device *pdev) { + const struct sram_config *config; struct sram_dev *sram; int ret; struct resource *res; - int (*init_func)(void); + + config = of_device_get_match_data(&pdev->dev); sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL); if (!sram) return -ENOMEM; sram->dev = &pdev->dev; + sram->no_memory_wc = of_property_read_bool(pdev->dev.of_node, "no-memory-wc"); + sram->config = config; + + if (!config || !config->map_only_reserved) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (sram->no_memory_wc) + sram->virt_base = devm_ioremap_resource(&pdev->dev, res); + else + sram->virt_base = devm_ioremap_resource_wc(&pdev->dev, res); + if (IS_ERR(sram->virt_base)) { + dev_err(&pdev->dev, "could not map SRAM registers\n"); + return PTR_ERR(sram->virt_base); + } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc")) - sram->virt_base = devm_ioremap_resource(&pdev->dev, res); - else - sram->virt_base = devm_ioremap_resource_wc(&pdev->dev, res); - if (IS_ERR(sram->virt_base)) { - dev_err(&pdev->dev, "could not map SRAM registers\n"); - return PTR_ERR(sram->virt_base); + sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY), + NUMA_NO_NODE, NULL); + if (IS_ERR(sram->pool)) + return PTR_ERR(sram->pool); } - sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY), - NUMA_NO_NODE, NULL); - if (IS_ERR(sram->pool)) - return PTR_ERR(sram->pool); - sram->clk = devm_clk_get(sram->dev, NULL); if (IS_ERR(sram->clk)) sram->clk = NULL; @@ -378,15 +421,15 @@ static int sram_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sram); - init_func = of_device_get_match_data(&pdev->dev); - if (init_func) { - ret = init_func(); + if (config && config->init) { + ret = config->init(); if (ret) goto err_free_partitions; } - dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n", - gen_pool_size(sram->pool) / 1024, sram->virt_base); + if (sram->pool) + dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n", + gen_pool_size(sram->pool) / 1024, sram->virt_base); return 0; @@ -405,7 +448,7 @@ static int sram_remove(struct platform_device *pdev) sram_free_partitions(sram); - if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) + if (sram->pool && gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) dev_err(sram->dev, "removed while SRAM allocated\n"); if (sram->clk) diff --git a/drivers/misc/sram.h b/drivers/misc/sram.h index 9c1d21ff7347..d2058d8c8f1d 100644 --- a/drivers/misc/sram.h +++ b/drivers/misc/sram.h @@ -5,6 +5,11 @@ #ifndef __SRAM_H #define __SRAM_H +struct sram_config { + int (*init)(void); + bool map_only_reserved; +}; + struct sram_partition { void __iomem *base; @@ -15,8 +20,11 @@ struct sram_partition { }; struct sram_dev { + const struct sram_config *config; + struct device *dev; void __iomem *virt_base; + bool no_memory_wc; struct gen_pool *pool; struct clk *clk; @@ -29,6 +37,7 @@ struct sram_reserve { struct list_head list; u32 start; u32 size; + struct resource res; bool export; bool pool; bool protect_exec; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 9890a1532cb0..ce8aed562929 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -28,6 +28,7 @@ #include <linux/errno.h> #include <linux/hdreg.h> #include <linux/kdev_t.h> +#include <linux/kref.h> #include <linux/blkdev.h> #include <linux/cdev.h> #include <linux/mutex.h> @@ -111,7 +112,7 @@ struct mmc_blk_data { #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ - unsigned int usage; + struct kref kref; unsigned int read_only; unsigned int part_type; unsigned int reset_done; @@ -181,10 +182,8 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) mutex_lock(&open_lock); md = disk->private_data; - if (md && md->usage == 0) + if (md && !kref_get_unless_zero(&md->kref)) md = NULL; - if (md) - md->usage++; mutex_unlock(&open_lock); return md; @@ -196,18 +195,25 @@ static inline int mmc_get_devidx(struct gendisk *disk) return devidx; } -static void mmc_blk_put(struct mmc_blk_data *md) +static void mmc_blk_kref_release(struct kref *ref) { - mutex_lock(&open_lock); - md->usage--; - if (md->usage == 0) { - int devidx = mmc_get_devidx(md->disk); + struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref); + int devidx; - ida_simple_remove(&mmc_blk_ida, devidx); - put_disk(md->disk); - kfree(md); - } + devidx = mmc_get_devidx(md->disk); + ida_simple_remove(&mmc_blk_ida, devidx); + + mutex_lock(&open_lock); + md->disk->private_data = NULL; mutex_unlock(&open_lock); + + put_disk(md->disk); + kfree(md); +} + +static void mmc_blk_put(struct mmc_blk_data *md) +{ + kref_put(&md->kref, mmc_blk_kref_release); } static ssize_t power_ro_lock_show(struct device *dev, @@ -2327,7 +2333,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, INIT_LIST_HEAD(&md->part); INIT_LIST_HEAD(&md->rpmbs); - md->usage = 1; + kref_init(&md->kref); + md->queue.blkdata = md; md->disk->major = MMC_BLOCK_MAJOR; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index eda4a1892c33..0475d96047c4 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -75,7 +75,8 @@ static void mmc_host_classdev_release(struct device *dev) { struct mmc_host *host = cls_dev_to_mmc_host(dev); wakeup_source_unregister(host->ws); - ida_simple_remove(&mmc_host_ida, host->index); + if (of_alias_get_id(host->parent->of_node, "mmc") < 0) + ida_simple_remove(&mmc_host_ida, host->index); kfree(host); } @@ -502,7 +503,7 @@ static int mmc_first_nonreserved_index(void) */ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) { - int err; + int index; struct mmc_host *host; int alias_id, min_idx, max_idx; @@ -515,20 +516,19 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) alias_id = of_alias_get_id(dev->of_node, "mmc"); if (alias_id >= 0) { - min_idx = alias_id; - max_idx = alias_id + 1; + index = alias_id; } else { min_idx = mmc_first_nonreserved_index(); max_idx = 0; - } - err = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL); - if (err < 0) { - kfree(host); - return NULL; + index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL); + if (index < 0) { + kfree(host); + return NULL; + } } - host->index = err; + host->index = index; dev_set_name(&host->class_dev, "mmc%d", host->index); host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev)); diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index d333130d1531..c3229d8c7041 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2018,8 +2018,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) continue; } - dw_mci_stop_dma(host); send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_SENDING_STOP; break; } @@ -2043,10 +2043,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) */ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { - dw_mci_stop_dma(host); if (!(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE))) send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_DATA_ERROR; break; } @@ -2079,10 +2079,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) */ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { - dw_mci_stop_dma(host); if (!(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE))) send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_DATA_ERROR; break; } diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 0db17bcc9c16..cb1a64a5c256 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -789,6 +789,8 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; } } + fallthrough; + case JZ4740_MMC_STATE_DONE: break; } diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index 51db30acf4dc..fdaa11f92fe6 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -479,8 +479,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host, u32 status; int ret = 0; - if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { - spin_lock_irqsave(&host->lock, flags); + spin_lock_irqsave(&host->lock, flags); + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 && + host->pwr_reg & MCI_STM32_VSWITCHEN) { mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH); spin_unlock_irqrestore(&host->lock, flags); @@ -492,9 +493,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host, writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC, host->base + MMCICLEAR); + spin_lock_irqsave(&host->lock, flags); mmci_write_pwrreg(host, host->pwr_reg & ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH)); } + spin_unlock_irqrestore(&host->lock, flags); return ret; } diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index cce390fe9cf3..e7565c671998 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -173,6 +173,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host) return pltfm_host->clock; } +/* + * There is a known bug on BCM2711's SDHCI core integration where the + * controller will hang when the difference between the core clock and the bus + * clock is too great. Specifically this can be reproduced under the following + * conditions: + * + * - No SD card plugged in, polling thread is running, probing cards at + * 100 kHz. + * - BCM2711's core clock configured at 500MHz or more + * + * So we set 200kHz as the minimum clock frequency available for that SoC. + */ +static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host) +{ + return 200000; +} + static const struct sdhci_ops sdhci_iproc_ops = { .set_clock = sdhci_set_clock, .get_max_clock = sdhci_iproc_get_max_clock, @@ -271,13 +288,15 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = { .set_clock = sdhci_set_clock, .set_power = sdhci_set_power_and_bus_voltage, .get_max_clock = sdhci_iproc_get_max_clock, + .get_min_clock = sdhci_iproc_bcm2711_get_min_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = { - .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, .ops = &sdhci_iproc_bcm2711_ops, }; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index e44b7a66b73c..290a14cdc1cf 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2089,6 +2089,23 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) sdhci_cqe_disable(mmc, recovery); } +static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +{ + u32 count, start = 15; + + __sdhci_set_timeout(host, cmd); + count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL); + /* + * Update software timeout value if its value is less than hardware data + * timeout value. Qcom SoC hardware data timeout value was calculated + * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock. + */ + if (cmd && cmd->data && host->clock > 400000 && + host->clock <= 50000000 && + ((1 << (count + start)) > (10 * host->clock))) + host->data_timeout = 22LL * NSEC_PER_SEC; +} + static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { .enable = sdhci_msm_cqe_enable, .disable = sdhci_msm_cqe_disable, @@ -2438,6 +2455,7 @@ static const struct sdhci_ops sdhci_msm_ops = { .irq = sdhci_msm_cqe_irq, .dump_vendor_regs = sdhci_msm_dump_vendor_regs, .set_power = sdhci_set_power_noreg, + .set_timeout = sdhci_msm_set_timeout, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { diff --git a/drivers/most/most_cdev.c b/drivers/most/most_cdev.c index 8908b9363a96..3722f9abd7b9 100644 --- a/drivers/most/most_cdev.c +++ b/drivers/most/most_cdev.c @@ -486,7 +486,7 @@ static struct cdev_component comp = { }, }; -static int __init mod_init(void) +static int __init most_cdev_init(void) { int err; @@ -518,7 +518,7 @@ dest_ida: return err; } -static void __exit mod_exit(void) +static void __exit most_cdev_exit(void) { struct comp_channel *c, *tmp; @@ -534,8 +534,8 @@ static void __exit mod_exit(void) class_destroy(comp.class); } -module_init(mod_init); -module_exit(mod_exit); +module_init(most_cdev_init); +module_exit(most_cdev_exit); MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("character device component for mostcore"); diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 3097e93787f7..a761134fd3be 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi) struct cfi_pri_amdstd *extp = cfi->cmdset_priv; u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ; - return extp->MinorVersion >= '5' && + return extp && extp->MinorVersion >= '5' && (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; } diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index 99b7986002f0..6a6a2a21d2ed 100644 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c @@ -108,8 +108,8 @@ map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi #if BITS_PER_LONG >= 64 case 8: onecmd |= (onecmd << (chip_mode * 32)); -#endif fallthrough; +#endif case 4: onecmd |= (onecmd << (chip_mode * 16)); fallthrough; @@ -164,8 +164,8 @@ unsigned long cfi_merge_status(map_word val, struct map_info *map, #if BITS_PER_LONG >= 64 case 8: res |= (onestat >> (chip_mode * 32)); -#endif fallthrough; +#endif case 4: res |= (onestat >> (chip_mode * 16)); fallthrough; diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c index efc2003bd13a..99400d0fb8c1 100644 --- a/drivers/mtd/devices/mchp48l640.c +++ b/drivers/mtd/devices/mchp48l640.c @@ -229,7 +229,7 @@ static int mchp48l640_write(struct mtd_info *mtd, loff_t to, size_t len, woff += ws; } - return ret; + return 0; } static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len, @@ -255,6 +255,7 @@ static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len, if (!ret) *retlen += len; + kfree(cmd); return ret; fail: @@ -286,7 +287,7 @@ static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len, woff += ws; } - return ret; + return 0; }; static const struct mchp48_caps mchp48l640_caps = { diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 6ce4bc57f919..44bea3f65060 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -419,6 +419,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) if (tr->discard) { blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq); blk_queue_max_discard_sectors(new->rq, UINT_MAX); + new->rq->limits.discard_granularity = tr->blksize; } gd->queue = new->rq; @@ -525,14 +526,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) if (!blktrans_notifier.list.next) register_mtd_user(&blktrans_notifier); - - mutex_lock(&mtd_table_mutex); - ret = register_blkdev(tr->major, tr->name); if (ret < 0) { printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", tr->name, tr->major, ret); - mutex_unlock(&mtd_table_mutex); return ret; } @@ -542,12 +539,12 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) tr->blkshift = ffs(tr->blksize) - 1; INIT_LIST_HEAD(&tr->devs); - list_add(&tr->list, &blktrans_majors); + mutex_lock(&mtd_table_mutex); + list_add(&tr->list, &blktrans_majors); mtd_for_each_device(mtd) if (mtd->type != MTD_ABSENT) tr->add_mtd(tr, mtd); - mutex_unlock(&mtd_table_mutex); return 0; } @@ -564,8 +561,8 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) list_for_each_entry_safe(dev, next, &tr->devs, list) tr->remove_dev(dev); - unregister_blkdev(tr->major, tr->name); mutex_unlock(&mtd_table_mutex); + unregister_blkdev(tr->major, tr->name); BUG_ON(!list_empty(&tr->devs)); return 0; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index b5ccd3037788..c8fd7f758938 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -806,7 +806,9 @@ static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user) err: kfree(info); - return ret; + + /* ENODATA means there is no OTP region. */ + return ret == -ENODATA ? 0 : ret; } static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 57a583149cc0..3d6c6e880520 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -5228,12 +5228,18 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np) static int of_get_nand_secure_regions(struct nand_chip *chip) { struct device_node *dn = nand_get_flash_node(chip); + struct property *prop; int nr_elem, i, j; - nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64)); - if (!nr_elem) + /* Only proceed if the "secure-regions" property is present in DT */ + prop = of_find_property(dn, "secure-regions", NULL); + if (!prop) return 0; + nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64)); + if (nr_elem <= 0) + return nr_elem; + chip->nr_secure_regions = nr_elem / 2; chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions), GFP_KERNEL); diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index a7ee0af1af90..54e321a695ce 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -71,12 +71,18 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) family = AF_INET6; if (bareudp->ethertype == htons(ETH_P_IP)) { - struct iphdr *iphdr; + __u8 ipversion; - iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN); - if (iphdr->version == 4) { - proto = bareudp->ethertype; - } else if (bareudp->multi_proto_mode && (iphdr->version == 6)) { + if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion, + sizeof(ipversion))) { + bareudp->dev->stats.rx_dropped++; + goto drop; + } + ipversion >>= 4; + + if (ipversion == 4) { + proto = htons(ETH_P_IP); + } else if (ipversion == 6 && bareudp->multi_proto_mode) { proto = htons(ETH_P_IPV6); } else { bareudp->dev->stats.rx_dropped++; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 0ff7567bd04f..31730efa7538 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -401,24 +401,85 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, static int bond_ipsec_add_sa(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; + struct bond_ipsec *ipsec; struct bonding *bond; struct slave *slave; + int err; if (!bond_dev) return -EINVAL; + rcu_read_lock(); bond = netdev_priv(bond_dev); slave = rcu_dereference(bond->curr_active_slave); - xs->xso.real_dev = slave->dev; - bond->xs = xs; + if (!slave) { + rcu_read_unlock(); + return -ENODEV; + } - if (!(slave->dev->xfrmdev_ops - && slave->dev->xfrmdev_ops->xdo_dev_state_add)) { + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_add || + netif_is_bond_master(slave->dev)) { slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); + rcu_read_unlock(); return -EINVAL; } - return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); + ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC); + if (!ipsec) { + rcu_read_unlock(); + return -ENOMEM; + } + xs->xso.real_dev = slave->dev; + + err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); + if (!err) { + ipsec->xs = xs; + INIT_LIST_HEAD(&ipsec->list); + spin_lock_bh(&bond->ipsec_lock); + list_add(&ipsec->list, &bond->ipsec_list); + spin_unlock_bh(&bond->ipsec_lock); + } else { + kfree(ipsec); + } + rcu_read_unlock(); + return err; +} + +static void bond_ipsec_add_sa_all(struct bonding *bond) +{ + struct net_device *bond_dev = bond->dev; + struct bond_ipsec *ipsec; + struct slave *slave; + + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (!slave) + goto out; + + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_add || + netif_is_bond_master(slave->dev)) { + spin_lock_bh(&bond->ipsec_lock); + if (!list_empty(&bond->ipsec_list)) + slave_warn(bond_dev, slave->dev, + "%s: no slave xdo_dev_state_add\n", + __func__); + spin_unlock_bh(&bond->ipsec_lock); + goto out; + } + + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + ipsec->xs->xso.real_dev = slave->dev; + if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) { + slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__); + ipsec->xs->xso.real_dev = NULL; + } + } + spin_unlock_bh(&bond->ipsec_lock); +out: + rcu_read_unlock(); } /** @@ -428,27 +489,77 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) static void bond_ipsec_del_sa(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; + struct bond_ipsec *ipsec; struct bonding *bond; struct slave *slave; if (!bond_dev) return; + rcu_read_lock(); bond = netdev_priv(bond_dev); slave = rcu_dereference(bond->curr_active_slave); if (!slave) - return; + goto out; - xs->xso.real_dev = slave->dev; + if (!xs->xso.real_dev) + goto out; + + WARN_ON(xs->xso.real_dev != slave->dev); - if (!(slave->dev->xfrmdev_ops - && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_delete || + netif_is_bond_master(slave->dev)) { slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); - return; + goto out; } slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); +out: + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (ipsec->xs == xs) { + list_del(&ipsec->list); + kfree(ipsec); + break; + } + } + spin_unlock_bh(&bond->ipsec_lock); + rcu_read_unlock(); +} + +static void bond_ipsec_del_sa_all(struct bonding *bond) +{ + struct net_device *bond_dev = bond->dev; + struct bond_ipsec *ipsec; + struct slave *slave; + + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (!slave) { + rcu_read_unlock(); + return; + } + + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (!ipsec->xs->xso.real_dev) + continue; + + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_delete || + netif_is_bond_master(slave->dev)) { + slave_warn(bond_dev, slave->dev, + "%s: no slave xdo_dev_state_delete\n", + __func__); + } else { + slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); + } + ipsec->xs->xso.real_dev = NULL; + } + spin_unlock_bh(&bond->ipsec_lock); + rcu_read_unlock(); } /** @@ -459,21 +570,37 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; - struct bonding *bond = netdev_priv(bond_dev); - struct slave *curr_active = rcu_dereference(bond->curr_active_slave); - struct net_device *slave_dev = curr_active->dev; + struct net_device *real_dev; + struct slave *curr_active; + struct bonding *bond; + int err; - if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) - return true; + bond = netdev_priv(bond_dev); + rcu_read_lock(); + curr_active = rcu_dereference(bond->curr_active_slave); + real_dev = curr_active->dev; - if (!(slave_dev->xfrmdev_ops - && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) { - slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__); - return false; + if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { + err = false; + goto out; } - xs->xso.real_dev = slave_dev; - return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); + if (!xs->xso.real_dev) { + err = false; + goto out; + } + + if (!real_dev->xfrmdev_ops || + !real_dev->xfrmdev_ops->xdo_dev_offload_ok || + netif_is_bond_master(real_dev)) { + err = false; + goto out; + } + + err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); +out: + rcu_read_unlock(); + return err; } static const struct xfrmdev_ops bond_xfrmdev_ops = { @@ -990,8 +1117,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) return; #ifdef CONFIG_XFRM_OFFLOAD - if (old_active && bond->xs) - bond_ipsec_del_sa(bond->xs); + bond_ipsec_del_sa_all(bond); #endif /* CONFIG_XFRM_OFFLOAD */ if (new_active) { @@ -1066,10 +1192,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) } #ifdef CONFIG_XFRM_OFFLOAD - if (new_active && bond->xs) { - xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true); - bond_ipsec_add_sa(bond->xs); - } + bond_ipsec_add_sa_all(bond); #endif /* CONFIG_XFRM_OFFLOAD */ /* resend IGMP joins since active slave has changed or @@ -3327,6 +3450,9 @@ static int bond_master_netdev_event(unsigned long event, return bond_event_changename(event_bond); case NETDEV_UNREGISTER: bond_remove_proc_entry(event_bond); +#ifdef CONFIG_XFRM_OFFLOAD + xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true); +#endif /* CONFIG_XFRM_OFFLOAD */ break; case NETDEV_REGISTER: bond_create_proc_entry(event_bond); @@ -4894,7 +5020,8 @@ void bond_setup(struct net_device *bond_dev) #ifdef CONFIG_XFRM_OFFLOAD /* set up xfrm device ops (only supported in active-backup right now) */ bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; - bond->xs = NULL; + INIT_LIST_HEAD(&bond->ipsec_list); + spin_lock_init(&bond->ipsec_lock); #endif /* CONFIG_XFRM_OFFLOAD */ /* don't acquire bond device's netif_tx_lock when transmitting */ diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index a77124bc1f4b..709660cb38f8 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -20,15 +20,6 @@ config CAIF_TTY identified as N_CAIF. When this ldisc is opened from user space it will redirect the TTY's traffic into the CAIF stack. -config CAIF_HSI - tristate "CAIF HSI transport driver" - depends on CAIF - default n - help - The CAIF low level driver for CAIF over HSI. - Be aware that if you enable this then you also need to - enable a low-level HSI driver. - config CAIF_VIRTIO tristate "CAIF virtio transport driver" depends on CAIF && HAS_DMA diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile index b1918c8c126c..97f664f8016c 100644 --- a/drivers/net/caif/Makefile +++ b/drivers/net/caif/Makefile @@ -4,8 +4,5 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG # Serial interface obj-$(CONFIG_CAIF_TTY) += caif_serial.o -# HSI interface -obj-$(CONFIG_CAIF_HSI) += caif_hsi.o - # Virtio interface obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c deleted file mode 100644 index 3d63b15bbaa1..000000000000 --- a/drivers/net/caif/caif_hsi.c +++ /dev/null @@ -1,1454 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) ST-Ericsson AB 2010 - * Author: Daniel Martensson - * Dmitry.Tarnyagin / dmitry.tarnyagin@lockless.no - */ - -#define pr_fmt(fmt) KBUILD_MODNAME fmt - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/netdevice.h> -#include <linux/string.h> -#include <linux/list.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/if_arp.h> -#include <linux/timer.h> -#include <net/rtnetlink.h> -#include <linux/pkt_sched.h> -#include <net/caif/caif_layer.h> -#include <net/caif/caif_hsi.h> - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Daniel Martensson"); -MODULE_DESCRIPTION("CAIF HSI driver"); - -/* Returns the number of padding bytes for alignment. */ -#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\ - (((pow)-((x)&((pow)-1))))) - -static const struct cfhsi_config hsi_default_config = { - - /* Inactivity timeout on HSI, ms */ - .inactivity_timeout = HZ, - - /* Aggregation timeout (ms) of zero means no aggregation is done*/ - .aggregation_timeout = 1, - - /* - * HSI link layer flow-control thresholds. - * Threshold values for the HSI packet queue. Flow-control will be - * asserted when the number of packets exceeds q_high_mark. It will - * not be de-asserted before the number of packets drops below - * q_low_mark. - * Warning: A high threshold value might increase throughput but it - * will at the same time prevent channel prioritization and increase - * the risk of flooding the modem. The high threshold should be above - * the low. - */ - .q_high_mark = 100, - .q_low_mark = 50, - - /* - * HSI padding options. - * Warning: must be a base of 2 (& operation used) and can not be zero ! - */ - .head_align = 4, - .tail_align = 4, -}; - -#define ON 1 -#define OFF 0 - -static LIST_HEAD(cfhsi_list); - -static void cfhsi_inactivity_tout(struct timer_list *t) -{ - struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer); - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - /* Schedule power down work queue. */ - if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - queue_work(cfhsi->wq, &cfhsi->wake_down_work); -} - -static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi, - const struct sk_buff *skb, - int direction) -{ - struct caif_payload_info *info; - int hpad, tpad, len; - - info = (struct caif_payload_info *)&skb->cb; - hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); - tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); - len = skb->len + hpad + tpad; - - if (direction > 0) - cfhsi->aggregation_len += len; - else if (direction < 0) - cfhsi->aggregation_len -= len; -} - -static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi) -{ - int i; - - if (cfhsi->cfg.aggregation_timeout == 0) - return true; - - for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { - if (cfhsi->qhead[i].qlen) - return true; - } - - /* TODO: Use aggregation_len instead */ - if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS) - return true; - - return false; -} - -static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi) -{ - struct sk_buff *skb; - int i; - - for (i = 0; i < CFHSI_PRIO_LAST; ++i) { - skb = skb_dequeue(&cfhsi->qhead[i]); - if (skb) - break; - } - - return skb; -} - -static int cfhsi_tx_queue_len(struct cfhsi *cfhsi) -{ - int i, len = 0; - for (i = 0; i < CFHSI_PRIO_LAST; ++i) - len += skb_queue_len(&cfhsi->qhead[i]); - return len; -} - -static void cfhsi_abort_tx(struct cfhsi *cfhsi) -{ - struct sk_buff *skb; - - for (;;) { - spin_lock_bh(&cfhsi->lock); - skb = cfhsi_dequeue(cfhsi); - if (!skb) - break; - - cfhsi->ndev->stats.tx_errors++; - cfhsi->ndev->stats.tx_dropped++; - cfhsi_update_aggregation_stats(cfhsi, skb, -1); - spin_unlock_bh(&cfhsi->lock); - kfree_skb(skb); - } - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); -} - -static int cfhsi_flush_fifo(struct cfhsi *cfhsi) -{ - char buffer[32]; /* Any reasonable value */ - size_t fifo_occupancy; - int ret; - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - do { - ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, - &fifo_occupancy); - if (ret) { - netdev_warn(cfhsi->ndev, - "%s: can't get FIFO occupancy: %d.\n", - __func__, ret); - break; - } else if (!fifo_occupancy) - /* No more data, exitting normally */ - break; - - fifo_occupancy = min(sizeof(buffer), fifo_occupancy); - set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); - ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy, - cfhsi->ops); - if (ret) { - clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); - netdev_warn(cfhsi->ndev, - "%s: can't read data: %d.\n", - __func__, ret); - break; - } - - ret = 5 * HZ; - ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait, - !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); - - if (ret < 0) { - netdev_warn(cfhsi->ndev, - "%s: can't wait for flush complete: %d.\n", - __func__, ret); - break; - } else if (!ret) { - ret = -ETIMEDOUT; - netdev_warn(cfhsi->ndev, - "%s: timeout waiting for flush complete.\n", - __func__); - break; - } - } while (1); - - return ret; -} - -static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) -{ - int nfrms = 0; - int pld_len = 0; - struct sk_buff *skb; - u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; - - skb = cfhsi_dequeue(cfhsi); - if (!skb) - return 0; - - /* Clear offset. */ - desc->offset = 0; - - /* Check if we can embed a CAIF frame. */ - if (skb->len < CFHSI_MAX_EMB_FRM_SZ) { - struct caif_payload_info *info; - int hpad; - int tpad; - - /* Calculate needed head alignment and tail alignment. */ - info = (struct caif_payload_info *)&skb->cb; - - hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); - tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); - - /* Check if frame still fits with added alignment. */ - if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) { - u8 *pemb = desc->emb_frm; - desc->offset = CFHSI_DESC_SHORT_SZ; - *pemb = (u8)(hpad - 1); - pemb += hpad; - - /* Update network statistics. */ - spin_lock_bh(&cfhsi->lock); - cfhsi->ndev->stats.tx_packets++; - cfhsi->ndev->stats.tx_bytes += skb->len; - cfhsi_update_aggregation_stats(cfhsi, skb, -1); - spin_unlock_bh(&cfhsi->lock); - - /* Copy in embedded CAIF frame. */ - skb_copy_bits(skb, 0, pemb, skb->len); - - /* Consume the SKB */ - consume_skb(skb); - skb = NULL; - } - } - - /* Create payload CAIF frames. */ - while (nfrms < CFHSI_MAX_PKTS) { - struct caif_payload_info *info; - int hpad; - int tpad; - - if (!skb) - skb = cfhsi_dequeue(cfhsi); - - if (!skb) - break; - - /* Calculate needed head alignment and tail alignment. */ - info = (struct caif_payload_info *)&skb->cb; - - hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); - tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); - - /* Fill in CAIF frame length in descriptor. */ - desc->cffrm_len[nfrms] = hpad + skb->len + tpad; - - /* Fill head padding information. */ - *pfrm = (u8)(hpad - 1); - pfrm += hpad; - - /* Update network statistics. */ - spin_lock_bh(&cfhsi->lock); - cfhsi->ndev->stats.tx_packets++; - cfhsi->ndev->stats.tx_bytes += skb->len; - cfhsi_update_aggregation_stats(cfhsi, skb, -1); - spin_unlock_bh(&cfhsi->lock); - - /* Copy in CAIF frame. */ - skb_copy_bits(skb, 0, pfrm, skb->len); - - /* Update payload length. */ - pld_len += desc->cffrm_len[nfrms]; - - /* Update frame pointer. */ - pfrm += skb->len + tpad; - - /* Consume the SKB */ - consume_skb(skb); - skb = NULL; - - /* Update number of frames. */ - nfrms++; - } - - /* Unused length fields should be zero-filled (according to SPEC). */ - while (nfrms < CFHSI_MAX_PKTS) { - desc->cffrm_len[nfrms] = 0x0000; - nfrms++; - } - - /* Check if we can piggy-back another descriptor. */ - if (cfhsi_can_send_aggregate(cfhsi)) - desc->header |= CFHSI_PIGGY_DESC; - else - desc->header &= ~CFHSI_PIGGY_DESC; - - return CFHSI_DESC_SZ + pld_len; -} - -static void cfhsi_start_tx(struct cfhsi *cfhsi) -{ - struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; - int len, res; - - netdev_dbg(cfhsi->ndev, "%s.\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - do { - /* Create HSI frame. */ - len = cfhsi_tx_frm(desc, cfhsi); - if (!len) { - spin_lock_bh(&cfhsi->lock); - if (unlikely(cfhsi_tx_queue_len(cfhsi))) { - spin_unlock_bh(&cfhsi->lock); - res = -EAGAIN; - continue; - } - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - /* Start inactivity timer. */ - mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); - break; - } - - /* Set up new transfer. */ - res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); - if (WARN_ON(res < 0)) - netdev_err(cfhsi->ndev, "%s: TX error %d.\n", - __func__, res); - } while (res < 0); -} - -static void cfhsi_tx_done(struct cfhsi *cfhsi) -{ - netdev_dbg(cfhsi->ndev, "%s.\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* - * Send flow on if flow off has been previously signalled - * and number of packets is below low water mark. - */ - spin_lock_bh(&cfhsi->lock); - if (cfhsi->flow_off_sent && - cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark && - cfhsi->cfdev.flowctrl) { - - cfhsi->flow_off_sent = 0; - cfhsi->cfdev.flowctrl(cfhsi->ndev, ON); - } - - if (cfhsi_can_send_aggregate(cfhsi)) { - spin_unlock_bh(&cfhsi->lock); - cfhsi_start_tx(cfhsi); - } else { - mod_timer(&cfhsi->aggregation_timer, - jiffies + cfhsi->cfg.aggregation_timeout); - spin_unlock_bh(&cfhsi->lock); - } - - return; -} - -static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - cfhsi_tx_done(cfhsi); -} - -static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) -{ - int xfer_sz = 0; - int nfrms = 0; - u16 *plen = NULL; - u8 *pfrm = NULL; - - if ((desc->header & ~CFHSI_PIGGY_DESC) || - (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { - netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", - __func__); - return -EPROTO; - } - - /* Check for embedded CAIF frame. */ - if (desc->offset) { - struct sk_buff *skb; - int len = 0; - pfrm = ((u8 *)desc) + desc->offset; - - /* Remove offset padding. */ - pfrm += *pfrm + 1; - - /* Read length of CAIF frame (little endian). */ - len = *pfrm; - len |= ((*(pfrm+1)) << 8) & 0xFF00; - len += 2; /* Add FCS fields. */ - - /* Sanity check length of CAIF frame. */ - if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { - netdev_err(cfhsi->ndev, "%s: Invalid length.\n", - __func__); - return -EPROTO; - } - - /* Allocate SKB (OK even in IRQ context). */ - skb = alloc_skb(len + 1, GFP_ATOMIC); - if (!skb) { - netdev_err(cfhsi->ndev, "%s: Out of memory !\n", - __func__); - return -ENOMEM; - } - caif_assert(skb != NULL); - - skb_put_data(skb, pfrm, len); - - skb->protocol = htons(ETH_P_CAIF); - skb_reset_mac_header(skb); - skb->dev = cfhsi->ndev; - - netif_rx_any_context(skb); - - /* Update network statistics. */ - cfhsi->ndev->stats.rx_packets++; - cfhsi->ndev->stats.rx_bytes += len; - } - - /* Calculate transfer length. */ - plen = desc->cffrm_len; - while (nfrms < CFHSI_MAX_PKTS && *plen) { - xfer_sz += *plen; - plen++; - nfrms++; - } - - /* Check for piggy-backed descriptor. */ - if (desc->header & CFHSI_PIGGY_DESC) - xfer_sz += CFHSI_DESC_SZ; - - if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) { - netdev_err(cfhsi->ndev, - "%s: Invalid payload len: %d, ignored.\n", - __func__, xfer_sz); - return -EPROTO; - } - return xfer_sz; -} - -static int cfhsi_rx_desc_len(struct cfhsi_desc *desc) -{ - int xfer_sz = 0; - int nfrms = 0; - u16 *plen; - - if ((desc->header & ~CFHSI_PIGGY_DESC) || - (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { - - pr_err("Invalid descriptor. %x %x\n", desc->header, - desc->offset); - return -EPROTO; - } - - /* Calculate transfer length. */ - plen = desc->cffrm_len; - while (nfrms < CFHSI_MAX_PKTS && *plen) { - xfer_sz += *plen; - plen++; - nfrms++; - } - - if (xfer_sz % 4) { - pr_err("Invalid payload len: %d, ignored.\n", xfer_sz); - return -EPROTO; - } - return xfer_sz; -} - -static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) -{ - int rx_sz = 0; - int nfrms = 0; - u16 *plen = NULL; - u8 *pfrm = NULL; - - /* Sanity check header and offset. */ - if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) || - (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { - netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", - __func__); - return -EPROTO; - } - - /* Set frame pointer to start of payload. */ - pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; - plen = desc->cffrm_len; - - /* Skip already processed frames. */ - while (nfrms < cfhsi->rx_state.nfrms) { - pfrm += *plen; - rx_sz += *plen; - plen++; - nfrms++; - } - - /* Parse payload. */ - while (nfrms < CFHSI_MAX_PKTS && *plen) { - struct sk_buff *skb; - u8 *pcffrm = NULL; - int len; - - /* CAIF frame starts after head padding. */ - pcffrm = pfrm + *pfrm + 1; - - /* Read length of CAIF frame (little endian). */ - len = *pcffrm; - len |= ((*(pcffrm + 1)) << 8) & 0xFF00; - len += 2; /* Add FCS fields. */ - - /* Sanity check length of CAIF frames. */ - if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { - netdev_err(cfhsi->ndev, "%s: Invalid length.\n", - __func__); - return -EPROTO; - } - - /* Allocate SKB (OK even in IRQ context). */ - skb = alloc_skb(len + 1, GFP_ATOMIC); - if (!skb) { - netdev_err(cfhsi->ndev, "%s: Out of memory !\n", - __func__); - cfhsi->rx_state.nfrms = nfrms; - return -ENOMEM; - } - caif_assert(skb != NULL); - - skb_put_data(skb, pcffrm, len); - - skb->protocol = htons(ETH_P_CAIF); - skb_reset_mac_header(skb); - skb->dev = cfhsi->ndev; - - netif_rx_any_context(skb); - - /* Update network statistics. */ - cfhsi->ndev->stats.rx_packets++; - cfhsi->ndev->stats.rx_bytes += len; - - pfrm += *plen; - rx_sz += *plen; - plen++; - nfrms++; - } - - return rx_sz; -} - -static void cfhsi_rx_done(struct cfhsi *cfhsi) -{ - int res; - int desc_pld_len = 0, rx_len, rx_state; - struct cfhsi_desc *desc = NULL; - u8 *rx_ptr, *rx_buf; - struct cfhsi_desc *piggy_desc = NULL; - - desc = (struct cfhsi_desc *)cfhsi->rx_buf; - - netdev_dbg(cfhsi->ndev, "%s\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* Update inactivity timer if pending. */ - spin_lock_bh(&cfhsi->lock); - mod_timer_pending(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); - - if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { - desc_pld_len = cfhsi_rx_desc_len(desc); - - if (desc_pld_len < 0) - goto out_of_sync; - - rx_buf = cfhsi->rx_buf; - rx_len = desc_pld_len; - if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC)) - rx_len += CFHSI_DESC_SZ; - if (desc_pld_len == 0) - rx_buf = cfhsi->rx_flip_buf; - } else { - rx_buf = cfhsi->rx_flip_buf; - - rx_len = CFHSI_DESC_SZ; - if (cfhsi->rx_state.pld_len > 0 && - (desc->header & CFHSI_PIGGY_DESC)) { - - piggy_desc = (struct cfhsi_desc *) - (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ + - cfhsi->rx_state.pld_len); - - cfhsi->rx_state.piggy_desc = true; - - /* Extract payload len from piggy-backed descriptor. */ - desc_pld_len = cfhsi_rx_desc_len(piggy_desc); - if (desc_pld_len < 0) - goto out_of_sync; - - if (desc_pld_len > 0) { - rx_len = desc_pld_len; - if (piggy_desc->header & CFHSI_PIGGY_DESC) - rx_len += CFHSI_DESC_SZ; - } - - /* - * Copy needed information from the piggy-backed - * descriptor to the descriptor in the start. - */ - memcpy(rx_buf, (u8 *)piggy_desc, - CFHSI_DESC_SHORT_SZ); - } - } - - if (desc_pld_len) { - rx_state = CFHSI_RX_STATE_PAYLOAD; - rx_ptr = rx_buf + CFHSI_DESC_SZ; - } else { - rx_state = CFHSI_RX_STATE_DESC; - rx_ptr = rx_buf; - rx_len = CFHSI_DESC_SZ; - } - - /* Initiate next read */ - if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { - /* Set up new transfer. */ - netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", - __func__); - - res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len, - cfhsi->ops); - if (WARN_ON(res < 0)) { - netdev_err(cfhsi->ndev, "%s: RX error %d.\n", - __func__, res); - cfhsi->ndev->stats.rx_errors++; - cfhsi->ndev->stats.rx_dropped++; - } - } - - if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { - /* Extract payload from descriptor */ - if (cfhsi_rx_desc(desc, cfhsi) < 0) - goto out_of_sync; - } else { - /* Extract payload */ - if (cfhsi_rx_pld(desc, cfhsi) < 0) - goto out_of_sync; - if (piggy_desc) { - /* Extract any payload in piggyback descriptor. */ - if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0) - goto out_of_sync; - /* Mark no embedded frame after extracting it */ - piggy_desc->offset = 0; - } - } - - /* Update state info */ - memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state)); - cfhsi->rx_state.state = rx_state; - cfhsi->rx_ptr = rx_ptr; - cfhsi->rx_len = rx_len; - cfhsi->rx_state.pld_len = desc_pld_len; - cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC; - - if (rx_buf != cfhsi->rx_buf) - swap(cfhsi->rx_buf, cfhsi->rx_flip_buf); - return; - -out_of_sync: - netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__); - print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, - cfhsi->rx_buf, CFHSI_DESC_SZ); - schedule_work(&cfhsi->out_of_sync_work); -} - -static void cfhsi_rx_slowpath(struct timer_list *t) -{ - struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer); - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - cfhsi_rx_done(cfhsi); -} - -static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits)) - wake_up_interruptible(&cfhsi->flush_fifo_wait); - else - cfhsi_rx_done(cfhsi); -} - -static void cfhsi_wake_up(struct work_struct *work) -{ - struct cfhsi *cfhsi = NULL; - int res; - int len; - long ret; - - cfhsi = container_of(work, struct cfhsi, wake_up_work); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) { - /* It happenes when wakeup is requested by - * both ends at the same time. */ - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - return; - } - - /* Activate wake line. */ - cfhsi->ops->cfhsi_wake_up(cfhsi->ops); - - netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n", - __func__); - - /* Wait for acknowledge. */ - ret = CFHSI_WAKE_TOUT; - ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait, - test_and_clear_bit(CFHSI_WAKE_UP_ACK, - &cfhsi->bits), ret); - if (unlikely(ret < 0)) { - /* Interrupted by signal. */ - netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", - __func__, ret); - - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - cfhsi->ops->cfhsi_wake_down(cfhsi->ops); - return; - } else if (!ret) { - bool ca_wake = false; - size_t fifo_occupancy = 0; - - /* Wakeup timeout */ - netdev_dbg(cfhsi->ndev, "%s: Timeout.\n", - __func__); - - /* Check FIFO to check if modem has sent something. */ - WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, - &fifo_occupancy)); - - netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n", - __func__, (unsigned) fifo_occupancy); - - /* Check if we misssed the interrupt. */ - WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, - &ca_wake)); - - if (ca_wake) { - netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", - __func__); - - /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */ - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - - /* Continue execution. */ - goto wake_ack; - } - - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - cfhsi->ops->cfhsi_wake_down(cfhsi->ops); - return; - } -wake_ack: - netdev_dbg(cfhsi->ndev, "%s: Woken.\n", - __func__); - - /* Clear power up bit. */ - set_bit(CFHSI_AWAKE, &cfhsi->bits); - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - - /* Resume read operation. */ - netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__); - res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops); - - if (WARN_ON(res < 0)) - netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res); - - /* Clear power up acknowledment. */ - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - - spin_lock_bh(&cfhsi->lock); - - /* Resume transmit if queues are not empty. */ - if (!cfhsi_tx_queue_len(cfhsi)) { - netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n", - __func__); - /* Start inactivity timer. */ - mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); - return; - } - - netdev_dbg(cfhsi->ndev, "%s: Host wake.\n", - __func__); - - spin_unlock_bh(&cfhsi->lock); - - /* Create HSI frame. */ - len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi); - - if (likely(len > 0)) { - /* Set up new transfer. */ - res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); - if (WARN_ON(res < 0)) { - netdev_err(cfhsi->ndev, "%s: TX error %d.\n", - __func__, res); - cfhsi_abort_tx(cfhsi); - } - } else { - netdev_err(cfhsi->ndev, - "%s: Failed to create HSI frame: %d.\n", - __func__, len); - } -} - -static void cfhsi_wake_down(struct work_struct *work) -{ - long ret; - struct cfhsi *cfhsi = NULL; - size_t fifo_occupancy = 0; - int retry = CFHSI_WAKE_TOUT; - - cfhsi = container_of(work, struct cfhsi, wake_down_work); - netdev_dbg(cfhsi->ndev, "%s.\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* Deactivate wake line. */ - cfhsi->ops->cfhsi_wake_down(cfhsi->ops); - - /* Wait for acknowledge. */ - ret = CFHSI_WAKE_TOUT; - ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait, - test_and_clear_bit(CFHSI_WAKE_DOWN_ACK, - &cfhsi->bits), ret); - if (ret < 0) { - /* Interrupted by signal. */ - netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", - __func__, ret); - return; - } else if (!ret) { - bool ca_wake = true; - - /* Timeout */ - netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__); - - /* Check if we misssed the interrupt. */ - WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, - &ca_wake)); - if (!ca_wake) - netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", - __func__); - } - - /* Check FIFO occupancy. */ - while (retry) { - WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, - &fifo_occupancy)); - - if (!fifo_occupancy) - break; - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - retry--; - } - - if (!retry) - netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__); - - /* Clear AWAKE condition. */ - clear_bit(CFHSI_AWAKE, &cfhsi->bits); - - /* Cancel pending RX requests. */ - cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); -} - -static void cfhsi_out_of_sync(struct work_struct *work) -{ - struct cfhsi *cfhsi = NULL; - - cfhsi = container_of(work, struct cfhsi, out_of_sync_work); - - rtnl_lock(); - dev_close(cfhsi->ndev); - rtnl_unlock(); -} - -static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi = NULL; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - wake_up_interruptible(&cfhsi->wake_up_wait); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* Schedule wake up work queue if the peer initiates. */ - if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) - queue_work(cfhsi->wq, &cfhsi->wake_up_work); -} - -static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi = NULL; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - /* Initiating low power is only permitted by the host (us). */ - set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); - wake_up_interruptible(&cfhsi->wake_down_wait); -} - -static void cfhsi_aggregation_tout(struct timer_list *t) -{ - struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer); - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - cfhsi_start_tx(cfhsi); -} - -static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct cfhsi *cfhsi = NULL; - int start_xfer = 0; - int timer_active; - int prio; - - if (!dev) - return -EINVAL; - - cfhsi = netdev_priv(dev); - - switch (skb->priority) { - case TC_PRIO_BESTEFFORT: - case TC_PRIO_FILLER: - case TC_PRIO_BULK: - prio = CFHSI_PRIO_BEBK; - break; - case TC_PRIO_INTERACTIVE_BULK: - prio = CFHSI_PRIO_VI; - break; - case TC_PRIO_INTERACTIVE: - prio = CFHSI_PRIO_VO; - break; - case TC_PRIO_CONTROL: - default: - prio = CFHSI_PRIO_CTL; - break; - } - - spin_lock_bh(&cfhsi->lock); - - /* Update aggregation statistics */ - cfhsi_update_aggregation_stats(cfhsi, skb, 1); - - /* Queue the SKB */ - skb_queue_tail(&cfhsi->qhead[prio], skb); - - /* Sanity check; xmit should not be called after unregister_netdev */ - if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { - spin_unlock_bh(&cfhsi->lock); - cfhsi_abort_tx(cfhsi); - return -EINVAL; - } - - /* Send flow off if number of packets is above high water mark. */ - if (!cfhsi->flow_off_sent && - cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark && - cfhsi->cfdev.flowctrl) { - cfhsi->flow_off_sent = 1; - cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); - } - - if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) { - cfhsi->tx_state = CFHSI_TX_STATE_XFER; - start_xfer = 1; - } - - if (!start_xfer) { - /* Send aggregate if it is possible */ - bool aggregate_ready = - cfhsi_can_send_aggregate(cfhsi) && - del_timer(&cfhsi->aggregation_timer) > 0; - spin_unlock_bh(&cfhsi->lock); - if (aggregate_ready) - cfhsi_start_tx(cfhsi); - return NETDEV_TX_OK; - } - - /* Delete inactivity timer if started. */ - timer_active = del_timer_sync(&cfhsi->inactivity_timer); - - spin_unlock_bh(&cfhsi->lock); - - if (timer_active) { - struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; - int len; - int res; - - /* Create HSI frame. */ - len = cfhsi_tx_frm(desc, cfhsi); - WARN_ON(!len); - - /* Set up new transfer. */ - res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); - if (WARN_ON(res < 0)) { - netdev_err(cfhsi->ndev, "%s: TX error %d.\n", - __func__, res); - cfhsi_abort_tx(cfhsi); - } - } else { - /* Schedule wake up work queue if the we initiate. */ - if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) - queue_work(cfhsi->wq, &cfhsi->wake_up_work); - } - - return NETDEV_TX_OK; -} - -static const struct net_device_ops cfhsi_netdevops; - -static void cfhsi_setup(struct net_device *dev) -{ - int i; - struct cfhsi *cfhsi = netdev_priv(dev); - dev->features = 0; - dev->type = ARPHRD_CAIF; - dev->flags = IFF_POINTOPOINT | IFF_NOARP; - dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; - dev->priv_flags |= IFF_NO_QUEUE; - dev->needs_free_netdev = true; - dev->netdev_ops = &cfhsi_netdevops; - for (i = 0; i < CFHSI_PRIO_LAST; ++i) - skb_queue_head_init(&cfhsi->qhead[i]); - cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; - cfhsi->cfdev.use_frag = false; - cfhsi->cfdev.use_stx = false; - cfhsi->cfdev.use_fcs = false; - cfhsi->ndev = dev; - cfhsi->cfg = hsi_default_config; -} - -static int cfhsi_open(struct net_device *ndev) -{ - struct cfhsi *cfhsi = netdev_priv(ndev); - int res; - - clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits); - - /* Initialize state vaiables. */ - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; - - /* Set flow info */ - cfhsi->flow_off_sent = 0; - - /* - * Allocate a TX buffer with the size of a HSI packet descriptors - * and the necessary room for CAIF payload frames. - */ - cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL); - if (!cfhsi->tx_buf) { - res = -ENODEV; - goto err_alloc_tx; - } - - /* - * Allocate a RX buffer with the size of two HSI packet descriptors and - * the necessary room for CAIF payload frames. - */ - cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL); - if (!cfhsi->rx_buf) { - res = -ENODEV; - goto err_alloc_rx; - } - - cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL); - if (!cfhsi->rx_flip_buf) { - res = -ENODEV; - goto err_alloc_rx_flip; - } - - /* Initialize aggregation timeout */ - cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout; - - /* Initialize recieve vaiables. */ - cfhsi->rx_ptr = cfhsi->rx_buf; - cfhsi->rx_len = CFHSI_DESC_SZ; - - /* Initialize spin locks. */ - spin_lock_init(&cfhsi->lock); - - /* Set up the driver. */ - cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb; - cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb; - cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb; - cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb; - - /* Initialize the work queues. */ - INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); - INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down); - INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync); - - /* Clear all bit fields. */ - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - clear_bit(CFHSI_AWAKE, &cfhsi->bits); - - /* Create work thread. */ - cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM); - if (!cfhsi->wq) { - netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n", - __func__); - res = -ENODEV; - goto err_create_wq; - } - - /* Initialize wait queues. */ - init_waitqueue_head(&cfhsi->wake_up_wait); - init_waitqueue_head(&cfhsi->wake_down_wait); - init_waitqueue_head(&cfhsi->flush_fifo_wait); - - /* Setup the inactivity timer. */ - timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0); - /* Setup the slowpath RX timer. */ - timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0); - /* Setup the aggregation timer. */ - timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0); - - /* Activate HSI interface. */ - res = cfhsi->ops->cfhsi_up(cfhsi->ops); - if (res) { - netdev_err(cfhsi->ndev, - "%s: can't activate HSI interface: %d.\n", - __func__, res); - goto err_activate; - } - - /* Flush FIFO */ - res = cfhsi_flush_fifo(cfhsi); - if (res) { - netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n", - __func__, res); - goto err_net_reg; - } - return res; - - err_net_reg: - cfhsi->ops->cfhsi_down(cfhsi->ops); - err_activate: - destroy_workqueue(cfhsi->wq); - err_create_wq: - kfree(cfhsi->rx_flip_buf); - err_alloc_rx_flip: - kfree(cfhsi->rx_buf); - err_alloc_rx: - kfree(cfhsi->tx_buf); - err_alloc_tx: - return res; -} - -static int cfhsi_close(struct net_device *ndev) -{ - struct cfhsi *cfhsi = netdev_priv(ndev); - u8 *tx_buf, *rx_buf, *flip_buf; - - /* going to shutdown driver */ - set_bit(CFHSI_SHUTDOWN, &cfhsi->bits); - - /* Delete timers if pending */ - del_timer_sync(&cfhsi->inactivity_timer); - del_timer_sync(&cfhsi->rx_slowpath_timer); - del_timer_sync(&cfhsi->aggregation_timer); - - /* Cancel pending RX request (if any) */ - cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); - - /* Destroy workqueue */ - destroy_workqueue(cfhsi->wq); - - /* Store bufferes: will be freed later. */ - tx_buf = cfhsi->tx_buf; - rx_buf = cfhsi->rx_buf; - flip_buf = cfhsi->rx_flip_buf; - /* Flush transmit queues. */ - cfhsi_abort_tx(cfhsi); - - /* Deactivate interface */ - cfhsi->ops->cfhsi_down(cfhsi->ops); - - /* Free buffers. */ - kfree(tx_buf); - kfree(rx_buf); - kfree(flip_buf); - return 0; -} - -static void cfhsi_uninit(struct net_device *dev) -{ - struct cfhsi *cfhsi = netdev_priv(dev); - ASSERT_RTNL(); - symbol_put(cfhsi_get_device); - list_del(&cfhsi->list); -} - -static const struct net_device_ops cfhsi_netdevops = { - .ndo_uninit = cfhsi_uninit, - .ndo_open = cfhsi_open, - .ndo_stop = cfhsi_close, - .ndo_start_xmit = cfhsi_xmit -}; - -static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi) -{ - int i; - - if (!data) { - pr_debug("no params data found\n"); - return; - } - - i = __IFLA_CAIF_HSI_INACTIVITY_TOUT; - /* - * Inactivity timeout in millisecs. Lowest possible value is 1, - * and highest possible is NEXT_TIMER_MAX_DELTA. - */ - if (data[i]) { - u32 inactivity_timeout = nla_get_u32(data[i]); - /* Pre-calculate inactivity timeout. */ - cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000; - if (cfhsi->cfg.inactivity_timeout == 0) - cfhsi->cfg.inactivity_timeout = 1; - else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA) - cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA; - } - - i = __IFLA_CAIF_HSI_AGGREGATION_TOUT; - if (data[i]) - cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_HEAD_ALIGN; - if (data[i]) - cfhsi->cfg.head_align = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_TAIL_ALIGN; - if (data[i]) - cfhsi->cfg.tail_align = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_QHIGH_WATERMARK; - if (data[i]) - cfhsi->cfg.q_high_mark = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_QLOW_WATERMARK; - if (data[i]) - cfhsi->cfg.q_low_mark = nla_get_u32(data[i]); -} - -static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[], - struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - cfhsi_netlink_parms(data, netdev_priv(dev)); - netdev_state_change(dev); - return 0; -} - -static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = { - [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 }, -}; - -static size_t caif_hsi_get_size(const struct net_device *dev) -{ - int i; - size_t s = 0; - for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++) - s += nla_total_size(caif_hsi_policy[i].len); - return s; -} - -static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev) -{ - struct cfhsi *cfhsi = netdev_priv(dev); - - if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT, - cfhsi->cfg.inactivity_timeout) || - nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT, - cfhsi->cfg.aggregation_timeout) || - nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN, - cfhsi->cfg.head_align) || - nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN, - cfhsi->cfg.tail_align) || - nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK, - cfhsi->cfg.q_high_mark) || - nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK, - cfhsi->cfg.q_low_mark)) - return -EMSGSIZE; - - return 0; -} - -static int caif_hsi_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - struct cfhsi *cfhsi = NULL; - struct cfhsi_ops *(*get_ops)(void); - - ASSERT_RTNL(); - - cfhsi = netdev_priv(dev); - cfhsi_netlink_parms(data, cfhsi); - - get_ops = symbol_get(cfhsi_get_ops); - if (!get_ops) { - pr_err("%s: failed to get the cfhsi_ops\n", __func__); - return -ENODEV; - } - - /* Assign the HSI device. */ - cfhsi->ops = (*get_ops)(); - if (!cfhsi->ops) { - pr_err("%s: failed to get the cfhsi_ops\n", __func__); - goto err; - } - - /* Assign the driver to this HSI device. */ - cfhsi->ops->cb_ops = &cfhsi->cb_ops; - if (register_netdevice(dev)) { - pr_warn("%s: caif_hsi device registration failed\n", __func__); - goto err; - } - /* Add CAIF HSI device to list. */ - list_add_tail(&cfhsi->list, &cfhsi_list); - - return 0; -err: - symbol_put(cfhsi_get_ops); - return -ENODEV; -} - -static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = { - .kind = "cfhsi", - .priv_size = sizeof(struct cfhsi), - .setup = cfhsi_setup, - .maxtype = __IFLA_CAIF_HSI_MAX, - .policy = caif_hsi_policy, - .newlink = caif_hsi_newlink, - .changelink = caif_hsi_changelink, - .get_size = caif_hsi_get_size, - .fill_info = caif_hsi_fill_info, -}; - -static void __exit cfhsi_exit_module(void) -{ - struct list_head *list_node; - struct list_head *n; - struct cfhsi *cfhsi; - - rtnl_link_unregister(&caif_hsi_link_ops); - - rtnl_lock(); - list_for_each_safe(list_node, n, &cfhsi_list) { - cfhsi = list_entry(list_node, struct cfhsi, list); - unregister_netdevice(cfhsi->ndev); - } - rtnl_unlock(); -} - -static int __init cfhsi_init_module(void) -{ - return rtnl_link_register(&caif_hsi_link_ops); -} - -module_init(cfhsi_init_module); -module_exit(cfhsi_exit_module); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index bba2a449ac70..43bca315a66c 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1164,10 +1164,10 @@ static int m_can_set_bittiming(struct net_device *dev) FIELD_PREP(TDCR_TDCO_MASK, tdco)); } - reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) | - FIELD_PREP(NBTP_NSJW_MASK, sjw) | - FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) | - FIELD_PREP(NBTP_NTSEG2_MASK, tseg2); + reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) | + FIELD_PREP(DBTP_DSJW_MASK, sjw) | + FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) | + FIELD_PREP(DBTP_DTSEG2_MASK, tseg2); m_can_write(cdev, M_CAN_DBTP, reg_btp); } diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index dd17b8c53e1c..89d9c986a229 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -218,7 +218,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len) return ret; } -static u8 hi3110_cmd(struct spi_device *spi, u8 command) +static int hi3110_cmd(struct spi_device *spi, u8 command) { struct hi3110_priv *priv = spi_get_drvdata(spi); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 47c3f408a799..9ae48072b6c6 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -2300,6 +2300,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) err, priv->regs_status.intf); mcp251xfd_dump(priv); mcp251xfd_chip_interrupts_disable(priv); + mcp251xfd_timestamp_stop(priv); return handled; } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 0a37af4a3fa4..2b5302e72435 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -255,6 +255,8 @@ struct ems_usb { unsigned int free_slots; /* remember number of available slots */ struct ems_cpc_msg active_params; /* active controller parameters */ + void *rxbuf[MAX_RX_URBS]; + dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; static void ems_usb_read_interrupt_callback(struct urb *urb) @@ -587,6 +589,7 @@ static int ems_usb_start(struct ems_usb *dev) for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf = NULL; + dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -596,7 +599,7 @@ static int ems_usb_start(struct ems_usb *dev) } buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, - &urb->transfer_dma); + &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); @@ -604,6 +607,8 @@ static int ems_usb_start(struct ems_usb *dev) break; } + urb->transfer_dma = buf_dma; + usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2), buf, RX_BUFFER_SIZE, ems_usb_read_bulk_callback, dev); @@ -619,6 +624,9 @@ static int ems_usb_start(struct ems_usb *dev) break; } + dev->rxbuf[i] = buf; + dev->rxbuf_dma[i] = buf_dma; + /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } @@ -684,6 +692,10 @@ static void unlink_all_urbs(struct ems_usb *dev) usb_kill_anchored_urbs(&dev->rx_submitted); + for (i = 0; i < MAX_RX_URBS; ++i) + usb_free_coherent(dev->udev, RX_BUFFER_SIZE, + dev->rxbuf[i], dev->rxbuf_dma[i]); + usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 65b58f8fc328..66fa8b07c2e6 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -195,6 +195,8 @@ struct esd_usb2 { int net_count; u32 version; int rxinitdone; + void *rxbuf[MAX_RX_URBS]; + dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; struct esd_usb2_net_priv { @@ -545,6 +547,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf = NULL; + dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -554,7 +557,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) } buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, - &urb->transfer_dma); + &buf_dma); if (!buf) { dev_warn(dev->udev->dev.parent, "No memory left for USB buffer\n"); @@ -562,6 +565,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) goto freeurb; } + urb->transfer_dma = buf_dma; + usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), buf, RX_BUFFER_SIZE, @@ -574,8 +579,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) usb_unanchor_urb(urb); usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, urb->transfer_dma); + goto freeurb; } + dev->rxbuf[i] = buf; + dev->rxbuf_dma[i] = buf_dma; + freeurb: /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); @@ -663,6 +672,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev) int i, j; usb_kill_anchored_urbs(&dev->rx_submitted); + + for (i = 0; i < MAX_RX_URBS; ++i) + usb_free_coherent(dev->udev, RX_BUFFER_SIZE, + dev->rxbuf[i], dev->rxbuf_dma[i]); + for (i = 0; i < dev->net_count; i++) { priv = dev->nets[i]; if (priv) { diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index a45865bd7254..a1a154c08b7f 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -653,6 +653,8 @@ static int mcba_usb_start(struct mcba_priv *priv) break; } + urb->transfer_dma = buf_dma; + usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN), buf, MCBA_USB_RX_BUFF_SIZE, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 1d6f77252f01..899a3d21b77f 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -117,7 +117,8 @@ #define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR) /* identify bus event packets with rx/tx error counters */ -#define PCAN_USB_ERR_CNT 0x80 +#define PCAN_USB_ERR_CNT_DEC 0x00 /* counters are decreasing */ +#define PCAN_USB_ERR_CNT_INC 0x80 /* counters are increasing */ /* private to PCAN-USB adapter */ struct pcan_usb { @@ -608,11 +609,12 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir) /* acccording to the content of the packet */ switch (ir) { - case PCAN_USB_ERR_CNT: + case PCAN_USB_ERR_CNT_DEC: + case PCAN_USB_ERR_CNT_INC: /* save rx/tx error counters from in the device context */ - pdev->bec.rxerr = mc->ptr[0]; - pdev->bec.txerr = mc->ptr[1]; + pdev->bec.rxerr = mc->ptr[1]; + pdev->bec.txerr = mc->ptr[2]; break; default: diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index b6e7ef0d5bc6..d1b83bd1b3cb 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -137,7 +137,8 @@ struct usb_8dev_priv { u8 *cmd_msg_buffer; struct mutex usb_8dev_cmd_lock; - + void *rxbuf[MAX_RX_URBS]; + dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; /* tx frame */ @@ -733,6 +734,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf; + dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -742,7 +744,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) } buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL, - &urb->transfer_dma); + &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); @@ -750,6 +752,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) break; } + urb->transfer_dma = buf_dma; + usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX), @@ -767,6 +771,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) break; } + priv->rxbuf[i] = buf; + priv->rxbuf_dma[i] = buf_dma; + /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } @@ -836,6 +843,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv) usb_kill_anchored_urbs(&priv->rx_submitted); + for (i = 0; i < MAX_RX_URBS; ++i) + usb_free_coherent(priv->udev, RX_BUFFER_SIZE, + priv->rxbuf[i], priv->rxbuf_dma[i]); + usb_kill_anchored_urbs(&priv->tx_submitted); atomic_set(&priv->active_tx_urbs, 0); diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 9fdcc4bde480..5c54ae1be62c 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -912,6 +912,7 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port, { struct hellcreek *hellcreek = ds->priv; u16 entries; + int ret = 0; size_t i; mutex_lock(&hellcreek->reg_lock); @@ -943,12 +944,14 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port, if (!(entry.portmask & BIT(port))) continue; - cb(entry.mac, 0, entry.is_static, data); + ret = cb(entry.mac, 0, entry.is_static, data); + if (ret) + break; } mutex_unlock(&hellcreek->reg_lock); - return 0; + return ret; } static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 344374025426..d7ce281570b5 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -557,12 +557,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1) return 0; } -typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1, - int portmap, void *ctx); +typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1, + int portmap, void *ctx); -static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx) +static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx) { - int i; + int ret = 0, i; mutex_lock(&chip->alr_mutex); lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, @@ -582,13 +582,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx) LAN9303_ALR_DAT1_PORT_BITOFFS; portmap = alrport_2_portmap[alrport]; - cb(chip, dat0, dat1, portmap, ctx); + ret = cb(chip, dat0, dat1, portmap, ctx); + if (ret) + break; lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, LAN9303_ALR_CMD_GET_NEXT); lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0); } mutex_unlock(&chip->alr_mutex); + + return ret; } static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6]) @@ -606,18 +610,20 @@ struct del_port_learned_ctx { }; /* Clear learned (non-static) entry on given port */ -static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0, - u32 dat1, int portmap, void *ctx) +static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0, + u32 dat1, int portmap, void *ctx) { struct del_port_learned_ctx *del_ctx = ctx; int port = del_ctx->port; if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC)) - return; + return 0; /* learned entries has only one port, we can just delete */ dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */ lan9303_alr_make_entry_raw(chip, dat0, dat1); + + return 0; } struct port_fdb_dump_ctx { @@ -626,19 +632,19 @@ struct port_fdb_dump_ctx { dsa_fdb_dump_cb_t *cb; }; -static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0, - u32 dat1, int portmap, void *ctx) +static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0, + u32 dat1, int portmap, void *ctx) { struct port_fdb_dump_ctx *dump_ctx = ctx; u8 mac[ETH_ALEN]; bool is_static; if ((BIT(dump_ctx->port) & portmap) == 0) - return; + return 0; alr_reg_to_mac(dat0, dat1, mac); is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC); - dump_ctx->cb(mac, 0, is_static, dump_ctx->data); + return dump_ctx->cb(mac, 0, is_static, dump_ctx->data); } /* Set a static ALR entry. Delete entry if port_map is zero */ @@ -1210,9 +1216,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port, }; dev_dbg(chip->dev, "%s(%d)\n", __func__, port); - lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx); - - return 0; + return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx); } static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 314ae78bbdd6..e78026ef6d8c 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1404,11 +1404,17 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, addr[1] = mac_bridge.key[2] & 0xff; addr[0] = (mac_bridge.key[2] >> 8) & 0xff; if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) { - if (mac_bridge.val[0] & BIT(port)) - cb(addr, 0, true, data); + if (mac_bridge.val[0] & BIT(port)) { + err = cb(addr, 0, true, data); + if (err) + return err; + } } else { - if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) - cb(addr, 0, false, data); + if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) { + err = cb(addr, 0, false, data); + if (err) + return err; + } } } return 0; diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 560f6843bb65..c5142f86a3c7 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -687,8 +687,8 @@ static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr) shifts = ksz8->shifts; ksz8_r_table(dev, TABLE_VLAN, addr, &data); - addr *= dev->phy_port_cnt; - for (i = 0; i < dev->phy_port_cnt; i++) { + addr *= 4; + for (i = 0; i < 4; i++) { dev->vlan_cache[addr + i].table[0] = (u16)data; data >>= shifts[VLAN_TABLE]; } @@ -702,7 +702,7 @@ static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) u64 buf; data = (u16 *)&buf; - addr = vid / dev->phy_port_cnt; + addr = vid / 4; index = vid & 3; ksz8_r_table(dev, TABLE_VLAN, addr, &buf); *vlan = data[index]; @@ -716,7 +716,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) u64 buf; data = (u16 *)&buf; - addr = vid / dev->phy_port_cnt; + addr = vid / 4; index = vid & 3; ksz8_r_table(dev, TABLE_VLAN, addr, &buf); data[index] = vlan; @@ -1119,24 +1119,67 @@ static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag, if (ksz_is_ksz88x3(dev)) return -ENOTSUPP; + /* Discard packets with VID not enabled on the switch */ ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag); + /* Discard packets with VID not enabled on the ingress port */ + for (port = 0; port < dev->phy_port_cnt; ++port) + ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER, + flag); + return 0; } +static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state) +{ + if (ksz_is_ksz88x3(dev)) { + ksz_cfg(dev, REG_SW_INSERT_SRC_PVID, + 0x03 << (4 - 2 * port), state); + } else { + ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00); + } +} + static int ksz8_port_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) { bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; struct ksz_device *dev = ds->priv; + struct ksz_port *p = &dev->ports[port]; u16 data, new_pvid = 0; u8 fid, member, valid; if (ksz_is_ksz88x3(dev)) return -ENOTSUPP; - ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); + /* If a VLAN is added with untagged flag different from the + * port's Remove Tag flag, we need to change the latter. + * Ignore VID 0, which is always untagged. + * Ignore CPU port, which will always be tagged. + */ + if (untagged != p->remove_tag && vlan->vid != 0 && + port != dev->cpu_port) { + unsigned int vid; + + /* Reject attempts to add a VLAN that requires the + * Remove Tag flag to be changed, unless there are no + * other VLANs currently configured. + */ + for (vid = 1; vid < dev->num_vlans; ++vid) { + /* Skip the VID we are going to add or reconfigure */ + if (vid == vlan->vid) + continue; + + ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0], + &fid, &member, &valid); + if (valid && (member & BIT(port))) + return -EINVAL; + } + + ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); + p->remove_tag = untagged; + } ksz8_r_vlan_table(dev, vlan->vid, &data); ksz8_from_vlan(dev, data, &fid, &member, &valid); @@ -1160,9 +1203,11 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port, u16 vid; ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid); - vid &= 0xfff; + vid &= ~VLAN_VID_MASK; vid |= new_pvid; ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid); + + ksz8_port_enable_pvid(dev, port, true); } return 0; @@ -1171,9 +1216,8 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port, static int ksz8_port_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; struct ksz_device *dev = ds->priv; - u16 data, pvid, new_pvid = 0; + u16 data, pvid; u8 fid, member, valid; if (ksz_is_ksz88x3(dev)) @@ -1182,8 +1226,6 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port, ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid); pvid = pvid & 0xFFF; - ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); - ksz8_r_vlan_table(dev, vlan->vid, &data); ksz8_from_vlan(dev, data, &fid, &member, &valid); @@ -1195,14 +1237,11 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port, valid = 0; } - if (pvid == vlan->vid) - new_pvid = 1; - ksz8_to_vlan(dev, fid, member, valid, &data); ksz8_w_vlan_table(dev, vlan->vid, data); - if (new_pvid != pvid) - ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid); + if (pvid == vlan->vid) + ksz8_port_enable_pvid(dev, port, false); return 0; } @@ -1435,6 +1474,9 @@ static int ksz8_setup(struct dsa_switch *ds) ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); + if (!ksz_is_ksz88x3(dev)) + ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true); + /* set broadcast storm protection 10% rate */ regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL, BROADCAST_STORM_RATE, @@ -1717,6 +1759,16 @@ static int ksz8_switch_init(struct ksz_device *dev) /* set the real number of ports */ dev->ds->num_ports = dev->port_cnt; + /* We rely on software untagging on the CPU port, so that we + * can support both tagged and untagged VLANs + */ + dev->ds->untag_bridge_pvid = true; + + /* VLAN filtering is partly controlled by the global VLAN + * Enable flag + */ + dev->ds->vlan_filtering_is_global = true; + return 0; } diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h index a32355624f31..6b40bc25f7ff 100644 --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ b/drivers/net/dsa/microchip/ksz8795_reg.h @@ -631,6 +631,10 @@ #define REG_PORT_4_OUT_RATE_3 0xEE #define REG_PORT_5_OUT_RATE_3 0xFE +/* 88x3 specific */ + +#define REG_SW_INSERT_SRC_PVID 0xC2 + /* PME */ #define SW_PME_OUTPUT_ENABLE BIT(1) diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index a7e5ac60baef..1542bfb8b5e5 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -419,8 +419,10 @@ int ksz_switch_register(struct ksz_device *dev, if (of_property_read_u32(port, "reg", &port_num)) continue; - if (!(dev->port_mask & BIT(port_num))) + if (!(dev->port_mask & BIT(port_num))) { + of_node_put(port); return -EINVAL; + } of_get_phy_mode(port, &dev->ports[port_num].interface); } diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 2e6bfd333f50..1597c63988b4 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -27,6 +27,7 @@ struct ksz_port_mib { struct ksz_port { u16 member; u16 vid_member; + bool remove_tag; /* Remove Tag flag set, for ksz8795 only */ int stp_state; struct phy_device phydev; @@ -205,12 +206,8 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val) int ret; ret = regmap_bulk_read(dev->regmap[2], reg, value, 2); - if (!ret) { - /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */ - value[0] = swab32(value[0]); - value[1] = swab32(value[1]); - *val = swab64((u64)*value); - } + if (!ret) + *val = (u64)value[0] << 32 | value[1]; return ret; } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 93136f7e69f5..632f0fcc5aa7 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -47,6 +47,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = { MIB_DESC(2, 0x48, "TxBytes"), MIB_DESC(1, 0x60, "RxDrop"), MIB_DESC(1, 0x64, "RxFiltering"), + MIB_DESC(1, 0x68, "RxUnicast"), MIB_DESC(1, 0x6c, "RxMulticast"), MIB_DESC(1, 0x70, "RxBroadcast"), MIB_DESC(1, 0x74, "RxAlignErr"), @@ -366,6 +367,8 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid, int i; reg[1] |= vid & CVID_MASK; + if (vid > 1) + reg[1] |= ATA2_IVL; reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER; reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP; /* STATIC_ENT indicate that entry is static wouldn't diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 334d610a503d..b19b389ff10a 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -79,6 +79,7 @@ enum mt753x_bpdu_port_fw { #define STATIC_EMP 0 #define STATIC_ENT 3 #define MT7530_ATA2 0x78 +#define ATA2_IVL BIT(15) /* Register for address table write data */ #define MT7530_ATWD 0x7c diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index 05af632b0f59..634a48e6616b 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -12,7 +12,7 @@ config NET_DSA_MV88E6XXX config NET_DSA_MV88E6XXX_PTP bool "PTP support for Marvell 88E6xxx" default n - depends on PTP_1588_CLOCK + depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK help Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch chips that support it. diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 961fa6b75cad..272b0535d946 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2155,7 +2155,7 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip, int i, err; if (!vid) - return -EOPNOTSUPP; + return 0; err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) @@ -3583,6 +3583,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_set_speed_duplex = mv88e6341_port_set_speed_duplex, .port_max_speed_mode = mv88e6341_port_max_speed_mode, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -3596,7 +3597,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_set_cmode = mv88e6341_port_set_cmode, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_set_histogram = mv88e6095_g1_stats_set_histogram, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, .stats_get_strings = mv88e6320_stats_get_strings, .stats_get_stats = mv88e6390_stats_get_stats, @@ -3606,6 +3607,9 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3619,6 +3623,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .serdes_irq_enable = mv88e6390_serdes_irq_enable, .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, + .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, + .serdes_get_strings = mv88e6390_serdes_get_strings, + .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6341_phylink_validate, }; @@ -4383,6 +4392,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_set_speed_duplex = mv88e6341_port_set_speed_duplex, .port_max_speed_mode = mv88e6341_port_max_speed_mode, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -4396,7 +4406,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_set_cmode = mv88e6341_port_set_cmode, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_set_histogram = mv88e6095_g1_stats_set_histogram, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, .stats_get_strings = mv88e6320_stats_get_strings, .stats_get_stats = mv88e6390_stats_get_stats, @@ -4406,6 +4416,9 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -4421,6 +4434,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, + .serdes_get_strings = mv88e6390_serdes_get_strings, + .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6341_phylink_validate, }; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index e4fbef81bc52..b1d46dd8eaab 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -722,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = { int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port) { - if (mv88e6390_serdes_get_lane(chip, port) < 0) + if (mv88e6xxx_serdes_get_lane(chip, port) < 0) return 0; return ARRAY_SIZE(mv88e6390_serdes_hw_stats); @@ -734,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip, struct mv88e6390_serdes_hw_stat *stat; int i; - if (mv88e6390_serdes_get_lane(chip, port) < 0) + if (mv88e6xxx_serdes_get_lane(chip, port) < 0) return 0; for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) { @@ -770,7 +770,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, int lane; int i; - lane = mv88e6390_serdes_get_lane(chip, port); + lane = mv88e6xxx_serdes_get_lane(chip, port); if (lane < 0) return 0; diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index ca2ad77b71f1..563d8a279030 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -101,6 +101,23 @@ AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \ AR9331_SW_PORT_STATUS_SPEED_M) +#define AR9331_SW_REG_PORT_CTRL(_port) (0x104 + (_port) * 0x100) +#define AR9331_SW_PORT_CTRL_HEAD_EN BIT(11) +#define AR9331_SW_PORT_CTRL_PORT_STATE GENMASK(2, 0) +#define AR9331_SW_PORT_CTRL_PORT_STATE_DISABLED 0 +#define AR9331_SW_PORT_CTRL_PORT_STATE_BLOCKING 1 +#define AR9331_SW_PORT_CTRL_PORT_STATE_LISTENING 2 +#define AR9331_SW_PORT_CTRL_PORT_STATE_LEARNING 3 +#define AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD 4 + +#define AR9331_SW_REG_PORT_VLAN(_port) (0x108 + (_port) * 0x100) +#define AR9331_SW_PORT_VLAN_8021Q_MODE GENMASK(31, 30) +#define AR9331_SW_8021Q_MODE_SECURE 3 +#define AR9331_SW_8021Q_MODE_CHECK 2 +#define AR9331_SW_8021Q_MODE_FALLBACK 1 +#define AR9331_SW_8021Q_MODE_NONE 0 +#define AR9331_SW_PORT_VLAN_PORT_VID_MEMBER GENMASK(25, 16) + /* MIB registers */ #define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100)) @@ -371,12 +388,60 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv) return 0; } -static int ar9331_sw_setup(struct dsa_switch *ds) +static int ar9331_sw_setup_port(struct dsa_switch *ds, int port) { struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv; struct regmap *regmap = priv->regmap; + u32 port_mask, port_ctrl, val; int ret; + /* Generate default port settings */ + port_ctrl = FIELD_PREP(AR9331_SW_PORT_CTRL_PORT_STATE, + AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD); + + if (dsa_is_cpu_port(ds, port)) { + /* CPU port should be allowed to communicate with all user + * ports. + */ + port_mask = dsa_user_ports(ds); + /* Enable Atheros header on CPU port. This will allow us + * communicate with each port separately + */ + port_ctrl |= AR9331_SW_PORT_CTRL_HEAD_EN; + } else if (dsa_is_user_port(ds, port)) { + /* User ports should communicate only with the CPU port. + */ + port_mask = BIT(dsa_upstream_port(ds, port)); + } else { + /* Other ports do not need to communicate at all */ + port_mask = 0; + } + + val = FIELD_PREP(AR9331_SW_PORT_VLAN_8021Q_MODE, + AR9331_SW_8021Q_MODE_NONE) | + FIELD_PREP(AR9331_SW_PORT_VLAN_PORT_VID_MEMBER, port_mask); + + ret = regmap_write(regmap, AR9331_SW_REG_PORT_VLAN(port), val); + if (ret) + goto error; + + ret = regmap_write(regmap, AR9331_SW_REG_PORT_CTRL(port), port_ctrl); + if (ret) + goto error; + + return 0; +error: + dev_err(priv->dev, "%s: error: %i\n", __func__, ret); + + return ret; +} + +static int ar9331_sw_setup(struct dsa_switch *ds) +{ + struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv; + struct regmap *regmap = priv->regmap; + int ret, i; + ret = ar9331_sw_reset(priv); if (ret) return ret; @@ -402,6 +467,12 @@ static int ar9331_sw_setup(struct dsa_switch *ds) if (ret) goto error; + for (i = 0; i < ds->num_ports; i++) { + ret = ar9331_sw_setup_port(ds, i); + if (ret) + goto error; + } + ds->configure_vlan_while_not_filtering = false; return 0; @@ -837,16 +908,24 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val) return 0; } - ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val); + /* In case of this switch we work with 32bit registers on top of 16bit + * bus. Some registers (for example access to forwarding database) have + * trigger bit on the first 16bit half of request, the result and + * configuration of request in the second half. + * To make it work properly, we should do the second part of transfer + * before the first one is done. + */ + ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2, + val >> 16); if (ret < 0) goto error; - ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2, - val >> 16); + ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val); if (ret < 0) goto error; return 0; + error: dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n"); return ret; diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 56fead68ea9f..147709131c13 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -304,6 +304,15 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, hostcmd = SJA1105_HOSTCMD_INVALIDATE; } sja1105_packing(p, &hostcmd, 25, 23, size, op); +} + +static void +sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, + enum packing_op op) +{ + int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY; + + sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size); /* Hack - The hardware takes the 'index' field within * struct sja1105_l2_lookup_entry as the index on which this command @@ -313,26 +322,18 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, * such that our API doesn't need to ask for a full-blown entry * structure when e.g. a delete is requested. */ - sja1105_packing(buf, &cmd->index, 15, 6, - SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op); -} - -static void -sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, - enum packing_op op) -{ - int size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY; - - return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size); + sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op); } static void sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, enum packing_op op) { - int size = SJA1110_SIZE_L2_LOOKUP_ENTRY; + int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY; + + sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size); - return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size); + sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op); } /* The switch is so retarded that it makes our command/entry abstraction diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 4f0545605f6b..49eb0ac41b7d 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -122,14 +122,12 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv) for (i = 0; i < ds->num_ports; i++) { mac[i] = default_mac; - if (i == dsa_upstream_port(priv->ds, i)) { - /* STP doesn't get called for CPU port, so we need to - * set the I/O parameters statically. - */ - mac[i].dyn_learn = true; - mac[i].ingress = true; - mac[i].egress = true; - } + + /* Let sja1105_bridge_stp_state_set() keep address learning + * enabled for the CPU port. + */ + if (dsa_is_cpu_port(ds, i)) + priv->learn_ena |= BIT(i); } return 0; @@ -399,6 +397,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv) if (dsa_is_cpu_port(ds, port)) v->pvid = true; list_add(&v->list, &priv->dsa_8021q_vlans); + + v = kmemdup(v, sizeof(*v), GFP_KERNEL); + if (!v) + return -ENOMEM; + + list_add(&v->list, &priv->bridge_vlans); } ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; @@ -1314,10 +1318,11 @@ static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, int sja1105et_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid) { - struct sja1105_l2_lookup_entry l2_lookup = {0}; + struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp; struct sja1105_private *priv = ds->priv; struct device *dev = ds->dev; int last_unused = -1; + int start, end, i; int bin, way, rc; bin = sja1105et_fdb_hash(priv, addr, vid); @@ -1329,7 +1334,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port, * mask? If yes, we need to do nothing. If not, we need * to rewrite the entry by adding this port to it. */ - if (l2_lookup.destports & BIT(port)) + if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds) return 0; l2_lookup.destports |= BIT(port); } else { @@ -1360,6 +1365,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port, index, NULL, false); } } + l2_lookup.lockeds = true; l2_lookup.index = sja1105et_fdb_index(bin, way); rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, @@ -1368,6 +1374,29 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port, if (rc < 0) return rc; + /* Invalidate a dynamically learned entry if that exists */ + start = sja1105et_fdb_index(bin, 0); + end = sja1105et_fdb_index(bin, way); + + for (i = start; i < end; i++) { + rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, + i, &tmp); + if (rc == -ENOENT) + continue; + if (rc) + return rc; + + if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid) + continue; + + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, + i, NULL, false); + if (rc) + return rc; + + break; + } + return sja1105_static_fdb_change(priv, port, &l2_lookup, true); } @@ -1409,32 +1438,30 @@ int sja1105et_fdb_del(struct dsa_switch *ds, int port, int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid) { - struct sja1105_l2_lookup_entry l2_lookup = {0}; + struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp; struct sja1105_private *priv = ds->priv; int rc, i; /* Search for an existing entry in the FDB table */ l2_lookup.macaddr = ether_addr_to_u64(addr); l2_lookup.vlanid = vid; - l2_lookup.iotag = SJA1105_S_TAG; l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); - if (priv->vlan_state != SJA1105_VLAN_UNAWARE) { - l2_lookup.mask_vlanid = VLAN_VID_MASK; - l2_lookup.mask_iotag = BIT(0); - } else { - l2_lookup.mask_vlanid = 0; - l2_lookup.mask_iotag = 0; - } + l2_lookup.mask_vlanid = VLAN_VID_MASK; l2_lookup.destports = BIT(port); + tmp = l2_lookup; + rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, - SJA1105_SEARCH, &l2_lookup); - if (rc == 0) { - /* Found and this port is already in the entry's + SJA1105_SEARCH, &tmp); + if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) { + /* Found a static entry and this port is already in the entry's * port mask => job done */ - if (l2_lookup.destports & BIT(port)) + if ((tmp.destports & BIT(port)) && tmp.lockeds) return 0; + + l2_lookup = tmp; + /* l2_lookup.index is populated by the switch in case it * found something. */ @@ -1456,16 +1483,46 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, dev_err(ds->dev, "FDB is full, cannot add entry.\n"); return -EINVAL; } - l2_lookup.lockeds = true; l2_lookup.index = i; skip_finding_an_index: + l2_lookup.lockeds = true; + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, l2_lookup.index, &l2_lookup, true); if (rc < 0) return rc; + /* The switch learns dynamic entries and looks up the FDB left to + * right. It is possible that our addition was concurrent with the + * dynamic learning of the same address, so now that the static entry + * has been installed, we are certain that address learning for this + * particular address has been turned off, so the dynamic entry either + * is in the FDB at an index smaller than the static one, or isn't (it + * can also be at a larger index, but in that case it is inactive + * because the static FDB entry will match first, and the dynamic one + * will eventually age out). Search for a dynamically learned address + * prior to our static one and invalidate it. + */ + tmp = l2_lookup; + + rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, + SJA1105_SEARCH, &tmp); + if (rc < 0) { + dev_err(ds->dev, + "port %d failed to read back entry for %pM vid %d: %pe\n", + port, addr, vid, ERR_PTR(rc)); + return rc; + } + + if (tmp.index < l2_lookup.index) { + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, + tmp.index, NULL, false); + if (rc < 0) + return rc; + } + return sja1105_static_fdb_change(priv, port, &l2_lookup, true); } @@ -1479,15 +1536,8 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, l2_lookup.macaddr = ether_addr_to_u64(addr); l2_lookup.vlanid = vid; - l2_lookup.iotag = SJA1105_S_TAG; l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); - if (priv->vlan_state != SJA1105_VLAN_UNAWARE) { - l2_lookup.mask_vlanid = VLAN_VID_MASK; - l2_lookup.mask_iotag = BIT(0); - } else { - l2_lookup.mask_vlanid = 0; - l2_lookup.mask_iotag = 0; - } + l2_lookup.mask_vlanid = VLAN_VID_MASK; l2_lookup.destports = BIT(port); rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, @@ -1585,7 +1635,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, /* We need to hide the dsa_8021q VLANs from the user. */ if (priv->vlan_state == SJA1105_VLAN_UNAWARE) l2_lookup.vlanid = 0; - cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); + rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); + if (rc) + return rc; } return 0; } @@ -3135,6 +3187,7 @@ static void sja1105_teardown(struct dsa_switch *ds) } sja1105_devlink_teardown(ds); + sja1105_mdiobus_unregister(ds); sja1105_flower_teardown(ds); sja1105_tas_teardown(ds); sja1105_ptp_clock_unregister(ds); diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c index 19aea8fb76f6..705d3900e43a 100644 --- a/drivers/net/dsa/sja1105/sja1105_mdio.c +++ b/drivers/net/dsa/sja1105/sja1105_mdio.c @@ -284,8 +284,7 @@ static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv, struct mii_bus *bus; int rc = 0; - np = of_find_compatible_node(mdio_node, NULL, - "nxp,sja1110-base-tx-mdio"); + np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio"); if (!np) return 0; @@ -339,8 +338,7 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv, struct mii_bus *bus; int rc = 0; - np = of_find_compatible_node(mdio_node, NULL, - "nxp,sja1110-base-t1-mdio"); + np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio"); if (!np) return 0; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c index 7dff20350865..f19370c33444 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c @@ -594,6 +594,11 @@ int atl1c_phy_init(struct atl1c_hw *hw) int ret_val; u16 mii_bmcr_data = BMCR_RESET; + if (hw->nic_type == athr_mt) { + hw->phy_configured = true; + return 0; + } + if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) || (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) { dev_err(&pdev->dev, "Error get phy ID\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1a6ec1a12d53..b5d954cb409a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2669,7 +2669,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } /* Allocated memory for FW statistics */ - if (bnx2x_alloc_fw_stats_mem(bp)) + rc = bnx2x_alloc_fw_stats_mem(bp); + if (rc) LOAD_ERROR_EXIT(bp, load_error0); /* request pf to initialize status blocks */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f56245eeef7b..8a97640cdfe7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -72,7 +72,8 @@ #include "bnxt_debugfs.h" #define BNXT_TX_TIMEOUT (5 * HZ) -#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW) +#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ + NETIF_MSG_TX_ERR) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); @@ -365,6 +366,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) return md_dst->u.port_info.port_id; } +static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + u16 prod) +{ + bnxt_db_write(bp, &txr->tx_db, prod); + txr->kick_pending = 0; +} + +static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp, + struct bnxt_tx_ring_info *txr, + struct netdev_queue *txq) +{ + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * tx index in bnxt_tx_avail() below, because in + * bnxt_tx_int(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) { + netif_tx_wake_queue(txq); + return false; + } + + return true; +} + static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -384,6 +412,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) i = skb_get_queue_mapping(skb); if (unlikely(i >= bp->tx_nr_rings)) { dev_kfree_skb_any(skb); + atomic_long_inc(&dev->tx_dropped); return NETDEV_TX_OK; } @@ -393,8 +422,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) free_size = bnxt_tx_avail(bp, txr); if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { - netif_tx_stop_queue(txq); - return NETDEV_TX_BUSY; + /* We must have raced with NAPI cleanup */ + if (net_ratelimit() && txr->kick_pending) + netif_warn(bp, tx_err, dev, + "bnxt: ring busy w/ flush pending!\n"); + if (bnxt_txr_netif_try_stop_queue(bp, txr, txq)) + return NETDEV_TX_BUSY; } length = skb->len; @@ -426,7 +459,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && atomic_dec_if_positive(&ptp->tx_avail) >= 0) { - if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) { + if (!bnxt_ptp_parse(skb, &ptp->tx_seqid, + &ptp->tx_hdr_off)) { + if (vlan_tag_flags) + ptp->tx_hdr_off += VLAN_HLEN; lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; } else { @@ -514,21 +550,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) normal_tx: if (length < BNXT_MIN_PKT_SIZE) { pad = BNXT_MIN_PKT_SIZE - length; - if (skb_pad(skb, pad)) { + if (skb_pad(skb, pad)) /* SKB already freed. */ - tx_buf->skb = NULL; - return NETDEV_TX_OK; - } + goto tx_kick_pending; length = BNXT_MIN_PKT_SIZE; } mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { - dev_kfree_skb_any(skb); - tx_buf->skb = NULL; - return NETDEV_TX_OK; - } + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) + goto tx_free; dma_unmap_addr_set(tx_buf, mapping, mapping); flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | @@ -615,24 +646,17 @@ normal_tx: txr->tx_prod = prod; if (!netdev_xmit_more() || netif_xmit_stopped(txq)) - bnxt_db_write(bp, &txr->tx_db, prod); + bnxt_txr_db_kick(bp, txr, prod); + else + txr->kick_pending = 1; tx_done: if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { if (netdev_xmit_more() && !tx_buf->is_push) - bnxt_db_write(bp, &txr->tx_db, prod); - - netif_tx_stop_queue(txq); + bnxt_txr_db_kick(bp, txr, prod); - /* netif_tx_stop_queue() must be done before checking - * tx index in bnxt_tx_avail() below, because in - * bnxt_tx_int(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) - netif_tx_wake_queue(txq); + bnxt_txr_netif_try_stop_queue(bp, txr, txq); } return NETDEV_TX_OK; @@ -645,7 +669,6 @@ tx_dma_error: /* start back at beginning and unmap skb */ prod = txr->tx_prod; tx_buf = &txr->tx_buf_ring[prod]; - tx_buf->skb = NULL; dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); prod = NEXT_TX(prod); @@ -659,7 +682,13 @@ tx_dma_error: PCI_DMA_TODEVICE); } +tx_free: dev_kfree_skb_any(skb); +tx_kick_pending: + if (txr->kick_pending) + bnxt_txr_db_kick(bp, txr, txr->tx_prod); + txr->tx_buf_ring[txr->tx_prod].skb = NULL; + atomic_long_inc(&dev->tx_dropped); return NETDEV_TX_OK; } @@ -729,14 +758,9 @@ next_tx_int: smp_mb(); if (unlikely(netif_tx_queue_stopped(txq)) && - (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { - __netif_tx_lock(txq, smp_processor_id()); - if (netif_tx_queue_stopped(txq) && - bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && - txr->dev_state != BNXT_DEV_STATE_CLOSING) - netif_tx_wake_queue(txq); - __netif_tx_unlock(txq); - } + bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && + READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING) + netif_tx_wake_queue(txq); } static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, @@ -1671,11 +1695,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { - u16 vlan_proto = tpa_info->metadata >> - RX_CMP_FLAGS2_METADATA_TPID_SFT; + __be16 vlan_proto = htons(tpa_info->metadata >> + RX_CMP_FLAGS2_METADATA_TPID_SFT); u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + return NULL; + } } skb_checksum_none_assert(skb); @@ -1759,6 +1788,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) return -EBUSY; + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); prod = rxr->rx_prod; if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { @@ -1897,9 +1930,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; - u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; + __be16 vlan_proto = htons(meta_data >> + RX_CMP_FLAGS2_METADATA_TPID_SFT); - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + goto next_rx; + } } skb_checksum_none_assert(skb); @@ -1975,6 +2014,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp, if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) return -EBUSY; + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); cmp_type = RX_CMP_TYPE(rxcmp); if (cmp_type == CMP_TYPE_RX_L2_CMP) { rxcmp1->rx_cmp_cfa_code_errors_v2 |= @@ -2440,6 +2483,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) if (!TX_CMP_VALID(txcmp, raw_cons)) break; + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { tmp_raw_cons = NEXT_RAW_CMP(raw_cons); cp_cons = RING_CMP(tmp_raw_cons); @@ -7563,8 +7610,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_WOL_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) bp->flags |= BNXT_FLAG_WOL_CAP; - if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) + if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { __bnxt_hwrm_ptp_qcfg(bp); + } else { + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + } } else { #ifdef CONFIG_BNXT_SRIOV struct bnxt_vf_info *vf = &bp->vf; @@ -9110,10 +9161,9 @@ static void bnxt_disable_napi(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + napi_disable(&bp->bnapi[i]->napi); if (bp->bnapi[i]->rx_ring) cancel_work_sync(&cpr->dim.work); - - napi_disable(&bp->bnapi[i]->napi); } } @@ -9147,9 +9197,11 @@ void bnxt_tx_disable(struct bnxt *bp) if (bp->tx_ring) { for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; - txr->dev_state = BNXT_DEV_STATE_CLOSING; + WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); } } + /* Make sure napi polls see @dev_state change */ + synchronize_net(); /* Drop carrier first to prevent TX timeout */ netif_carrier_off(bp->dev); /* Stop all TX queues */ @@ -9163,8 +9215,10 @@ void bnxt_tx_enable(struct bnxt *bp) for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; - txr->dev_state = 0; + WRITE_ONCE(txr->dev_state, 0); } + /* Make sure napi polls see @dev_state change */ + synchronize_net(); netif_tx_wake_all_queues(bp->dev); if (bp->link_info.link_up) netif_carrier_on(bp->dev); @@ -10123,7 +10177,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - bnxt_ptp_start(bp); rc = bnxt_init_nic(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); @@ -10197,6 +10250,12 @@ int bnxt_half_open_nic(struct bnxt *bp) { int rc = 0; + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); + rc = -ENODEV; + goto half_open_err; + } + rc = bnxt_alloc_mem(bp, false); if (rc) { netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); @@ -10256,9 +10315,16 @@ static int bnxt_open(struct net_device *dev) rc = bnxt_hwrm_if_change(bp, true); if (rc) return rc; + + if (bnxt_ptp_init(bp)) { + netdev_warn(dev, "PTP initialization failed.\n"); + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + } rc = __bnxt_open_nic(bp, true, true); if (rc) { bnxt_hwrm_if_change(bp, false); + bnxt_ptp_clear(bp); } else { if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { @@ -10349,6 +10415,7 @@ static int bnxt_close(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); + bnxt_ptp_clear(bp); bnxt_hwmon_close(bp); bnxt_close_nic(bp, true, true); bnxt_hwrm_shutdown_link(bp); @@ -10737,6 +10804,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp) return true; return false; } + /* 212 firmware is broken for aRFS */ + if (BNXT_FW_MAJ(bp) == 212) + return false; if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) return true; if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) @@ -11335,6 +11405,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp) bnxt_clear_int_mode(bp); pci_disable_device(bp->pdev); } + bnxt_ptp_clear(bp); __bnxt_close_nic(bp, true, false); bnxt_vf_reps_free(bp); bnxt_clear_int_mode(bp); @@ -11959,10 +12030,21 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp) (bp->fw_reset_max_dsecs * HZ / 10)); } +static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) +{ + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { + bnxt_ulp_start(bp, rc); + bnxt_dl_health_status_update(bp, false); + } + bp->fw_reset_state = 0; + dev_close(bp->dev); +} + static void bnxt_fw_reset_task(struct work_struct *work) { struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); - int rc; + int rc = 0; if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); @@ -11992,6 +12074,11 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->fw_reset_timestamp = jiffies; rtnl_lock(); + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + bnxt_fw_reset_abort(bp, rc); + rtnl_unlock(); + return; + } bnxt_fw_reset_close(bp); if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; @@ -12039,6 +12126,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) if (val == 0xffff) { if (bnxt_fw_reset_timeout(bp)) { netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); + rc = -ETIMEDOUT; goto fw_reset_abort; } bnxt_queue_fw_reset_work(bp, HZ / 1000); @@ -12048,6 +12136,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); if (pci_enable_device(bp->pdev)) { netdev_err(bp->dev, "Cannot re-enable PCI device\n"); + rc = -ENODEV; goto fw_reset_abort; } pci_set_master(bp->pdev); @@ -12074,18 +12163,18 @@ static void bnxt_fw_reset_task(struct work_struct *work) } rc = bnxt_open(bp->dev); if (rc) { - netdev_err(bp->dev, "bnxt_open_nic() failed\n"); - clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - dev_close(bp->dev); + netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); + bnxt_fw_reset_abort(bp, rc); + rtnl_unlock(); + return; } bp->fw_reset_state = 0; /* Make sure fw_reset_state is 0 before clearing the flag */ smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - bnxt_ulp_start(bp, rc); - if (!rc) - bnxt_reenable_sriov(bp); + bnxt_ulp_start(bp, 0); + bnxt_reenable_sriov(bp); bnxt_vf_reps_alloc(bp); bnxt_vf_reps_open(bp); bnxt_dl_health_recovery_done(bp); @@ -12103,12 +12192,8 @@ fw_reset_abort_status: netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); } fw_reset_abort: - clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) - bnxt_dl_health_status_update(bp, false); - bp->fw_reset_state = 0; rtnl_lock(); - dev_close(bp->dev); + bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); } @@ -12662,7 +12747,6 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) devlink_port_type_clear(&bp->dl_port); - bnxt_ptp_clear(bp); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); @@ -13246,11 +13330,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc); } - if (bnxt_ptp_init(bp)) { - netdev_warn(dev, "PTP initialization failed.\n"); - kfree(bp->ptp_cfg); - bp->ptp_cfg = NULL; - } bnxt_inv_fw_health_reg(bp); bnxt_dl_register(bp); @@ -13436,7 +13515,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) bnxt_close(netdev); - pci_disable_device(pdev); + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index bcf8d00b8c80..ba4e0fc38520 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -786,6 +786,7 @@ struct bnxt_tx_ring_info { u16 tx_prod; u16 tx_cons; u16 txq_index; + u8 kick_pending; struct bnxt_db_info tx_db; struct tx_bd *tx_desc_ring[MAX_TX_PAGES]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 8e90224c43a2..8a68df4d9e59 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -433,6 +433,7 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) { int total_ets_bw = 0; + bool zero = false; u8 max_tc = 0; int i; @@ -453,13 +454,20 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) break; case IEEE_8021QAZ_TSA_ETS: total_ets_bw += ets->tc_tx_bw[i]; + zero = zero || !ets->tc_tx_bw[i]; break; default: return -ENOTSUPP; } } - if (total_ets_bw > 100) + if (total_ets_bw > 100) { + netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n"); return -EINVAL; + } + if (zero && total_ets_bw == 100) { + netdev_warn(bp->dev, "rejecting ETS config starving a TC\n"); + return -EINVAL; + } if (max_tc >= bp->max_tc) *tc = bp->max_tc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 3fc6781c5b98..94d07a9f7034 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -368,6 +368,7 @@ struct cmd_nums { #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL + #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -531,8 +532,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 2 -#define HWRM_VERSION_RSVD 47 -#define HWRM_VERSION_STR "1.10.2.47" +#define HWRM_VERSION_RSVD 52 +#define HWRM_VERSION_STR "1.10.2.52" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -585,6 +586,7 @@ struct hwrm_ver_get_output { #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL u8 roce_fw_maj_8b; u8 roce_fw_min_8b; u8 roce_fw_bld_8b; @@ -886,7 +888,8 @@ struct hwrm_async_event_cmpl_reset_notify { #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8) #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8) #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8) - #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16 }; @@ -1236,13 +1239,14 @@ struct hwrm_async_event_cmpl_error_report_base { u8 timestamp_lo; __le16 timestamp_hi; __le32 event_data1; - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD }; /* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */ @@ -1446,6 +1450,8 @@ struct hwrm_func_vf_cfg_input { #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS 0x1000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS 0x2000UL __le16 mtu; __le16 guest_vlan; __le16 async_event_cr; @@ -1469,7 +1475,8 @@ struct hwrm_func_vf_cfg_input { __le16 num_vnics; __le16 num_stat_ctxs; __le16 num_hw_ring_grps; - u8 unused_0[4]; + __le16 num_tx_key_ctxs; + __le16 num_rx_key_ctxs; }; /* hwrm_func_vf_cfg_output (size:128b/16B) */ @@ -1493,7 +1500,7 @@ struct hwrm_func_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_qcaps_output (size:704b/88B) */ +/* hwrm_func_qcaps_output (size:768b/96B) */ struct hwrm_func_qcaps_output { __le16 error_code; __le16 req_type; @@ -1587,7 +1594,8 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL - u8 unused_1; + __le16 max_key_ctxs_alloc; + u8 unused_1[7]; u8 valid; }; @@ -1602,7 +1610,7 @@ struct hwrm_func_qcfg_input { u8 unused_0[6]; }; -/* hwrm_func_qcfg_output (size:832b/104B) */ +/* hwrm_func_qcfg_output (size:896b/112B) */ struct hwrm_func_qcfg_output { __le16 error_code; __le16 req_type; @@ -1749,11 +1757,13 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 __le16 host_mtu; - u8 unused_3; + __le16 alloc_tx_key_ctxs; + __le16 alloc_rx_key_ctxs; + u8 unused_3[5]; u8 valid; }; -/* hwrm_func_cfg_input (size:832b/104B) */ +/* hwrm_func_cfg_input (size:896b/112B) */ struct hwrm_func_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -1820,6 +1830,8 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL + #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS 0x40000000UL + #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS 0x80000000UL __le16 admin_mtu; __le16 mru; __le16 num_rsscos_ctxs; @@ -1929,6 +1941,9 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 __be16 tpid; __le16 host_mtu; + __le16 num_tx_key_ctxs; + __le16 num_rx_key_ctxs; + u8 unused_0[4]; }; /* hwrm_func_cfg_output (size:128b/16B) */ @@ -2099,6 +2114,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL + #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -2268,7 +2284,7 @@ struct hwrm_func_resource_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_resource_qcaps_output (size:448b/56B) */ +/* hwrm_func_resource_qcaps_output (size:512b/64B) */ struct hwrm_func_resource_qcaps_output { __le16 error_code; __le16 req_type; @@ -2300,11 +2316,15 @@ struct hwrm_func_resource_qcaps_output { __le16 max_tx_scheduler_inputs; __le16 flags; #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL + __le16 min_tx_key_ctxs; + __le16 max_tx_key_ctxs; + __le16 min_rx_key_ctxs; + __le16 max_rx_key_ctxs; u8 unused_0[5]; u8 valid; }; -/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */ +/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */ struct hwrm_func_vf_resource_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -2331,6 +2351,10 @@ struct hwrm_func_vf_resource_cfg_input { __le16 max_hw_ring_grps; __le16 flags; #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL + __le16 min_tx_key_ctxs; + __le16 max_tx_key_ctxs; + __le16 min_rx_key_ctxs; + __le16 max_rx_key_ctxs; u8 unused_0[2]; }; @@ -2348,7 +2372,9 @@ struct hwrm_func_vf_resource_cfg_output { __le16 reserved_vnics; __le16 reserved_stat_ctx; __le16 reserved_hw_ring_grps; - u8 unused_0[7]; + __le16 reserved_tx_key_ctxs; + __le16 reserved_rx_key_ctxs; + u8 unused_0[3]; u8 valid; }; @@ -4220,7 +4246,7 @@ struct hwrm_port_lpbk_clr_stats_output { u8 valid; }; -/* hwrm_port_ts_query_input (size:256b/32B) */ +/* hwrm_port_ts_query_input (size:320b/40B) */ struct hwrm_port_ts_query_input { __le16 req_type; __le16 cmpl_ring; @@ -4238,8 +4264,11 @@ struct hwrm_port_ts_query_input { __le16 enables; #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL + #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL __le16 ts_req_timeout; __le32 ptp_seq_id; + __le16 ptp_hdr_offset; + u8 unused_1[6]; }; /* hwrm_port_ts_query_output (size:192b/24B) */ @@ -8172,6 +8201,7 @@ struct hwrm_fw_reset_input { u8 host_idx; u8 flags; #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL + #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL u8 unused_0[4]; }; @@ -8952,7 +8982,7 @@ struct hwrm_nvm_get_dir_info_output { u8 valid; }; -/* hwrm_nvm_write_input (size:384b/48B) */ +/* hwrm_nvm_write_input (size:448b/56B) */ struct hwrm_nvm_write_input { __le16 req_type; __le16 cmpl_ring; @@ -8968,7 +8998,11 @@ struct hwrm_nvm_write_input { __le16 option; __le16 flags; #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL + #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL + #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL __le32 dir_item_length; + __le32 offset; + __le32 len; __le32 unused_0; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index f698b6bd4ff8..81f40ab748f1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -20,7 +20,7 @@ #include "bnxt.h" #include "bnxt_ptp.h" -int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id) +int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off) { unsigned int ptp_class; struct ptp_header *hdr; @@ -34,6 +34,7 @@ int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id) if (!hdr) return -EINVAL; + *hdr_off = (u8 *)hdr - skb->data; *seq_id = ntohs(hdr->sequence_id); return 0; default: @@ -91,6 +92,7 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts) PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); + req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT); } mutex_lock(&bp->hwrm_cmd_lock); @@ -353,6 +355,12 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) bnxt_ptp_get_current_time(bp); ptp->next_period = now + HZ; + if (time_after_eq(now, ptp->next_overflow_check)) { + spin_lock_bh(&ptp->ptp_lock); + timecounter_read(&ptp->tc); + spin_unlock_bh(&ptp->ptp_lock); + ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD; + } return HZ; } @@ -385,22 +393,6 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) return 0; } -void bnxt_ptp_start(struct bnxt *bp) -{ - struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - - if (!ptp) - return; - - if (bp->flags & BNXT_FLAG_CHIP_P5) { - spin_lock_bh(&ptp->ptp_lock); - ptp->current_time = bnxt_refclk_read(bp, NULL); - WRITE_ONCE(ptp->old_time, ptp->current_time); - spin_unlock_bh(&ptp->ptp_lock); - ptp_schedule_worker(ptp->ptp_clock, 0); - } -} - static const struct ptp_clock_info bnxt_ptp_caps = { .owner = THIS_MODULE, .name = "bnxt clock", @@ -439,6 +431,7 @@ int bnxt_ptp_init(struct bnxt *bp) ptp->cc.shift = 0; ptp->cc.mult = 1; + ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD; timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); ptp->ptp_info = bnxt_ptp_caps; @@ -450,7 +443,13 @@ int bnxt_ptp_init(struct bnxt *bp) bnxt_unmap_ptp_regs(bp); return err; } - + if (bp->flags & BNXT_FLAG_CHIP_P5) { + spin_lock_bh(&ptp->ptp_lock); + ptp->current_time = bnxt_refclk_read(bp, NULL); + WRITE_ONCE(ptp->old_time, ptp->current_time); + spin_unlock_bh(&ptp->ptp_lock); + ptp_schedule_worker(ptp->ptp_clock, 0); + } return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 6b6245750e20..524f1c272054 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -10,8 +10,8 @@ #ifndef BNXT_PTP_H #define BNXT_PTP_H -#define BNXT_PTP_GRC_WIN 5 -#define BNXT_PTP_GRC_WIN_BASE 0x5000 +#define BNXT_PTP_GRC_WIN 6 +#define BNXT_PTP_GRC_WIN_BASE 0x6000 #define BNXT_MAX_PHC_DRIFT 31000000 #define BNXT_LO_TIMER_MASK 0x0000ffffffffUL @@ -19,7 +19,8 @@ #define BNXT_PTP_QTS_TIMEOUT 1000 #define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \ - PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT) + PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \ + PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET) struct bnxt_ptp_cfg { struct ptp_clock_info ptp_info; @@ -32,7 +33,12 @@ struct bnxt_ptp_cfg { u64 current_time; u64 old_time; unsigned long next_period; + unsigned long next_overflow_check; + /* 48-bit PHC overflows in 78 hours. Check overflow every 19 hours. */ + #define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ) + u16 tx_seqid; + u16 tx_hdr_off; struct bnxt *bp; atomic_t tx_avail; #define BNXT_MAX_TX_TS 1 @@ -70,12 +76,11 @@ do { \ ((dst) = READ_ONCE(src)) #endif -int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id); +int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off); int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb); int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts); -void bnxt_ptp_start(struct bnxt *bp); int bnxt_ptp_init(struct bnxt *bp); void bnxt_ptp_clear(struct bnxt *bp); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index a918e374f3c5..187ff643ad2a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -479,16 +479,17 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev) if (!edev) return ERR_PTR(-ENOMEM); edev->en_ops = &bnxt_en_ops_tbl; - if (bp->flags & BNXT_FLAG_ROCEV1_CAP) - edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; - if (bp->flags & BNXT_FLAG_ROCEV2_CAP) - edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; edev->net = dev; edev->pdev = bp->pdev; edev->l2_db_size = bp->db_size; edev->l2_db_size_nc = bp->db_size; bp->edev = edev; } + edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP; + if (bp->flags & BNXT_FLAG_ROCEV1_CAP) + edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; + if (bp->flags & BNXT_FLAG_ROCEV2_CAP) + edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; return bp->edev; } EXPORT_SYMBOL(bnxt_ulp_probe); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 41f7f078cd27..db74241935ab 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1640,7 +1640,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, switch (mode) { case GENET_POWER_PASSIVE: - reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | + EXT_ENERGY_DET_MASK); if (GENET_IS_V5(priv)) { reg &= ~(EXT_PWR_DOWN_PHY_EN | EXT_PWR_DOWN_PHY_RD | @@ -3237,15 +3238,21 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, /* Returns a reusable dma control register value */ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) { + unsigned int i; u32 reg; u32 dma_ctrl; /* disable DMA */ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); reg = bcmgenet_tdma_readl(priv, DMA_CTRL); reg &= ~dma_ctrl; bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); reg = bcmgenet_rdma_readl(priv, DMA_CTRL); reg &= ~dma_ctrl; bcmgenet_rdma_writel(priv, reg, DMA_CTRL); @@ -3292,7 +3299,6 @@ static int bcmgenet_open(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); unsigned long dma_ctrl; - u32 reg; int ret; netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); @@ -3318,12 +3324,6 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_set_hw_addr(priv, dev->dev_addr); - if (priv->internal_phy) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - /* Disable RX/TX DMA and flush TX queues */ dma_ctrl = bcmgenet_dma_disable(priv); @@ -4139,7 +4139,6 @@ static int bcmgenet_resume(struct device *d) struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_rxnfc_rule *rule; unsigned long dma_ctrl; - u32 reg; int ret; if (!netif_running(dev)) @@ -4176,12 +4175,6 @@ static int bcmgenet_resume(struct device *d) if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) bcmgenet_hfb_create_rxnfc_filter(priv, rule); - if (priv->internal_phy) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - /* Disable RX/TX DMA and flush TX queues */ dma_ctrl = bcmgenet_dma_disable(priv); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index facde824bcaa..e31a5a397f11 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -186,12 +186,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, reg |= CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - if (priv->hw_params->flags & GENET_HAS_EXT) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg &= ~EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - reg = UMAC_IRQ_MPD_R; if (hfb_enable) reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 4cddd628d41b..9ed3d1ab2ca5 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) * bits 32:47 indicate the PVF num. */ for (q_no = 0; q_no < ern; q_no++) { - reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; + reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; /* for VF assigned queues. */ if (q_no < oct->sriov_info.pf_srn) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 9a2b166d651e..dbf9a0e6601d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap) { unsigned int i; + if (!is_uld(adap)) + return; + mutex_lock(&uld_mutex); list_del(&adap->list_node); @@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev) */ destroy_workqueue(adapter->workq); - if (is_uld(adapter)) { - detach_ulds(adapter); - t4_uld_clean_up(adapter); - } + detach_ulds(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]->reg_state == NETREG_REGISTERED) + unregister_netdev(adapter->port[i]); + + t4_uld_clean_up(adapter); adap_free_hma_mem(adapter); @@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev) cxgb4_free_mps_ref_entries(adapter); - for_each_port(adapter, i) - if (adapter->port[i]->reg_state == NETREG_REGISTERED) - unregister_netdev(adapter->port[i]); - debugfs_remove_recursive(adapter->debugfs_root); if (!is_t4(adapter->params.chip)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 743af9e654aa..17faac715882 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap) { unsigned int i; + if (!is_uld(adap)) + return; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_ULD_MAX; i++) { if (!adap->uld[i].handle) diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index f6ff1f76eacb..1876f15dd827 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -357,7 +357,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; void __iomem *ioaddr; - i = pci_enable_device(pdev); + i = pcim_enable_device(pdev); if (i) return i; pci_set_master(pdev); @@ -379,7 +379,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size); if (!ioaddr) - goto err_out_free_res; + goto err_out_netdev; for (i = 0; i < 3; i++) ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); @@ -458,8 +458,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) err_out_cleardev: pci_iounmap(pdev, ioaddr); -err_out_free_res: - pci_release_regions(pdev); err_out_netdev: free_netdev (dev); return -ENODEV; @@ -1526,7 +1524,6 @@ static void w840_remove1(struct pci_dev *pdev) if (dev) { struct netdev_private *np = netdev_priv(dev); unregister_netdev(dev); - pci_release_regions(pdev); pci_iounmap(pdev, np->base_addr); free_netdev(dev); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index f3d12d0714fb..98cc0133c343 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -2770,32 +2770,32 @@ static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) if (err) return err; - err = dpaa2_switch_seed_bp(ethsw); - if (err) - goto err_free_dpbp; - err = dpaa2_switch_alloc_rings(ethsw); if (err) - goto err_drain_dpbp; + goto err_free_dpbp; err = dpaa2_switch_setup_dpio(ethsw); if (err) goto err_destroy_rings; + err = dpaa2_switch_seed_bp(ethsw); + if (err) + goto err_deregister_dpio; + err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); if (err) { dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); - goto err_deregister_dpio; + goto err_drain_dpbp; } return 0; +err_drain_dpbp: + dpaa2_switch_drain_bp(ethsw); err_deregister_dpio: dpaa2_switch_free_dpio(ethsw); err_destroy_rings: dpaa2_switch_destroy_rings(ethsw); -err_drain_dpbp: - dpaa2_switch_drain_bp(ethsw); err_free_dpbp: dpaa2_switch_free_dpbp(ethsw); @@ -3038,26 +3038,30 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) return err; } -static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev) +static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) +{ + dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); + dpaa2_switch_free_dpio(ethsw); + dpaa2_switch_destroy_rings(ethsw); + dpaa2_switch_drain_bp(ethsw); + dpaa2_switch_free_dpbp(ethsw); +} + +static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev) { struct device *dev = &sw_dev->dev; struct ethsw_core *ethsw = dev_get_drvdata(dev); int err; + dpaa2_switch_ctrl_if_teardown(ethsw); + + destroy_workqueue(ethsw->workqueue); + err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); if (err) dev_warn(dev, "dpsw_close err %d\n", err); } -static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) -{ - dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); - dpaa2_switch_free_dpio(ethsw); - dpaa2_switch_destroy_rings(ethsw); - dpaa2_switch_drain_bp(ethsw); - dpaa2_switch_free_dpbp(ethsw); -} - static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) { struct ethsw_port_priv *port_priv; @@ -3068,8 +3072,6 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) dev = &sw_dev->dev; ethsw = dev_get_drvdata(dev); - dpaa2_switch_ctrl_if_teardown(ethsw); - dpaa2_switch_teardown_irqs(sw_dev); dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); @@ -3084,9 +3086,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) kfree(ethsw->acls); kfree(ethsw->ports); - dpaa2_switch_takedown(sw_dev); - - destroy_workqueue(ethsw->workqueue); + dpaa2_switch_teardown(sw_dev); fsl_mc_portal_free(ethsw->mc_io); @@ -3199,7 +3199,7 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) GFP_KERNEL); if (!(ethsw->ports)) { err = -ENOMEM; - goto err_takedown; + goto err_teardown; } ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), @@ -3270,8 +3270,8 @@ err_free_fdbs: err_free_ports: kfree(ethsw->ports); -err_takedown: - dpaa2_switch_takedown(sw_dev); +err_teardown: + dpaa2_switch_teardown(sw_dev); err_free_cmdport: fsl_mc_portal_free(ethsw->mc_io); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 8aea707a65a7..7e4c4980ced7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3843,13 +3843,13 @@ fec_drv_remove(struct platform_device *pdev) if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); - free_netdev(ndev); clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + free_netdev(ndev); return 0; } diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 46ecb42f2ef8..d9fc5c456bf3 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -524,6 +524,7 @@ static void setup_memac(struct mac_device *mac_dev) | SUPPORTED_Autoneg \ | SUPPORTED_Pause \ | SUPPORTED_Asym_Pause \ + | SUPPORTED_FIBRE \ | SUPPORTED_MII) static DEFINE_MUTEX(eth_lock); diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 867e87af3432..099a2bc5ae67 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1469,7 +1469,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_enable_device(pdev); if (err) - return -ENXIO; + return err; err = pci_request_regions(pdev, "gvnic-cfg"); if (err) @@ -1477,19 +1477,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err); goto abort_with_pci_region; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, - "Failed to set consistent dma mask: err=%d\n", err); - goto abort_with_pci_region; - } - reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0); if (!reg_bar) { dev_err(&pdev->dev, "Failed to map pci bar!\n"); @@ -1512,6 +1505,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); if (!dev) { dev_err(&pdev->dev, "could not allocate netdev\n"); + err = -ENOMEM; goto abort_with_db_bar; } SET_NETDEV_DEV(dev, &pdev->dev); @@ -1565,7 +1559,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = register_netdev(dev); if (err) - goto abort_with_wq; + goto abort_with_gve_init; dev_info(&pdev->dev, "GVE version %s\n", gve_version_str); dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format); @@ -1573,6 +1567,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) queue_work(priv->gve_wq, &priv->service_task); return 0; +abort_with_gve_init: + gve_teardown_priv_resources(priv); + abort_with_wq: destroy_workqueue(priv->gve_wq); @@ -1590,7 +1587,7 @@ abort_with_pci_region: abort_with_enabled: pci_disable_device(pdev); - return -ENXIO; + return err; } static void gve_remove(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 77bb8227f89b..8500621b2cd4 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -566,13 +566,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, return 0; } - /* Prefetch the payload header. */ - prefetch((char *)buf_state->addr + buf_state->page_info.page_offset); -#if L1_CACHE_BYTES < 128 - prefetch((char *)buf_state->addr + buf_state->page_info.page_offset + - L1_CACHE_BYTES); -#endif - if (eop && buf_len <= priv->rx_copybreak) { rx->skb_head = gve_rx_copy(priv->dev, napi, &buf_state->page_info, buf_len, 0); diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 12f6c2442a7a..e53512f6878a 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -131,7 +131,7 @@ /* buf unit size is cache_line_size, which is 64, so the shift is 6 */ #define PPE_BUF_SIZE_SHIFT 6 #define PPE_TX_BUF_HOLD BIT(31) -#define CACHE_LINE_MASK 0x3F +#define SOC_CACHE_LINE_MASK 0x3F #else #define PPE_CFG_QOS_VMID_GRP_SHIFT 8 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11 @@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) #if defined(CONFIG_HI13X1_GMAC) desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT); - desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK); - desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK); + desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK); + desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK); #else desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV); desc->send_addr = (__force u32)cpu_to_be32(phys); diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 0a6cda309b24..aa86a81c8f4a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -98,6 +98,7 @@ struct hclgevf_mbx_resp_status { u32 origin_mbx_msg; bool received_resp; int resp_status; + u16 match_id; u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE]; }; @@ -143,7 +144,8 @@ struct hclge_mbx_vf_to_pf_cmd { u8 mbx_need_resp; u8 rsv1[1]; u8 msg_len; - u8 rsv2[3]; + u8 rsv2; + u16 match_id; struct hclge_vf_to_pf_msg msg; }; @@ -153,7 +155,8 @@ struct hclge_mbx_pf_to_vf_cmd { u8 dest_vfid; u8 rsv[3]; u8 msg_len; - u8 rsv1[3]; + u8 rsv1; + u16 match_id; struct hclge_pf_to_vf_msg msg; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index dd3354a57c62..ebeaf12e409b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9552,13 +9552,17 @@ static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) if (ret) return ret; - if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) + if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, !enable); - else if (!vport->vport_id) + } else if (!vport->vport_id) { + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + enable = false; + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, HCLGE_FILTER_FE_INGRESS, enable, 0); + } return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index e10a2c36b706..c0a478ae9583 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -47,6 +47,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; + resp_pf_to_vf->match_id = vf_to_pf_req->match_id; resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 3b1f84502e36..befa9bcc2f2f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -5,9 +5,27 @@ #include "hclge_main.h" #include "hnae3.h" +static int hclge_ptp_get_cycle(struct hclge_dev *hdev) +{ + struct hclge_ptp *ptp = hdev->ptp; + + ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) & + HCLGE_PTP_CYCLE_QUO_MASK; + ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG); + ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG); + + if (ptp->cycle.den == 0) { + dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n"); + return -EINVAL; + } + + return 0; +} + static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) { struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); + struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle; u64 adj_val, adj_base, diff; unsigned long flags; bool is_neg = false; @@ -18,7 +36,7 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) is_neg = true; } - adj_base = HCLGE_PTP_CYCLE_ADJ_BASE * HCLGE_PTP_CYCLE_ADJ_UNIT; + adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer; adj_val = adj_base * ppb; diff = div_u64(adj_val, 1000000000ULL); @@ -29,16 +47,16 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) /* This clock cycle is defined by three part: quotient, numerator * and denominator. For example, 2.5ns, the quotient is 2, - * denominator is fixed to HCLGE_PTP_CYCLE_ADJ_UNIT, and numerator - * is 0.5 * HCLGE_PTP_CYCLE_ADJ_UNIT. + * denominator is fixed to ptp->cycle.den, and numerator + * is 0.5 * ptp->cycle.den. */ - quo = div_u64_rem(adj_val, HCLGE_PTP_CYCLE_ADJ_UNIT, &numerator); + quo = div_u64_rem(adj_val, cycle->den, &numerator); spin_lock_irqsave(&hdev->ptp->lock, flags); - writel(quo, hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG); + writel(quo & HCLGE_PTP_CYCLE_QUO_MASK, + hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG); writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG); - writel(HCLGE_PTP_CYCLE_ADJ_UNIT, - hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG); + writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG); writel(HCLGE_PTP_CYCLE_ADJ_EN, hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG); spin_unlock_irqrestore(&hdev->ptp->lock, flags); @@ -475,6 +493,10 @@ int hclge_ptp_init(struct hclge_dev *hdev) ret = hclge_ptp_create_clock(hdev); if (ret) return ret; + + ret = hclge_ptp_get_cycle(hdev); + if (ret) + return ret; } ret = hclge_ptp_int_en(hdev, true); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h index 5a202b775471..dbf5f4c08019 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -29,6 +29,7 @@ #define HCLGE_PTP_TIME_ADJ_REG 0x60 #define HCLGE_PTP_TIME_ADJ_EN BIT(0) #define HCLGE_PTP_CYCLE_QUO_REG 0x64 +#define HCLGE_PTP_CYCLE_QUO_MASK GENMASK(7, 0) #define HCLGE_PTP_CYCLE_DEN_REG 0x68 #define HCLGE_PTP_CYCLE_NUM_REG 0x6C #define HCLGE_PTP_CYCLE_CFG_REG 0x70 @@ -37,9 +38,7 @@ #define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78 #define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C -#define HCLGE_PTP_CYCLE_ADJ_BASE 2 #define HCLGE_PTP_CYCLE_ADJ_MAX 500000000 -#define HCLGE_PTP_CYCLE_ADJ_UNIT 100000000 #define HCLGE_PTP_SEC_H_OFFSET 32u #define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0) @@ -47,6 +46,12 @@ #define HCLGE_PTP_FLAG_TX_EN 1 #define HCLGE_PTP_FLAG_RX_EN 2 +struct hclge_ptp_cycle { + u32 quo; + u32 numer; + u32 den; +}; + struct hclge_ptp { struct hclge_dev *hdev; struct ptp_clock *clock; @@ -58,6 +63,7 @@ struct hclge_ptp { spinlock_t lock; /* protects ptp registers */ u32 ptp_cfg; u32 last_tx_seqid; + struct hclge_ptp_cycle cycle; unsigned long tx_start; unsigned long tx_cnt; unsigned long tx_skipped; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 52eaf82b7cd7..8784d61e833f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2641,6 +2641,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) { + struct hnae3_handle *nic = &hdev->nic; + int ret; + + ret = hclgevf_en_hw_strip_rxvtag(nic, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to enable rx vlan offload, ret = %d\n", ret); + return ret; + } + return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, false); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 9b17735b9f4c..772b2f8acd2e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -13,6 +13,7 @@ static int hclgevf_resp_to_errno(u16 resp_code) return resp_code ? -resp_code : 0; } +#define HCLGEVF_MBX_MATCH_ID_START 1 static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) { /* this function should be called with mbx_resp.mbx_mutex held @@ -21,6 +22,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) hdev->mbx_resp.received_resp = false; hdev->mbx_resp.origin_mbx_msg = 0; hdev->mbx_resp.resp_status = 0; + hdev->mbx_resp.match_id++; + /* Update match_id and ensure the value of match_id is not zero */ + if (hdev->mbx_resp.match_id == 0) + hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START; memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE); } @@ -115,6 +120,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, if (need_resp) { mutex_lock(&hdev->mbx_resp.mbx_mutex); hclgevf_reset_mbx_resp_status(hdev); + req->match_id = hdev->mbx_resp.match_id; status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) { dev_err(&hdev->pdev->dev, @@ -211,6 +217,19 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) resp->additional_info[i] = *temp; temp++; } + + /* If match_id is not zero, it means PF support + * match_id. If the match_id is right, VF get the + * right response, otherwise ignore the response. + * Driver will clear hdev->mbx_resp when send + * next message which need response. + */ + if (req->match_id) { + if (req->match_id == resp->match_id) + resp->received_resp = true; + } else { + resp->received_resp = true; + } break; case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 374a75d4faea..a775c69e4fd7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1731,7 +1731,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; } @@ -1753,6 +1752,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) dev_kfree_skb_any(skb); tx_send_failed++; tx_dropped++; + ibmvnic_tx_scrq_flush(adapter, tx_scrq); ret = NETDEV_TX_OK; goto out; } @@ -2420,9 +2420,10 @@ out: static void __ibmvnic_reset(struct work_struct *work) { - struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; bool saved_state = false; + struct ibmvnic_rwi *tmprwi; + struct ibmvnic_rwi *rwi; unsigned long flags; u32 reset_state; int rc = 0; @@ -2489,7 +2490,7 @@ static void __ibmvnic_reset(struct work_struct *work) } else { rc = do_reset(adapter, rwi, reset_state); } - kfree(rwi); + tmprwi = rwi; adapter->last_reset_time = jiffies; if (rc) @@ -2497,8 +2498,23 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); + /* + * If there is another reset queued, free the previous rwi + * and process the new reset even if previous reset failed + * (the previous reset could have failed because of a fail + * over for instance, so process the fail over). + * + * If there are no resets queued and the previous reset failed, + * the adapter would be in an undefined state. So retry the + * previous reset as a hard reset. + */ + if (rwi) + kfree(tmprwi); + else if (rc) + rwi = tmprwi; + if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || - rwi->reset_reason == VNIC_RESET_MOBILITY)) + rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) adapter->force_reset_recovery = true; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d150dade06cf..757a54c39eef 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7664,6 +7664,7 @@ err_flashmap: err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index dbcae92bb18d..adfa2768f024 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2227,6 +2227,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_netdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 3e822bad4851..2c9e4eeb7270 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -980,7 +980,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, default: /* if we got here and link is up something bad is afoot */ netdev_info(netdev, - "WARNING: Link is up but PHY type 0x%x is not recognized.\n", + "WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n", hw_link_info->phy_type); } @@ -5294,6 +5294,10 @@ flags_complete: dev_warn(&pf->pdev->dev, "Device configuration forbids SW from starting the LLDP agent.\n"); return -EINVAL; + case I40E_AQ_RC_EAGAIN: + dev_warn(&pf->pdev->dev, + "Stop FW LLDP agent command is still being processed, please try again in a second.\n"); + return -EBUSY; default: dev_warn(&pf->pdev->dev, "Starting FW LLDP agent failed: error: %s, %s\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 861e59a350bd..1d1f52756a93 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4454,11 +4454,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, } /** - * i40e_vsi_control_tx - Start or stop a VSI's rings + * i40e_vsi_enable_tx - Start a VSI's rings * @vsi: the VSI being configured - * @enable: start or stop the rings **/ -static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) +static int i40e_vsi_enable_tx(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret = 0; @@ -4467,7 +4466,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, - false /*is xdp*/, enable); + false /*is xdp*/, true); if (ret) break; @@ -4476,7 +4475,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q + vsi->alloc_queue_pairs, - true /*is xdp*/, enable); + true /*is xdp*/, true); if (ret) break; } @@ -4574,32 +4573,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable) } /** - * i40e_vsi_control_rx - Start or stop a VSI's rings + * i40e_vsi_enable_rx - Start a VSI's rings * @vsi: the VSI being configured - * @enable: start or stop the rings **/ -static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) +static int i40e_vsi_enable_rx(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret = 0; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - ret = i40e_control_wait_rx_q(pf, pf_q, enable); + ret = i40e_control_wait_rx_q(pf, pf_q, true); if (ret) { dev_info(&pf->pdev->dev, - "VSI seid %d Rx ring %d %sable timeout\n", - vsi->seid, pf_q, (enable ? "en" : "dis")); + "VSI seid %d Rx ring %d enable timeout\n", + vsi->seid, pf_q); break; } } - /* Due to HW errata, on Rx disable only, the register can indicate done - * before it really is. Needs 50ms to be sure - */ - if (!enable) - mdelay(50); - return ret; } @@ -4612,29 +4604,47 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) int ret = 0; /* do rx first for enable and last for disable */ - ret = i40e_vsi_control_rx(vsi, true); + ret = i40e_vsi_enable_rx(vsi); if (ret) return ret; - ret = i40e_vsi_control_tx(vsi, true); + ret = i40e_vsi_enable_tx(vsi); return ret; } +#define I40E_DISABLE_TX_GAP_MSEC 50 + /** * i40e_vsi_stop_rings - Stop a VSI's rings * @vsi: the VSI being configured **/ void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { + struct i40e_pf *pf = vsi->back; + int pf_q, err, q_end; + /* When port TX is suspended, don't wait */ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) return i40e_vsi_stop_rings_no_wait(vsi); - /* do rx first for enable and last for disable - * Ignore return value, we need to shutdown whatever we can - */ - i40e_vsi_control_tx(vsi, false); - i40e_vsi_control_rx(vsi, false); + q_end = vsi->base_queue + vsi->num_queue_pairs; + for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) + i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); + + for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { + err = i40e_control_wait_rx_q(pf, pf_q, false); + if (err) + dev_info(&pf->pdev->dev, + "VSI seid %d Rx ring %d dissable timeout\n", + vsi->seid, pf_q); + } + + msleep(I40E_DISABLE_TX_GAP_MSEC); + pf_q = vsi->base_queue; + for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) + wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); + + i40e_vsi_wait_queues_disabled(vsi); } /** @@ -7280,6 +7290,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, } if (vsi->num_queue_pairs < (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { + dev_err(&vsi->back->pdev->dev, + "Failed to create traffic channel, insufficient number of queues.\n"); return -EINVAL; } if (sum_max_rate > i40e_get_link_speed(vsi)) { @@ -13261,6 +13273,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_poll_controller = i40e_netpoll, #endif .ndo_setup_tc = __i40e_setup_tc, + .ndo_select_queue = i40e_lan_select_queue, .ndo_set_features = i40e_set_features, .ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 38eb8151ee9a..10a83e5385c7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3631,6 +3631,55 @@ dma_error: return -1; } +static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev, + const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 jhash_initval_salt = 0xd631614b; + u32 hash; + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16)skb->protocol ^ skb->hash; + + hash = jhash_1word(hash, jhash_initval_salt); + + return (u16)(((u64)hash * num_tx_queues) >> 32); +} + +u16 i40e_lan_select_queue(struct net_device *netdev, + struct sk_buff *skb, + struct net_device __always_unused *sb_dev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_hw *hw; + u16 qoffset; + u16 qcount; + u8 tclass; + u16 hash; + u8 prio; + + /* is DCB enabled at all? */ + if (vsi->tc_config.numtc == 1) + return netdev_pick_tx(netdev, skb, sb_dev); + + prio = skb->priority; + hw = &vsi->back->hw; + tclass = hw->local_dcbx_config.etscfg.prioritytable[prio]; + /* sanity check */ + if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass)))) + tclass = 0; + + /* select a queue assigned for the given TC */ + qcount = vsi->tc_config.tc_info[tclass].qcount; + hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount); + + qoffset = vsi->tc_config.tc_info[tclass].qoffset; + return qoffset + hash; +} + /** * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring * @xdpf: data to transmit diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 86fed05b4f19..bfc2845c99d1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -451,6 +451,8 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_rx_ring(struct i40e_ring *rx_ring); int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index e8bd04100ecd..90793b36126e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -136,6 +136,7 @@ struct iavf_q_vector { struct iavf_mac_filter { struct list_head list; u8 macaddr[ETH_ALEN]; + bool is_new_mac; /* filter is new, wait for PF decision */ bool remove; /* filter needs to be removed */ bool add; /* filter needs to be added */ }; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index e612c24fa384..606a01ce4073 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -751,6 +751,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; + f->is_new_mac = true; adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; @@ -1506,11 +1507,6 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_map_rings_to_vectors(adapter); - - if (RSS_AQ(adapter)) - adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; - else - err = iavf_init_rss(adapter); err: return err; } @@ -2200,6 +2196,14 @@ continue_reset: goto reset_err; } + if (RSS_AQ(adapter)) { + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; + } else { + err = iavf_init_rss(adapter); + if (err) + goto reset_err; + } + adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; @@ -3798,6 +3802,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 0eab3c43bdc5..3c735968e1b8 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -541,6 +541,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) } /** + * iavf_mac_add_ok + * @adapter: adapter structure + * + * Submit list of filters based on PF response. + **/ +static void iavf_mac_add_ok(struct iavf_adapter *adapter) +{ + struct iavf_mac_filter *f, *ftmp; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + f->is_new_mac = false; + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * iavf_mac_add_reject + * @adapter: adapter structure + * + * Remove filters from list based on PF response. + **/ +static void iavf_mac_add_reject(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct iavf_mac_filter *f, *ftmp; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) + f->remove = false; + + if (f->is_new_mac) { + list_del(&f->list); + kfree(f); + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** * iavf_add_vlans * @adapter: adapter structure * @@ -1492,6 +1533,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, case VIRTCHNL_OP_ADD_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); + iavf_mac_add_reject(adapter); /* restore administratively set MAC address */ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); break; @@ -1639,10 +1681,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } switch (v_opcode) { - case VIRTCHNL_OP_ADD_ETH_ADDR: { + case VIRTCHNL_OP_ADD_ETH_ADDR: + if (!v_retval) + iavf_mac_add_ok(adapter); if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - } break; case VIRTCHNL_OP_GET_STATS: { struct iavf_eth_stats *stats = diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index a450343fbb92..eadcb9958346 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -234,6 +234,7 @@ enum ice_pf_state { ICE_VFLR_EVENT_PENDING, ICE_FLTR_OVERFLOW_PROMISC, ICE_VF_DIS, + ICE_VF_DEINIT_IN_PROGRESS, ICE_CFG_BUSY, ICE_SERVICE_SCHED, ICE_SERVICE_DIS, diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index ef8d1815af56..fe2ded775f25 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -191,6 +191,14 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + /* Under some circumstances, we might receive a request to delete our + * own device address from our uc list. Because we store the device + * address in the VSI's MAC filter list, we need to ignore such + * requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, ICE_FWD_TO_VSI)) return -EINVAL; @@ -4194,6 +4202,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) struct ice_hw *hw; int i, err; + if (pdev->is_virtfn) { + dev_err(dev, "can't probe a virtual function\n"); + return -EINVAL; + } + /* this driver uses devres, see * Documentation/driver-api/driver-model/devres.rst */ @@ -5119,7 +5132,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) return -EADDRNOTAVAIL; if (ether_addr_equal(netdev->dev_addr, mac)) { - netdev_warn(netdev, "already using mac %pM\n", mac); + netdev_dbg(netdev, "already using mac %pM\n", mac); return 0; } @@ -5130,6 +5143,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) return -EBUSY; } + netif_addr_lock_bh(netdev); /* Clean up old MAC filter. Not an error if old filter doesn't exist */ status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI); if (status && status != ICE_ERR_DOES_NOT_EXIST) { @@ -5139,30 +5153,28 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) /* Add filter for new MAC. If filter exists, return success */ status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); - if (status == ICE_ERR_ALREADY_EXISTS) { + if (status == ICE_ERR_ALREADY_EXISTS) /* Although this MAC filter is already present in hardware it's * possible in some cases (e.g. bonding) that dev_addr was * modified outside of the driver and needs to be restored back * to this value. */ - memcpy(netdev->dev_addr, mac, netdev->addr_len); netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); - return 0; - } - - /* error if the new filter addition failed */ - if (status) + else if (status) + /* error if the new filter addition failed */ err = -EADDRNOTAVAIL; err_update_filters: if (err) { netdev_err(netdev, "can't set MAC %pM. filter update failed\n", mac); + netif_addr_unlock_bh(netdev); return err; } /* change the netdev's MAC address */ memcpy(netdev->dev_addr, mac, netdev->addr_len); + netif_addr_unlock_bh(netdev); netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", netdev->dev_addr); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 5d5207b56ca9..9e3ddb9b8b51 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -656,7 +656,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, * maintaining phase */ if (start_time < current_time) - start_time = div64_u64(current_time + NSEC_PER_MSEC - 1, + start_time = div64_u64(current_time + NSEC_PER_SEC - 1, NSEC_PER_SEC) * NSEC_PER_SEC + phase; start_time -= E810_OUT_PROP_DELAY_NS; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 2826570dab51..e93430ab37f1 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -615,6 +615,8 @@ void ice_free_vfs(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; unsigned int tmp, i; + set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); + if (!pf->vf) return; @@ -680,6 +682,7 @@ void ice_free_vfs(struct ice_pf *pf) i); clear_bit(ICE_VF_DIS, pf->state); + clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); } @@ -4415,6 +4418,10 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) struct device *dev; int err = 0; + /* if de-init is underway, don't process messages from VF */ + if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state)) + return; + dev = ice_pf_to_dev(pf); if (ice_validate_vf_id(pf, vf_id)) { err = -EINVAL; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7e6435dc7e80..171a7a629b20 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) **/ static int igb_request_msix(struct igb_adapter *adapter) { + unsigned int num_q_vectors = adapter->num_q_vectors; struct net_device *netdev = adapter->netdev; int i, err = 0, vector = 0, free_vector = 0; @@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter) if (err) goto err_out; - for (i = 0; i < adapter->num_q_vectors; i++) { + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; vector++; @@ -1678,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter) **/ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) { - struct igb_ring *ring = adapter->tx_ring[queue]; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; u32 tqavcc, tqavctrl; u16 value; WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); + ring = adapter->tx_ring[queue]; /* If any of the Qav features is enabled, configure queues as SR and * with HIGH PRIO. If none is, then configure them with LOW PRIO and @@ -3615,6 +3623,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -4835,6 +4844,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) DMA_TO_DEVICE); } + tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 9e0bbb2e55e3..5901ed9fb545 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -578,7 +578,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) if (hw->phy.ops.read_reg) return hw->phy.ops.read_reg(hw, offset, data); - return 0; + return -EOPNOTSUPP; } void igc_reinit_locked(struct igc_adapter *); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 95323095094d..e29aadbc6744 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -232,6 +232,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); } + tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; @@ -6054,6 +6056,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ffff69efd78a..14aea40da50f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1825,7 +1825,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { if (ring_uses_build_skb(rx_ring)) { - unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; + unsigned long offset = (unsigned long)(skb->data) & mask; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, @@ -11067,6 +11068,7 @@ err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 96dd1a4f956a..b1d22e4d5ec9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -52,8 +52,11 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter, /* Kick start the NAPI context so that receiving will start */ err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); - if (err) + if (err) { + clear_bit(qid, adapter->af_xdp_zc_qps); + xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); return err; + } } return 0; diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index caaea2c920a6..e3e4676af9e4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.dev; + struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, **/ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbevf_adapter *adapter = netdev_priv(dev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *dev = xs->xso.real_dev; + struct ixgbevf_adapter *adapter; + struct ixgbevf_ipsec *ipsec; u16 sa_idx; int ret; + adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n", xs->id.proto); @@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) **/ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbevf_adapter *adapter = netdev_priv(dev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *dev = xs->xso.real_dev; + struct ixgbevf_adapter *adapter; + struct ixgbevf_ipsec *ipsec; u16 sa_idx; + adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 361bc4fbe20b..76a7777c746d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2299,19 +2299,19 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, skb_frag_off_set(frag, pp->rx_offset_correction); skb_frag_size_set(frag, data_len); __skb_frag_set_page(frag, page); - - /* last fragment */ - if (len == *size) { - struct skb_shared_info *sinfo; - - sinfo = xdp_get_shared_info_from_buff(xdp); - sinfo->nr_frags = xdp_sinfo->nr_frags; - memcpy(sinfo->frags, xdp_sinfo->frags, - sinfo->nr_frags * sizeof(skb_frag_t)); - } } else { page_pool_put_full_page(rxq->page_pool, page, true); } + + /* last fragment */ + if (len == *size) { + struct skb_shared_info *sinfo; + + sinfo = xdp_get_shared_info_from_buff(xdp); + sinfo->nr_frags = xdp_sinfo->nr_frags; + memcpy(sinfo->frags, xdp_sinfo->frags, + sinfo->nr_frags * sizeof(skb_frag_t)); + } *size -= len; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index b9fbc9f000f2..cf8acabb90ac 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -938,7 +938,7 @@ enum mvpp22_ptp_packet_format { #define MVPP2_BM_COOKIE_POOL_OFFS 8 #define MVPP2_BM_COOKIE_CPU_OFFS 24 -#define MVPP2_BM_SHORT_FRAME_SIZE 704 /* frame size 128 */ +#define MVPP2_BM_SHORT_FRAME_SIZE 736 /* frame size 128 */ #define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */ #define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */ /* BM short pool packet size diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 1a3455620b38..cc8ac36cf687 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -10,4 +10,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ - rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o + rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index fac6474ad694..544c96c8fe1d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id) return test_bit(lmac_id, &cgx->lmac_bmap); } +/* Helper function to get sequential index + * given the enabled LMAC of a CGX + */ +static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) +{ + int tmp, id = 0; + + for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + if (tmp == lmac_id) + break; + id++; + } + + return id; +} + struct mac_ops *get_mac_ops(void *cgxd) { if (!cgxd) @@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr) return mac; } +static void cfg2mac(u64 cfg, u8 *mac_addr) +{ + int i, index = 0; + + for (i = ETH_ALEN - 1; i >= 0; i--, index++) + mac_addr[i] = (cfg >> (8 * index)) & 0xFF; +} + int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index, id; u64 cfg; + /* access mac_ops to know csr_offset */ mac_ops = cgx_dev->mac_ops; + /* copy 6bytes from macaddr */ /* memcpy(&cfg, mac_addr, 6); */ cfg = mac2u64 (mac_addr); - cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)), + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg |= CGX_DMAC_CTL0_CAM_ENABLE; + cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE | + CGX_DMAC_MCAST_MODE); cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); return 0; } +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id) +{ + struct mac_ops *mac_ops; + struct cgx *cgx = cgxd; + + if (!cgxd || !is_lmac_valid(cgxd, lmac_id)) + return 0; + + cgx = cgxd; + /* Get mac_ops to know csr offset */ + mac_ops = cgx->mac_ops; + + return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); +} + +u64 cgx_read_dmac_entry(void *cgxd, int index) +{ + struct mac_ops *mac_ops; + struct cgx *cgx; + + if (!cgxd) + return 0; + + cgx = cgxd; + mac_ops = cgx->mac_ops; + return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8))); +} + +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + int index, idx; + u64 cfg = 0; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Get available index where entry is to be installed */ + idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap); + if (idx < 0) + return idx; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + idx; + + cfg = mac2u64 (mac_addr); + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cfg |= ((u64)lmac_id << 49); + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT); + + if (is_multicast_ether_addr(mac_addr)) { + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE_CAM; + lmac->mcast_filters_count++; + } else if (!lmac->mcast_filters_count) { + cfg |= CGX_DMAC_MCAST_MODE; + } + + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return idx; +} + +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 index = 0, id; + u64 cfg; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Restore index 0 to its default init value as done during + * cgx_lmac_init + */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */ + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return 0; +} + +/* Allows caller to change macaddress associated with index + * in dmac filter table including index 0 reserved for + * interface mac address + */ +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct mac_ops *mac_ops; + struct lmac *lmac; + u64 cfg; + int id; + + lmac = lmac_pdata(lmac_id, cgx_dev); + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* ensure index is already set */ + if (!test_bit(index, lmac->mac_to_index_bmap.bmap)) + return -EINVAL; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + cfg &= ~CGX_RX_DMAC_ADR_MASK; + cfg |= mac2u64 (mac_addr); + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + return 0; +} + +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 mac[ETH_ALEN]; + u64 cfg; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* Skip deletion for reserved index i.e. index 0 */ + if (index == 0) + return 0; + + rvu_free_rsrc(&lmac->mac_to_index_bmap, index); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + /* Read MAC address to check whether it is ucast or mcast */ + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + + cfg2mac(cfg, mac); + if (is_multicast_ether_addr(mac)) + lmac->mcast_filters_count--; + + if (!lmac->mcast_filters_count) { + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE; + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + } + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + return 0; +} + +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + + if (lmac) + return lmac->mac_to_index_bmap.max; + + return 0; +} + u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index; u64 cfg; + int id; mac_ops = cgx_dev->mac_ops; - cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8); return cfg & CGX_RX_DMAC_ADR_MASK; } @@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) { struct cgx *cgx = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx); + u16 max_dmac = lmac->mac_to_index_bmap.max; struct mac_ops *mac_ops; + int index, i; u64 cfg = 0; + int id; if (!cgx) return; + id = get_sequence_id_of_lmac(cgx, lmac_id); + mac_ops = cgx->mac_ops; if (enable) { /* Enable promiscuous mode on LMAC */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE); - cfg |= CGX_DMAC_BCAST_MODE; + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg); + } } else { /* Disable promiscuous mode */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg |= CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) { + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + + index * 0x8), + cfg); + } + } } } @@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx) } lmac->cgx = cgx; + lmac->mac_to_index_bmap.max = + MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count; + err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); + if (err) + return err; + + /* Reserve first entry for default MAC address */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + init_waitqueue_head(&lmac->wq_cmd_cmplt); mutex_init(&lmac->cmd_lock); spin_lock_init(&lmac->event_cb_lock); @@ -1243,8 +1504,8 @@ static int cgx_lmac_init(struct cgx *cgx) /* Add reference */ cgx->lmac_idmap[lmac->lmac_id] = lmac; - cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); set_bit(lmac->lmac_id, &cgx->lmac_bmap); + cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); } return cgx_lmac_verify_fwi_version(cgx); @@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx) continue; cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); + kfree(lmac->mac_to_index_bmap.bmap); kfree(lmac->name); kfree(lmac); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 12521262164a..237ba2b56210 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -23,6 +23,7 @@ #define CGX_ID_MASK 0x7 #define MAX_LMAC_PER_CGX 4 +#define MAX_DMAC_ENTRIES_PER_CGX 32 #define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ #define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) @@ -46,10 +47,12 @@ #define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset) #define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) #define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) +#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2) #define CGX_DMAC_MCAST_MODE BIT_ULL(1) #define CGX_DMAC_BCAST_MODE BIT_ULL(0) #define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset) #define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) +#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49) #define CGXX_CMRX_RX_DMAC_CAM1 0x400 #define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) #define CGXX_CMRX_TX_STAT0 0x700 @@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id); u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id); +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index); +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id); void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable); int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); @@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index); unsigned long cgx_get_lmac_bmap(void *cgxd); void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val); u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset); +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index); +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id); +u64 cgx_read_dmac_entry(void *cgxd, int index); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 45706fd87120..a8b7b1c7a1d5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -10,17 +10,19 @@ #include "rvu.h" #include "cgx.h" /** - * struct lmac + * struct lmac - per lmac locks and properties * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion * @cmd_lock: Lock to serialize the command interface * @resp: command response * @link_info: link related information + * @mac_to_index_bmap: Mac address to CGX table index mapping * @event_cb: callback for linkchange events * @event_cb_lock: lock for serializing callback with unregister - * @cmd_pend: flag set before new command is started - * flag cleared after command response is received * @cgx: parent cgx port + * @mcast_filters_count: Number of multicast filters installed * @lmac_id: lmac port id + * @cmd_pend: flag set before new command is started + * flag cleared after command response is received * @name: lmac port name */ struct lmac { @@ -29,12 +31,14 @@ struct lmac { struct mutex cmd_lock; u64 resp; struct cgx_link_user_info link_info; + struct rsrc_bmap mac_to_index_bmap; struct cgx_event_cb event_cb; /* lock for serializing callback with unregister */ spinlock_t event_cb_lock; - bool cmd_pend; struct cgx *cgx; + u8 mcast_filters_count; u8 lmac_id; + bool cmd_pend; char *name; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 770d86262838..f5ec39de026a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -134,6 +134,8 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ +M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ + msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ @@ -163,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ cgx_features_info_msg) \ M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ - /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ +M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \ + cgx_mac_addr_add_rsp) \ +M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \ + msg_rsp) \ +M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \ + cgx_max_dmac_entries_get_rsp) \ +M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \ +M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \ + msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ npa_lf_alloc_req, npa_lf_alloc_rsp) \ @@ -401,6 +411,38 @@ struct cgx_mac_addr_set_or_get { u8 mac_addr[ETH_ALEN]; }; +/* Structure for requesting the operation to + * add DMAC filter entry into CGX interface + */ +struct cgx_mac_addr_add_req { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; +}; + +/* Structure for response against the operation to + * add DMAC filter entry into CGX interface + */ +struct cgx_mac_addr_add_rsp { + struct mbox_msghdr hdr; + u8 index; +}; + +/* Structure for requesting the operation to + * delete DMAC filter entry from CGX interface + */ +struct cgx_mac_addr_del_req { + struct mbox_msghdr hdr; + u8 index; +}; + +/* Structure for response against the operation to + * get maximum supported DMAC filter entries + */ +struct cgx_max_dmac_entries_get_rsp { + struct mbox_msghdr hdr; + u8 max_dmac_filters; +}; + struct cgx_link_user_info { uint64_t link_up:1; uint64_t full_duplex:1; @@ -499,6 +541,12 @@ struct cgx_set_link_mode_rsp { int status; }; +struct cgx_mac_addr_update_req { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; + u8 index; +}; + #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ #define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ #define RVU_MAC_VERSION BIT_ULL(2) @@ -1278,6 +1326,14 @@ struct set_vf_perm { u64 flags; }; +struct lmtst_tbl_setup_req { + struct mbox_msghdr hdr; + u16 base_pcifunc; + u8 use_local_lmt_region; + u64 lmt_iova; + u64 rsvd[4]; +}; + /* CPT mailbox error codes * Range 901 - 1000. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 19bad9a59c8f..243cf8070e77 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -151,7 +151,10 @@ enum npc_kpu_lh_ltype { * Software assigns pkind for each incoming port such as CGX * Ethernet interfaces, LBK interfaces, etc. */ +#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND + enum npc_pkind_type { + NPC_RX_LBK_PKIND = 0ULL, NPC_RX_VLAN_EXDSA_PKIND = 56ULL, NPC_RX_CHLEN24B_PKIND = 57ULL, NPC_RX_CPT_HDR_PKIND, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 0b092949d7ac..5fe277e354f7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -391,8 +391,10 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) /* Get numVFs attached to this PF and first HWVF */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); - *numvfs = (cfg >> 12) & 0xFF; - *hwvf = cfg & 0xFFF; + if (numvfs) + *numvfs = (cfg >> 12) & 0xFF; + if (hwvf) + *hwvf = cfg & 0xFFF; } static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) @@ -1314,7 +1316,7 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu, return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); } -static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) +int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int blkaddr = BLKADDR_NIX0, vf; @@ -2333,6 +2335,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); + rvu_reset_lmt_map_tbl(rvu, pcifunc); rvu_detach_rsrcs(rvu, NULL, pcifunc); mutex_unlock(&rvu->flr_lock); } @@ -2858,6 +2861,12 @@ static int rvu_enable_sriov(struct rvu *rvu) if (!vfs) return 0; + /* LBK channel number 63 is used for switching packets between + * CGX mapped VFs. Hence limit LBK pairs till 62 only. + */ + if (vfs > 62) + vfs = 62; + /* Save VFs number for reference in VF interrupts handlers. * Since interrupts might start arriving during SRIOV enablement * ordinary API cannot be used to get number of enabled VFs. @@ -3000,6 +3009,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Initialize debugfs */ rvu_dbg_init(rvu); + mutex_init(&rvu->rswitch.switch_lock); + return 0; err_dl: rvu_unregister_dl(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 9e5d9ba6f01e..91503fb2762c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -243,6 +243,7 @@ struct rvu_pfvf { u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ + u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/ unsigned long flags; }; @@ -414,6 +415,16 @@ struct npc_kpu_profile_adapter { size_t kpus; }; +#define RVU_SWITCH_LBK_CHAN 63 + +struct rvu_switch { + struct mutex switch_lock; /* Serialize flow installation */ + u32 used_entries; + u16 *entry2pcifunc; + u16 mode; + u16 start_entry; +}; + struct rvu { void __iomem *afreg_base; void __iomem *pfreg_base; @@ -444,6 +455,7 @@ struct rvu { /* CGX */ #define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ + u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */ u8 cgx_mapped_pfs; u8 cgx_cnt_max; /* CGX port count max */ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ @@ -476,6 +488,9 @@ struct rvu { struct rvu_debugfs rvu_dbg; #endif struct rvu_devlink *rvu_dl; + + /* RVU switch implementation over NPC with DMAC rules */ + struct rvu_switch rswitch; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -656,6 +671,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable); int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start); int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, int rxtxflag, u64 *stat); +void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc); + /* NPA APIs */ int rvu_npa_init(struct rvu *rvu); void rvu_npa_freemem(struct rvu *rvu); @@ -688,6 +705,7 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_cn10k_aq_enq_req *aq_req, struct nix_cn10k_aq_enq_rsp *aq_rsp, u16 pcifunc, u8 ctype, u32 qidx); +int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc); /* NPC APIs */ int rvu_npc_init(struct rvu *rvu); @@ -741,6 +759,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); +int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type); @@ -754,6 +773,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); int rvu_set_channels_base(struct rvu *rvu); void rvu_program_channels(struct rvu *rvu); +/* CN10K RVU - LMT*/ +void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc); + #ifdef CONFIG_DEBUG_FS void rvu_dbg_init(struct rvu *rvu); void rvu_dbg_exit(struct rvu *rvu); @@ -761,4 +783,10 @@ void rvu_dbg_exit(struct rvu *rvu); static inline void rvu_dbg_init(struct rvu *rvu) {} static inline void rvu_dbg_exit(struct rvu *rvu) {} #endif + +/* RVU Switch */ +void rvu_switch_enable(struct rvu *rvu); +void rvu_switch_disable(struct rvu *rvu); +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); + #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 6e2bf4fcd29c..fe99ac4a4dd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; } -static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) +int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) { unsigned long pfmap; @@ -126,6 +126,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) unsigned long lmac_bmap; int size, free_pkind; int cgx, lmac, iter; + int numvfs, hwvfs; if (!cgx_cnt_max) return 0; @@ -166,6 +167,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); rvu->cgx_mapped_pfs++; + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs); + rvu->cgx_mapped_vfs += numvfs; pf++; } } @@ -454,6 +457,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) return 0; } +void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) +{ + int pf = rvu_get_pf(pcifunc); + int i = 0, lmac_count = 0; + u8 max_dmac_filters; + u8 cgx_id, lmac_id; + void *cgx_dev; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgx_dev = cgx_get_pdata(cgx_id); + lmac_count = cgx_get_lmac_cnt(cgx_dev); + max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count; + + for (i = 0; i < max_dmac_filters; i++) + cgx_lmac_addr_del(cgx_id, lmac_id, i); + + /* As cgx_lmac_addr_del does not clear entry for index 0 + * so it needs to be done explicitly + */ + cgx_lmac_addr_reset(cgx_id, lmac_id); +} + int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -557,6 +585,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, return 0; } +int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, + struct cgx_mac_addr_add_req *req, + struct cgx_mac_addr_add_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + int rc = 0; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); + if (rc >= 0) { + rsp->index = rc; + return 0; + } + + return rc; +} + +int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, + struct cgx_mac_addr_del_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); +} + +int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, + struct msg_req *req, + struct cgx_max_dmac_entries_get_rsp + *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + /* If msg is received from PFs(which are not mapped to CGX LMACs) + * or VF then no entries are allocated for DMAC filters at CGX level. + * So returning zero. + */ + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { + rsp->max_dmac_filters = 0; + return 0; + } + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id); + return 0; +} + int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) @@ -953,3 +1038,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); return 0; } + +int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_reset(cgx_id, lmac_id); +} + +int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, + struct cgx_mac_addr_update_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7d9e71c6965f..8d48b64485c6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -10,6 +10,206 @@ #include "cgx.h" #include "rvu_reg.h" +/* RVU LMTST */ +#define LMT_TBL_OP_READ 0 +#define LMT_TBL_OP_WRITE 1 +#define LMT_MAP_TABLE_SIZE (128 * 1024) +#define LMT_MAPTBL_ENTRY_SIZE 16 + +/* Function to perform operations (read/write) on lmtst map table */ +static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + int lmt_tbl_op) +{ + void __iomem *lmt_map_base; + u64 tbl_base; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + + lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + return -ENOMEM; + } + + if (lmt_tbl_op == LMT_TBL_OP_READ) { + *val = readq(lmt_map_base + index); + } else { + writeq((*val), (lmt_map_base + index)); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S + * changes effective. Write 1 for flush and read is being used as a + * barrier and sets up a data dependency. Write to 0 after a write + * to 1 to complete the flush. + */ + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0)); + rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL); + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00); + } + + iounmap(lmt_map_base); + return 0; +} + +static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) +{ + return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; +} + +static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, + u64 iova, u64 *lmt_addr) +{ + u64 pa, val, pf; + int err; + + if (!iova) { + dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__); + return -EINVAL; + } + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); + pf = rvu_get_pf(pcifunc) & 0x1F; + val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | + ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); + + err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false); + if (err) { + dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__); + return err; + } + val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS); + if (val & ~0x1ULL) { + dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val); + return -EIO; + } + /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21] + * PA[11:0] = IOVA[11:0] + */ + pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21; + pa &= GENMASK_ULL(39, 0); + *lmt_addr = (pa << 12) | (iova & 0xFFF); + + return 0; +} + +static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u32 tbl_idx; + int err = 0; + u64 val; + + /* Read the current lmt addr of pcifunc */ + tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); + err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + tbl_idx, err); + return err; + } + + /* Storing the seondary's lmt base address as this needs to be + * reverted in FLR. Also making sure this default value doesn't + * get overwritten on multiple calls to this mailbox. + */ + if (!pfvf->lmt_base_addr) + pfvf->lmt_base_addr = val; + + /* Update the LMT table with new addr */ + err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE); + if (err) { + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + return err; + } + return 0; +} + +int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, + struct lmtst_tbl_setup_req *req, + struct msg_rsp *rsp) +{ + u64 lmt_addr, val; + u32 pri_tbl_idx; + int err = 0; + + /* Check if PF_FUNC wants to use it's own local memory as LMTLINE + * region, if so, convert that IOVA to physical address and + * populate LMT table with that address + */ + if (req->use_local_lmt_region) { + err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc, + req->lmt_iova, &lmt_addr); + if (err < 0) + return err; + + /* Update the lmt addr for this PFFUNC in the LMT table */ + err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr); + if (err) + return err; + } + + /* Reconfiguring lmtst map table in lmt region shared mode i.e. make + * multiple PF_FUNCs to share an LMTLINE region, so primary/base + * pcifunc (which is passed as an argument to mailbox) is the one + * whose lmt base address will be shared among other secondary + * pcifunc (will be the one who is calling this mailbox). + */ + if (req->base_pcifunc) { + /* Calculating the LMT table index equivalent to primary + * pcifunc. + */ + pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc); + + /* Read the base lmt addr of the primary pcifunc */ + err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val, + LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + pri_tbl_idx, err); + return err; + } + + /* Update the base lmt addr of secondary with primary's base + * lmt addr. + */ + err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val); + if (err) + return err; + } + + return 0; +} + +/* Resetting the lmtst map table to original base addresses */ +void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u32 tbl_idx; + int err; + + if (is_rvu_otx2(rvu)) + return; + + if (pfvf->lmt_base_addr) { + /* This corresponds to lmt map table index */ + tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); + /* Reverting back original lmt base addr for respective + * pcifunc. + */ + err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + pfvf->lmt_base_addr = 0; + } +} + int rvu_set_channels_base(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 3cc3c6fd1d84..9b2dfbf90e51 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1971,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; } -static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) { struct dentry *current_dir; - int err, lmac_id; char *buf; current_dir = filp->file->f_path.dentry->d_parent; @@ -1982,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) if (!buf) return -EINVAL; - err = kstrtoint(buf + 1, 10, &lmac_id); - if (!err) { - err = cgx_print_stats(filp, lmac_id); - if (err) - return err; - } + return kstrtoint(buf + 1, 10, lmac_id); +} + +static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +{ + int lmac_id, err; + + err = rvu_dbg_derive_lmacid(filp, &lmac_id); + if (!err) + return cgx_print_stats(filp, lmac_id); + return err; } RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); +static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) +{ + struct pci_dev *pdev = NULL; + void *cgxd = s->private; + char *bcast, *mcast; + u16 index, domain; + u8 dmac[ETH_ALEN]; + struct rvu *rvu; + u64 cfg, mac; + int pf; + + rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); + if (!rvu) + return -ENODEV; + + pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); + domain = 2; + + pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); + if (!pdev) + return 0; + + cfg = cgx_read_dmac_ctrl(cgxd, lmac_id); + bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT"; + mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT"; + + seq_puts(s, + "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n"); + seq_printf(s, "%s PF%d %9s %9s", + dev_name(&pdev->dev), pf, bcast, mcast); + if (cfg & CGX_DMAC_CAM_ACCEPT) + seq_printf(s, "%12s\n\n", "UNICAST"); + else + seq_printf(s, "%16s\n\n", "PROMISCUOUS"); + + seq_puts(s, "\nDMAC-INDEX ADDRESS\n"); + + for (index = 0 ; index < 32 ; index++) { + cfg = cgx_read_dmac_entry(cgxd, index); + /* Display enabled dmac entries associated with current lmac */ + if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) && + FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) { + mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg); + u64_to_ether_addr(mac, dmac); + seq_printf(s, "%7d %pM\n", index, dmac); + } + } + + return 0; +} + +static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) +{ + int err, lmac_id; + + err = rvu_dbg_derive_lmacid(filp, &lmac_id); + if (!err) + return cgx_print_dmac_flt(filp, lmac_id); + + return err; +} + +RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); + static void rvu_dbg_cgx_init(struct rvu *rvu) { struct mac_ops *mac_ops; @@ -2029,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, cgx, &rvu_dbg_cgx_stat_fops); + debugfs_create_file("mac_filter", 0600, + rvu->rvu_dbg.lmac, cgx, + &rvu_dbg_cgx_dmac_flt_fops); } } } @@ -2041,9 +2113,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s, int entry_acnt, entry_ecnt; int cntr_acnt, cntr_ecnt; - /* Skip PF0 */ - if (!pcifunc) - return; rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr, &entry_acnt, &entry_ecnt); rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr, @@ -2226,7 +2295,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, struct rvu_npc_mcam_rule *rule) { - if (rule->intf == NIX_INTF_TX) { + if (is_npc_intf_tx(rule->intf)) { switch (rule->tx_action.op) { case NIX_TX_ACTIONOP_DROP: seq_puts(s, "\taction: Drop\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 10a98bcb7c54..2688186066d9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1364,6 +1364,44 @@ static void rvu_health_reporters_destroy(struct rvu *rvu) rvu_nix_health_reporters_destroy(rvu_dl); } +static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + struct rvu_switch *rswitch; + + rswitch = &rvu->rswitch; + *mode = rswitch->mode; + + return 0; +} + +static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + struct rvu_switch *rswitch; + + rswitch = &rvu->rswitch; + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (rswitch->mode == mode) + return 0; + rswitch->mode = mode; + if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) + rvu_switch_enable(rvu); + else + rvu_switch_disable(rvu); + break; + default: + return -EINVAL; + } + + return 0; +} + static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) { @@ -1372,6 +1410,8 @@ static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req static const struct devlink_ops rvu_devlink_ops = { .info_get = rvu_devlink_info_get, + .eswitch_mode_get = rvu_devlink_eswitch_mode_get, + .eswitch_mode_set = rvu_devlink_eswitch_mode_set, }; int rvu_register_dl(struct rvu *rvu) @@ -1380,14 +1420,9 @@ int rvu_register_dl(struct rvu *rvu) struct devlink *dl; int err; - rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL); - if (!rvu_dl) - return -ENOMEM; - dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink)); if (!dl) { dev_warn(rvu->dev, "devlink_alloc failed\n"); - kfree(rvu_dl); return -ENOMEM; } @@ -1395,10 +1430,10 @@ int rvu_register_dl(struct rvu *rvu) if (err) { dev_err(rvu->dev, "devlink register failed with error %d\n", err); devlink_free(dl); - kfree(rvu_dl); return err; } + rvu_dl = devlink_priv(dl); rvu_dl->dl = dl; rvu_dl->rvu = rvu; rvu->rvu_dl = rvu_dl; @@ -1417,5 +1452,4 @@ void rvu_unregister_dl(struct rvu *rvu) rvu_health_reporters_destroy(rvu); devlink_unregister(dl); devlink_free(dl); - kfree(rvu_dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index d6f8210652c5..4bfbbdf38770 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -196,11 +196,22 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr) { int err; - /*Sync all in flight RX packets to LLC/DRAM */ + /* Sync all in flight RX packets to LLC/DRAM */ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); if (err) - dev_err(rvu->dev, "NIX RX software sync failed\n"); + dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); + + /* SW_SYNC ensures all existing transactions are finished and pkts + * are written to LLC/DRAM, queues should be teared down after + * successful SW_SYNC. Due to a HW errata, in some rare scenarios + * an existing transaction might end after SW_SYNC operation. To + * ensure operation is fully done, do the SW_SYNC twice. + */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); + err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); + if (err) + dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); } static bool is_valid_txschq(struct rvu *rvu, int blkaddr, @@ -298,6 +309,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) rvu_nix_chan_lbk(rvu, lbkid, vf + 1); pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; + rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, pfvf->rx_chan_cnt); @@ -346,6 +358,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) /* Free and disable any MCAM entries used by this NIX LF */ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + + /* Disable DMAC filters used */ + rvu_cgx_disable_dmac_entries(rvu, pcifunc); } int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, @@ -1949,6 +1964,35 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); } +static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, + u16 pcifunc, struct nix_txsch *txsch) +{ + struct rvu_hwinfo *hw = rvu->hw; + int lbk_link_start, lbk_links; + u8 pf = rvu_get_pf(pcifunc); + int schq; + + if (!is_pf_cgxmapped(rvu, pf)) + return; + + lbk_link_start = hw->cgx_links; + + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) + continue; + /* Enable all LBK links with channel 63 by default so that + * packets can be sent to LBK with a NPC TX MCAM rule + */ + lbk_links = hw->lbk_links; + while (lbk_links--) + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, + lbk_link_start + + lbk_links), + BIT_ULL(12) | RVU_SWITCH_LBK_CHAN); + } +} + int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp) @@ -2037,6 +2081,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, rvu_write64(rvu, blkaddr, reg, regval); } + rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc, + &nix_hw->txsch[NIX_TXSCH_LVL_TL2]); + return 0; } @@ -3177,6 +3224,8 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) ether_addr_copy(pfvf->default_mac, req->mac_addr); + rvu_switch_update_rules(rvu, pcifunc); + return 0; } @@ -3805,7 +3854,6 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, vlan = &nix_hw->txvlan; kfree(vlan->rsrc.bmap); mutex_destroy(&vlan->rsrc_lock); - devm_kfree(rvu->dev, vlan->entry2pfvf_map); mcast = &nix_hw->mcast; qmem_free(rvu->dev, mcast->mce_ctx); @@ -3846,6 +3894,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, pfvf = rvu_get_pfvf(rvu, pcifunc); set_bit(NIXLF_INITIALIZED, &pfvf->flags); + rvu_switch_update_rules(rvu, pcifunc); + return rvu_cgx_start_stop_io(rvu, pcifunc, true); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 3612e0a2cab3..52b255426c22 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -442,7 +442,8 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, owner = mcam->entry2pfvf_map[index]; target_func = (entry->action >> 4) & 0xffff; /* do nothing when target is LBK/PF or owner is not PF */ - if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) || + if (is_pffunc_af(owner) || is_afvf(target_func) || + (owner & RVU_PFVF_FUNC_MASK) || !(target_func & RVU_PFVF_FUNC_MASK)) return; @@ -468,6 +469,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, { int bank = npc_get_bank(mcam, index); int kw = 0, actbank, actindex; + u8 tx_intf_mask = ~intf & 0x3; + u8 tx_intf = intf; u64 cam0, cam1; actbank = bank; /* Save bank id, to set action later on */ @@ -488,12 +491,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, */ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { /* Interface should be set in all banks */ + if (is_npc_intf_tx(intf)) { + /* Last bit must be set and rest don't care + * for TX interfaces + */ + tx_intf_mask = 0x1; + tx_intf = intf & tx_intf_mask; + tx_intf_mask = ~tx_intf & tx_intf_mask; + } + rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), - intf); + tx_intf); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), - ~intf & 0x3); + tx_intf_mask); /* Set the match key */ npc_get_keyword(entry, kw, &cam0, &cam1); @@ -650,6 +662,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, eth_broadcast_addr((u8 *)&req.mask.dmac); req.features = BIT_ULL(NPC_DMAC); req.channel = chan; + req.chan_mask = 0xFFFU; req.intf = pfvf->nix_rx_intf; req.op = action.op; req.hdr.pcifunc = 0; /* AF is requester */ @@ -799,6 +812,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, eth_broadcast_addr((u8 *)&req.mask.dmac); req.features = BIT_ULL(NPC_DMAC); req.channel = chan; + req.chan_mask = 0xFFFU; req.intf = pfvf->nix_rx_intf; req.entry = index; req.hdr.pcifunc = 0; /* AF is requester */ @@ -1707,7 +1721,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; int num_pkinds, num_kpus, idx; - struct npc_pkind *pkind; /* Disable all KPUs and their entries */ for (idx = 0; idx < hw->npc_kpus; idx++) { @@ -1725,9 +1738,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) * Check HW max count to avoid configuring junk or * writing to unsupported CSR addresses. */ - pkind = &hw->pkind; num_pkinds = rvu->kpu.pkinds; - num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds); + num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds); for (idx = 0; idx < num_pkinds; idx++) npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true); @@ -1745,6 +1757,8 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) int nixlf_count = rvu_get_nixlf_count(rvu); struct npc_mcam *mcam = &rvu->hw->mcam; int rsvd, err; + u16 index; + int cntr; u64 cfg; /* Actual number of MCAM entries vary by entry size */ @@ -1845,6 +1859,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) if (!mcam->entry2target_pffunc) goto free_mem; + for (index = 0; index < mcam->bmap_entries; index++) { + mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; + mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; + } + + for (cntr = 0; cntr < mcam->counters.max; cntr++) + mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; + mutex_init(&mcam->lock); return 0; @@ -1867,7 +1889,8 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr) if (npc_const1 & BIT_ULL(63)) npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2); - pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL; + pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT; + hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL; hw->npc_kpu_entries = npc_const1 & 0xFFFULL; hw->npc_kpus = (npc_const >> 8) & 0x1FULL; hw->npc_intfs = npc_const & 0xFULL; @@ -1978,6 +2001,10 @@ int rvu_npc_init(struct rvu *rvu) err = rvu_alloc_bitmap(&pkind->rsrc); if (err) return err; + /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0', + * no need to configure PKIND for all LBKs separately. + */ + rvu_alloc_rsrc(&pkind->rsrc); /* Allocate mem for pkind to PF and channel mapping info */ pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, @@ -2562,7 +2589,7 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, } /* Alloc request from PFFUNC with no NIXLF attached should be denied */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_ALLOC_DENIED; return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); @@ -2582,7 +2609,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, return NPC_MCAM_INVALID_REQ; /* Free request from PFFUNC with no NIXLF attached, ignore */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); @@ -2594,7 +2621,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, if (rc) goto exit; - mcam->entry2pfvf_map[req->entry] = 0; + mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP; mcam->entry2target_pffunc[req->entry] = 0x0; npc_mcam_clear_bit(mcam, req->entry); npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); @@ -2679,13 +2706,14 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, else nix_intf = pfvf->nix_rx_intf; - if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { + if (!is_pffunc_af(pcifunc) && + npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, - pcifunc)) { + if (!is_pffunc_af(pcifunc) && + npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } @@ -2836,7 +2864,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, return NPC_MCAM_INVALID_REQ; /* If the request is from a PFFUNC with no NIXLF attached, ignore */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_INVALID_REQ; /* Since list of allocated counter IDs needs to be sent to requester, @@ -3081,7 +3109,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (rc) { /* Free allocated MCAM entry */ mutex_lock(&mcam->lock); - mcam->entry2pfvf_map[entry] = 0; + mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP; npc_mcam_clear_bit(mcam, entry); mutex_unlock(&mcam->lock); return rc; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 68633145a8b8..5c01cf4a9c5b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -910,14 +910,17 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct mcam_entry *entry, - struct npc_install_flow_req *req, u16 target) + struct npc_install_flow_req *req, + u16 target, bool pf_set_vfs_mac) { + struct rvu_switch *rswitch = &rvu->rswitch; struct nix_rx_action action; - u64 chan_mask; - chan_mask = req->chan_mask ? req->chan_mask : ~0ULL; - npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0, - NIX_INTF_RX); + if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac) + req->chan_mask = 0x0; /* Do not care channel */ + + npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask, + 0, NIX_INTF_RX); *(u64 *)&action = 0x00; action.pf_func = target; @@ -949,9 +952,16 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct npc_install_flow_req *req, u16 target) { struct nix_tx_action action; + u64 mask = ~0ULL; + + /* If AF is installing then do not care about + * PF_FUNC in Send Descriptor + */ + if (is_pffunc_af(req->hdr.pcifunc)) + mask = 0; npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), - 0, ~0ULL, 0, NIX_INTF_TX); + 0, mask, 0, NIX_INTF_TX); *(u64 *)&action = 0x00; action.op = req->op; @@ -1002,7 +1012,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, req->intf); if (is_npc_intf_rx(req->intf)) - npc_update_rx_entry(rvu, pfvf, entry, req, target); + npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); else npc_update_tx_entry(rvu, pfvf, entry, req, target); @@ -1164,7 +1174,9 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, if (err) return err; - if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) + /* Skip channel validation if AF is installing */ + if (!is_pffunc_af(req->hdr.pcifunc) && + npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, target); @@ -1180,6 +1192,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, eth_broadcast_addr((u8 *)&req->mask.dmac); } + /* Proceed if NIXLF is attached or not for TX rules */ err = nix_get_nixlf(rvu, target, &nixlf, NULL); if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac) return -EINVAL; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 76837d5e19c6..8b01ef6e2c99 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -49,6 +49,11 @@ #define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4) #define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4) #define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4) +#define RVU_AF_SMMU_ADDR_REQ (0x6000) +#define RVU_AF_SMMU_TXN_REQ (0x6008) +#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010) +#define RVU_AF_SMMU_ADDR_TLN (0x6018) +#define RVU_AF_SMMU_TLN_FLIT1 (0x6030) /* Admin function's privileged PF/VF registers */ #define RVU_PRIV_CONST (0x8000000) @@ -692,4 +697,9 @@ #define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6) #define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0) +/* APR */ +#define APR_AF_LMT_CFG (0x000ull) +#define APR_AF_LMT_MAP_BASE (0x008ull) +#define APR_AF_LMT_CTL (0x010ull) + #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 14aa8e37ea41..5bbe6727d11d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -35,7 +35,8 @@ enum rvu_block_addr_e { BLKADDR_NDC_NPA0 = 0xeULL, BLKADDR_NDC_NIX1_RX = 0x10ULL, BLKADDR_NDC_NIX1_TX = 0x11ULL, - BLK_COUNT = 0x12ULL, + BLKADDR_APR = 0x16ULL, + BLK_COUNT = 0x17ULL, }; /* RVU Block Type Enumeration */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c new file mode 100644 index 000000000000..820adf390b8e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2021 Marvell. + */ + +#include <linux/bitfield.h> +#include "rvu.h" + +static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc, + u16 chan_mask) +{ + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + /* If the pcifunc is not initialized then nothing to do. + * This same function will be called again via rvu_switch_update_rules + * after pcifunc is initialized. + */ + if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) + return 0; + + ether_addr_copy(req.packet.dmac, pfvf->mac_addr); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.features = BIT_ULL(NPC_DMAC); + req.channel = pfvf->rx_chan_base; + req.chan_mask = chan_mask; + req.intf = pfvf->nix_rx_intf; + req.op = NIX_RX_ACTION_DEFAULT; + req.default_rule = 1; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry) +{ + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; + struct rvu_pfvf *pfvf; + u8 lbkid; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + /* If the pcifunc is not initialized then nothing to do. + * This same function will be called again via rvu_switch_update_rules + * after pcifunc is initialized. + */ + if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) + return 0; + + lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; + ether_addr_copy(req.packet.dmac, pfvf->mac_addr); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.entry = entry; + req.features = BIT_ULL(NPC_DMAC); + req.intf = pfvf->nix_tx_intf; + req.op = NIX_TX_ACTIONOP_UCAST_CHAN; + req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; + req.set_cntr = 1; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_switch_install_rules(struct rvu *rvu) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u16 start = rswitch->start_entry; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc, entry = 0; + int pf, vf, numvfs; + int err; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << 10; + /* rvu_get_nix_blkaddr sets up the corresponding NIX block + * address and NIX RX and TX interfaces for a pcifunc. + * Generally it is called during attach call of a pcifunc but it + * is called here since we are pre-installing rules before + * nixlfs are attached + */ + rvu_get_nix_blkaddr(rvu, pcifunc); + + /* MCAM RX rule for a PF/VF already exists as default unicast + * rules installed by AF. Hence change the channel in those + * rules to ignore channel so that packets with the required + * DMAC received from LBK(by other PF/VFs in system) or from + * external world (from wire) are accepted. + */ + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); + if (err) { + dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n", + pf, err); + return err; + } + + err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry); + if (err) { + dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n", + pf, err); + return err; + } + + rswitch->entry2pcifunc[entry++] = pcifunc; + + rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); + for (vf = 0; vf < numvfs; vf++) { + pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + rvu_get_nix_blkaddr(rvu, pcifunc); + + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); + if (err) { + dev_err(rvu->dev, + "RX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + return err; + } + + err = rvu_switch_install_tx_rule(rvu, pcifunc, + start + entry); + if (err) { + dev_err(rvu->dev, + "TX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + return err; + } + + rswitch->entry2pcifunc[entry++] = pcifunc; + } + } + + return 0; +} + +void rvu_switch_enable(struct rvu *rvu) +{ + struct npc_mcam_alloc_entry_req alloc_req = { 0 }; + struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 }; + struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_mcam_free_entry_req free_req = { 0 }; + struct rvu_switch *rswitch = &rvu->rswitch; + struct msg_rsp rsp; + int ret; + + alloc_req.contig = true; + alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; + ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, + &alloc_rsp); + if (ret) { + dev_err(rvu->dev, + "Unable to allocate MCAM entries\n"); + goto exit; + } + + if (alloc_rsp.count != alloc_req.count) { + dev_err(rvu->dev, + "Unable to allocate %d MCAM entries, got %d\n", + alloc_req.count, alloc_rsp.count); + goto free_entries; + } + + rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16), + GFP_KERNEL); + if (!rswitch->entry2pcifunc) + goto free_entries; + + rswitch->used_entries = alloc_rsp.count; + rswitch->start_entry = alloc_rsp.entry; + + ret = rvu_switch_install_rules(rvu); + if (ret) + goto uninstall_rules; + + return; + +uninstall_rules: + uninstall_req.start = rswitch->start_entry; + uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + kfree(rswitch->entry2pcifunc); +free_entries: + free_req.all = 1; + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); +exit: + return; +} + +void rvu_switch_disable(struct rvu *rvu) +{ + struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_mcam_free_entry_req free_req = { 0 }; + struct rvu_switch *rswitch = &rvu->rswitch; + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs; + struct msg_rsp rsp; + u16 pcifunc; + int err; + + if (!rswitch->used_entries) + return; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << 10; + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); + if (err) + dev_err(rvu->dev, + "Reverting RX rule for PF%d failed(%d)\n", + pf, err); + + rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); + for (vf = 0; vf < numvfs; vf++) { + pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); + if (err) + dev_err(rvu->dev, + "Reverting RX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + } + } + + uninstall_req.start = rswitch->start_entry; + uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; + free_req.all = 1; + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); + rswitch->used_entries = 0; + kfree(rswitch->entry2pcifunc); +} + +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u32 max = rswitch->used_entries; + u16 entry; + + if (!rswitch->used_entries) + return; + + for (entry = 0; entry < max; entry++) { + if (rswitch->entry2pcifunc[entry] == pcifunc) + break; + } + + if (entry >= max) + return; + + rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry); + rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 457c94793e63..3254b02205ca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o + otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o rvu_nicvf-y := otx2_vf.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 1b08896b46d2..184de9466286 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = { .refill_pool_ptrs = cn10k_refill_pool_ptrs, }; -int cn10k_pf_lmtst_init(struct otx2_nic *pf) +int cn10k_lmtst_init(struct otx2_nic *pfvf) { - int size, num_lines; - u64 base; - if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { - pf->hw_ops = &otx2_hw_ops; + struct lmtst_tbl_setup_req *req; + int qcount, err; + + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + pfvf->hw_ops = &otx2_hw_ops; return 0; } - pf->hw_ops = &cn10k_hw_ops; - base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + - (MBOX_SIZE * (pf->total_vfs + 1)); - - size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) - - (MBOX_SIZE * (pf->total_vfs + 1)); - - pf->hw.lmt_base = ioremap(base, size); + pfvf->hw_ops = &cn10k_hw_ops; + qcount = pfvf->hw.max_queues; + /* LMTST lines allocation + * qcount = num_online_cpus(); + * NPA = TX + RX + XDP. + * NIX = TX * 32 (For Burst SQE flush). + */ + pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32); + pfvf->npa_lmt_lines = qcount * 3; + pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE; - if (!pf->hw.lmt_base) { - dev_err(pf->dev, "Unable to map PF LMTST region\n"); + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } - /* FIXME: Get the num of LMTST lines from LMT table */ - pf->tot_lmt_lines = size / LMT_LINE_SIZE; - num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) / - pf->hw.tx_queues; - /* Number of LMT lines per SQ queues */ - pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; - - pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE; - return 0; -} + req->use_local_lmt_region = true; -int cn10k_vf_lmtst_init(struct otx2_nic *vf) -{ - int size, num_lines; - - if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) { - vf->hw_ops = &otx2_hw_ops; - return 0; + err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines, + LMT_LINE_SIZE); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; } + pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base; + req->lmt_iova = (u64)pfvf->dync_lmt->iova; - vf->hw_ops = &cn10k_hw_ops; - size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM); - vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev, - PCI_MBOX_BAR_NUM), - size); - if (!vf->hw.lmt_base) { - dev_err(vf->dev, "Unable to map VF LMTST region\n"); - return -ENOMEM; - } + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); - vf->tot_lmt_lines = size / LMT_LINE_SIZE; - /* LMTST lines per SQ */ - num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) / - vf->hw.tx_queues; - vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; - vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE; return 0; } -EXPORT_SYMBOL(cn10k_vf_lmtst_init); +EXPORT_SYMBOL(cn10k_lmtst_init); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) { @@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) struct otx2_snd_queue *sq; sq = &pfvf->qset.sq[qidx]; - sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base + + sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base + (qidx * pfvf->nix_lmt_size)); + sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE); + /* Get memory to put this msg */ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); if (!aq) @@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) { - struct otx2_nic *pfvf = dev; - int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines); u64 val = 0, tar_addr = 0; /* FIXME: val[0:10] LMT_ID. * [12:15] no of LMTST - 1 in the burst. * [19:63] data size of each LMTST in the burst except first. */ - val = (lmt_id & 0x7FF); + val = (sq->lmt_id & 0x7FF); /* Target address for LMTST flush tells HW how many 128bit * words are present. * tar_addr[6:4] size of first LMTST - 1 in units of 128b. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index 71292a4cf1f3..1a1ae334477d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -12,8 +12,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); -int cn10k_pf_lmtst_init(struct otx2_nic *pf); -int cn10k_vf_lmtst_init(struct otx2_nic *vf); +int cn10k_lmtst_init(struct otx2_nic *pfvf); int cn10k_free_all_ipolicers(struct otx2_nic *pfvf); int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf); int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index cf7875d51d87..70fcc1fd962f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) /* update dmac field in vlan offload rule */ if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) otx2_install_rxvlan_offload_flow(pfvf); + /* update dmac address in ntuple and DMAC filter list */ + if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) + otx2_dmacflt_update_pfmac_flow(pfvf); } else { return -EPERM; } @@ -921,12 +924,14 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); aq->cq.drop_ena = 1; - /* Enable receive CQ backpressure */ - aq->cq.bp_ena = 1; - aq->cq.bpid = pfvf->bpid[0]; + if (!is_otx2_lbkvf(pfvf->pdev)) { + /* Enable receive CQ backpressure */ + aq->cq.bp_ena = 1; + aq->cq.bpid = pfvf->bpid[0]; - /* Set backpressure level is same as cq pass level */ - aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); + /* Set backpressure level is same as cq pass level */ + aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); + } } /* Fill AQ info */ @@ -1183,7 +1188,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ /* Enable backpressure for RQ aura */ - if (aura_id < pfvf->hw.rqpool_cnt) { + if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { aq->aura.bp_ena = 0; aq->aura.nix0_bpid = pfvf->bpid[0]; /* Set backpressure level for RQ's Aura */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 234b330f3183..8fd58cd07f50 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -218,8 +218,8 @@ struct otx2_hw { unsigned long cap_flag; #define LMT_LINE_SIZE 128 -#define NIX_LMTID_BASE 72 /* RX + TX + XDP */ - void __iomem *lmt_base; +#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ + u64 *lmt_base; u64 *npa_lmt_base; u64 *nix_lmt_base; }; @@ -288,6 +288,9 @@ struct otx2_flow_config { u16 tc_flower_offset; u16 ntuple_max_flows; u16 tc_max_flows; + u8 dmacflt_max_flows; + u8 *bmap_to_dmacindex; + unsigned long dmacflt_bmap; struct list_head flow_list; }; @@ -329,6 +332,7 @@ struct otx2_nic { #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) +#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) u64 flags; struct otx2_qset qset; @@ -363,8 +367,9 @@ struct otx2_nic { /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ int nix_blkaddr; /* LMTST Lines info */ + struct qmem *dync_lmt; u16 tot_lmt_lines; - u16 nix_lmt_lines; + u16 npa_lmt_lines; u32 nix_lmt_size; struct otx2_ptp *ptp; @@ -833,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); +/* CGX/RPM DMAC filters support */ +int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos); +void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); +void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c new file mode 100644 index 000000000000..383a6b5cb698 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2021 Marvell. + */ + +#include "otx2_common.h" + +static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, + u8 *dmac_index) +{ + struct cgx_mac_addr_add_req *req; + struct cgx_mac_addr_add_rsp *rsp; + int err; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + err = otx2_sync_mbox_msg(&pf->mbox); + + if (!err) { + rsp = (struct cgx_mac_addr_add_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + *dmac_index = rsp->index; + } + + mutex_unlock(&pf->mbox.lock); + return err; +} + +static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf) +{ + struct cgx_mac_addr_set_or_get *req; + int err; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, pf->netdev->dev_addr); + err = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos) +{ + u8 *dmacindex; + + /* Store dmacindex returned by CGX/RPM driver which will + * be used for macaddr update/remove + */ + dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + if (ether_addr_equal(mac, pf->netdev->dev_addr)) + return otx2_dmacflt_add_pfmac(pf); + else + return otx2_dmacflt_do_add(pf, mac, dmacindex); +} + +static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, + u8 dmac_index) +{ + struct cgx_mac_addr_del_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->index = dmac_index; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return err; +} + +static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf) +{ + struct msg_req *req; + int err; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, + u8 bit_pos) +{ + u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + if (ether_addr_equal(mac, pf->netdev->dev_addr)) + return otx2_dmacflt_remove_pfmac(pf); + else + return otx2_dmacflt_do_remove(pf, mac, dmacindex); +} + +/* CGX/RPM blocks support max unicast entries of 32. + * on typical configuration MAC block associated + * with 4 lmacs, each lmac will have 8 dmac entries + */ +int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf) +{ + struct cgx_max_dmac_entries_get_rsp *rsp; + struct msg_req *msg; + int err; + + mutex_lock(&pf->mbox.lock); + msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox); + + if (!msg) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) + goto out; + + rsp = (struct cgx_max_dmac_entries_get_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr); + pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters; + +out: + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos) +{ + struct cgx_mac_addr_update_req *req; + int rc; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox); + + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + rc = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return rc; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 8df748e0677b..b906a0eb6e0d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -298,15 +298,14 @@ static int otx2_set_channels(struct net_device *dev, err = otx2_set_real_num_queues(dev, channel->tx_count, channel->rx_count); if (err) - goto fail; + return err; pfvf->hw.rx_queues = channel->rx_count; pfvf->hw.tx_queues = channel->tx_count; pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues; -fail: if (if_up) - dev->netdev_ops->ndo_open(dev); + err = dev->netdev_ops->ndo_open(dev); netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", pfvf->hw.tx_queues, pfvf->hw.rx_queues); @@ -410,7 +409,7 @@ static int otx2_set_ringparam(struct net_device *netdev, qs->rqe_cnt = rx_count; if (if_up) - netdev->netdev_ops->ndo_open(netdev); + return netdev->netdev_ops->ndo_open(netdev); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 8c97106bdd1c..4d9de525802d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -18,6 +18,12 @@ struct otx2_flow { bool is_vf; u8 rss_ctx_id; int vf; + bool dmac_filter; +}; + +enum dmac_req { + DMAC_ADDR_UPDATE, + DMAC_ADDR_DEL }; static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg) @@ -219,6 +225,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) if (!pf->mac_table) return -ENOMEM; + otx2_dmacflt_get_max_cnt(pf); + + /* DMAC filters are not allocated */ + if (!pf->flow_cfg->dmacflt_max_flows) + return 0; + + pf->flow_cfg->bmap_to_dmacindex = + devm_kzalloc(pf->dev, sizeof(u8) * + pf->flow_cfg->dmacflt_max_flows, + GFP_KERNEL); + + if (!pf->flow_cfg->bmap_to_dmacindex) + return -ENOMEM; + + pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT; + return 0; } @@ -280,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) { struct otx2_nic *pf = netdev_priv(netdev); + if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + pf->flow_cfg->dmacflt_max_flows)) + netdev_warn(netdev, + "Add %pM to CGX/RPM DMAC filters list as well\n", + mac); + return otx2_do_add_macfilter(pf, mac); } @@ -351,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) list_add(&flow->list, head); } +static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) +{ + if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows || + bitmap_weight(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows)) + return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows; + else + return flow_cfg->ntuple_max_flows; +} + int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 location) { struct otx2_flow *iter; - if (location >= pfvf->flow_cfg->ntuple_max_flows) + if (location >= otx2_get_maxflows(pfvf->flow_cfg)) return -EINVAL; list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { @@ -378,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, int idx = 0; int err = 0; - nfc->data = pfvf->flow_cfg->ntuple_max_flows; + nfc->data = otx2_get_maxflows(pfvf->flow_cfg); while ((!err || err == -ENOENT) && idx < rule_cnt) { err = otx2_get_flow(pfvf, nfc, location); if (!err) @@ -760,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, return 0; } +static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf, + struct ethtool_rx_flow_spec *fsp) +{ + struct ethhdr *eth_mask = &fsp->m_u.ether_spec; + struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; + u64 ring_cookie = fsp->ring_cookie; + u32 flow_type; + + if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)) + return false; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + + /* CGX/RPM block dmac filtering configured for white listing + * check for action other than DROP + */ + if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC && + !ethtool_get_flow_spec_ring_vf(ring_cookie)) { + if (is_zero_ether_addr(eth_mask->h_dest) && + is_valid_ether_addr(eth_hdr->h_dest)) + return true; + } + + return false; +} + static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) { u64 ring_cookie = flow->flow_spec.ring_cookie; @@ -818,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) return err; } +static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, + struct otx2_flow *flow) +{ + struct otx2_flow *pf_mac; + struct ethhdr *eth_hdr; + + pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL); + if (!pf_mac) + return -ENOMEM; + + pf_mac->entry = 0; + pf_mac->dmac_filter = true; + pf_mac->location = pfvf->flow_cfg->ntuple_max_flows; + memcpy(&pf_mac->flow_spec, &flow->flow_spec, + sizeof(struct ethtool_rx_flow_spec)); + pf_mac->flow_spec.location = pf_mac->location; + + /* Copy PF mac address */ + eth_hdr = &pf_mac->flow_spec.h_u.ether_spec; + ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr); + + /* Install DMAC filter with PF mac address */ + otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0); + + otx2_add_flow_to_list(pfvf, pf_mac); + pfvf->flow_cfg->nr_flows++; + set_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + + return 0; +} + int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct ethtool_rx_flow_spec *fsp = &nfc->fs; struct otx2_flow *flow; + struct ethhdr *eth_hdr; bool new = false; + int err = 0; u32 ring; - int err; ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) @@ -834,16 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; - if (fsp->location >= flow_cfg->ntuple_max_flows) + if (fsp->location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, fsp->location); if (!flow) { - flow = kzalloc(sizeof(*flow), GFP_ATOMIC); + flow = kzalloc(sizeof(*flow), GFP_KERNEL); if (!flow) return -ENOMEM; flow->location = fsp->location; - flow->entry = flow_cfg->flow_ent[flow->location]; new = true; } /* struct copy */ @@ -852,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (fsp->flow_type & FLOW_RSS) flow->rss_ctx_id = nfc->rss_context; - err = otx2_add_flow_msg(pfvf, flow); + if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) { + eth_hdr = &flow->flow_spec.h_u.ether_spec; + + /* Sync dmac filter table with updated fields */ + if (flow->dmac_filter) + return otx2_dmacflt_update(pfvf, eth_hdr->h_dest, + flow->entry); + + if (bitmap_full(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows)) { + netdev_warn(pfvf->netdev, + "Can't insert the rule %d as max allowed dmac filters are %d\n", + flow->location + + flow_cfg->dmacflt_max_flows, + flow_cfg->dmacflt_max_flows); + err = -EINVAL; + if (new) + kfree(flow); + return err; + } + + /* Install PF mac address to DMAC filter list */ + if (!test_bit(0, &flow_cfg->dmacflt_bmap)) + otx2_add_flow_with_pfmac(pfvf, flow); + + flow->dmac_filter = true; + flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows); + fsp->location = flow_cfg->ntuple_max_flows + flow->entry; + flow->flow_spec.location = fsp->location; + flow->location = fsp->location; + + set_bit(flow->entry, &flow_cfg->dmacflt_bmap); + otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); + + } else { + if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) { + netdev_warn(pfvf->netdev, + "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n", + flow->location, + flow_cfg->ntuple_max_flows - 1); + err = -EINVAL; + } else { + flow->entry = flow_cfg->flow_ent[flow->location]; + err = otx2_add_flow_msg(pfvf, flow); + } + } + if (err) { if (new) kfree(flow); @@ -890,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all) return err; } +static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req) +{ + struct otx2_flow *iter; + struct ethhdr *eth_hdr; + bool found = false; + + list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { + if (iter->dmac_filter && iter->entry == 0) { + eth_hdr = &iter->flow_spec.h_u.ether_spec; + if (req == DMAC_ADDR_DEL) { + otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, + 0); + clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + found = true; + } else { + ether_addr_copy(eth_hdr->h_dest, + pfvf->netdev->dev_addr); + otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0); + } + break; + } + } + + if (found) { + list_del(&iter->list); + kfree(iter); + pfvf->flow_cfg->nr_flows--; + } +} + int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct otx2_flow *flow; int err; - if (location >= flow_cfg->ntuple_max_flows) + if (location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, location); if (!flow) return -ENOENT; - err = otx2_remove_flow_msg(pfvf, flow->entry, false); + if (flow->dmac_filter) { + struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec; + + /* user not allowed to remove dmac filter with interface mac */ + if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest)) + return -EPERM; + + err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, + flow->entry); + clear_bit(flow->entry, &flow_cfg->dmacflt_bmap); + /* If all dmac filters are removed delete macfilter with + * interface mac address and configure CGX/RPM block in + * promiscuous mode + */ + if (bitmap_weight(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows) == 1) + otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL); + } else { + err = otx2_remove_flow_msg(pfvf, flow->entry, false); + } + if (err) return err; @@ -1100,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable) mutex_unlock(&pf->mbox.lock); return rsp_hdr->rc; } + +void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf) +{ + struct otx2_flow *iter; + struct ethhdr *eth_hdr; + + list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) { + if (iter->dmac_filter) { + eth_hdr = &iter->flow_spec.h_u.ether_spec; + otx2_dmacflt_add(pf, eth_hdr->h_dest, + iter->entry); + } + } +} + +void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf) +{ + otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 59912f73417b..2c24944a4dba 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1110,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) struct msg_req *msg; int err; + if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + pf->flow_cfg->dmacflt_max_flows)) + netdev_warn(pf->netdev, + "CGX/RPM internal loopback might not work as DMAC filters are active\n"); + mutex_lock(&pf->mbox.lock); if (enable) msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); @@ -1533,10 +1538,10 @@ int otx2_open(struct net_device *netdev) if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { /* Reserve LMT lines for NPA AURA batch free */ - pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base; + pf->hw.npa_lmt_base = pf->hw.lmt_base; /* Reserve LMT lines for NIX TX */ - pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base + - (NIX_LMTID_BASE * LMT_LINE_SIZE)); + pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base + + (pf->npa_lmt_lines * LMT_LINE_SIZE)); } err = otx2_init_hw_resources(pf); @@ -1644,6 +1649,10 @@ int otx2_open(struct net_device *netdev) /* Restore pause frame settings */ otx2_config_pause_frm(pf); + /* Install DMAC Filters */ + if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) + otx2_dmacflt_reinstall_flows(pf); + err = otx2_rxtx_enable(pf, true); if (err) goto err_tx_stop_queues; @@ -1653,6 +1662,7 @@ int otx2_open(struct net_device *netdev) err_tx_stop_queues: netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); + pf->flags |= OTX2_FLAG_INTF_DOWN; err_free_cints: otx2_free_cints(pf, qidx); vec = pci_irq_vector(pf->pdev, @@ -1680,6 +1690,10 @@ int otx2_stop(struct net_device *netdev) struct otx2_rss_info *rss; int qidx, vec, wrk; + /* If the DOWN flag is set resources are already freed */ + if (pf->flags & OTX2_FLAG_INTF_DOWN) + return 0; + netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); @@ -2526,7 +2540,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; - err = cn10k_pf_lmtst_init(pf); + err = cn10k_lmtst_init(pf); if (err) goto err_detach_rsrc; @@ -2630,8 +2644,8 @@ err_del_mcam_entries: err_ptp_destroy: otx2_ptp_destroy(pf); err_detach_rsrc: - if (hw->lmt_base) - iounmap(hw->lmt_base); + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); otx2_detach_resources(&pf->mbox); err_disable_mbox_intr: otx2_disable_mbox_intr(pf); @@ -2772,9 +2786,8 @@ static void otx2_remove(struct pci_dev *pdev) otx2_mcam_flow_del(pf); otx2_shutdown_tc(pf); otx2_detach_resources(&pf->mbox); - if (pf->hw.lmt_base) - iounmap(pf->hw.lmt_base); - + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); otx2_disable_mbox_intr(pf); otx2_pfaf_mbox_destroy(pf); pci_free_irq_vectors(pf->pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 905fc02a7dfe..972b202b9884 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -288,7 +288,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, struct otx2_nic *priv; u32 burst, mark = 0; u8 nr_police = 0; - bool pps; + bool pps = false; u64 rate; int i; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 52486c1f0973..2f144e2cf436 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -83,6 +83,7 @@ struct otx2_snd_queue { u16 num_sqbs; u16 sqe_thresh; u8 sqe_per_sqb; + u32 lmt_id; u64 io_addr; u64 *aura_fc_addr; u64 *lmt_addr; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 13a908f75ba0..a8bee5aefec1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; - err = cn10k_vf_lmtst_init(vf); + err = cn10k_lmtst_init(vf); if (err) goto err_detach_rsrc; @@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_unreg_netdev: unregister_netdev(netdev); err_detach_rsrc: - if (hw->lmt_base) - iounmap(hw->lmt_base); + if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) + qmem_free(vf->dev, vf->dync_lmt); otx2_detach_resources(&vf->mbox); err_disable_mbox_intr: otx2vf_disable_mbox_intr(vf); @@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev) destroy_workqueue(vf->otx2_wq); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); - - if (vf->hw.lmt_base) - iounmap(vf->hw.lmt_base); - + if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) + qmem_free(vf->dev, vf->dync_lmt); otx2vf_vfaf_mbox_destroy(vf); pci_free_irq_vectors(vf->pdev); pci_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c index d12e21db9fd6..fa7a0682ad1e 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c @@ -530,6 +530,8 @@ err_trap_register: prestera_trap = &prestera_trap_items_arr[i]; devlink_traps_unregister(devlink, &prestera_trap->trap, 1); } + devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr, + groups_count); err_groups_register: kfree(trap_data->trap_items_arr); err_trap_items_alloc: diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c index 0b3e8f2db294..9a309169dbae 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -748,7 +748,7 @@ static void prestera_fdb_offload_notify(struct prestera_port *port, struct switchdev_notifier_fdb_info *info) { - struct switchdev_notifier_fdb_info send_info; + struct switchdev_notifier_fdb_info send_info = {}; send_info.addr = info->addr; send_info.vid = info->vid; @@ -1123,7 +1123,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused, static void prestera_fdb_event(struct prestera_switch *sw, struct prestera_event *evt, void *arg) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; struct net_device *dev = NULL; struct prestera_port *port; struct prestera_lag *lag; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 00c84656b2e7..28ac4693da3c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3535,6 +3535,7 @@ slave_start: if (!SRIOV_VALID_STATE(dev->flags)) { mlx4_err(dev, "Invalid SRIOV state\n"); + err = -EINVAL; goto err_close; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index df3e4938ecdd..360e093874d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -134,6 +134,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq->cqn); cq->uar = dev->priv.uar; + cq->irqn = eq->core.irqn; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index ceebfc20f65e..def2156e50ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -500,10 +500,7 @@ static int next_phys_dev(struct device *dev, const void *data) return 1; } -/* This function is called with two flows: - * 1. During initialization of mlx5_core_dev and we don't need to lock it. - * 2. During LAG configure stage and caller holds &mlx5_intf_mutex. - */ +/* Must be called with intf_mutex held */ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) { struct auxiliary_device *adev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 01a1d02dcf15..3f8a98093f8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -1019,12 +1019,19 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER); mlx5_eq_notifier_register(dev, &tracer->nb); - mlx5_fw_tracer_start(tracer); - + err = mlx5_fw_tracer_start(tracer); + if (err) { + mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err); + goto err_notifier_unregister; + } return 0; +err_notifier_unregister: + mlx5_eq_notifier_unregister(dev, &tracer->nb); + mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); err_dealloc_pd: mlx5_core_dealloc_pd(dev, tracer->buff.pdn); + cancel_work_sync(&tracer->read_fw_strings_work); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 150c8e82c738..2cbf18c967f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -471,6 +471,15 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; } +static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) +{ + bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && + MLX5_CAP_GEN(mdev, relaxed_ordering_write); + + return ro && params->lro_en ? + MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; +} + int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, @@ -508,7 +517,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, } MLX5_SET(wq, wq, wq_type, params->rq_wq_type); - MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); + MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params)); MLX5_SET(wq, wq, log_wq_stride, mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 778e229310a9..efef4adce086 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -482,8 +482,11 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c, params->log_sq_size = orig->log_sq_size; mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param); } - if (test_bit(MLX5E_PTP_STATE_RX, c->state)) + /* RQ */ + if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { + params->vlan_strip_disable = orig->vlan_strip_disable; mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams); + } } static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, @@ -494,7 +497,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, int err; rq->wq_type = params->rq_wq_type; - rq->pdev = mdev->device; + rq->pdev = c->pdev; rq->netdev = priv->netdev; rq->priv = priv; rq->clock = &mdev->clock; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 8f79f04eccd6..1e2d117082d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -124,6 +124,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv, if (IS_ERR(rt)) return PTR_ERR(rt); + if (rt->rt_type != RTN_UNICAST) { + ret = -ENETUNREACH; + goto err_rt_release; + } + if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) { ret = -ENETUNREACH; goto err_rt_release; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 86ab4e864fe6..7f94508594fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -37,7 +37,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params struct mlx5e_priv *priv = t->priv; rq->wq_type = params->rq_wq_type; - rq->pdev = mdev->device; + rq->pdev = t->pdev; rq->netdev = priv->netdev; rq->priv = priv; rq->clock = &mdev->clock; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d09e65557e75..24f919ef9b8e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1535,15 +1535,9 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv, { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; - int eqn_not_used; - unsigned int irqn; int err; u32 i; - err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); - if (err) - return err; - err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, &cq->wq_ctrl); if (err) @@ -1557,7 +1551,6 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv, mcq->vector = param->eq_ix; mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; - mcq->irqn = irqn; for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); @@ -1605,11 +1598,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) void *in; void *cqc; int inlen; - unsigned int irqn_not_used; int eqn; int err; - err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); + err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn); if (err) return err; @@ -1891,30 +1883,30 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, if (err) goto err_close_icosq; + err = mlx5e_open_rxq_rq(c, params, &cparam->rq); + if (err) + goto err_close_sqs; + if (c->xdp) { err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->rq_xdpsq, false); if (err) - goto err_close_sqs; + goto err_close_rq; } - err = mlx5e_open_rxq_rq(c, params, &cparam->rq); - if (err) - goto err_close_xdp_sq; - err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true); if (err) - goto err_close_rq; + goto err_close_xdp_sq; return 0; -err_close_rq: - mlx5e_close_rq(&c->rq); - err_close_xdp_sq: if (c->xdp) mlx5e_close_xdpsq(&c->rq_xdpsq); +err_close_rq: + mlx5e_close_rq(&c->rq); + err_close_sqs: mlx5e_close_sqs(c); @@ -1949,9 +1941,9 @@ err_close_async_icosq_cq: static void mlx5e_close_queues(struct mlx5e_channel *c) { mlx5e_close_xdpsq(&c->xdpsq); - mlx5e_close_rq(&c->rq); if (c->xdp) mlx5e_close_xdpsq(&c->rq_xdpsq); + mlx5e_close_rq(&c->rq); mlx5e_close_sqs(c); mlx5e_close_icosq(&c->icosq); mlx5e_close_icosq(&c->async_icosq); @@ -1983,9 +1975,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel *c; unsigned int irq; int err; - int eqn; - err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); + err = mlx5_vector2irqn(priv->mdev, ix, &irq); if (err) return err; @@ -3384,7 +3375,7 @@ static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool en static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) { - int err = 0; + int err; int i; for (i = 0; i < chs->num; i++) { @@ -3392,6 +3383,8 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) if (err) return err; } + if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state)) + return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd); return 0; } @@ -3829,6 +3822,24 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) return 0; } +static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev, + netdev_features_t features) +{ + features &= ~NETIF_F_HW_TLS_RX; + if (netdev->features & NETIF_F_HW_TLS_RX) + netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n"); + + features &= ~NETIF_F_HW_TLS_TX; + if (netdev->features & NETIF_F_HW_TLS_TX) + netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n"); + + features &= ~NETIF_F_NTUPLE; + if (netdev->features & NETIF_F_NTUPLE) + netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n"); + + return features; +} + static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_features_t features) { @@ -3860,15 +3871,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n"); } - if (mlx5e_is_uplink_rep(priv)) { - features &= ~NETIF_F_HW_TLS_RX; - if (netdev->features & NETIF_F_HW_TLS_RX) - netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n"); - - features &= ~NETIF_F_HW_TLS_TX; - if (netdev->features & NETIF_F_HW_TLS_TX) - netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n"); - } + if (mlx5e_is_uplink_rep(priv)) + features = mlx5e_fix_uplink_rep_features(netdev, features); mutex_unlock(&priv->state_lock); @@ -4859,6 +4863,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) if (MLX5_CAP_ETH(mdev, scatter_fcs)) netdev->hw_features |= NETIF_F_RXFCS; + if (mlx5_qos_is_supported(mdev)) + netdev->hw_features |= NETIF_F_HW_TC; + netdev->features = netdev->hw_features; /* Defaults */ @@ -4879,8 +4886,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_NTUPLE; #endif } - if (mlx5_qos_is_supported(mdev)) - netdev->features |= NETIF_F_HW_TC; netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 629a61e8022f..d273758255c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -452,12 +452,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, static struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) { + struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_priv *priv; - netdev = __dev_get_by_index(net, ifindex); + netdev = dev_get_by_index(net, ifindex); + if (!netdev) + return ERR_PTR(-ENODEV); + priv = netdev_priv(netdev); - return priv->mdev; + mdev = priv->mdev; + dev_put(netdev); + + /* Mirred tc action holds a refcount on the ifindex net_device (see + * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev + * after dev_put(netdev), while we're in the context of adding a tc flow. + * + * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then + * stored in a hairpin object, which exists until all flows, that refer to it, get + * removed. + * + * On the other hand, after a hairpin object has been created, the peer net_device may + * be removed/unbound while there are still some hairpin flows that are using it. This + * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to + * NETDEV_UNREGISTER event of the peer net_device. + */ + return mdev; } static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) @@ -666,6 +686,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params func_mdev = priv->mdev; peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); + if (IS_ERR(peer_mdev)) { + err = PTR_ERR(peer_mdev); + goto create_pair_err; + } pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params); if (IS_ERR(pair)) { @@ -804,6 +828,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, int err; peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); + if (IS_ERR(peer_mdev)) { + NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device"); + return PTR_ERR(peer_mdev); + } + if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 6e074cc457de..605c8ecc3610 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -855,8 +855,8 @@ clean: return err; } -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, - unsigned int *irqn) +static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn, + unsigned int *irqn) { struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_eq_comp *eq, *n; @@ -865,8 +865,10 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { if (i++ == vector) { - *eqn = eq->core.eqn; - *irqn = eq->core.irqn; + if (irqn) + *irqn = eq->core.irqn; + if (eqn) + *eqn = eq->core.eqn; err = 0; break; } @@ -874,8 +876,18 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, return err; } + +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn) +{ + return vector2eqnirqn(dev, vector, eqn, NULL); +} EXPORT_SYMBOL(mlx5_vector2eqn); +int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn) +{ + return vector2eqnirqn(dev, vector, NULL, irqn); +} + unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev) { return dev->priv.eq_table->num_comp_eqs; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index a6e1d4f78268..69a3630818d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -69,7 +69,7 @@ static void mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid, unsigned long val) { - struct switchdev_notifier_fdb_info send_info; + struct switchdev_notifier_fdb_info send_info = {}; send_info.addr = addr; send_info.vid = vid; @@ -579,7 +579,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex, xa_init(&bridge->vports); bridge->ifindex = ifindex; bridge->refcnt = 1; - bridge->ageing_time = BR_DEFAULT_AGEING_TIME; + bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME); list_add(&bridge->list, &br_offloads->bridges); return bridge; @@ -1006,7 +1006,7 @@ int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswit if (!vport->bridge) return -EINVAL; - vport->bridge->ageing_time = ageing_time; + vport->bridge->ageing_time = clock_t_to_jiffies(ageing_time); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c index 794012c5c476..d3ad78aa9d45 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c @@ -501,6 +501,7 @@ err_sampler: err_offload_rule: mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr); err_default_tbl: + kfree(sample_flow); return ERR_PTR(err); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 48cac5bf606d..d562edf5b0bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -636,7 +636,7 @@ struct esw_vport_tbl_namespace { }; struct mlx5_vport_tbl_attr { - u16 chain; + u32 chain; u16 prio; u16 vport; const struct esw_vport_tbl_namespace *vport_ns; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 7579f3402776..3bb71a186004 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -48,6 +48,7 @@ #include "lib/fs_chains.h" #include "en_tc.h" #include "en/mapping.h" +#include "devlink.h" #define mlx5_esw_for_each_rep(esw, i, rep) \ xa_for_each(&((esw)->offloads.vport_reps), i, rep) @@ -382,10 +383,11 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f { dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; - dest[dest_idx].vport.vhca_id = - MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { + dest[dest_idx].vport.vhca_id = + MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; + } if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { if (pkt_reformat) { flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; @@ -2367,6 +2369,9 @@ static int mlx5_esw_offloads_devcom_event(int event, switch (event) { case ESW_OFFLOADS_DEVCOM_PAIR: + if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev) + break; + if (mlx5_eswitch_vport_match_metadata_enabled(esw) != mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) break; @@ -2997,12 +3002,19 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (cur_mlx5_mode == mlx5_mode) goto unlock; - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) + if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { + if (mlx5_devlink_trap_get_num_active(esw->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Can't change mode while devlink traps are active"); + err = -EOPNOTSUPP; + goto unlock; + } err = esw_offloads_start(esw, extack); - else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) + } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { err = esw_offloads_stop(esw, extack); - else + } else { err = -EINVAL; + } unlock: mlx5_esw_unlock(esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index bd66ab2af5b5..d5da4ab65766 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -417,7 +417,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) struct mlx5_wq_param wqp; struct mlx5_cqe64 *cqe; int inlen, err, eqn; - unsigned int irqn; void *cqc, *in; __be64 *pas; u32 i; @@ -446,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) goto err_cqwq; } - err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); + err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn); if (err) { kvfree(in); goto err_cqwq; @@ -476,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) *conn->cq.mcq.arm_db = 0; conn->cq.mcq.vector = 0; conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete; - conn->cq.mcq.irqn = irqn; conn->cq.mcq.uar = fdev->conn_res.uar; tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index d7bf0a3e4a52..c0697e1b7118 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev, static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct fs_prio *prio) { - struct mlx5_flow_table *next_ft; + struct mlx5_flow_table *next_ft, *first_ft; int err = 0; /* Connect_prev_fts and update_root_ft_create are mutually exclusive */ - if (list_empty(&prio->node.children)) { + first_ft = list_first_entry_or_null(&prio->node.children, + struct mlx5_flow_table, node.list); + if (!first_ft || first_ft->level > ft->level) { err = connect_prev_fts(dev, ft, prio); if (err) return err; - next_ft = find_next_chained_ft(prio); + next_ft = first_ft ? first_ft : find_next_chained_ft(prio); err = connect_fwd_rules(dev, ft, next_ft); if (err) return err; @@ -2120,7 +2122,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft) node.list) == ft)) return 0; - next_ft = find_next_chained_ft(prio); + next_ft = find_next_ft(ft); err = connect_fwd_rules(dev, next_ft, ft); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9ff163c5bcde..9abeb80ffa31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -626,8 +626,16 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) } fw_reporter_ctx.err_synd = health->synd; fw_reporter_ctx.miss_counter = health->miss_counter; - devlink_health_report(health->fw_fatal_reporter, - "FW fatal error reported", &fw_reporter_ctx); + if (devlink_health_report(health->fw_fatal_reporter, + "FW fatal error reported", &fw_reporter_ctx) == -ECANCELED) { + /* If recovery wasn't performed, due to grace period, + * unload the driver. This ensures that the driver + * closes all its resources and it is not subjected to + * requests from the kernel. + */ + mlx5_core_err(dev, "Driver is in error state. Unloading\n"); + mlx5_unload_one(dev); + } } static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h index 624cedebb510..d3d628b862f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h @@ -104,4 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev); struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev); #endif +int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn); + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index eb1b316560a8..c84ad87c99bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1784,16 +1784,14 @@ static int __init init(void) if (err) goto err_sf; -#ifdef CONFIG_MLX5_CORE_EN err = mlx5e_init(); - if (err) { - pci_unregister_driver(&mlx5_core_driver); - goto err_debug; - } -#endif + if (err) + goto err_en; return 0; +err_en: + mlx5_sf_driver_unregister(); err_sf: pci_unregister_driver(&mlx5_core_driver); err_debug: @@ -1803,9 +1801,7 @@ err_debug: static void __exit cleanup(void) { -#ifdef CONFIG_MLX5_CORE_EN mlx5e_cleanup(); -#endif mlx5_sf_driver_unregister(); pci_unregister_driver(&mlx5_core_driver); mlx5_unregister_debugfs(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 343807ac2036..da365b8f0141 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -206,8 +206,13 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw, int mlx5_fw_version_query(struct mlx5_core_dev *dev, u32 *running_ver, u32 *stored_ver); +#ifdef CONFIG_MLX5_CORE_EN int mlx5e_init(void); void mlx5e_cleanup(void); +#else +static inline int mlx5e_init(void){ return 0; } +static inline void mlx5e_cleanup(void){} +#endif static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index b25f764daa08..3465b363fc2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -214,6 +214,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) err = -ENOMEM; goto err_cpumask; } + irq->pool = pool; kref_init(&irq->kref); irq->index = i; err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL)); @@ -222,7 +223,6 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) irq->index, err); goto err_xa; } - irq->pool = pool; return irq; err_xa: free_cpumask_var(irq->mask); @@ -251,8 +251,11 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb) int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb) { + int err = 0; + + err = atomic_notifier_chain_unregister(&irq->nh, nb); irq_put(irq); - return atomic_notifier_chain_unregister(&irq->nh, nb); + return err; } struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq) @@ -437,6 +440,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name, if (!pool) return ERR_PTR(-ENOMEM); pool->dev = dev; + mutex_init(&pool->lock); xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC); pool->xa_num_irqs.min = start; pool->xa_num_irqs.max = start + size - 1; @@ -445,7 +449,6 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name, name); pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ; pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ; - mutex_init(&pool->lock); mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d", name, size, start); return pool; @@ -459,6 +462,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) xa_for_each(&pool->irqs, index, irq) irq_release(&irq->kref); xa_destroy(&pool->irqs); + mutex_destroy(&pool->lock); kvfree(pool); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 12cf323a5943..9df0e73d1c35 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -749,7 +749,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, struct mlx5_cqe64 *cqe; struct mlx5dr_cq *cq; int inlen, err, eqn; - unsigned int irqn; void *cqc, *in; __be64 *pas; int vector; @@ -782,7 +781,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, goto err_cqwq; vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev); - err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn); + err = mlx5_vector2eqn(mdev, vector, &eqn); if (err) { kvfree(in); goto err_cqwq; @@ -818,7 +817,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, *cq->mcq.arm_db = cpu_to_be32(2 << 28); cq->mcq.vector = 0; - cq->mcq.irqn = irqn; cq->mcq.uar = uar; return cq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index f1950e4968da..e4dd4eed5aee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -352,6 +352,7 @@ static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p) { MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, DR_STE_TUNL_ACTION_DECAP); + MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1); } static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p) @@ -365,6 +366,7 @@ static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan) MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, DR_STE_TUNL_ACTION_L3_DECAP); MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0); + MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1); } static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 7e221ef01437..f69cbb3852d5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -9079,7 +9079,7 @@ mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; struct net_device *dev; dev = br_fdb_find_port(rif->dev, mac, 0); @@ -9127,8 +9127,8 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) { + struct switchdev_notifier_fdb_info info = {}; u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); - struct switchdev_notifier_fdb_info info; struct net_device *br_dev; struct net_device *dev; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index c5ef9aa64efe..8f90cd323d5f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -2508,7 +2508,7 @@ mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type, const char *mac, u16 vid, struct net_device *dev, bool offloaded) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; info.addr = mac; info.vid = vid; diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index a80419d8d4b5..7bdbb2d09a14 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -2,6 +2,8 @@ config SPARX5_SWITCH tristate "Sparx5 switch driver" depends on NET_SWITCHDEV depends on HAS_IOMEM + depends on OF + depends on ARCH_SPARX5 || COMPILE_TEST select PHYLINK select PHY_SPARX5_SERDES select RESET_CONTROLLER diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c index 0443f66b5550..9a8e4f201eb1 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c @@ -277,7 +277,7 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type, const char *mac, u16 vid, struct net_device *dev, bool offloaded) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; info.addr = mac; info.vid = vid; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c index 9d485a9d1f1f..cb68eaaac881 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c @@ -13,19 +13,26 @@ */ #define VSTAX 73 -static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width) +#define ifh_encode_bitfield(ifh, value, pos, _width) \ + ({ \ + u32 width = (_width); \ + \ + /* Max width is 5 bytes - 40 bits. In worst case this will + * spread over 6 bytes - 48 bits + */ \ + compiletime_assert(width <= 40, \ + "Unsupported width, must be <= 40"); \ + __ifh_encode_bitfield((ifh), (value), (pos), width); \ + }) + +static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width) { u8 *ifh_hdr = ifh; /* Calculate the Start IFH byte position of this IFH bit position */ u32 byte = (35 - (pos / 8)); /* Calculate the Start bit position in the Start IFH byte */ u32 bit = (pos % 8); - u64 encode = GENMASK(bit + width - 1, bit) & (value << bit); - - /* Max width is 5 bytes - 40 bits. In worst case this will - * spread over 6 bytes - 48 bits - */ - compiletime_assert(width <= 40, "Unsupported width, must be <= 40"); + u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit); /* The b0-b7 goes into the start IFH byte */ if (encode & 0xFF) diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 5249b64f4fc5..49def6934cad 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -540,10 +540,8 @@ static int moxart_mac_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); ret = register_netdev(ndev); - if (ret) { - free_netdev(ndev); + if (ret) goto init_fail; - } netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n", __func__, ndev->irq, ndev->dev_addr); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index adfb9781799e..2948d731a1c1 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1334,6 +1334,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) struct net_device *bond = ocelot_port->bond; mask = ocelot_get_bridge_fwd_mask(ocelot, bridge); + mask |= cpu_fwd_mask; mask &= ~BIT(port); if (bond) { mask &= ~ocelot_get_bond_mask(ocelot, bond, diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c index ea4e83410fe4..7390fa3980ec 100644 --- a/drivers/net/ethernet/mscc/ocelot_io.c +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset) ocelot->map[target][reg & REG_MASK] + offset, &val); return val; } -EXPORT_SYMBOL(__ocelot_read_ix); +EXPORT_SYMBOL_GPL(__ocelot_read_ix); void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset) { @@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset) regmap_write(ocelot->targets[target], ocelot->map[target][reg & REG_MASK] + offset, val); } -EXPORT_SYMBOL(__ocelot_write_ix); +EXPORT_SYMBOL_GPL(__ocelot_write_ix); void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, u32 offset) @@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, ocelot->map[target][reg & REG_MASK] + offset, mask, val); } -EXPORT_SYMBOL(__ocelot_rmw_ix); +EXPORT_SYMBOL_GPL(__ocelot_rmw_ix); u32 ocelot_port_readl(struct ocelot_port *port, u32 reg) { @@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg) regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val); return val; } -EXPORT_SYMBOL(ocelot_port_readl); +EXPORT_SYMBOL_GPL(ocelot_port_readl); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg) { @@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg) regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val); } -EXPORT_SYMBOL(ocelot_port_writel); +EXPORT_SYMBOL_GPL(ocelot_port_writel); void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg) { @@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg) ocelot_port_writel(port, (cur & (~mask)) | val, reg); } -EXPORT_SYMBOL(ocelot_port_rmwl); +EXPORT_SYMBOL_GPL(ocelot_port_rmwl); u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target, u32 reg, u32 offset) @@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot, return 0; } -EXPORT_SYMBOL(ocelot_regfields_init); +EXPORT_SYMBOL_GPL(ocelot_regfields_init); static struct regmap_config ocelot_regmap_config = { .reg_bits = 32, @@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res) return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config); } -EXPORT_SYMBOL(ocelot_regmap_init); +EXPORT_SYMBOL_GPL(ocelot_regmap_init); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 3e89e34f86d5..e9d260d84bf3 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -1298,6 +1298,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev, } static int ocelot_netdevice_changeupper(struct net_device *dev, + struct net_device *brport_dev, struct netdev_notifier_changeupper_info *info) { struct netlink_ext_ack *extack; @@ -1307,11 +1308,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev, if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) - err = ocelot_netdevice_bridge_join(dev, dev, + err = ocelot_netdevice_bridge_join(dev, brport_dev, info->upper_dev, extack); else - err = ocelot_netdevice_bridge_leave(dev, dev, + err = ocelot_netdevice_bridge_leave(dev, brport_dev, info->upper_dev); } if (netif_is_lag_master(info->upper_dev)) { @@ -1346,7 +1347,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev, if (ocelot_port->bond != dev) return NOTIFY_OK; - err = ocelot_netdevice_changeupper(lower, info); + err = ocelot_netdevice_changeupper(lower, dev, info); if (err) return notifier_from_errno(err); } @@ -1385,7 +1386,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct netdev_notifier_changeupper_info *info = ptr; if (ocelot_netdevice_dev_check(dev)) - return ocelot_netdevice_changeupper(dev, info); + return ocelot_netdevice_changeupper(dev, dev, info); if (netif_is_lag_master(dev)) return ocelot_netdevice_lag_changeupper(dev, info); diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 51b4b25d15ad..84f7dbe9edff 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -819,7 +819,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) printk(version); #endif - i = pci_enable_device(pdev); + i = pcim_enable_device(pdev); if (i) return i; /* natsemi has a non-standard PM control register @@ -852,7 +852,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) ioaddr = ioremap(iostart, iosize); if (!ioaddr) { i = -ENOMEM; - goto err_ioremap; + goto err_pci_request_regions; } /* Work around the dropped serial bit. */ @@ -974,9 +974,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) err_register_netdev: iounmap(ioaddr); - err_ioremap: - pci_release_regions(pdev); - err_pci_request_regions: free_netdev(dev); return i; @@ -3241,7 +3238,6 @@ static void natsemi_remove1(struct pci_dev *pdev) NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround); unregister_netdev (dev); - pci_release_regions (pdev); iounmap(ioaddr); free_netdev (dev); } diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 82eef4c72f01..7abd13e69471 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3512,13 +3512,13 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev) kfree(vdev->vpaths); - /* we are safe to free it now */ - free_netdev(dev); - vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", buf); vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, __func__, __LINE__); + + /* we are safe to free it now */ + free_netdev(dev); } /* diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 273d529d43c2..062bb2db68bf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -1141,20 +1141,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) nfp_fl_ct_clean_flow_entry(ct_entry); kfree(ct_map_ent); - /* If this is the last pre_ct_rule it means that it is - * very likely that the nft table will be cleaned up next, - * as this happens on the removal of the last act_ct flow. - * However we cannot deregister the callback on the removal - * of the last nft flow as this runs into a deadlock situation. - * So deregister the callback on removal of the last pre_ct flow - * and remove any remaining nft flow entries. We also cannot - * save this state and delete the callback later since the - * nft table would already have been freed at that time. - */ if (!zt->pre_ct_count) { - nf_flow_table_offload_del_cb(zt->nft, - nfp_fl_ct_handle_nft_flow, - zt); zt->nft = NULL; nfp_fl_ct_clean_nft_entries(zt); } @@ -1172,6 +1159,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) nfp_ct_map_params); nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry); kfree(ct_map_ent); + break; default: break; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 1b482446536d..8803faadd302 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -286,6 +286,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev, /* Init to unknowns */ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); cmd->base.port = PORT_OTHER; cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index af3a5368529c..e795fa63ca12 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -29,7 +29,7 @@ static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { */ }; -static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); +static void ionic_lif_rx_mode(struct ionic_lif *lif); static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); static void ionic_link_status_check(struct ionic_lif *lif); @@ -53,7 +53,19 @@ static void ionic_dim_work(struct work_struct *work) cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); qcq = container_of(dim, struct ionic_qcq, dim); new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec); - qcq->intr.dim_coal_hw = new_coal ? new_coal : 1; + new_coal = new_coal ? new_coal : 1; + + if (qcq->intr.dim_coal_hw != new_coal) { + unsigned int qi = qcq->cq.bound_q->index; + struct ionic_lif *lif = qcq->q.lif; + + qcq->intr.dim_coal_hw = new_coal; + + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->rxqcqs[qi]->intr.index, + qcq->intr.dim_coal_hw); + } + dim->state = DIM_START_MEASURE; } @@ -77,7 +89,7 @@ static void ionic_lif_deferred_work(struct work_struct *work) switch (w->type) { case IONIC_DW_TYPE_RX_MODE: - ionic_lif_rx_mode(lif, w->rx_mode); + ionic_lif_rx_mode(lif); break; case IONIC_DW_TYPE_RX_ADDR_ADD: ionic_lif_addr_add(lif, w->addr); @@ -1301,10 +1313,8 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) return 0; } -static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add, - bool can_sleep) +static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) { - struct ionic_deferred_work *work; unsigned int nmfilters; unsigned int nufilters; @@ -1330,97 +1340,46 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add, lif->nucast--; } - if (!can_sleep) { - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) - return -ENOMEM; - work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : - IONIC_DW_TYPE_RX_ADDR_DEL; - memcpy(work->addr, addr, ETH_ALEN); - netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", - add ? "add" : "del", addr); - ionic_lif_deferred_enqueue(&lif->deferred, work); - } else { - netdev_dbg(lif->netdev, "rx_filter %s %pM\n", - add ? "add" : "del", addr); - if (add) - return ionic_lif_addr_add(lif, addr); - else - return ionic_lif_addr_del(lif, addr); - } + netdev_dbg(lif->netdev, "rx_filter %s %pM\n", + add ? "add" : "del", addr); + if (add) + return ionic_lif_addr_add(lif, addr); + else + return ionic_lif_addr_del(lif, addr); return 0; } static int ionic_addr_add(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP); -} - -static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr) -{ - return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP); + return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR); } static int ionic_addr_del(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP); + return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR); } -static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr) +static void ionic_lif_rx_mode(struct ionic_lif *lif) { - return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP); -} - -static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) -{ - struct ionic_admin_ctx ctx = { - .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), - .cmd.rx_mode_set = { - .opcode = IONIC_CMD_RX_MODE_SET, - .lif_index = cpu_to_le16(lif->index), - .rx_mode = cpu_to_le16(rx_mode), - }, - }; + struct net_device *netdev = lif->netdev; + unsigned int nfilters; + unsigned int nd_flags; char buf[128]; - int err; + u16 rx_mode; int i; #define REMAIN(__x) (sizeof(buf) - (__x)) - i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", - lif->rx_mode, rx_mode); - if (rx_mode & IONIC_RX_MODE_F_UNICAST) - i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); - if (rx_mode & IONIC_RX_MODE_F_MULTICAST) - i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); - if (rx_mode & IONIC_RX_MODE_F_BROADCAST) - i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); - if (rx_mode & IONIC_RX_MODE_F_PROMISC) - i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); - if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) - i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); - netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); - - err = ionic_adminq_post_wait(lif, &ctx); - if (err) - netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", - rx_mode, err); - else - lif->rx_mode = rx_mode; -} + mutex_lock(&lif->config_lock); -static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) -{ - struct ionic_lif *lif = netdev_priv(netdev); - struct ionic_deferred_work *work; - unsigned int nfilters; - unsigned int rx_mode; + /* grab the flags once for local use */ + nd_flags = netdev->flags; rx_mode = IONIC_RX_MODE_F_UNICAST; - rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; - rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; - rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; - rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; + rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; + rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; + rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; + rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; /* sync unicast addresses * next check to see if we're in an overflow state @@ -1429,49 +1388,83 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) * we remove our overflow flag and check the netdev flags * to see if we can disable NIC PROMISC */ - if (can_sleep) - __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); - else - __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); + __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); if (netdev_uc_count(netdev) + 1 > nfilters) { rx_mode |= IONIC_RX_MODE_F_PROMISC; lif->uc_overflow = true; } else if (lif->uc_overflow) { lif->uc_overflow = false; - if (!(netdev->flags & IFF_PROMISC)) + if (!(nd_flags & IFF_PROMISC)) rx_mode &= ~IONIC_RX_MODE_F_PROMISC; } /* same for multicast */ - if (can_sleep) - __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); - else - __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); + __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); if (netdev_mc_count(netdev) > nfilters) { rx_mode |= IONIC_RX_MODE_F_ALLMULTI; lif->mc_overflow = true; } else if (lif->mc_overflow) { lif->mc_overflow = false; - if (!(netdev->flags & IFF_ALLMULTI)) + if (!(nd_flags & IFF_ALLMULTI)) rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; } + i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", + lif->rx_mode, rx_mode); + if (rx_mode & IONIC_RX_MODE_F_UNICAST) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); + if (rx_mode & IONIC_RX_MODE_F_MULTICAST) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); + if (rx_mode & IONIC_RX_MODE_F_BROADCAST) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); + if (rx_mode & IONIC_RX_MODE_F_PROMISC) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); + if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); + if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER) + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER"); + netdev_dbg(netdev, "lif%d %s\n", lif->index, buf); + if (lif->rx_mode != rx_mode) { - if (!can_sleep) { - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - netdev_err(lif->netdev, "rxmode change dropped\n"); - return; - } - work->type = IONIC_DW_TYPE_RX_MODE; - work->rx_mode = rx_mode; - netdev_dbg(lif->netdev, "deferred: rx_mode\n"); - ionic_lif_deferred_enqueue(&lif->deferred, work); - } else { - ionic_lif_rx_mode(lif, rx_mode); + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_mode_set = { + .opcode = IONIC_CMD_RX_MODE_SET, + .lif_index = cpu_to_le16(lif->index), + }, + }; + int err; + + ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode); + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n", + rx_mode, err); + else + lif->rx_mode = rx_mode; + } + + mutex_unlock(&lif->config_lock); +} + +static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_deferred_work *work; + + if (!can_sleep) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "rxmode change dropped\n"); + return; } + work->type = IONIC_DW_TYPE_RX_MODE; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); + } else { + ionic_lif_rx_mode(lif); } } @@ -3058,6 +3051,7 @@ void ionic_lif_deinit(struct ionic_lif *lif) ionic_lif_qcq_deinit(lif, lif->notifyqcq); ionic_lif_qcq_deinit(lif, lif->adminqcq); + mutex_destroy(&lif->config_lock); mutex_destroy(&lif->queue_lock); ionic_lif_reset(lif); } @@ -3185,7 +3179,7 @@ static int ionic_station_set(struct ionic_lif *lif) */ if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) - ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP); + ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR); } else { /* Update the netdev mac with the device's mac */ memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); @@ -3202,7 +3196,7 @@ static int ionic_station_set(struct ionic_lif *lif) netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", netdev->dev_addr); - ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP); + ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR); return 0; } @@ -3225,6 +3219,7 @@ int ionic_lif_init(struct ionic_lif *lif) lif->hw_index = le16_to_cpu(comp.hw_index); mutex_init(&lif->queue_lock); + mutex_init(&lif->config_lock); /* now that we have the hw_index we can figure out our doorbell page */ lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 346506f01715..69ab59fedb6c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -108,7 +108,6 @@ struct ionic_deferred_work { struct list_head list; enum ionic_deferred_work_type type; union { - unsigned int rx_mode; u8 addr[ETH_ALEN]; u8 fw_status; }; @@ -179,6 +178,7 @@ struct ionic_lif { unsigned int index; unsigned int hw_index; struct mutex queue_lock; /* lock for queue structures */ + struct mutex config_lock; /* lock for config actions */ spinlock_t adminq_lock; /* lock for AdminQ operations */ struct ionic_qcq *adminqcq; struct ionic_qcq *notifyqcq; @@ -199,7 +199,7 @@ struct ionic_lif { unsigned int nrxq_descs; u32 rx_copybreak; u64 rxq_features; - unsigned int rx_mode; + u16 rx_mode; u64 hw_features; bool registered; bool mc_overflow; @@ -302,7 +302,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type, int ionic_lif_size(struct ionic *ionic); #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) -int ionic_lif_hwstamp_replay(struct ionic_lif *lif); +void ionic_lif_hwstamp_replay(struct ionic_lif *lif); int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr); int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr); ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter); @@ -311,10 +311,7 @@ void ionic_lif_unregister_phc(struct ionic_lif *lif); void ionic_lif_alloc_phc(struct ionic_lif *lif); void ionic_lif_free_phc(struct ionic_lif *lif); #else -static inline int ionic_lif_hwstamp_replay(struct ionic_lif *lif) -{ - return -EOPNOTSUPP; -} +static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {} static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c index a87c87e86aef..6e2403c71608 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c @@ -188,6 +188,9 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) struct hwtstamp_config config; int err; + if (!lif->phc || !lif->phc->ptp) + return -EOPNOTSUPP; + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; @@ -203,15 +206,16 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) return 0; } -int ionic_lif_hwstamp_replay(struct ionic_lif *lif) +void ionic_lif_hwstamp_replay(struct ionic_lif *lif) { int err; + if (!lif->phc || !lif->phc->ptp) + return; + err = ionic_lif_hwstamp_set_ts_config(lif, NULL); if (err) netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err); - - return err; } int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 08934888575c..08870190e4d2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -274,12 +274,11 @@ static void ionic_rx_clean(struct ionic_queue *q, } } - if (likely(netdev->features & NETIF_F_RXCSUM)) { - if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum = (__force __wsum)le16_to_cpu(comp->csum); - stats->csum_complete++; - } + if (likely(netdev->features & NETIF_F_RXCSUM) && + (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = (__force __wsum)le16_to_cpu(comp->csum); + stats->csum_complete++; } else { stats->csum_none++; } @@ -451,11 +450,12 @@ void ionic_rx_empty(struct ionic_queue *q) q->tail_idx = 0; } -static void ionic_dim_update(struct ionic_qcq *qcq) +static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode) { struct dim_sample dim_sample; struct ionic_lif *lif; unsigned int qi; + u64 pkts, bytes; if (!qcq->intr.dim_coal_hw) return; @@ -463,14 +463,23 @@ static void ionic_dim_update(struct ionic_qcq *qcq) lif = qcq->q.lif; qi = qcq->cq.bound_q->index; - ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, - lif->rxqcqs[qi]->intr.index, - qcq->intr.dim_coal_hw); + switch (napi_mode) { + case IONIC_LIF_F_TX_DIM_INTR: + pkts = lif->txqstats[qi].pkts; + bytes = lif->txqstats[qi].bytes; + break; + case IONIC_LIF_F_RX_DIM_INTR: + pkts = lif->rxqstats[qi].pkts; + bytes = lif->rxqstats[qi].bytes; + break; + default: + pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts; + bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes; + break; + } dim_update_sample(qcq->cq.bound_intr->rearm_count, - lif->txqstats[qi].pkts, - lif->txqstats[qi].bytes, - &dim_sample); + pkts, bytes, &dim_sample); net_dim(&qcq->dim, dim_sample); } @@ -491,7 +500,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) ionic_tx_service, NULL, NULL); if (work_done < budget && napi_complete_done(napi, work_done)) { - ionic_dim_update(qcq); + ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR); flags |= IONIC_INTR_CRED_UNMASK; cq->bound_intr->rearm_count++; } @@ -530,7 +539,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) ionic_rx_fill(cq->bound_q); if (work_done < budget && napi_complete_done(napi, work_done)) { - ionic_dim_update(qcq); + ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR); flags |= IONIC_INTR_CRED_UNMASK; cq->bound_intr->rearm_count++; } @@ -576,7 +585,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) ionic_rx_fill(rxcq->bound_q); if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { - ionic_dim_update(qcq); + ionic_dim_update(qcq, 0); flags |= IONIC_INTR_CRED_UNMASK; rxcq->bound_intr->rearm_count++; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 02a4610d9330..c46a7f756ed5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -327,6 +327,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) unsigned long flags; int rc = -EINVAL; + if (!p_ll2_conn) + return rc; + spin_lock_irqsave(&p_tx->lock, flags); if (p_tx->b_completing_packet) { rc = -EBUSY; @@ -500,7 +503,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) unsigned long flags = 0; int rc = 0; + if (!p_ll2_conn) + return rc; + spin_lock_irqsave(&p_rx->lock, flags); + + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) { + spin_unlock_irqrestore(&p_rx->lock, flags); + return 0; + } + cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); @@ -821,6 +833,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; int rc; + if (!p_ll2_conn) + return 0; + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) return 0; @@ -844,6 +859,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) u16 new_idx = 0, num_bds = 0; int rc; + if (!p_ll2_conn) + return 0; + if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; @@ -1728,6 +1746,8 @@ int qed_ll2_post_rx_buffer(void *cxt, if (!p_ll2_conn) return -EINVAL; p_rx = &p_ll2_conn->rx_queue; + if (!p_rx->set_prod_addr) + return -EIO; spin_lock_irqsave(&p_rx->lock, flags); if (!list_empty(&p_rx->free_descq)) diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index da864d12916b..4f4b79250a2b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1285,8 +1285,7 @@ qed_rdma_create_qp(void *rdma_cxt, if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info->active) { - DP_ERR(p_hwfn->cdev, - "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", + pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", rdma_cxt, in_params, out_params); return NULL; } diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 2e62a2c4eb63..5630008f38b7 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -501,6 +501,7 @@ struct qede_fastpath { #define QEDE_SP_HW_ERR 4 #define QEDE_SP_ARFS_CONFIG 5 #define QEDE_SP_AER 7 +#define QEDE_SP_DISABLE 8 #ifdef CONFIG_RFS_ACCEL int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index c59b72c90293..a2e4dfb5cb44 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -831,7 +831,7 @@ int qede_configure_vlan_filters(struct qede_dev *edev) int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct qede_dev *edev = netdev_priv(dev); - struct qede_vlan *vlan = NULL; + struct qede_vlan *vlan; int rc = 0; DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); @@ -842,7 +842,7 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) if (vlan->vid == vid) break; - if (!vlan || (vlan->vid != vid)) { + if (list_entry_is_head(vlan, &edev->vlan_list, list)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "Vlan isn't configured\n"); goto out; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 01ac1e93d27a..7c6064baeba2 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1009,6 +1009,13 @@ static void qede_sp_task(struct work_struct *work) struct qede_dev *edev = container_of(work, struct qede_dev, sp_task.work); + /* Disable execution of this deferred work once + * qede removal is in progress, this stop any future + * scheduling of sp_task. + */ + if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags)) + return; + /* The locking scheme depends on the specific flag: * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to * ensure that ongoing flows are ended and new ones are not started. @@ -1300,6 +1307,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY)); if (mode != QEDE_REMOVE_RECOVERY) { + set_bit(QEDE_SP_DISABLE, &edev->sp_flags); unregister_netdev(ndev); cancel_delayed_work_sync(&edev->sp_task); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 2376b2729633..c00ad57575ea 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -154,7 +154,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) "driver lock acquired\n"); return 1; } - ssleep(1); + mdelay(1000); } while (++i < 10); netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); @@ -3274,7 +3274,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) if ((value & ISP_CONTROL_SR) == 0) break; - ssleep(1); + mdelay(1000); } while ((--max_wait_time)); /* @@ -3310,7 +3310,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) ispControlStatus); if ((value & ISP_CONTROL_FSR) == 0) break; - ssleep(1); + mdelay(1000); } while ((--max_wait_time)); } if (max_wait_time == 0) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index d8882d0b6b49..d51bac7ba5af 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3156,8 +3156,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); ret = QLCRD32(adapter, indirect_addr, &err); - if (err == -EIO) + if (err == -EIO) { + qlcnic_83xx_unlock_flash(adapter); return err; + } word = ret; *(u32 *)p_data = word; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 8543bf3c3484..ad655f0a4965 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -735,12 +735,13 @@ static int emac_remove(struct platform_device *pdev) put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); - free_netdev(netdev); if (adpt->phy.digital) iounmap(adpt->phy.digital); iounmap(adpt->phy.base); + free_netdev(netdev); + return 0; } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index f744557c33a3..4d8e337f5085 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -3502,12 +3502,16 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + /* The default value is 0x13. Change it to 0x2f */ + rtl_csi_access_enable(tp, 0x2f); + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); /* disable EEE */ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); } DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) @@ -5084,7 +5088,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp) new_bus->priv = tp; new_bus->parent = &pdev->dev; new_bus->irq[0] = PHY_MAC_INTERRUPT; - snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev)); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); new_bus->read = r8169_mdio_read_reg; new_bus->write = r8169_mdio_write_reg; diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 86a1eb0634e8..80e62ca2e3d3 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -864,7 +864,7 @@ enum GECMR_BIT { /* The Ethernet AVB descriptor definitions. */ struct ravb_desc { - __le16 ds; /* Descriptor size */ + __le16 ds; /* Descriptor size */ u8 cc; /* Content control MSBs (reserved) */ u8 die_dt; /* Descriptor interrupt enable and type */ __le32 dptr; /* Descriptor pointer */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 69c50f81e1cb..805397088850 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -920,7 +920,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) if (ravb_rx(ndev, "a, q)) goto out; - /* Processing RX Descriptor Ring */ + /* Processing TX Descriptor Ring */ spin_lock_irqsave(&priv->lock, flags); /* Clear TX interrupt */ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index a46633606cae..1f06b92ee5bb 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2715,7 +2715,7 @@ static void rocker_fdb_offload_notify(struct rocker_port *rocker_port, struct switchdev_notifier_fdb_info *recv_info) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; info.addr = recv_info->addr; info.vid = recv_info->vid; diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 967a634ee9ac..e33a9d283a4e 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -1822,7 +1822,7 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work) container_of(work, struct ofdpa_fdb_learn_work, work); bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE); bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED); - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; info.addr = lw->addr; info.vid = lw->vid; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index a3ca406a3561..e5b0d795c301 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, * maximum size. */ tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx); + tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL); n_xdp_tx = num_possible_cpus(); n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev); @@ -169,6 +170,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, netif_err(efx, drv, efx->net_dev, "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", n_xdp_ev, n_channels, max_channels); + netif_err(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will not work on this interface"); efx->n_xdp_channels = 0; efx->xdp_tx_per_channel = 0; efx->xdp_tx_queue_count = 0; @@ -176,12 +179,14 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, netif_err(efx, drv, efx->net_dev, "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", n_xdp_tx, n_channels, efx->max_vis); + netif_err(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will not work on this interface"); efx->n_xdp_channels = 0; efx->xdp_tx_per_channel = 0; efx->xdp_tx_queue_count = 0; } else { efx->n_xdp_channels = n_xdp_ev; - efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL; + efx->xdp_tx_per_channel = tx_per_ev; efx->xdp_tx_queue_count = n_xdp_tx; n_channels += n_xdp_ev; netif_dbg(efx, drv, efx->net_dev, @@ -891,18 +896,20 @@ int efx_set_channels(struct efx_nic *efx) if (efx_channel_is_xdp_tx(channel)) { efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->queue = next_queue++; - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); + /* We may have a few left-over XDP TX * queues owing to xdp_tx_queue_count * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL. * We still allocate and probe those * TXQs, but never use them. */ - if (xdp_queue_number < efx->xdp_tx_queue_count) + if (xdp_queue_number < efx->xdp_tx_queue_count) { + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", + channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); efx->xdp_tx_queues[xdp_queue_number] = tx_queue; - xdp_queue_number++; + xdp_queue_number++; + } } } else { efx_for_each_channel_tx_queue(tx_queue, channel) { @@ -914,8 +921,7 @@ int efx_set_channels(struct efx_nic *efx) } } } - if (xdp_queue_number) - efx->xdp_tx_queue_count = xdp_queue_number; + WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count); rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index ca9c00b7f588..cff87de9178a 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -443,7 +443,7 @@ static int sis900_probe(struct pci_dev *pci_dev, #endif /* setup various bits in PCI command register */ - ret = pci_enable_device(pci_dev); + ret = pcim_enable_device(pci_dev); if(ret) return ret; i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32)); @@ -469,7 +469,7 @@ static int sis900_probe(struct pci_dev *pci_dev, ioaddr = pci_iomap(pci_dev, 0, 0); if (!ioaddr) { ret = -ENOMEM; - goto err_out_cleardev; + goto err_out; } sis_priv = netdev_priv(net_dev); @@ -581,8 +581,6 @@ err_unmap_tx: sis_priv->tx_ring_dma); err_out_unmap: pci_iounmap(pci_dev, ioaddr); -err_out_cleardev: - pci_release_regions(pci_dev); err_out: free_netdev(net_dev); return ret; @@ -2499,7 +2497,6 @@ static void sis900_remove(struct pci_dev *pci_dev) sis_priv->tx_ring_dma); pci_iounmap(pci_dev, sis_priv->ioaddr); free_netdev(net_dev); - pci_release_regions(pci_dev); } static int __maybe_unused sis900_suspend(struct device *dev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index e108b0d2bd28..4c9a37dd0d3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -49,9 +49,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id { struct plat_stmmacenet_data *plat; struct stmmac_resources res; - bool mdio = false; - int ret, i; struct device_node *np; + int ret, i, phy_mode; + bool mdio = false; np = dev_of_node(&pdev->dev); @@ -108,10 +108,11 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (plat->bus_id < 0) plat->bus_id = pci_dev_id(pdev); - plat->phy_interface = device_get_phy_mode(&pdev->dev); - if (plat->phy_interface < 0) + phy_mode = device_get_phy_mode(&pdev->dev); + if (phy_mode < 0) dev_err(&pdev->dev, "phy_mode not found\n"); + plat->phy_interface = phy_mode; plat->interface = PHY_INTERFACE_MODE_GMII; pci_set_master(pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 67ba083eb90c..b21745368983 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -1249,6 +1249,7 @@ const struct stmmac_ops dwmac410_ops = { .config_l3_filter = dwmac4_config_l3_filter, .config_l4_filter = dwmac4_config_l4_filter, .est_configure = dwmac5_est_configure, + .est_irq_status = dwmac5_est_irq_status, .fpe_configure = dwmac5_fpe_configure, .fpe_send_mpacket = dwmac5_fpe_send_mpacket, .fpe_irq_status = dwmac5_fpe_irq_status, @@ -1300,6 +1301,7 @@ const struct stmmac_ops dwmac510_ops = { .config_l3_filter = dwmac4_config_l3_filter, .config_l4_filter = dwmac4_config_l4_filter, .est_configure = dwmac5_est_configure, + .est_irq_status = dwmac5_est_irq_status, .fpe_configure = dwmac5_fpe_configure, .fpe_send_mpacket = dwmac5_fpe_send_mpacket, .fpe_irq_status = dwmac5_fpe_irq_status, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index e735134e8487..fcdb1d20389b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -349,6 +349,9 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue); void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue); void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue); int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags); +struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, + ktime_t current_time, + u64 cycle_time); #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) void stmmac_selftest_run(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8d9d6ecf8c63..7b8404a21544 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7171,6 +7171,7 @@ int stmmac_suspend(struct device *dev) priv->plat->rx_queues_to_use, false); stmmac_fpe_handshake(priv, false); + stmmac_fpe_stop_wq(priv); } priv->speed = SPEED_UNKNOWN; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 072eff8079d0..5ca710844cc1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -397,6 +397,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; + int phy_mode; void *ret; int rc; @@ -412,10 +413,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) eth_zero_addr(mac); } - plat->phy_interface = device_get_phy_mode(&pdev->dev); - if (plat->phy_interface < 0) - return ERR_PTR(plat->phy_interface); + phy_mode = device_get_phy_mode(&pdev->dev); + if (phy_mode < 0) + return ERR_PTR(phy_mode); + plat->phy_interface = phy_mode; plat->interface = stmmac_of_get_mac_mode(np); if (plat->interface < 0) plat->interface = plat->phy_interface; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 4e86cdf2bc9f..580cc035536b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -62,7 +62,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) u32 sec, nsec; u32 quotient, reminder; int neg_adj = 0; - bool xmac; + bool xmac, est_rst = false; + int ret; xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; @@ -75,10 +76,48 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) sec = quotient; nsec = reminder; + /* If EST is enabled, disabled it before adjust ptp time. */ + if (priv->plat->est && priv->plat->est->enable) { + est_rst = true; + mutex_lock(&priv->plat->est->lock); + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + } + spin_lock_irqsave(&priv->ptp_lock, flags); stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac); spin_unlock_irqrestore(&priv->ptp_lock, flags); + /* Caculate new basetime and re-configured EST after PTP time adjust. */ + if (est_rst) { + struct timespec64 current_time, time; + ktime_t current_time_ns, basetime; + u64 cycle_time; + + mutex_lock(&priv->plat->est->lock); + priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); + current_time_ns = timespec64_to_ktime(current_time); + time.tv_nsec = priv->plat->est->btr_reserve[0]; + time.tv_sec = priv->plat->est->btr_reserve[1]; + basetime = timespec64_to_ktime(time); + cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC + + priv->plat->est->ctr[0]; + time = stmmac_calc_tas_basetime(basetime, + current_time_ns, + cycle_time); + + priv->plat->est->btr[0] = (u32)time.tv_nsec; + priv->plat->est->btr[1] = (u32)time.tv_sec; + priv->plat->est->enable = true; + ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + if (ret) + netdev_err(priv->dev, "failed to configure EST\n"); + } + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 92dab609d4f8..4f3b6437b114 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -711,12 +711,35 @@ static int tc_setup_cls(struct stmmac_priv *priv, return ret; } +struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, + ktime_t current_time, + u64 cycle_time) +{ + struct timespec64 time; + + if (ktime_after(old_base_time, current_time)) { + time = ktime_to_timespec64(old_base_time); + } else { + s64 n; + ktime_t base_time; + + n = div64_s64(ktime_sub_ns(current_time, old_base_time), + cycle_time); + base_time = ktime_add_ns(old_base_time, + (n + 1) * cycle_time); + + time = ktime_to_timespec64(base_time); + } + + return time; +} + static int tc_setup_taprio(struct stmmac_priv *priv, struct tc_taprio_qopt_offload *qopt) { u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; struct plat_stmmacenet_data *plat = priv->plat; - struct timespec64 time, current_time; + struct timespec64 time, current_time, qopt_time; ktime_t current_time_ns; bool fpe = false; int i, ret = 0; @@ -773,14 +796,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv, GFP_KERNEL); if (!plat->est) return -ENOMEM; + + mutex_init(&priv->plat->est->lock); } else { memset(plat->est, 0, sizeof(*plat->est)); } size = qopt->num_entries; + mutex_lock(&priv->plat->est->lock); priv->plat->est->gcl_size = size; priv->plat->est->enable = qopt->enable; + mutex_unlock(&priv->plat->est->lock); for (i = 0; i < size; i++) { s64 delta_ns = qopt->entries[i].interval; @@ -811,32 +838,28 @@ static int tc_setup_taprio(struct stmmac_priv *priv, priv->plat->est->gcl[i] = delta_ns | (gates << wid); } + mutex_lock(&priv->plat->est->lock); /* Adjust for real system time */ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); current_time_ns = timespec64_to_ktime(current_time); - if (ktime_after(qopt->base_time, current_time_ns)) { - time = ktime_to_timespec64(qopt->base_time); - } else { - ktime_t base_time; - s64 n; - - n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time), - qopt->cycle_time); - base_time = ktime_add_ns(qopt->base_time, - (n + 1) * qopt->cycle_time); - - time = ktime_to_timespec64(base_time); - } + time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns, + qopt->cycle_time); priv->plat->est->btr[0] = (u32)time.tv_nsec; priv->plat->est->btr[1] = (u32)time.tv_sec; + qopt_time = ktime_to_timespec64(qopt->base_time); + priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec; + priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec; + ctr = qopt->cycle_time; priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC); priv->plat->est->ctr[1] = (u32)ctr; - if (fpe && !priv->dma_cap.fpesel) + if (fpe && !priv->dma_cap.fpesel) { + mutex_unlock(&priv->plat->est->lock); return -EOPNOTSUPP; + } /* Actual FPE register configuration will be done after FPE handshake * is success. @@ -845,6 +868,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv, ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); if (ret) { netdev_err(priv->dev, "failed to configure EST\n"); goto disable; @@ -860,9 +884,11 @@ static int tc_setup_taprio(struct stmmac_priv *priv, return 0; disable: + mutex_lock(&priv->plat->est->lock); priv->plat->est->enable = false; stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); priv->plat->fpe_cfg->enable = false; stmmac_fpe_configure(priv, priv->ioaddr, diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 74e748662ec0..860644d182ab 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -8191,8 +8191,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start) err = niu_pci_vpd_scan_props(np, here, end); if (err < 0) return err; + /* ret == 1 is not an error */ if (err == 1) - return -EINVAL; + return 0; } return 0; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 718539cdd2f2..67a08cbba859 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2060,8 +2060,12 @@ static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *comm for (i = 1; i <= common->port_num; i++) { struct am65_cpsw_port *port = am65_common_get_port(common, i); - struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev); + struct am65_cpsw_ndev_priv *priv; + if (!port->ndev) + continue; + + priv = am65_ndev_to_priv(port->ndev); priv->offload_fwd_mark = set_val; } } diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c index 9c29b363e9ae..599708a3e81d 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c +++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c @@ -358,7 +358,7 @@ static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx, static void am65_cpsw_fdb_offload_notify(struct net_device *ndev, struct switchdev_notifier_fdb_info *rcv) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; info.addr = rcv->addr; info.vid = rcv->vid; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 57d279fdcc9f..d1d02001cef6 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -920,7 +920,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, struct cpdma_chan *txch; int ret, q_idx; - if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { + if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) { cpsw_err(priv, tx_err, "packet pad failed\n"); ndev->stats.tx_dropped++; return NET_XMIT_DROP; @@ -1100,7 +1100,7 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, for (i = 0; i < n; i++) { xdpf = frames[i]; - if (xdpf->len < CPSW_MIN_PACKET_SIZE) + if (xdpf->len < READ_ONCE(priv->tx_packet_min)) break; if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port)) @@ -1389,6 +1389,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) priv->dev = dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->emac_port = i + 1; + priv->tx_packet_min = CPSW_MIN_PACKET_SIZE; if (is_valid_ether_addr(slave_data->mac_addr)) { ether_addr_copy(priv->mac_addr, slave_data->mac_addr); @@ -1686,6 +1687,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, priv = netdev_priv(sl_ndev); slave->port_vlan = vlan; + WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN); if (netif_running(sl_ndev)) cpsw_port_add_switch_def_ale_entries(priv, slave); @@ -1714,6 +1716,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, priv = netdev_priv(slave->ndev); slave->port_vlan = slave->data->dual_emac_res_vlan; + WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE); cpsw_port_add_dual_emac_def_ale_entries(priv, slave); } diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index a323bea54faa..2951fb7b9dae 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -89,7 +89,8 @@ do { \ #define CPSW_POLL_WEIGHT 64 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4 -#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN) +#define CPSW_MIN_PACKET_SIZE_VLAN (VLAN_ETH_ZLEN) +#define CPSW_MIN_PACKET_SIZE (ETH_ZLEN) #define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\ ETH_FCS_LEN +\ CPSW_RX_VLAN_ENCAP_HDR_SIZE) @@ -380,6 +381,7 @@ struct cpsw_priv { u32 emac_port; struct cpsw_common *cpsw; int offload_fwd_mark; + u32 tx_packet_min; }; #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw) diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c index f7fb6e17dadd..a7d97d429e06 100644 --- a/drivers/net/ethernet/ti/cpsw_switchdev.c +++ b/drivers/net/ethernet/ti/cpsw_switchdev.c @@ -368,7 +368,7 @@ static int cpsw_port_obj_del(struct net_device *ndev, const void *ctx, static void cpsw_fdb_offload_notify(struct net_device *ndev, struct switchdev_notifier_fdb_info *rcv) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; info.addr = rcv->addr; info.vid = rcv->vid; diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 0b2ce4bdc2c3..e0cb713193ea 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); #endif - free_netdev(dev); - cancel_work_sync(&priv->tlan_tqueue); + free_netdev(dev); } static void tlan_start(struct net_device *dev) diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c index 99d4d9439d05..a6fb88fd42f7 100644 --- a/drivers/net/ethernet/xscale/ptp_ixp46x.c +++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c @@ -14,6 +14,8 @@ #include <linux/kernel.h> #include <linux/ptp_clock_kernel.h> #include <linux/soc/ixp4xx/cpu.h> +#include <linux/module.h> +#include <mach/ixp4xx-regs.h> #include "ixp46x_ts.h" diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index 14f07050b6b1..0de2c4552f5e 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c @@ -1504,9 +1504,8 @@ err_out_resource: release_mem_region(start, len); err_out_kfree: - free_netdev(dev); - pr_err("%s: initialization failure, aborting!\n", fp->name); + free_netdev(dev); return ret; } diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index fcf3af76b6d7..8fe8887d506a 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -827,6 +827,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte) return; } + if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) { + pr_err("6pack: cooked buffer overrun, data loss\n"); + sp->rx_count = 0; + return; + } + buf = sp->raw_buf; sp->cooked_buf[sp->rx_count_cooked++] = buf[0] | ((buf[1] << 2) & 0xc0); diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index ebc976b7fcc2..8caa61ec718f 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -418,7 +418,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info) struct hwsim_edge *e; u32 v0, v1; - if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] && + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] || !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; @@ -528,14 +528,14 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) u32 v0, v1; u8 lqi; - if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] && + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] || !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; - if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] && + if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] || !edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI]) return -EINVAL; diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c index 110e4ee85785..ebd001f0eece 100644 --- a/drivers/net/mdio/mdio-mux.c +++ b/drivers/net/mdio/mdio-mux.c @@ -82,6 +82,17 @@ out: static int parent_count; +static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb) +{ + struct mdio_mux_child_bus *cb = pb->children; + + while (cb) { + mdiobus_unregister(cb->mii_bus); + mdiobus_free(cb->mii_bus); + cb = cb->next; + } +} + int mdio_mux_init(struct device *dev, struct device_node *mux_node, int (*switch_fn)(int cur, int desired, void *data), @@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev, cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL); if (!cb) { ret_val = -ENOMEM; - continue; + goto err_loop; } cb->bus_number = v; cb->parent = pb; @@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev, cb->mii_bus = mdiobus_alloc(); if (!cb->mii_bus) { ret_val = -ENOMEM; - devm_kfree(dev, cb); - continue; + goto err_loop; } cb->mii_bus->priv = cb; @@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev, cb->mii_bus->write = mdio_mux_write; r = of_mdiobus_register(cb->mii_bus, child_bus_node); if (r) { + mdiobus_free(cb->mii_bus); + if (r == -EPROBE_DEFER) { + ret_val = r; + goto err_loop; + } + devm_kfree(dev, cb); dev_err(dev, "Error: Failed to register MDIO bus for child %pOF\n", child_bus_node); - mdiobus_free(cb->mii_bus); - devm_kfree(dev, cb); } else { cb->next = pb->children; pb->children = cb; @@ -181,7 +195,10 @@ int mdio_mux_init(struct device *dev, } dev_err(dev, "Error: No acceptable child buses found\n"); - devm_kfree(dev, pb); + +err_loop: + mdio_mux_uninit_children(pb); + of_node_put(child_bus_node); err_pb_kz: put_device(&parent_bus->dev); err_parent_bus: @@ -193,14 +210,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init); void mdio_mux_uninit(void *mux_handle) { struct mdio_mux_parent_bus *pb = mux_handle; - struct mdio_mux_child_bus *cb = pb->children; - - while (cb) { - mdiobus_unregister(cb->mii_bus); - mdiobus_free(cb->mii_bus); - cb = cb->next; - } + mdio_mux_uninit_children(pb); put_device(&pb->mii_bus->dev); } EXPORT_SYMBOL_GPL(mdio_mux_uninit); diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c index e60e38c1f09d..11be6bcdd551 100644 --- a/drivers/net/mhi/net.c +++ b/drivers/net/mhi/net.c @@ -335,7 +335,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id, u64_stats_init(&mhi_netdev->stats.tx_syncp); /* Start MHI channels */ - err = mhi_prepare_for_transfer(mhi_dev); + err = mhi_prepare_for_transfer(mhi_dev, 0); if (err) goto out_err; diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index 3811f1bde84e..b80ed2ffd45e 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -85,7 +85,7 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { const char aes_gcm_name[] = "rfc4106(gcm(aes))"; - struct net_device *dev = xs->xso.dev; + struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -134,7 +134,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs) u16 sa_idx; int ret; - dev = xs->xso.dev; + dev = xs->xso.real_dev; ns = netdev_priv(dev); ipsec = &ns->ipsec; @@ -194,7 +194,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs) static void nsim_ipsec_del_sa(struct xfrm_state *xs) { - struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct netdevsim *ns = netdev_priv(xs->xso.real_dev); struct nsim_ipsec *ipsec = &ns->ipsec; u16 sa_idx; @@ -211,7 +211,7 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs) static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { - struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct netdevsim *ns = netdev_priv(xs->xso.real_dev); struct nsim_ipsec *ipsec = &ns->ipsec; ipsec->ok++; diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 63fda3fc40aa..4bd61339823c 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -1089,7 +1089,7 @@ struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL); if (!xpcs) - return NULL; + return ERR_PTR(-ENOMEM); xpcs->mdiodev = mdiodev; diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 7bf3011b8e77..83aea5c5cd03 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -288,7 +288,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev) if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) { if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E || BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 || - BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) + BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811) val |= BCM54XX_SHD_SCR3_RXCTXC_DIS; else val |= BCM54XX_SHD_SCR3_TRDDAPD; diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index bbbc6ac8fa82..53a433442803 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -78,6 +78,11 @@ enum { /* Temperature read register (88E2110 only) */ MV_PCS_TEMP = 0x8042, + /* Number of ports on the device */ + MV_PCS_PORT_INFO = 0xd00d, + MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380, + MV_PCS_PORT_INFO_NPORTS_SHIFT = 7, + /* These registers appear at 0x800X and 0xa00X - the 0xa00X control * registers appear to set themselves to the 0x800X when AN is * restarted, but status registers appear readable from either. @@ -966,6 +971,30 @@ static const struct mv3310_chip mv2111_type = { #endif }; +static int mv3310_get_number_of_ports(struct phy_device *phydev) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PORT_INFO); + if (ret < 0) + return ret; + + ret &= MV_PCS_PORT_INFO_NPORTS_MASK; + ret >>= MV_PCS_PORT_INFO_NPORTS_SHIFT; + + return ret + 1; +} + +static int mv3310_match_phy_device(struct phy_device *phydev) +{ + return mv3310_get_number_of_ports(phydev) == 1; +} + +static int mv3340_match_phy_device(struct phy_device *phydev) +{ + return mv3310_get_number_of_ports(phydev) == 4; +} + static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g) { int val; @@ -994,7 +1023,8 @@ static int mv2111_match_phy_device(struct phy_device *phydev) static struct phy_driver mv3310_drivers[] = { { .phy_id = MARVELL_PHY_ID_88X3310, - .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .match_phy_device = mv3310_match_phy_device, .name = "mv88x3310", .driver_data = &mv3310_type, .get_features = mv3310_get_features, @@ -1011,8 +1041,9 @@ static struct phy_driver mv3310_drivers[] = { .set_loopback = genphy_c45_loopback, }, { - .phy_id = MARVELL_PHY_ID_88X3340, - .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK, + .phy_id = MARVELL_PHY_ID_88X3310, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .match_phy_device = mv3340_match_phy_device, .name = "mv88x3340", .driver_data = &mv3340_type, .get_features = mv3310_get_features, @@ -1069,8 +1100,7 @@ static struct phy_driver mv3310_drivers[] = { module_phy_driver(mv3310_drivers); static struct mdio_device_id __maybe_unused mv3310_tbl[] = { - { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK }, - { MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK }, + { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK }, { }, }; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 4d53886f7d51..5c928f827173 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -401,11 +401,11 @@ static int ksz8041_config_aneg(struct phy_device *phydev) } static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev, - const u32 ksz_phy_id) + const bool ksz_8051) { int ret; - if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id) + if ((phydev->phy_id & MICREL_PHY_ID_MASK) != PHY_ID_KSZ8051) return 0; ret = phy_read(phydev, MII_BMSR); @@ -418,7 +418,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev, * the switch does not. */ ret &= BMSR_ERCAP; - if (ksz_phy_id == PHY_ID_KSZ8051) + if (ksz_8051) return ret; else return !ret; @@ -426,7 +426,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev, static int ksz8051_match_phy_device(struct phy_device *phydev) { - return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051); + return ksz8051_ksz8795_match_phy_device(phydev, true); } static int ksz8081_config_init(struct phy_device *phydev) @@ -535,7 +535,7 @@ static int ksz8061_config_init(struct phy_device *phydev) static int ksz8795_match_phy_device(struct phy_device *phydev) { - return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX); + return ksz8051_ksz8795_match_phy_device(phydev, false); } static int ksz9021_load_values_from_of(struct phy_device *phydev, @@ -1760,8 +1760,6 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ87XX Switch", /* PHY_BASIC_FEATURES */ .config_init = kszphy_config_init, - .config_aneg = ksz8873mll_config_aneg, - .read_status = ksz8873mll_read_status, .match_phy_device = ksz8795_match_phy_device, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 930e49ef15f6..7a099c37527f 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -284,7 +284,7 @@ static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); static int ppp_connect_channel(struct channel *pch, int unit); static int ppp_disconnect_channel(struct channel *pch); static void ppp_destroy_channel(struct channel *pch); -static int unit_get(struct idr *p, void *ptr); +static int unit_get(struct idr *p, void *ptr, int min); static int unit_set(struct idr *p, void *ptr, int n); static void unit_put(struct idr *p, int n); static void *unit_find(struct idr *p, int n); @@ -1155,9 +1155,20 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) mutex_lock(&pn->all_ppp_mutex); if (unit < 0) { - ret = unit_get(&pn->units_idr, ppp); + ret = unit_get(&pn->units_idr, ppp, 0); if (ret < 0) goto err; + if (!ifname_is_set) { + while (1) { + snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret); + if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name)) + break; + unit_put(&pn->units_idr, ret); + ret = unit_get(&pn->units_idr, ppp, ret + 1); + if (ret < 0) + goto err; + } + } } else { /* Caller asked for a specific unit number. Fail with -EEXIST * if unavailable. For backward compatibility, return -EEXIST @@ -1306,7 +1317,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev, * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows * userspace to infer the device name using to the PPPIOCGUNIT ioctl. */ - if (!tb[IFLA_IFNAME]) + if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME])) conf.ifname_is_set = false; err = ppp_dev_configure(src_net, dev, &conf); @@ -3552,9 +3563,9 @@ static int unit_set(struct idr *p, void *ptr, int n) } /* get new free unit number and associate pointer with it */ -static int unit_get(struct idr *p, void *ptr) +static int unit_get(struct idr *p, void *ptr, int min) { - return idr_alloc(p, ptr, 0, 0, GFP_KERNEL); + return idr_alloc(p, ptr, min, 0, GFP_KERNEL); } /* put unit number back to a pool */ diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index ac92bc52a85e..38cda590895c 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -63,6 +63,29 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, value, index, data, size); } +static int asix_check_host_enable(struct usbnet *dev, int in_pm) +{ + int i, ret; + u8 smsr; + + for (i = 0; i < 30; ++i) { + ret = asix_set_sw_mii(dev, in_pm); + if (ret == -ENODEV || ret == -ETIMEDOUT) + break; + usleep_range(1000, 1100); + ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, + 0, 0, 1, &smsr, in_pm); + if (ret == -ENODEV) + break; + else if (ret < 0) + continue; + else if (smsr & AX_HOST_EN) + break; + } + + return ret; +} + static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx) { /* Reset the variables that have a lifetime outside of @@ -467,19 +490,11 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc) { struct usbnet *dev = netdev_priv(netdev); __le16 res; - u8 smsr; - int i = 0; int ret; mutex_lock(&dev->phy_mutex); - do { - ret = asix_set_sw_mii(dev, 0); - if (ret == -ENODEV || ret == -ETIMEDOUT) - break; - usleep_range(1000, 1100); - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, - 0, 0, 1, &smsr, 0); - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); + + ret = asix_check_host_enable(dev, 0); if (ret == -ENODEV || ret == -ETIMEDOUT) { mutex_unlock(&dev->phy_mutex); return ret; @@ -505,23 +520,14 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc, { struct usbnet *dev = netdev_priv(netdev); __le16 res = cpu_to_le16(val); - u8 smsr; - int i = 0; int ret; netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", phy_id, loc, val); mutex_lock(&dev->phy_mutex); - do { - ret = asix_set_sw_mii(dev, 0); - if (ret == -ENODEV) - break; - usleep_range(1000, 1100); - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, - 0, 0, 1, &smsr, 0); - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); + ret = asix_check_host_enable(dev, 0); if (ret == -ENODEV) goto out; @@ -561,19 +567,11 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc) { struct usbnet *dev = netdev_priv(netdev); __le16 res; - u8 smsr; - int i = 0; int ret; mutex_lock(&dev->phy_mutex); - do { - ret = asix_set_sw_mii(dev, 1); - if (ret == -ENODEV || ret == -ETIMEDOUT) - break; - usleep_range(1000, 1100); - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, - 0, 0, 1, &smsr, 1); - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); + + ret = asix_check_host_enable(dev, 1); if (ret == -ENODEV || ret == -ETIMEDOUT) { mutex_unlock(&dev->phy_mutex); return ret; @@ -595,22 +593,14 @@ asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val) { struct usbnet *dev = netdev_priv(netdev); __le16 res = cpu_to_le16(val); - u8 smsr; - int i = 0; int ret; netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", phy_id, loc, val); mutex_lock(&dev->phy_mutex); - do { - ret = asix_set_sw_mii(dev, 1); - if (ret == -ENODEV) - break; - usleep_range(1000, 1100); - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, - 0, 0, 1, &smsr, 1); - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); + + ret = asix_check_host_enable(dev, 1); if (ret == -ENODEV) { mutex_unlock(&dev->phy_mutex); return; diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index aec97b021a73..2c115216420a 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -701,6 +701,7 @@ static int ax88772_init_phy(struct usbnet *dev) return ret; } + phy_suspend(priv->phydev); priv->phydev->mac_managed_pm = 1; phy_attached_info(priv->phydev); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 63006838bdcc..dec96e8ab567 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2495,7 +2495,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, hso_net_init); if (!net) { dev_err(&interface->dev, "Unable to create ethernet device\n"); - goto exit; + goto err_hso_dev; } hso_net = netdev_priv(net); @@ -2508,13 +2508,13 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, USB_DIR_IN); if (!hso_net->in_endp) { dev_err(&interface->dev, "Can't find BULK IN endpoint\n"); - goto exit; + goto err_net; } hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT); if (!hso_net->out_endp) { dev_err(&interface->dev, "Can't find BULK OUT endpoint\n"); - goto exit; + goto err_net; } SET_NETDEV_DEV(net, &interface->dev); SET_NETDEV_DEVTYPE(net, &hso_type); @@ -2523,18 +2523,18 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_rx_urb_pool[i]) - goto exit; + goto err_mux_bulk_rx; hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_rx_buf_pool[i]) - goto exit; + goto err_mux_bulk_rx; } hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_tx_urb) - goto exit; + goto err_mux_bulk_rx; hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_tx_buf) - goto exit; + goto err_free_tx_urb; add_net_device(hso_dev); @@ -2542,7 +2542,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, result = register_netdev(net); if (result) { dev_err(&interface->dev, "Failed to register device\n"); - goto exit; + goto err_free_tx_buf; } hso_log_port(hso_dev); @@ -2550,8 +2550,21 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, hso_create_rfkill(hso_dev, interface); return hso_dev; -exit: - hso_free_net_device(hso_dev, true); + +err_free_tx_buf: + remove_net_device(hso_dev); + kfree(hso_net->mux_bulk_tx_buf); +err_free_tx_urb: + usb_free_urb(hso_net->mux_bulk_tx_urb); +err_mux_bulk_rx: + for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { + usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); + kfree(hso_net->mux_bulk_rx_buf_pool[i]); + } +err_net: + free_netdev(net); +err_hso_dev: + kfree(hso_dev); return NULL; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 25489389ea49..6d092d78e0cb 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1154,7 +1154,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) { struct phy_device *phydev = dev->net->phydev; struct ethtool_link_ksettings ecmd; - int ladv, radv, ret; + int ladv, radv, ret, link; u32 buf; /* clear LAN78xx interrupt status */ @@ -1162,9 +1162,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) if (unlikely(ret < 0)) return -EIO; + mutex_lock(&phydev->lock); phy_read_status(phydev); + link = phydev->link; + mutex_unlock(&phydev->lock); - if (!phydev->link && dev->link_on) { + if (!link && dev->link_on) { dev->link_on = false; /* reset MAC */ @@ -1177,7 +1180,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) return -EIO; del_timer(&dev->stat_monitor); - } else if (phydev->link && !dev->link_on) { + } else if (link && !dev->link_on) { dev->link_on = true; phy_ethtool_ksettings_get(phydev, &ecmd); @@ -1466,9 +1469,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata) static u32 lan78xx_get_link(struct net_device *net) { + u32 link; + + mutex_lock(&net->phydev->lock); phy_read_status(net->phydev); + link = net->phydev->link; + mutex_unlock(&net->phydev->lock); - return net->phydev->link; + return link; } static void lan78xx_get_drvinfo(struct net_device *net, diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 9a907182569c..652e9fcf0b77 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -1,31 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 1999-2013 Petko Manolov (petkan@nucleusys.com) + * Copyright (c) 1999-2021 Petko Manolov (petkan@nucleusys.com) * - * ChangeLog: - * .... Most of the time spent on reading sources & docs. - * v0.2.x First official release for the Linux kernel. - * v0.3.0 Beutified and structured, some bugs fixed. - * v0.3.x URBifying bulk requests and bugfixing. First relatively - * stable release. Still can touch device's registers only - * from top-halves. - * v0.4.0 Control messages remained unurbified are now URBs. - * Now we can touch the HW at any time. - * v0.4.9 Control urbs again use process context to wait. Argh... - * Some long standing bugs (enable_net_traffic) fixed. - * Also nasty trick about resubmiting control urb from - * interrupt context used. Please let me know how it - * behaves. Pegasus II support added since this version. - * TODO: suppressing HCD warnings spewage on disconnect. - * v0.4.13 Ethernet address is now set at probe(), not at open() - * time as this seems to break dhcpd. - * v0.5.0 branch to 2.5.x kernels - * v0.5.1 ethtool support added - * v0.5.5 rx socket buffers are in a pool and the their allocation - * is out of the interrupt routine. - * ... - * v0.9.3 simplified [get|set]_register(s), async update registers - * logic revisited, receive skb_pool removed. */ #include <linux/sched.h> @@ -45,7 +21,6 @@ /* * Version Information */ -#define DRIVER_VERSION "v0.9.3 (2013/04/25)" #define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>" #define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver" @@ -132,9 +107,15 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, const void *data) { - return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS, + int ret; + + ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, indx, data, size, 1000, GFP_NOIO); + if (ret < 0) + netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret); + + return ret; } /* @@ -145,10 +126,15 @@ static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) { void *buf = &data; + int ret; - return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG, + ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, indx, buf, 1, 1000, GFP_NOIO); + if (ret < 0) + netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret); + + return ret; } static int update_eth_regs_async(pegasus_t *pegasus) @@ -188,10 +174,9 @@ static int update_eth_regs_async(pegasus_t *pegasus) static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd) { - int i; - __u8 data[4] = { phy, 0, 0, indx }; + int i, ret; __le16 regdi; - int ret = -ETIMEDOUT; + __u8 data[4] = { phy, 0, 0, indx }; if (cmd & PHY_WRITE) { __le16 *t = (__le16 *) & data[1]; @@ -207,12 +192,15 @@ static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd) if (data[0] & PHY_DONE) break; } - if (i >= REG_TIMEOUT) + if (i >= REG_TIMEOUT) { + ret = -ETIMEDOUT; goto fail; + } if (cmd & PHY_READ) { ret = get_registers(p, PhyData, 2, ®di); + if (ret < 0) + goto fail; *regd = le16_to_cpu(regdi); - return ret; } return 0; fail: @@ -235,9 +223,13 @@ static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd) static int mdio_read(struct net_device *dev, int phy_id, int loc) { pegasus_t *pegasus = netdev_priv(dev); + int ret; u16 res; - read_mii_word(pegasus, phy_id, loc, &res); + ret = read_mii_word(pegasus, phy_id, loc, &res); + if (ret < 0) + return ret; + return (int)res; } @@ -251,10 +243,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) { - int i; - __u8 tmp = 0; + int ret, i; __le16 retdatai; - int ret; + __u8 tmp = 0; set_register(pegasus, EpromCtrl, 0); set_register(pegasus, EpromOffset, index); @@ -262,21 +253,25 @@ static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) for (i = 0; i < REG_TIMEOUT; i++) { ret = get_registers(pegasus, EpromCtrl, 1, &tmp); + if (ret < 0) + goto fail; if (tmp & EPROM_DONE) break; - if (ret == -ESHUTDOWN) - goto fail; } - if (i >= REG_TIMEOUT) + if (i >= REG_TIMEOUT) { + ret = -ETIMEDOUT; goto fail; + } ret = get_registers(pegasus, EpromData, 2, &retdatai); + if (ret < 0) + goto fail; *retdata = le16_to_cpu(retdatai); return ret; fail: - netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); - return -ETIMEDOUT; + netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); + return ret; } #ifdef PEGASUS_WRITE_EEPROM @@ -324,10 +319,10 @@ static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data) return ret; fail: - netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); + netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); return -ETIMEDOUT; } -#endif /* PEGASUS_WRITE_EEPROM */ +#endif /* PEGASUS_WRITE_EEPROM */ static inline int get_node_id(pegasus_t *pegasus, u8 *id) { @@ -367,19 +362,21 @@ static void set_ethernet_addr(pegasus_t *pegasus) return; err: eth_hw_addr_random(pegasus->net); - dev_info(&pegasus->intf->dev, "software assigned MAC address.\n"); + netif_dbg(pegasus, drv, pegasus->net, "software assigned MAC address.\n"); return; } static inline int reset_mac(pegasus_t *pegasus) { + int ret, i; __u8 data = 0x8; - int i; set_register(pegasus, EthCtrl1, data); for (i = 0; i < REG_TIMEOUT; i++) { - get_registers(pegasus, EthCtrl1, 1, &data); + ret = get_registers(pegasus, EthCtrl1, 1, &data); + if (ret < 0) + goto fail; if (~data & 0x08) { if (loopback) break; @@ -402,22 +399,29 @@ static inline int reset_mac(pegasus_t *pegasus) } if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) { __u16 auxmode; - read_mii_word(pegasus, 3, 0x1b, &auxmode); + ret = read_mii_word(pegasus, 3, 0x1b, &auxmode); + if (ret < 0) + goto fail; auxmode |= 4; write_mii_word(pegasus, 3, 0x1b, &auxmode); } return 0; +fail: + netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); + return ret; } static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) { - __u16 linkpart; - __u8 data[4]; pegasus_t *pegasus = netdev_priv(dev); int ret; + __u16 linkpart; + __u8 data[4]; - read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); + ret = read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); + if (ret < 0) + goto fail; data[0] = 0xc8; /* TX & RX enable, append status, no CRC */ data[1] = 0; if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL)) @@ -435,11 +439,16 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 || usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) { u16 auxmode; - read_mii_word(pegasus, 0, 0x1b, &auxmode); + ret = read_mii_word(pegasus, 0, 0x1b, &auxmode); + if (ret < 0) + goto fail; auxmode |= 4; write_mii_word(pegasus, 0, 0x1b, &auxmode); } + return 0; +fail: + netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); return ret; } @@ -447,9 +456,9 @@ static void read_bulk_callback(struct urb *urb) { pegasus_t *pegasus = urb->context; struct net_device *net; + u8 *buf = urb->transfer_buffer; int rx_status, count = urb->actual_length; int status = urb->status; - u8 *buf = urb->transfer_buffer; __u16 pkt_len; if (!pegasus) @@ -735,12 +744,16 @@ static inline void disable_net_traffic(pegasus_t *pegasus) set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); } -static inline void get_interrupt_interval(pegasus_t *pegasus) +static inline int get_interrupt_interval(pegasus_t *pegasus) { u16 data; u8 interval; + int ret; + + ret = read_eprom_word(pegasus, 4, &data); + if (ret < 0) + return ret; - read_eprom_word(pegasus, 4, &data); interval = data >> 8; if (pegasus->usb->speed != USB_SPEED_HIGH) { if (interval < 0x80) { @@ -755,6 +768,8 @@ static inline void get_interrupt_interval(pegasus_t *pegasus) } } pegasus->intr_interval = interval; + + return 0; } static void set_carrier(struct net_device *net) @@ -880,7 +895,6 @@ static void pegasus_get_drvinfo(struct net_device *dev, pegasus_t *pegasus = netdev_priv(dev); strlcpy(info->driver, driver_name, sizeof(info->driver)); - strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info)); } @@ -998,8 +1012,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) data[0] = pegasus->phy; fallthrough; case SIOCDEVPRIVATE + 1: - read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); - res = 0; + res = read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); break; case SIOCDEVPRIVATE + 2: if (!capable(CAP_NET_ADMIN)) @@ -1033,22 +1046,25 @@ static void pegasus_set_multicast(struct net_device *net) static __u8 mii_phy_probe(pegasus_t *pegasus) { - int i; + int i, ret; __u16 tmp; for (i = 0; i < 32; i++) { - read_mii_word(pegasus, i, MII_BMSR, &tmp); + ret = read_mii_word(pegasus, i, MII_BMSR, &tmp); + if (ret < 0) + goto fail; if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0) continue; else return i; } - +fail: return 0xff; } static inline void setup_pegasus_II(pegasus_t *pegasus) { + int ret; __u8 data = 0xa5; set_register(pegasus, Reg1d, 0); @@ -1060,7 +1076,9 @@ static inline void setup_pegasus_II(pegasus_t *pegasus) set_register(pegasus, Reg7b, 2); set_register(pegasus, 0x83, data); - get_registers(pegasus, 0x83, 1, &data); + ret = get_registers(pegasus, 0x83, 1, &data); + if (ret < 0) + goto fail; if (data == 0xa5) pegasus->chip = 0x8513; @@ -1075,6 +1093,10 @@ static inline void setup_pegasus_II(pegasus_t *pegasus) set_register(pegasus, Reg81, 6); else set_register(pegasus, Reg81, 2); + + return; +fail: + netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); } static void check_carrier(struct work_struct *work) @@ -1149,7 +1171,9 @@ static int pegasus_probe(struct usb_interface *intf, | NETIF_MSG_PROBE | NETIF_MSG_LINK); pegasus->features = usb_dev_id[dev_index].private; - get_interrupt_interval(pegasus); + res = get_interrupt_interval(pegasus); + if (res) + goto out2; if (reset_mac(pegasus)) { dev_err(&intf->dev, "can't reset MAC\n"); res = -EIO; @@ -1296,7 +1320,7 @@ static void __init parse_id(char *id) static int __init pegasus_init(void) { - pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION); + pr_info("%s: " DRIVER_DESC "\n", driver_name); if (devid) parse_id(devid); return usb_register(&pegasus_driver); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 1692d3b1b6e1..79832374f78d 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1552,7 +1552,8 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, u32 advertising); -static int rtl8152_set_mac_address(struct net_device *netdev, void *p) +static int __rtl8152_set_mac_address(struct net_device *netdev, void *p, + bool in_resume) { struct r8152 *tp = netdev_priv(netdev); struct sockaddr *addr = p; @@ -1561,9 +1562,11 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) goto out1; - ret = usb_autopm_get_interface(tp->intf); - if (ret < 0) - goto out1; + if (!in_resume) { + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out1; + } mutex_lock(&tp->control); @@ -1575,11 +1578,17 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) mutex_unlock(&tp->control); - usb_autopm_put_interface(tp->intf); + if (!in_resume) + usb_autopm_put_interface(tp->intf); out1: return ret; } +static int rtl8152_set_mac_address(struct net_device *netdev, void *p) +{ + return __rtl8152_set_mac_address(netdev, p, false); +} + /* Devices containing proper chips can support a persistent * host system provided MAC address. * Examples of this are Dell TB15 and Dell WD15 docks @@ -1698,7 +1707,7 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa) return ret; } -static int set_ethernet_addr(struct r8152 *tp) +static int set_ethernet_addr(struct r8152 *tp, bool in_resume) { struct net_device *dev = tp->netdev; struct sockaddr sa; @@ -1711,7 +1720,7 @@ static int set_ethernet_addr(struct r8152 *tp) if (tp->version == RTL_VER_01) ether_addr_copy(dev->dev_addr, sa.sa_data); else - ret = rtl8152_set_mac_address(dev, &sa); + ret = __rtl8152_set_mac_address(dev, &sa, in_resume); return ret; } @@ -3946,17 +3955,28 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type) case RTL_VER_06: ocp_write_byte(tp, type, PLA_BP_EN, 0); break; + case RTL_VER_14: + ocp_write_word(tp, type, USB_BP2_EN, 0); + + ocp_write_word(tp, type, USB_BP_8, 0); + ocp_write_word(tp, type, USB_BP_9, 0); + ocp_write_word(tp, type, USB_BP_10, 0); + ocp_write_word(tp, type, USB_BP_11, 0); + ocp_write_word(tp, type, USB_BP_12, 0); + ocp_write_word(tp, type, USB_BP_13, 0); + ocp_write_word(tp, type, USB_BP_14, 0); + ocp_write_word(tp, type, USB_BP_15, 0); + break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: - case RTL_VER_14: case RTL_VER_15: default: if (type == MCU_TYPE_USB) { - ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0); ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0); ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0); @@ -4322,7 +4342,6 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac) case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: - case RTL_VER_14: case RTL_VER_15: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; @@ -4330,6 +4349,13 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac) bp_start = PLA_BP_0; max_bp = 8; break; + case RTL_VER_14: + fw_reg = 0xf800; + bp_ba_addr = PLA_BP_BA; + bp_en_addr = USB_BP2_EN; + bp_start = PLA_BP_0; + max_bp = 16; + break; default: goto out; } @@ -6763,9 +6789,10 @@ static int rtl8152_close(struct net_device *netdev) tp->rtl_ops.down(tp); mutex_unlock(&tp->control); + } + if (!res) usb_autopm_put_interface(tp->intf); - } free_all_mem(tp); @@ -8443,7 +8470,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf) clear_bit(SELECTIVE_SUSPEND, &tp->flags); tp->rtl_ops.init(tp); queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); - set_ethernet_addr(tp); + set_ethernet_addr(tp, true); return rtl8152_resume(intf); } @@ -9644,7 +9671,7 @@ static int rtl8152_probe(struct usb_interface *intf, tp->rtl_fw.retry = true; #endif queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); - set_ethernet_addr(tp); + set_ethernet_addr(tp, false); usb_set_intfdata(intf, tp); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8a58a2f013af..eee493685aad 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = { VIRTIO_NET_F_GUEST_CSUM }; -#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ +#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ (1ULL << VIRTIO_NET_F_GUEST_UFO)) @@ -1771,6 +1771,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, { struct scatterlist *sgs[4], hdr, stat; unsigned out_num = 0, tmp; + int ret; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); @@ -1790,7 +1791,12 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, sgs[out_num] = &stat; BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); - virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); + ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); + if (ret < 0) { + dev_warn(&vi->vdev->dev, + "Failed to add sgs for command vq: %d\n.", ret); + return false; + } if (unlikely(!virtqueue_kick(vi->cvq))) return vi->ctrl->status == VIRTIO_NET_OK; @@ -2509,7 +2515,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { - NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first"); + NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); return -EOPNOTSUPP; } @@ -2640,15 +2646,15 @@ static int virtnet_set_features(struct net_device *dev, u64 offloads; int err; - if ((dev->features ^ features) & NETIF_F_LRO) { + if ((dev->features ^ features) & NETIF_F_GRO_HW) { if (vi->xdp_enabled) return -EBUSY; - if (features & NETIF_F_LRO) + if (features & NETIF_F_GRO_HW) offloads = vi->guest_offloads_capable; else offloads = vi->guest_offloads_capable & - ~GUEST_OFFLOAD_LRO_MASK; + ~GUEST_OFFLOAD_GRO_HW_MASK; err = virtnet_set_guest_offloads(vi, offloads); if (err) @@ -3128,9 +3134,9 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= NETIF_F_RXCSUM; if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) - dev->features |= NETIF_F_LRO; + dev->features |= NETIF_F_GRO_HW; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) - dev->hw_features |= NETIF_F_LRO; + dev->hw_features |= NETIF_F_GRO_HW; dev->vlan_features = dev->features; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index c0bd9cbc43b1..1b483cf2b1ca 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -26,6 +26,10 @@ #include "vmxnet3_int.h" +#include <net/vxlan.h> +#include <net/geneve.h> + +#define VXLAN_UDP_PORT 8472 struct vmxnet3_stat_desc { char desc[ETH_GSTRING_LEN]; @@ -262,6 +266,8 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb, if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_proto = 0; + u16 port; + struct udphdr *udph; switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): @@ -274,8 +280,20 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb, return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } - if (l4_proto != IPPROTO_UDP) + switch (l4_proto) { + case IPPROTO_UDP: + udph = udp_hdr(skb); + port = be16_to_cpu(udph->dest); + /* Check if offloaded port is supported */ + if (port != GENEVE_UDP_PORT && + port != IANA_VXLAN_UDP_PORT && + port != VXLAN_UDP_PORT) { + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } + break; + default: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } } return features; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 2b1b944d4b28..8bbe2a7bb141 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1367,6 +1367,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); bool is_ndisc = ipv6_ndisc_frame(skb); + nf_reset_ct(skb); + /* loopback, multicast & non-ND link-local traffic; do not push through * packet taps again. Reset pkt_type for upper layers to process skb. * For strict packets with a source LLA, determine the dst using the @@ -1429,6 +1431,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, skb->skb_iif = vrf_dev->ifindex; IPCB(skb)->flags |= IPSKB_L3SLAVE; + nf_reset_ct(skb); + if (ipv4_is_multicast(ip_hdr(skb)->daddr)) goto out; diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 349ca18088e8..c54fdae950fb 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -364,19 +364,19 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_cisco_init(void) { register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_cisco_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_cisco_init); +module_exit(hdlc_cisco_exit); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 72250fe0a1df..25e3564ce118 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1279,19 +1279,19 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_fr_init(void) { register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_fr_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_fr_init); +module_exit(hdlc_fr_exit); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 834be2ae3e9e..b81ecf432a0c 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -705,20 +705,20 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_ppp_init(void) { skb_queue_head_init(&tx_queue); register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_ppp_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_ppp_init); +module_exit(hdlc_ppp_exit); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("PPP protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 388fcc09b4dd..54d28496fefd 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c @@ -90,7 +90,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr) } -static int __init mod_init(void) +static int __init hdlc_raw_init(void) { register_hdlc_protocol(&proto); return 0; @@ -98,14 +98,14 @@ static int __init mod_init(void) -static void __exit mod_exit(void) +static void __exit hdlc_raw_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_raw_init); +module_exit(hdlc_raw_exit); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index c70a518b8b47..927596276a07 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -110,7 +110,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) } -static int __init mod_init(void) +static int __init hdlc_eth_init(void) { register_hdlc_protocol(&proto); return 0; @@ -118,14 +118,14 @@ static int __init mod_init(void) -static void __exit mod_exit(void) +static void __exit hdlc_eth_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_eth_init); +module_exit(hdlc_eth_exit); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index d2bf72bf3bd7..9b7ebf8bd85c 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -365,19 +365,19 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_x25_init(void) { register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_x25_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_x25_init); +module_exit(hdlc_x25_exit); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("X.25 protocol support for generic HDLC"); diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 75cc2d80fde8..26c7ae242db6 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -330,6 +330,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) mhi_ctrl->cntrl_dev = ab->dev; mhi_ctrl->fw_image = ab_pci->amss_path; mhi_ctrl->regs = ab->mem; + mhi_ctrl->reg_len = ab->mem_len; ret = ath11k_mhi_get_msi(ab_pci); if (ret) { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c index 2403490cbc26..b4b1f75b9c2a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c @@ -37,6 +37,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, u32 sha1 = 0; u16 mac_type = 0, rf_id = 0; u8 *pnvm_data = NULL, *tmp; + bool hw_match = false; u32 size = 0; int ret; @@ -83,6 +84,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, break; } + if (hw_match) + break; + mac_type = le16_to_cpup((__le16 *)data); rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16))); @@ -90,15 +94,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n", mac_type, rf_id); - if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) || - rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) { - IWL_DEBUG_FW(trans, - "HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n", - CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id); - ret = -ENOENT; - goto out; - } - + if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) && + rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id)) + hw_match = true; break; case IWL_UCODE_TLV_SEC_RT: { struct iwl_pnvm_section *section = (void *)data; @@ -149,6 +147,15 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, } done: + if (!hw_match) { + IWL_DEBUG_FW(trans, + "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n", + CSR_HW_REV_TYPE(trans->hw_rev), + CSR_HW_RFID_TYPE(trans->hw_rf_id)); + ret = -ENOENT; + goto out; + } + if (!size) { IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n"); ret = -ENOENT; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 16baee3d52ae..0b8a0cd3b652 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1110,12 +1110,80 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_bz_a0_mr_a0, iwl_ax211_name), +/* SoF with JF2 */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), + +/* SoF with JF */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), + /* So with GF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name) + iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), + +/* So with JF2 */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), + +/* So with JF */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_name) #endif /* CONFIG_IWLMVM */ }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 863aa18b3024..43960770a9af 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -111,7 +111,7 @@ mt7915_mcu_get_cipher(int cipher) case WLAN_CIPHER_SUITE_SMS4: return MCU_CIPHER_WAPI; default: - return MT_CIPHER_NONE; + return MCU_CIPHER_NONE; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index edd3ba3a0c2d..e68a562cc5b4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -1073,7 +1073,8 @@ enum { }; enum mcu_cipher_type { - MCU_CIPHER_WEP40 = 1, + MCU_CIPHER_NONE = 0, + MCU_CIPHER_WEP40, MCU_CIPHER_WEP104, MCU_CIPHER_WEP128, MCU_CIPHER_TKIP, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 7fd21049ff5a..63ec140c9c37 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -389,6 +389,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_WEP104: if (!mvif->wep_sta) return -EOPNOTSUPP; + break; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index c2c4dc196802..9fbaacc67cfa 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -111,7 +111,7 @@ mt7921_mcu_get_cipher(int cipher) case WLAN_CIPHER_SUITE_SMS4: return MCU_CIPHER_WAPI; default: - return MT_CIPHER_NONE; + return MCU_CIPHER_NONE; } } @@ -931,7 +931,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev) ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); if (ret) { dev_dbg(dev->mt76.dev, "Firmware is already download\n"); - return -EIO; + goto fw_loaded; } ret = mt7921_load_patch(dev); @@ -949,6 +949,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev) return -EIO; } +fw_loaded: mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); #ifdef CONFIG_PM diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h index d76cf8f8dfdf..de3c091f6736 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h @@ -199,7 +199,8 @@ struct sta_rec_sec { } __packed; enum mcu_cipher_type { - MCU_CIPHER_WEP40 = 1, + MCU_CIPHER_NONE = 0, + MCU_CIPHER_WEP40, MCU_CIPHER_WEP104, MCU_CIPHER_WEP128, MCU_CIPHER_TKIP, diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index 1df959532c7d..514f2c1124b6 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c @@ -136,6 +136,29 @@ static struct ieee80211_supported_band band_5ghz = { /* Assigned at module init. Guaranteed locally-administered and unicast. */ static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {}; +static void virt_wifi_inform_bss(struct wiphy *wiphy) +{ + u64 tsf = div_u64(ktime_get_boottime_ns(), 1000); + struct cfg80211_bss *informed_bss; + static const struct { + u8 tag; + u8 len; + u8 ssid[8]; + } __packed ssid = { + .tag = WLAN_EID_SSID, + .len = 8, + .ssid = "VirtWifi", + }; + + informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz, + CFG80211_BSS_FTYPE_PRESP, + fake_router_bssid, tsf, + WLAN_CAPABILITY_ESS, 0, + (void *)&ssid, sizeof(ssid), + DBM_TO_MBM(-50), GFP_KERNEL); + cfg80211_put_bss(wiphy, informed_bss); +} + /* Called with the rtnl lock held. */ static int virt_wifi_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) @@ -156,28 +179,13 @@ static int virt_wifi_scan(struct wiphy *wiphy, /* Acquires and releases the rdev BSS lock. */ static void virt_wifi_scan_result(struct work_struct *work) { - struct { - u8 tag; - u8 len; - u8 ssid[8]; - } __packed ssid = { - .tag = WLAN_EID_SSID, .len = 8, .ssid = "VirtWifi", - }; - struct cfg80211_bss *informed_bss; struct virt_wifi_wiphy_priv *priv = container_of(work, struct virt_wifi_wiphy_priv, scan_result.work); struct wiphy *wiphy = priv_to_wiphy(priv); struct cfg80211_scan_info scan_info = { .aborted = false }; - u64 tsf = div_u64(ktime_get_boottime_ns(), 1000); - informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz, - CFG80211_BSS_FTYPE_PRESP, - fake_router_bssid, tsf, - WLAN_CAPABILITY_ESS, 0, - (void *)&ssid, sizeof(ssid), - DBM_TO_MBM(-50), GFP_KERNEL); - cfg80211_put_bss(wiphy, informed_bss); + virt_wifi_inform_bss(wiphy); /* Schedules work which acquires and releases the rtnl lock. */ cfg80211_scan_done(priv->scan_request, &scan_info); @@ -225,10 +233,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev, if (!could_schedule) return -EBUSY; - if (sme->bssid) + if (sme->bssid) { ether_addr_copy(priv->connect_requested_bss, sme->bssid); - else + } else { + virt_wifi_inform_bss(wiphy); eth_zero_addr(priv->connect_requested_bss); + } wiphy_debug(wiphy, "connect\n"); @@ -241,11 +251,13 @@ static void virt_wifi_connect_complete(struct work_struct *work) struct virt_wifi_netdev_priv *priv = container_of(work, struct virt_wifi_netdev_priv, connect.work); u8 *requested_bss = priv->connect_requested_bss; - bool has_addr = !is_zero_ether_addr(requested_bss); bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid); u16 status = WLAN_STATUS_SUCCESS; - if (!priv->is_up || (has_addr && !right_addr)) + if (is_zero_ether_addr(requested_bss)) + requested_bss = NULL; + + if (!priv->is_up || (requested_bss && !right_addr)) status = WLAN_STATUS_UNSPECIFIED_FAILURE; else priv->is_connected = true; diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c index 804e6c4f2c78..519361ec40df 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c +++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c @@ -64,10 +64,9 @@ static struct ipc_chnl_cfg modem_cfg[] = { int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index) { - int array_size = ARRAY_SIZE(modem_cfg); - - if (index >= array_size) { - pr_err("index: %d and array_size %d", index, array_size); + if (index >= ARRAY_SIZE(modem_cfg)) { + pr_err("index: %d and array size %zu", index, + ARRAY_SIZE(modem_cfg)); return -ECHRNG; } diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 46f76e8aae92..0a472ce77370 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -24,15 +24,7 @@ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id) return -EIO; } - /* check for the interafce id - * if if_id 1 to 8 then create IP MUX channel sessions. - * To start MUX session from 0 as network interface id would start - * from 1 so map it to if_id = if_id - 1 - */ - if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - return ipc_mux_open_session(ipc_imem->mux, if_id - 1); - - return -EINVAL; + return ipc_mux_open_session(ipc_imem->mux, if_id); } /* Release a net link to CP. */ @@ -41,7 +33,7 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id, { if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - ipc_mux_close_session(ipc_imem->mux, if_id - 1); + ipc_mux_close_session(ipc_imem->mux, if_id); } /* Tasklet call to do uplink transfer. */ @@ -83,13 +75,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, goto out; } - if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - /* Route the UL packet through IP MUX Layer */ - ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, - if_id - 1, skb); - else - dev_err(ipc_imem->dev, - "invalid if_id %d: ", if_id); + /* Route the UL packet through IP MUX Layer */ + ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb); out: return ret; } diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index fd356dafbdd6..2007fe23e9a5 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -27,11 +27,11 @@ #define BOOT_CHECK_DEFAULT_TIMEOUT 400 /* IP MUX channel range */ -#define IP_MUX_SESSION_START 1 -#define IP_MUX_SESSION_END 8 +#define IP_MUX_SESSION_START 0 +#define IP_MUX_SESSION_END 7 /* Default IP MUX channel */ -#define IP_MUX_SESSION_DEFAULT 1 +#define IP_MUX_SESSION_DEFAULT 0 /** * ipc_imem_sys_port_open - Open a port link to CP. diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.h b/drivers/net/wwan/iosm/iosm_ipc_mmio.h index 45e6923da78f..f861994a6d90 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mmio.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.h @@ -10,10 +10,10 @@ #define IOSM_CP_VERSION 0x0100UL /* DL dir Aggregation support mask */ -#define DL_AGGR BIT(23) +#define DL_AGGR BIT(9) /* UL dir Aggregation support mask */ -#define UL_AGGR BIT(22) +#define UL_AGGR BIT(8) /* UL flow credit support mask */ #define UL_FLOW_CREDIT BIT(21) diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c index e634ffc6ec08..bdb2d32cdb6d 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c @@ -288,7 +288,7 @@ static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id, /* Pass the packet to the netif layer. */ dest_skb->priority = service_class; - return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1); + return ipc_wwan_receive(wwan, dest_skb, false, if_id); } /* Decode Flow Credit Table in the block */ @@ -320,7 +320,7 @@ static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux, return; } - ul_credits = fct->vfl.nr_of_bytes; + ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes); dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d", if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits); @@ -586,7 +586,7 @@ static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux) qlt->reserved[0] = 0; qlt->reserved[1] = 0; - qlt->vfl.nr_of_bytes = session->ul_list.qlen; + qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen); /* Add QLT to the transfer list. */ skb_queue_tail(&ipc_mux->channel->ul_list, diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h index 4a74e3c9457f..aae83db5cbb8 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h @@ -106,7 +106,7 @@ struct mux_lite_cmdh { * @nr_of_bytes: Number of bytes available to transmit in the queue. */ struct mux_lite_vfl { - u32 nr_of_bytes; + __le32 nr_of_bytes; }; /** diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c index 91109e27efd3..35d590743d3a 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c @@ -412,8 +412,8 @@ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol, } if (p_td->buffer.address != IPC_CB(skb)->mapping) { - dev_err(ipc_protocol->dev, "invalid buf=%p or skb=%p", - (void *)p_td->buffer.address, skb->data); + dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p", + (unsigned long long)p_td->buffer.address, skb->data); ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); skb = NULL; goto ret; diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.c b/drivers/net/wwan/iosm/iosm_ipc_uevent.c index 2229d752926c..d12188ffed7e 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_uevent.c +++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.c @@ -37,7 +37,7 @@ void ipc_uevent_send(struct device *dev, char *uevent) /* Store the device and event information */ info->dev = dev; - snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent); + snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent); /* Schedule uevent in process context using work queue */ schedule_work(&info->work); diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index c999c64001f4..b571d9cedba4 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -107,6 +107,7 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb, { struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); struct iosm_wwan *ipc_wwan = priv->ipc_wwan; + unsigned int len = skb->len; int if_id = priv->if_id; int ret; @@ -123,6 +124,8 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb, /* Return code of zero is success */ if (ret == 0) { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += len; ret = NETDEV_TX_OK; } else if (ret == -EBUSY) { ret = NETDEV_TX_BUSY; @@ -140,7 +143,8 @@ exit: ret); dev_kfree_skb_any(skb); - return ret; + netdev->stats.tx_dropped++; + return NETDEV_TX_OK; } /* Ops structure for wwan net link */ @@ -158,6 +162,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev) iosm_dev->priv_flags |= IFF_NO_QUEUE; iosm_dev->type = ARPHRD_NONE; + iosm_dev->mtu = ETH_DATA_LEN; iosm_dev->min_mtu = ETH_MIN_MTU; iosm_dev->max_mtu = ETH_MAX_MTU; @@ -223,7 +228,7 @@ static void ipc_wwan_dellink(void *ctxt, struct net_device *dev, RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL); /* unregistering includes synchronize_net() */ - unregister_netdevice(dev); + unregister_netdevice_queue(dev, head); unlock: mutex_unlock(&ipc_wwan->if_mutex); @@ -252,8 +257,8 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg, skb->pkt_type = PACKET_HOST; - if (if_id < (IP_MUX_SESSION_START - 1) || - if_id > (IP_MUX_SESSION_END - 1)) { + if (if_id < IP_MUX_SESSION_START || + if_id > IP_MUX_SESSION_END) { ret = -EINVAL; goto free; } diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c index 1bc6b69aa530..d0a98f34c54d 100644 --- a/drivers/net/wwan/mhi_wwan_ctrl.c +++ b/drivers/net/wwan/mhi_wwan_ctrl.c @@ -41,14 +41,14 @@ struct mhi_wwan_dev { /* Increment RX budget and schedule RX refill if necessary */ static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan) { - spin_lock(&mhiwwan->rx_lock); + spin_lock_bh(&mhiwwan->rx_lock); mhiwwan->rx_budget++; if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags)) schedule_work(&mhiwwan->rx_refill); - spin_unlock(&mhiwwan->rx_lock); + spin_unlock_bh(&mhiwwan->rx_lock); } /* Decrement RX budget if non-zero and return true on success */ @@ -56,7 +56,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan) { bool ret = false; - spin_lock(&mhiwwan->rx_lock); + spin_lock_bh(&mhiwwan->rx_lock); if (mhiwwan->rx_budget) { mhiwwan->rx_budget--; @@ -64,7 +64,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan) ret = true; } - spin_unlock(&mhiwwan->rx_lock); + spin_unlock_bh(&mhiwwan->rx_lock); return ret; } @@ -110,7 +110,7 @@ static int mhi_wwan_ctrl_start(struct wwan_port *port) int ret; /* Start mhi device's channel(s) */ - ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev); + ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev, 0); if (ret) return ret; @@ -130,9 +130,9 @@ static void mhi_wwan_ctrl_stop(struct wwan_port *port) { struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port); - spin_lock(&mhiwwan->rx_lock); + spin_lock_bh(&mhiwwan->rx_lock); clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags); - spin_unlock(&mhiwwan->rx_lock); + spin_unlock_bh(&mhiwwan->rx_lock); cancel_work_sync(&mhiwwan->rx_refill); diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index 3e16c318e705..35ece98134c0 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -164,11 +164,14 @@ static struct wwan_device *wwan_create_dev(struct device *parent) goto done_unlock; id = ida_alloc(&wwan_dev_ids, GFP_KERNEL); - if (id < 0) + if (id < 0) { + wwandev = ERR_PTR(id); goto done_unlock; + } wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL); if (!wwandev) { + wwandev = ERR_PTR(-ENOMEM); ida_free(&wwan_dev_ids, id); goto done_unlock; } @@ -182,7 +185,8 @@ static struct wwan_device *wwan_create_dev(struct device *parent) err = device_register(&wwandev->dev); if (err) { put_device(&wwandev->dev); - wwandev = NULL; + wwandev = ERR_PTR(err); + goto done_unlock; } done_unlock: @@ -984,6 +988,8 @@ static void wwan_create_default_link(struct wwan_device *wwandev, goto unlock; } + rtnl_configure_link(dev, NULL); /* Link initialized, notify new link */ + unlock: rtnl_unlock(); @@ -1012,8 +1018,8 @@ int wwan_register_ops(struct device *parent, const struct wwan_ops *ops, return -EINVAL; wwandev = wwan_create_dev(parent); - if (!wwandev) - return -ENOMEM; + if (IS_ERR(wwandev)) + return PTR_ERR(wwandev); if (WARN_ON(wwandev->ops)) { wwan_remove_dev(wwandev); diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c index a9864fcdfba6..dd27c85190d3 100644 --- a/drivers/nfc/nfcsim.c +++ b/drivers/nfc/nfcsim.c @@ -192,8 +192,7 @@ static void nfcsim_recv_wq(struct work_struct *work) if (!IS_ERR(skb)) dev_kfree_skb(skb); - - skb = ERR_PTR(-ENODEV); + return; } dev->cb(dev->nfc_digital_dev, dev->arg, skb); diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c index eb5d7a5beac7..e3e72b8a29e3 100644 --- a/drivers/nfc/s3fwrn5/firmware.c +++ b/drivers/nfc/s3fwrn5/firmware.c @@ -423,7 +423,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info) if (IS_ERR(tfm)) { ret = PTR_ERR(tfm); dev_err(&fw_info->ndev->nfc_dev->dev, - "Cannot allocate shash (code=%d)\n", ret); + "Cannot allocate shash (code=%pe)\n", tfm); goto out; } diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 2403b71b601e..745478213ff2 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -2527,7 +2527,7 @@ static void deactivate_labels(void *region) static int init_active_labels(struct nd_region *nd_region) { - int i; + int i, rc = 0; for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; @@ -2546,13 +2546,14 @@ static int init_active_labels(struct nd_region *nd_region) else if (test_bit(NDD_LABELING, &nvdimm->flags)) /* fail, labels needed to disambiguate dpa */; else - return 0; + continue; dev_err(&nd_region->dev, "%s: is %s, failing probe\n", dev_name(&nd_mapping->nvdimm->dev), test_bit(NDD_LOCKED, &nvdimm->flags) ? "locked" : "disabled"); - return -ENXIO; + rc = -ENXIO; + goto out; } nd_mapping->ndd = ndd; atomic_inc(&nvdimm->busy); @@ -2586,13 +2587,17 @@ static int init_active_labels(struct nd_region *nd_region) break; } - if (i < nd_region->ndr_mappings) { + if (i < nd_region->ndr_mappings) + rc = -ENOMEM; + +out: + if (rc) { deactivate_labels(nd_region); - return -ENOMEM; + return rc; } return devm_add_action_or_reset(&nd_region->dev, deactivate_labels, - nd_region); + nd_region); } int nd_region_register_namespaces(struct nd_region *nd_region, int *err) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 11779be42186..dfd9dec0c1f6 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -900,7 +900,10 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->write_zeroes.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); - cmnd->write_zeroes.control = 0; + if (nvme_ns_has_pi(ns)) + cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT); + else + cmnd->write_zeroes.control = 0; return BLK_STS_OK; } @@ -3807,6 +3810,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid, static void nvme_ns_remove(struct nvme_ns *ns) { + bool last_path = false; + if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) return; @@ -3815,8 +3820,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); - if (list_empty(&ns->head->list)) - list_del_init(&ns->head->entry); mutex_unlock(&ns->ctrl->subsys->lock); synchronize_rcu(); /* guarantee not available in head->list */ @@ -3836,7 +3839,15 @@ static void nvme_ns_remove(struct nvme_ns *ns) list_del_init(&ns->list); up_write(&ns->ctrl->namespaces_rwsem); - nvme_mpath_check_last_path(ns); + /* Synchronize with nvme_init_ns_head() */ + mutex_lock(&ns->head->subsys->lock); + if (list_empty(&ns->head->list)) { + list_del_init(&ns->head->entry); + last_path = true; + } + mutex_unlock(&ns->head->subsys->lock); + if (last_path) + nvme_mpath_shutdown_disk(ns->head); nvme_put_ns(ns); } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 0ea5298469c3..3f32c5e86bfc 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -760,14 +760,21 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) #endif } -void nvme_mpath_remove_disk(struct nvme_ns_head *head) +void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) { if (!head->disk) return; + kblockd_schedule_work(&head->requeue_work); if (head->disk->flags & GENHD_FL_UP) { nvme_cdev_del(&head->cdev, &head->cdev_device); del_gendisk(head->disk); } +} + +void nvme_mpath_remove_disk(struct nvme_ns_head *head) +{ + if (!head->disk) + return; blk_set_queue_dying(head->disk->queue); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 18ef8dd03a90..5cd1fa3b8464 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -716,14 +716,7 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl); bool nvme_mpath_clear_current_path(struct nvme_ns *ns); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); - -static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) -{ - struct nvme_ns_head *head = ns->head; - - if (head->disk && list_empty(&head->list)) - kblockd_schedule_work(&head->requeue_work); -} +void nvme_mpath_shutdown_disk(struct nvme_ns_head *head); static inline void nvme_trace_bio_complete(struct request *req) { @@ -772,7 +765,7 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) { } -static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) +static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) { } static inline void nvme_trace_bio_complete(struct request *req) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d3c5086673bc..51852085239e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1554,6 +1554,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) wmb(); /* ensure the first interrupt sees the initialization */ } +/* + * Try getting shutdown_lock while setting up IO queues. + */ +static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) +{ + /* + * Give up if the lock is being held by nvme_dev_disable. + */ + if (!mutex_trylock(&dev->shutdown_lock)) + return -ENODEV; + + /* + * Controller is in wrong state, fail early. + */ + if (dev->ctrl.state != NVME_CTRL_CONNECTING) { + mutex_unlock(&dev->shutdown_lock); + return -ENODEV; + } + + return 0; +} + static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) { struct nvme_dev *dev = nvmeq->dev; @@ -1582,8 +1604,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) goto release_cq; nvmeq->cq_vector = vector; - nvme_init_queue(nvmeq, qid); + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + nvme_init_queue(nvmeq, qid); if (!polled) { result = queue_request_irq(nvmeq); if (result < 0) @@ -1591,10 +1616,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) } set_bit(NVMEQ_ENABLED, &nvmeq->flags); + mutex_unlock(&dev->shutdown_lock); return result; release_sq: dev->online_queues--; + mutex_unlock(&dev->shutdown_lock); adapter_delete_sq(dev, qid); release_cq: adapter_delete_cq(dev, qid); @@ -2167,7 +2194,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (nr_io_queues == 0) return 0; - clear_bit(NVMEQ_ENABLED, &adminq->flags); + /* + * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions + * from set to unset. If there is a window to it is truely freed, + * pci_free_irq_vectors() jumping into this window will crash. + * And take lock to avoid racing with pci_free_irq_vectors() in + * nvme_dev_disable() path. + */ + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) + pci_free_irq(pdev, 0, adminq); if (dev->cmb_use_sqes) { result = nvme_cmb_qdepth(dev, nr_io_queues, @@ -2183,14 +2221,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) result = nvme_remap_bar(dev, size); if (!result) break; - if (!--nr_io_queues) - return -ENOMEM; + if (!--nr_io_queues) { + result = -ENOMEM; + goto out_unlock; + } } while (1); adminq->q_db = dev->dbs; retry: /* Deregister the admin queue's interrupt */ - pci_free_irq(pdev, 0, adminq); + if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) + pci_free_irq(pdev, 0, adminq); /* * If we enable msix early due to not intx, disable it again before @@ -2199,8 +2240,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) pci_free_irq_vectors(pdev); result = nvme_setup_irqs(dev, nr_io_queues); - if (result <= 0) - return -EIO; + if (result <= 0) { + result = -EIO; + goto out_unlock; + } dev->num_vecs = result; result = max(result - 1, 1); @@ -2214,8 +2257,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) */ result = queue_request_irq(adminq); if (result) - return result; + goto out_unlock; set_bit(NVMEQ_ENABLED, &adminq->flags); + mutex_unlock(&dev->shutdown_lock); result = nvme_create_io_queues(dev); if (result || dev->online_queues < 2) @@ -2224,6 +2268,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (dev->online_queues - 1 < dev->max_qid) { nr_io_queues = dev->online_queues - 1; nvme_disable_io_queues(dev); + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; nvme_suspend_io_queues(dev); goto retry; } @@ -2232,6 +2279,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) dev->io_queues[HCTX_TYPE_READ], dev->io_queues[HCTX_TYPE_POLL]); return 0; +out_unlock: + mutex_unlock(&dev->shutdown_lock); + return result; } static void nvme_del_queue_end(struct request *req, blk_status_t error) @@ -2581,7 +2631,9 @@ static void nvme_reset_work(struct work_struct *work) bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); int result; - if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) { + if (dev->ctrl.state != NVME_CTRL_RESETTING) { + dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", + dev->ctrl.state); result = -ENODEV; goto out; } @@ -2962,7 +3014,6 @@ static void nvme_remove(struct pci_dev *pdev) if (!pci_device_is_present(pdev)) { nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); nvme_dev_disable(dev, true); - nvme_dev_remove_admin(dev); } flush_work(&dev->ctrl.reset_work); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 12acfe05cd68..8cb15ee5b249 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -123,7 +123,6 @@ struct nvme_tcp_ctrl { struct blk_mq_tag_set admin_tag_set; struct sockaddr_storage addr; struct sockaddr_storage src_addr; - struct net_device *ndev; struct nvme_ctrl ctrl; struct work_struct err_work; @@ -2533,8 +2532,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, } if (opts->mask & NVMF_OPT_HOST_IFACE) { - ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface); - if (!ctrl->ndev) { + if (!__dev_get_by_name(&init_net, opts->host_iface)) { pr_err("invalid interface passed: %s\n", opts->host_iface); ret = -ENODEV; diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h index daaf700eae79..35bac7a25422 100644 --- a/drivers/nvme/host/trace.h +++ b/drivers/nvme/host/trace.h @@ -56,7 +56,7 @@ TRACE_EVENT(nvme_setup_cmd, __field(u8, fctype) __field(u16, cid) __field(u32, nsid) - __field(u64, metadata) + __field(bool, metadata) __array(u8, cdw10, 24) ), TP_fast_assign( @@ -66,13 +66,13 @@ TRACE_EVENT(nvme_setup_cmd, __entry->flags = cmd->common.flags; __entry->cid = cmd->common.command_id; __entry->nsid = le32_to_cpu(cmd->common.nsid); - __entry->metadata = le64_to_cpu(cmd->common.metadata); + __entry->metadata = !!blk_integrity_rq(req); __entry->fctype = cmd->fabrics.fctype; __assign_disk_name(__entry->disk, req->rq_disk); memcpy(__entry->cdw10, &cmd->common.cdw10, sizeof(__entry->cdw10)); ), - TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", + TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)", __entry->ctrl_id, __print_disk_name(__entry->disk), __entry->qid, __entry->cid, __entry->nsid, __entry->flags, __entry->metadata, diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index dd2019006838..39854d43758b 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -107,6 +107,17 @@ config MTK_EFUSE This driver can also be built as a module. If so, the module will be called efuse-mtk. +config NVMEM_NINTENDO_OTP + tristate "Nintendo Wii and Wii U OTP Support" + help + This is a driver exposing the OTP of a Nintendo Wii or Wii U console. + + This memory contains common and per-console keys, signatures and + related data required to access peripherals. + + This driver can also be built as a module. If so, the module + will be called nvmem-nintendo-otp. + config QCOM_QFPROM tristate "QCOM QFPROM Support" depends on ARCH_QCOM || COMPILE_TEST diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index bbea1410240a..dcbbde35b6a8 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile @@ -23,6 +23,8 @@ obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o nvmem_lpc18xx_otp-y := lpc18xx_otp.o obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o nvmem-mxs-ocotp-y := mxs-ocotp.o +obj-$(CONFIG_NVMEM_NINTENDO_OTP) += nvmem-nintendo-otp.o +nvmem-nintendo-otp-y := nintendo-otp.o obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o nvmem_mtk-efuse-y := mtk-efuse.o obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index b3bc30a04ed7..3d87fadaa160 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -824,8 +824,11 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) if (nvmem->nkeepout) { rval = nvmem_validate_keepouts(nvmem); - if (rval) - goto err_put_device; + if (rval) { + ida_free(&nvmem_ida, nvmem->id); + kfree(nvmem); + return ERR_PTR(rval); + } } dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); diff --git a/drivers/nvmem/nintendo-otp.c b/drivers/nvmem/nintendo-otp.c new file mode 100644 index 000000000000..33961b17f9f1 --- /dev/null +++ b/drivers/nvmem/nintendo-otp.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Nintendo Wii and Wii U OTP driver + * + * This is a driver exposing the OTP of a Nintendo Wii or Wii U console. + * + * This memory contains common and per-console keys, signatures and + * related data required to access peripherals. + * + * Based on reversed documentation from https://wiiubrew.org/wiki/Hardware/OTP + * + * Copyright (C) 2021 Emmanuel Gil Peyrot <linkmauve@linkmauve.fr> + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +#define HW_OTPCMD 0 +#define HW_OTPDATA 4 +#define OTP_READ 0x80000000 +#define BANK_SIZE 128 +#define WORD_SIZE 4 + +struct nintendo_otp_priv { + void __iomem *regs; +}; + +struct nintendo_otp_devtype_data { + const char *name; + unsigned int num_banks; +}; + +static const struct nintendo_otp_devtype_data hollywood_otp_data = { + .name = "wii-otp", + .num_banks = 1, +}; + +static const struct nintendo_otp_devtype_data latte_otp_data = { + .name = "wiiu-otp", + .num_banks = 8, +}; + +static int nintendo_otp_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct nintendo_otp_priv *priv = context; + u32 *val = _val; + int words = bytes / WORD_SIZE; + u32 bank, addr; + + while (words--) { + bank = (reg / BANK_SIZE) << 8; + addr = (reg / WORD_SIZE) % (BANK_SIZE / WORD_SIZE); + iowrite32be(OTP_READ | bank | addr, priv->regs + HW_OTPCMD); + *val++ = ioread32be(priv->regs + HW_OTPDATA); + reg += WORD_SIZE; + } + + return 0; +} + +static const struct of_device_id nintendo_otp_of_table[] = { + { .compatible = "nintendo,hollywood-otp", .data = &hollywood_otp_data }, + { .compatible = "nintendo,latte-otp", .data = &latte_otp_data }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, nintendo_otp_of_table); + +static int nintendo_otp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct of_device_id *of_id = + of_match_device(nintendo_otp_of_table, dev); + struct resource *res; + struct nvmem_device *nvmem; + struct nintendo_otp_priv *priv; + + struct nvmem_config config = { + .stride = WORD_SIZE, + .word_size = WORD_SIZE, + .reg_read = nintendo_otp_reg_read, + .read_only = true, + .root_only = true, + }; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + + if (of_id->data) { + const struct nintendo_otp_devtype_data *data = of_id->data; + config.name = data->name; + config.size = data->num_banks * BANK_SIZE; + } + + config.dev = dev; + config.priv = priv; + + nvmem = devm_nvmem_register(dev, &config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver nintendo_otp_driver = { + .probe = nintendo_otp_probe, + .driver = { + .name = "nintendo-otp", + .of_match_table = nintendo_otp_of_table, + }, +}; +module_platform_driver(nintendo_otp_driver); +MODULE_AUTHOR("Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>"); +MODULE_DESCRIPTION("Nintendo Wii and Wii U OTP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c index 81fbad5e939d..c500d6235bf6 100644 --- a/drivers/nvmem/qfprom.c +++ b/drivers/nvmem/qfprom.c @@ -12,6 +12,8 @@ #include <linux/mod_devicetable.h> #include <linux/nvmem-provider.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> #include <linux/property.h> #include <linux/regulator/consumer.h> @@ -139,6 +141,12 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv, { int ret; + writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET); + writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET); + + dev_pm_genpd_set_performance_state(priv->dev, 0); + pm_runtime_put(priv->dev); + /* * This may be a shared rail and may be able to run at a lower rate * when we're not blowing fuses. At the moment, the regulator framework @@ -159,9 +167,6 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv, "Failed to set clock rate for disable (ignoring)\n"); clk_disable_unprepare(priv->secclk); - - writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET); - writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET); } /** @@ -212,6 +217,14 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv, goto err_clk_rate_set; } + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + dev_err(priv->dev, "Failed to enable power-domain\n"); + goto err_reg_enable; + } + dev_pm_genpd_set_performance_state(priv->dev, INT_MAX); + old->timer_val = readl(priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET); old->accel_val = readl(priv->qfpconf + QFPROM_ACCEL_OFFSET); writel(priv->soc_data->qfprom_blow_timer_value, @@ -221,6 +234,8 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv, return 0; +err_reg_enable: + regulator_disable(priv->vcc); err_clk_rate_set: clk_set_rate(priv->secclk, old->clk_rate); err_clk_prepared: @@ -320,6 +335,11 @@ static int qfprom_reg_read(void *context, return 0; } +static void qfprom_runtime_disable(void *data) +{ + pm_runtime_disable(data); +} + static const struct qfprom_soc_data qfprom_7_8_data = { .accel_value = 0xD10, .qfprom_blow_timer_value = 25, @@ -420,6 +440,11 @@ static int qfprom_probe(struct platform_device *pdev) econfig.reg_write = qfprom_reg_write; } + pm_runtime_enable(dev); + ret = devm_add_action_or_reset(dev, qfprom_runtime_disable, dev); + if (ret) + return ret; + nvmem = devm_nvmem_register(dev, &econfig); return PTR_ERR_OR_ZERO(nvmem); diff --git a/drivers/opp/core.c b/drivers/opp/core.c index b335c077f215..5543c54dacc5 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1856,9 +1856,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - kfree(opp_table->supported_hw); opp_table->supported_hw = NULL; opp_table->supported_hw_count = 0; @@ -1944,9 +1941,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - kfree(opp_table->prop_name); opp_table->prop_name = NULL; @@ -2056,9 +2050,6 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) if (!opp_table->regulators) goto put_opp_table; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - if (opp_table->enabled) { for (i = opp_table->regulator_count - 1; i >= 0; i--) regulator_disable(opp_table->regulators[i]); @@ -2178,9 +2169,6 @@ void dev_pm_opp_put_clkname(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - clk_put(opp_table->clk); opp_table->clk = ERR_PTR(-EINVAL); @@ -2279,9 +2267,6 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - opp_table->set_opp = NULL; mutex_lock(&opp_table->lock); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index d298e38aaf7e..67f2e0710e79 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -964,8 +964,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) } } - /* There should be one of more OPP defined */ - if (WARN_ON(!count)) { + /* There should be one or more OPPs defined */ + if (!count) { + dev_err(dev, "%s: no supported OPPs", __func__); ret = -ENOENT; goto remove_static_opp; } diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c index 96b888bb49c6..9f5d784cd95d 100644 --- a/drivers/parport/parport_serial.c +++ b/drivers/parport/parport_serial.c @@ -606,12 +606,15 @@ static int parport_register(struct pci_dev *dev, const struct pci_device_id *id) "hi" as an offset (see SYBA def.) */ /* TODO: test if sharing interrupts works */ - irq = dev->irq; - if (irq == IRQ_NONE) { + irq = pci_irq_vector(dev, 0); + if (irq < 0) + return irq; + if (irq == 0) + irq = PARPORT_IRQ_NONE; + if (irq == PARPORT_IRQ_NONE) { dev_dbg(&dev->dev, "PCI parallel port detected: I/O at %#lx(%#lx)\n", io_lo, io_hi); - irq = PARPORT_IRQ_NONE; } else { dev_dbg(&dev->dev, "PCI parallel port detected: I/O at %#lx(%#lx), IRQ %d\n", diff --git a/drivers/pci/controller/pci-ixp4xx.c b/drivers/pci/controller/pci-ixp4xx.c index 896a45b24236..654ac4a82beb 100644 --- a/drivers/pci/controller/pci-ixp4xx.c +++ b/drivers/pci/controller/pci-ixp4xx.c @@ -145,7 +145,7 @@ static int ixp4xx_pci_check_master_abort(struct ixp4xx_pci *p) return 0; } -static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data) +static int ixp4xx_pci_read_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data) { ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr); @@ -170,7 +170,7 @@ static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data) return ixp4xx_pci_check_master_abort(p); } -static int ixp4xx_pci_write(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data) +static int ixp4xx_pci_write_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data) { ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr); @@ -308,7 +308,7 @@ static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, dev_dbg(p->dev, "read_config from %d size %d dev %d:%d:%d address: %08x cmd: %08x\n", where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd); - ret = ixp4xx_pci_read(p, addr, cmd, &val); + ret = ixp4xx_pci_read_indirect(p, addr, cmd, &val); if (ret) return PCIBIOS_DEVICE_NOT_FOUND; @@ -356,7 +356,7 @@ static int ixp4xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, dev_dbg(p->dev, "write_config_byte %#x to %d size %d dev %d:%d:%d addr: %08x cmd %08x\n", value, where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd); - ret = ixp4xx_pci_write(p, addr, cmd, val); + ret = ixp4xx_pci_write_indirect(p, addr, cmd, val); if (ret) return PCIBIOS_DEVICE_NOT_FOUND; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 9232255c8515..e5e75331b415 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -143,24 +143,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) * reliably as devices without an INTx disable bit will then generate a * level IRQ which will never be cleared. */ -u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { - u32 mask_bits = desc->masked; + raw_spinlock_t *lock = &desc->dev->msi_lock; + unsigned long flags; if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) - return 0; + return; - mask_bits &= ~mask; - mask_bits |= flag; + raw_spin_lock_irqsave(lock, flags); + desc->masked &= ~mask; + desc->masked |= flag; pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, - mask_bits); - - return mask_bits; + desc->masked); + raw_spin_unlock_irqrestore(lock, flags); } static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { - desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); + __pci_msi_desc_mask_irq(desc, mask, flag); } static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) @@ -289,13 +290,31 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) /* Don't touch the hardware now */ } else if (entry->msi_attrib.is_msix) { void __iomem *base = pci_msix_desc_addr(entry); + bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT); if (!base) goto skip; + /* + * The specification mandates that the entry is masked + * when the message is modified: + * + * "If software changes the Address or Data value of an + * entry while the entry is unmasked, the result is + * undefined." + */ + if (unmasked) + __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT); + writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); writel(msg->data, base + PCI_MSIX_ENTRY_DATA); + + if (unmasked) + __pci_msix_desc_mask_irq(entry, 0); + + /* Ensure that the writes are visible in the device */ + readl(base + PCI_MSIX_ENTRY_DATA); } else { int pos = dev->msi_cap; u16 msgctl; @@ -316,6 +335,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) pci_write_config_word(dev, pos + PCI_MSI_DATA_32, msg->data); } + /* Ensure that the writes are visible in the device */ + pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); } skip: @@ -636,21 +657,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, /* Configure MSI capability structure */ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); if (ret) { - msi_mask_irq(entry, mask, ~mask); + msi_mask_irq(entry, mask, 0); free_msi_irqs(dev); return ret; } ret = msi_verify_entries(dev); if (ret) { - msi_mask_irq(entry, mask, ~mask); + msi_mask_irq(entry, mask, 0); free_msi_irqs(dev); return ret; } ret = populate_msi_sysfs(dev); if (ret) { - msi_mask_irq(entry, mask, ~mask); + msi_mask_irq(entry, mask, 0); free_msi_irqs(dev); return ret; } @@ -691,6 +712,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, { struct irq_affinity_desc *curmsk, *masks = NULL; struct msi_desc *entry; + void __iomem *addr; int ret, i; int vec_count = pci_msix_vec_count(dev); @@ -711,6 +733,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, entry->msi_attrib.is_msix = 1; entry->msi_attrib.is_64 = 1; + if (entries) entry->msi_attrib.entry_nr = entries[i].entry; else @@ -722,6 +745,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, entry->msi_attrib.default_irq = dev->irq; entry->mask_base = base; + addr = pci_msix_desc_addr(entry); + if (addr) + entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); + list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); if (masks) curmsk++; @@ -732,26 +759,25 @@ out: return ret; } -static void msix_program_entries(struct pci_dev *dev, - struct msix_entry *entries) +static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) { struct msi_desc *entry; - int i = 0; - void __iomem *desc_addr; for_each_pci_msi_entry(entry, dev) { - if (entries) - entries[i++].vector = entry->irq; + if (entries) { + entries->vector = entry->irq; + entries++; + } + } +} - desc_addr = pci_msix_desc_addr(entry); - if (desc_addr) - entry->masked = readl(desc_addr + - PCI_MSIX_ENTRY_VECTOR_CTRL); - else - entry->masked = 0; +static void msix_mask_all(void __iomem *base, int tsize) +{ + u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; + int i; - msix_mask_irq(entry, 1); - } + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) + writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); } /** @@ -768,22 +794,33 @@ static void msix_program_entries(struct pci_dev *dev, static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec, struct irq_affinity *affd) { - int ret; - u16 control; void __iomem *base; + int ret, tsize; + u16 control; - /* Ensure MSI-X is disabled while it is set up */ - pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + /* + * Some devices require MSI-X to be enabled before the MSI-X + * registers can be accessed. Mask all the vectors to prevent + * interrupts coming in before they're fully set up. + */ + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | + PCI_MSIX_FLAGS_ENABLE); pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); /* Request & Map MSI-X table region */ - base = msix_map_region(dev, msix_table_size(control)); - if (!base) - return -ENOMEM; + tsize = msix_table_size(control); + base = msix_map_region(dev, tsize); + if (!base) { + ret = -ENOMEM; + goto out_disable; + } + + /* Ensure that all table entries are masked. */ + msix_mask_all(base, tsize); ret = msix_setup_entries(dev, base, entries, nvec, affd); if (ret) - return ret; + goto out_disable; ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); if (ret) @@ -794,15 +831,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, if (ret) goto out_free; - /* - * Some devices require MSI-X to be enabled before we can touch the - * MSI-X registers. We need to mask all the vectors to prevent - * interrupts coming in before they're fully set up. - */ - pci_msix_clear_and_set_ctrl(dev, 0, - PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); - - msix_program_entries(dev, entries); + msix_update_entries(dev, entries); ret = populate_msi_sysfs(dev); if (ret) @@ -836,6 +865,9 @@ out_avail: out_free: free_msi_irqs(dev); +out_disable: + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + return ret; } @@ -930,8 +962,7 @@ static void pci_msi_shutdown(struct pci_dev *dev) /* Return the device with MSI unmasked as initial states */ mask = msi_mask(desc->msi_attrib.multi_cap); - /* Keep cached state to be restored */ - __pci_msi_desc_mask_irq(desc, mask, ~mask); + msi_mask_irq(desc, mask, 0); /* Restore dev->irq to its default pin-assertion IRQ */ dev->irq = desc->msi_attrib.default_irq; @@ -1016,10 +1047,8 @@ static void pci_msix_shutdown(struct pci_dev *dev) } /* Return the device with MSI-X masked as initial states */ - for_each_pci_msi_entry(entry, dev) { - /* Keep cached states to be restored */ + for_each_pci_msi_entry(entry, dev) __pci_msix_desc_mask_irq(entry, 1); - } pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_intx_for_msi(dev, 1); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5d63df7c1820..7bbf2673c7f2 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -978,7 +978,7 @@ void pci_create_legacy_files(struct pci_bus *b) b->legacy_mem->size = 1024*1024; b->legacy_mem->attr.mode = 0600; b->legacy_mem->mmap = pci_mmap_legacy_mem; - b->legacy_io->mapping = iomem_get_mapping(); + b->legacy_mem->mapping = iomem_get_mapping(); pci_adjust_legacy_attr(b, pci_mmap_mem); error = device_create_bin_file(&b->dev, b->legacy_mem); if (error) diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 9bab07302bbf..d32fbfc93ea9 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -230,8 +230,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, break; } /* If arch decided it can't, fall through... */ -#endif /* HAVE_PCI_MMAP */ fallthrough; +#endif /* HAVE_PCI_MMAP */ default: ret = -EINVAL; break; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6d74386eadc2..ab3de1551b50 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1900,6 +1900,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot); #ifdef CONFIG_X86_IO_APIC static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c index 85887d885b5f..192c9049d654 100644 --- a/drivers/pcmcia/i82092.c +++ b/drivers/pcmcia/i82092.c @@ -112,6 +112,7 @@ static int i82092aa_pci_probe(struct pci_dev *dev, for (i = 0; i < socket_count; i++) { sockets[i].card_state = 1; /* 1 = present but empty */ sockets[i].io_base = pci_resource_start(dev, 0); + sockets[i].dev = dev; sockets[i].socket.features |= SS_CAP_PCCARD; sockets[i].socket.map_size = 0x1000; sockets[i].socket.irq_mask = 0; diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c index 03c061dd5f0d..cf10bed40528 100644 --- a/drivers/phy/amlogic/phy-meson8b-usb2.c +++ b/drivers/phy/amlogic/phy-meson8b-usb2.c @@ -219,6 +219,10 @@ static int phy_meson8b_usb2_power_off(struct phy *phy) clk_disable_unprepare(priv->clk_usb); clk_disable_unprepare(priv->clk_usb_general); + /* power off the PHY by putting it into reset mode */ + regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET, + REG_CTRL_POWER_ON_RESET); + return 0; } @@ -273,8 +277,8 @@ static int phy_meson8b_usb2_probe(struct platform_device *pdev) phy = devm_phy_create(&pdev->dev, NULL, &phy_meson8b_usb2_ops); if (IS_ERR(phy)) { - dev_err(&pdev->dev, "failed to create PHY\n"); - return PTR_ERR(phy); + return dev_err_probe(&pdev->dev, PTR_ERR(phy), + "failed to create PHY\n"); } phy_set_drvdata(phy, priv); diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c index 0477e7beebbf..415ace64adc5 100644 --- a/drivers/phy/cadence/phy-cadence-torrent.c +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -24,13 +24,15 @@ #include <linux/reset.h> #include <linux/regmap.h> -#define REF_CLK_19_2MHz 19200000 -#define REF_CLK_25MHz 25000000 +#define REF_CLK_19_2MHZ 19200000 +#define REF_CLK_25MHZ 25000000 +#define REF_CLK_100MHZ 100000000 #define MAX_NUM_LANES 4 #define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */ #define NUM_SSC_MODE 3 +#define NUM_REF_CLK 3 #define NUM_PHY_TYPE 6 #define POLL_TIMEOUT_US 5000 @@ -49,6 +51,10 @@ #define TORRENT_PHY_PCS_COMMON_OFFSET(block_offset) \ (0xC000 << (block_offset)) +#define TORRENT_PHY_PCS_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \ + ((0xD000 << (block_offset)) + \ + (((ln) << 8) << (reg_offset))) + #define TORRENT_PHY_PMA_COMMON_OFFSET(block_offset) \ (0xE000 << (block_offset)) @@ -101,6 +107,7 @@ #define CMN_PLL0_FRACDIVH_M0 0x0092U #define CMN_PLL0_HIGH_THR_M0 0x0093U #define CMN_PLL0_DSM_DIAG_M0 0x0094U +#define CMN_PLL0_DSM_FBH_OVRD_M0 0x0095U #define CMN_PLL0_SS_CTRL1_M0 0x0098U #define CMN_PLL0_SS_CTRL2_M0 0x0099U #define CMN_PLL0_SS_CTRL3_M0 0x009AU @@ -220,6 +227,9 @@ #define PHY_PIPE_USB3_GEN2_POST_CFG0 0x0022U #define PHY_PIPE_USB3_GEN2_POST_CFG1 0x0023U +/* PHY PCS lane registers */ +#define PHY_PCS_ISO_LINK_CTRL 0x000BU + /* PHY PMA common registers */ #define PHY_PMA_CMN_CTRL1 0x0000U #define PHY_PMA_CMN_CTRL2 0x0001U @@ -244,6 +254,9 @@ static const struct reg_field phy_pma_pll_raw_ctrl = static const struct reg_field phy_reset_ctrl = REG_FIELD(PHY_RESET, 8, 8); +static const struct reg_field phy_pcs_iso_link_ctrl_1 = + REG_FIELD(PHY_PCS_ISO_LINK_CTRL, 1, 1); + static const struct reg_field phy_pipe_cmn_ctrl1_0 = REG_FIELD(PHY_PIPE_CMN_CTRL1, 0, 0); #define REFCLK_OUT_NUM_CMN_CONFIG 5 @@ -273,6 +286,12 @@ enum cdns_torrent_phy_type { TYPE_USB, }; +enum cdns_torrent_ref_clk { + CLK_19_2_MHZ, + CLK_25_MHZ, + CLK_100_MHZ +}; + enum cdns_torrent_ssc_mode { NO_SSC, EXTERNAL_SSC, @@ -296,7 +315,7 @@ struct cdns_torrent_phy { struct reset_control *apb_rst; struct device *dev; struct clk *clk; - unsigned long ref_clk_rate; + enum cdns_torrent_ref_clk ref_clk_rate; struct cdns_torrent_inst phys[MAX_NUM_LANES]; int nsubnodes; const struct cdns_torrent_data *init_data; @@ -306,12 +325,14 @@ struct cdns_torrent_phy { struct regmap *regmap_phy_pma_common_cdb; struct regmap *regmap_tx_lane_cdb[MAX_NUM_LANES]; struct regmap *regmap_rx_lane_cdb[MAX_NUM_LANES]; + struct regmap *regmap_phy_pcs_lane_cdb[MAX_NUM_LANES]; struct regmap *regmap_dptx_phy_reg; struct regmap_field *phy_pll_cfg; struct regmap_field *phy_pma_cmn_ctrl_1; struct regmap_field *phy_pma_cmn_ctrl_2; struct regmap_field *phy_pma_pll_raw_ctrl; struct regmap_field *phy_reset_ctrl; + struct regmap_field *phy_pcs_iso_link_ctrl_1[MAX_NUM_LANES]; struct clk *clks[CDNS_TORRENT_REFCLK_DRIVER + 1]; struct clk_onecell_data clk_data; }; @@ -333,57 +354,6 @@ struct cdns_torrent_derived_refclk { #define to_cdns_torrent_derived_refclk(_hw) \ container_of(_hw, struct cdns_torrent_derived_refclk, hw) -static int cdns_torrent_phy_init(struct phy *phy); -static int cdns_torrent_dp_init(struct phy *phy); -static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, - u32 num_lanes); -static -int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy); -static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy, - struct cdns_torrent_inst *inst); -static -void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy); -static -void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy, - u32 rate, bool ssc); -static -void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy); -static -void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy, - u32 rate, bool ssc); -static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy, - unsigned int lane); -static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy, - u32 rate, u32 num_lanes); -static int cdns_torrent_dp_configure(struct phy *phy, - union phy_configure_opts *opts); -static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy, - u32 num_lanes, - enum phy_powerstate powerstate); -static int cdns_torrent_phy_on(struct phy *phy); -static int cdns_torrent_phy_off(struct phy *phy); - -static const struct phy_ops cdns_torrent_phy_ops = { - .init = cdns_torrent_phy_init, - .configure = cdns_torrent_dp_configure, - .power_on = cdns_torrent_phy_on, - .power_off = cdns_torrent_phy_off, - .owner = THIS_MODULE, -}; - -static int cdns_torrent_noop_phy_on(struct phy *phy) -{ - /* Give 5ms to 10ms delay for the PIPE clock to be stable */ - usleep_range(5000, 10000); - - return 0; -} - -static const struct phy_ops noop_ops = { - .power_on = cdns_torrent_noop_phy_on, - .owner = THIS_MODULE, -}; - struct cdns_reg_pairs { u32 val; u32 off; @@ -403,12 +373,12 @@ struct cdns_torrent_data { [NUM_SSC_MODE]; struct cdns_torrent_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] [NUM_SSC_MODE]; - struct cdns_torrent_vals *cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] - [NUM_SSC_MODE]; - struct cdns_torrent_vals *tx_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] - [NUM_SSC_MODE]; - struct cdns_torrent_vals *rx_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] - [NUM_SSC_MODE]; + struct cdns_torrent_vals *cmn_vals[NUM_REF_CLK][NUM_PHY_TYPE] + [NUM_PHY_TYPE][NUM_SSC_MODE]; + struct cdns_torrent_vals *tx_ln_vals[NUM_REF_CLK][NUM_PHY_TYPE] + [NUM_PHY_TYPE][NUM_SSC_MODE]; + struct cdns_torrent_vals *rx_ln_vals[NUM_REF_CLK][NUM_PHY_TYPE] + [NUM_PHY_TYPE][NUM_SSC_MODE]; }; struct cdns_regmap_cdb_context { @@ -497,6 +467,22 @@ static const struct regmap_config cdns_torrent_common_cdb_config = { .reg_read = cdns_regmap_read, }; +#define TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF(n) \ +{ \ + .name = "torrent_phy_pcs_lane" n "_cdb", \ + .reg_stride = 1, \ + .fast_io = true, \ + .reg_write = cdns_regmap_write, \ + .reg_read = cdns_regmap_read, \ +} + +static const struct regmap_config cdns_torrent_phy_pcs_lane_cdb_config[] = { + TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("0"), + TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("1"), + TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("2"), + TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("3"), +}; + static const struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = { .name = "torrent_phy_pcs_cmn_cdb", .reg_stride = 1, @@ -615,6 +601,351 @@ static const struct coefficients vltg_coeff[4][4] = { } }; +static const char *cdns_torrent_get_phy_type(enum cdns_torrent_phy_type phy_type) +{ + switch (phy_type) { + case TYPE_DP: + return "DisplayPort"; + case TYPE_PCIE: + return "PCIe"; + case TYPE_SGMII: + return "SGMII"; + case TYPE_QSGMII: + return "QSGMII"; + case TYPE_USB: + return "USB"; + default: + return "None"; + } +} + +/* + * Set registers responsible for enabling and configuring SSC, with second and + * third register values provided by parameters. + */ +static +void cdns_torrent_dp_enable_ssc_19_2mhz(struct cdns_torrent_phy *cdns_phy, + u32 ctrl2_val, u32 ctrl3_val) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl3_val); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl3_val); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003); +} + +static +void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* Assumes 19.2 MHz refclock */ + switch (rate) { + /* Setting VCO for 10.8GHz */ + case 2700: + case 5400: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0119); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00BC); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0012); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0119); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00BC); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0012); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x033A, 0x006A); + break; + /* Setting VCO for 9.72GHz */ + case 1620: + case 2430: + case 3240: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01FA); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0152); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01FA); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x4000); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0152); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x05DD, 0x0069); + break; + /* Setting VCO for 8.64GHz */ + case 2160: + case 4320: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01C2); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x012C); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01C2); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x012C); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x0536, 0x0069); + break; + /* Setting VCO for 8.1GHz */ + case 8100: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01A5); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xE000); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x011A); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01A5); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xE000); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x011A); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + if (ssc) + cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x04D7, 0x006A); + break; + } + + if (ssc) { + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_PLLCNT_START, 0x025E); + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_THR, 0x0005); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_PLLCNT_START, 0x025E); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_THR, 0x0005); + } else { + cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_PLLCNT_START, 0x0260); + cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_PLLCNT_START, 0x0260); + /* Set reset register values to disable SSC */ + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_THR, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_THR, 0x0003); + } + + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x0099); + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x0099); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x0099); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x0099); +} + +/* + * Set registers responsible for enabling and configuring SSC, with second + * register value provided by a parameter. + */ +static void cdns_torrent_dp_enable_ssc_25mhz(struct cdns_torrent_phy *cdns_phy, + u32 ctrl2_val) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x007F); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x007F); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003); +} + +static +void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* Assumes 25 MHz refclock */ + switch (rate) { + /* Setting VCO for 10.8GHz */ + case 2700: + case 5400: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01B0); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0120); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01B0); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0120); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x0423); + break; + /* Setting VCO for 9.72GHz */ + case 1620: + case 2430: + case 3240: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0184); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xCCCD); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0104); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0184); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xCCCD); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0104); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x03B9); + break; + /* Setting VCO for 8.64GHz */ + case 2160: + case 4320: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0159); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x999A); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00E7); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0159); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x999A); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00E7); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x034F); + break; + /* Setting VCO for 8.1GHz */ + case 8100: + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0144); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00D8); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0144); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00D8); + if (ssc) + cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x031A); + break; + } + + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + + if (ssc) { + cdns_torrent_phy_write(regmap, + CMN_PLL0_VCOCAL_PLLCNT_START, 0x0315); + cdns_torrent_phy_write(regmap, + CMN_PLL0_LOCK_PLLCNT_THR, 0x0005); + cdns_torrent_phy_write(regmap, + CMN_PLL1_VCOCAL_PLLCNT_START, 0x0315); + cdns_torrent_phy_write(regmap, + CMN_PLL1_LOCK_PLLCNT_THR, 0x0005); + } else { + cdns_torrent_phy_write(regmap, + CMN_PLL0_VCOCAL_PLLCNT_START, 0x0317); + cdns_torrent_phy_write(regmap, + CMN_PLL1_VCOCAL_PLLCNT_START, 0x0317); + /* Set reset register values to disable SSC */ + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL0_LOCK_PLLCNT_THR, 0x0003); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000); + cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000); + cdns_torrent_phy_write(regmap, + CMN_PLL1_LOCK_PLLCNT_THR, 0x0003); + } + + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x00C7); + cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x00C7); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x00C7); + cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7); +} + +static +void cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(struct cdns_torrent_phy *cdns_phy, + u32 rate, bool ssc) +{ + struct regmap *regmap = cdns_phy->regmap_common_cdb; + + /* Assumes 100 MHz refclock */ + switch (rate) { + /* Setting VCO for 10.8GHz */ + case 2700: + case 5400: + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0028); + cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_FBH_OVRD_M0, 0x0022); + cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBH_OVRD_M0, 0x0022); + cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBL_OVRD_M0, 0x000C); + break; + /* Setting VCO for 9.72GHz */ + case 1620: + case 2430: + case 3240: + cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0061); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0061); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x3333); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x3333); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0042); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0042); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + break; + /* Setting VCO for 8.64GHz */ + case 2160: + case 4320: + cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0056); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0056); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x6666); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x6666); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x003A); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x003A); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + break; + /* Setting VCO for 8.1GHz */ + case 8100: + cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08); + cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0051); + cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0051); + cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0036); + cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0036); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002); + cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002); + break; + } +} + /* * Enable or disable PLL for selected lanes. */ @@ -669,6 +1000,161 @@ static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy, return ret; } +static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy, + u32 num_lanes, + enum phy_powerstate powerstate) +{ + /* Register value for power state for a single byte. */ + u32 value_part; + u32 value; + u32 mask; + u32 read_val; + u32 ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + switch (powerstate) { + case (POWERSTATE_A0): + value_part = 0x01U; + break; + case (POWERSTATE_A2): + value_part = 0x04U; + break; + default: + /* Powerstate A3 */ + value_part = 0x08U; + break; + } + + /* Select values of registers and mask, depending on enabled + * lane count. + */ + switch (num_lanes) { + /* lane 0 */ + case (1): + value = value_part; + mask = 0x0000003FU; + break; + /* lanes 0-1 */ + case (2): + value = (value_part + | (value_part << 8)); + mask = 0x00003F3FU; + break; + /* lanes 0-3, all */ + default: + value = (value_part + | (value_part << 8) + | (value_part << 16) + | (value_part << 24)); + mask = 0x3F3F3F3FU; + break; + } + + /* Set power state A<n>. */ + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, value); + /* Wait, until PHY acknowledges power state completion. */ + ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK, + read_val, (read_val & mask) == value, 0, + POLL_TIMEOUT_US); + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000); + ndelay(100); + + return ret; +} + +static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes) +{ + unsigned int read_val; + int ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + /* + * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the + * master lane + */ + ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_PLLCLK_EN_ACK, + read_val, read_val & 1, + 0, POLL_TIMEOUT_US); + if (ret == -ETIMEDOUT) { + dev_err(cdns_phy->dev, + "timeout waiting for link PLL clock enable ack\n"); + return ret; + } + + ndelay(100); + + ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes, + POWERSTATE_A2); + if (ret) + return ret; + + ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes, + POWERSTATE_A0); + + return ret; +} + +static int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy) +{ + unsigned int reg; + int ret; + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + + ret = regmap_read_poll_timeout(regmap, PHY_PMA_CMN_READY, reg, + reg & 1, 0, POLL_TIMEOUT_US); + if (ret == -ETIMEDOUT) { + dev_err(cdns_phy->dev, + "timeout waiting for PMA common ready\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy, + u32 rate, u32 num_lanes) +{ + unsigned int clk_sel_val = 0; + unsigned int hsclk_div_val = 0; + unsigned int i; + + switch (rate) { + case 1620: + clk_sel_val = 0x0f01; + hsclk_div_val = 2; + break; + case 2160: + case 2430: + case 2700: + clk_sel_val = 0x0701; + hsclk_div_val = 1; + break; + case 3240: + clk_sel_val = 0x0b00; + hsclk_div_val = 2; + break; + case 4320: + case 5400: + clk_sel_val = 0x0301; + hsclk_div_val = 0; + break; + case 8100: + clk_sel_val = 0x0200; + hsclk_div_val = 0; + break; + } + + cdns_torrent_phy_write(cdns_phy->regmap_common_cdb, + CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val); + cdns_torrent_phy_write(cdns_phy->regmap_common_cdb, + CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val); + + /* PMA lane configuration to deal with multi-link operation */ + for (i = 0; i < num_lanes; i++) + cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[i], + XCVR_DIAG_HSCLK_DIV, hsclk_div_val); +} + /* * Perform register operations related to setting link rate, once powerstate is * set and PLL disable request was processed. @@ -676,8 +1162,7 @@ static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy, static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy, struct phy_configure_opts_dp *dp) { - u32 ret; - u32 read_val; + u32 read_val, ret; /* Disable the cmn_pll0_en before re-programming the new data rate. */ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x0); @@ -695,17 +1180,16 @@ static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy, ndelay(200); /* DP Rate Change - VCO Output settings. */ - if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) { + if (cdns_phy->ref_clk_rate == CLK_19_2_MHZ) /* PMA common configuration 19.2MHz */ - cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, - dp->ssc); - cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy); - } else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) { + cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, dp->ssc); + else if (cdns_phy->ref_clk_rate == CLK_25_MHZ) /* PMA common configuration 25MHz */ - cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, - dp->ssc); - cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy); - } + cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, dp->ssc); + else if (cdns_phy->ref_clk_rate == CLK_100_MHZ) + /* PMA common configuration 100MHz */ + cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(cdns_phy, dp->link_rate, dp->ssc); + cdns_torrent_dp_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes); /* Enable the cmn_pll0_en. */ @@ -984,28 +1468,71 @@ static int cdns_torrent_dp_configure(struct phy *phy, return ret; } -static int cdns_torrent_dp_init(struct phy *phy) +static int cdns_torrent_phy_on(struct phy *phy) { - unsigned char lane_bits; - int ret; struct cdns_torrent_inst *inst = phy_get_drvdata(phy); struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); - struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + u32 read_val; + int ret; - switch (cdns_phy->ref_clk_rate) { - case REF_CLK_19_2MHz: - case REF_CLK_25MHz: - /* Valid Ref Clock Rate */ - break; - default: - dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n"); - return -EINVAL; + if (cdns_phy->nsubnodes == 1) { + /* Take the PHY lane group out of reset */ + reset_control_deassert(inst->lnk_rst); + + /* Take the PHY out of reset */ + ret = reset_control_deassert(cdns_phy->phy_rst); + if (ret) + return ret; } - cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */ + /* + * Wait for cmn_ready assertion + * PHY_PMA_CMN_CTRL1[0] == 1 + */ + ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_1, + read_val, read_val, 1000, + PLL_LOCK_TIMEOUT); + if (ret) { + dev_err(cdns_phy->dev, "Timeout waiting for CMN ready\n"); + return ret; + } + + if (inst->phy_type == TYPE_PCIE || inst->phy_type == TYPE_USB) { + ret = regmap_field_read_poll_timeout(cdns_phy->phy_pcs_iso_link_ctrl_1[inst->mlane], + read_val, !read_val, 1000, + PLL_LOCK_TIMEOUT); + if (ret == -ETIMEDOUT) { + dev_err(cdns_phy->dev, "Timeout waiting for PHY status ready\n"); + return ret; + } + } + + return 0; +} + +static int cdns_torrent_phy_off(struct phy *phy) +{ + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + int ret; - /* PHY PMA registers configuration function */ - cdns_torrent_dp_pma_cfg(cdns_phy, inst); + if (cdns_phy->nsubnodes != 1) + return 0; + + ret = reset_control_assert(cdns_phy->phy_rst); + if (ret) + return ret; + + return reset_control_assert(inst->lnk_rst); +} + +static void cdns_torrent_dp_common_init(struct cdns_torrent_phy *cdns_phy, + struct cdns_torrent_inst *inst) +{ + struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; + unsigned char lane_bits; + + cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */ /* * Set lines power state to A0 @@ -1024,21 +1551,35 @@ static int cdns_torrent_dp_init(struct phy *phy) /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001); - /* PHY PMA registers configuration functions */ - /* Initialize PHY with max supported link rate, without SSC. */ - if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) + /* + * PHY PMA registers configuration functions + * Initialize PHY with max supported link rate, without SSC. + */ + if (cdns_phy->ref_clk_rate == CLK_19_2_MHZ) cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, cdns_phy->max_bit_rate, false); - else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) + else if (cdns_phy->ref_clk_rate == CLK_25_MHZ) cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, cdns_phy->max_bit_rate, false); + else if (cdns_phy->ref_clk_rate == CLK_100_MHZ) + cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(cdns_phy, + cdns_phy->max_bit_rate, + false); + cdns_torrent_dp_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate, inst->num_lanes); /* take out of reset */ regmap_field_write(cdns_phy->phy_reset_ctrl, 0x1); +} + +static int cdns_torrent_dp_start(struct cdns_torrent_phy *cdns_phy, + struct cdns_torrent_inst *inst, + struct phy *phy) +{ + int ret; cdns_torrent_phy_on(phy); @@ -1051,615 +1592,25 @@ static int cdns_torrent_dp_init(struct phy *phy) return ret; } -static -int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy) -{ - unsigned int reg; - int ret; - struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; - - ret = regmap_read_poll_timeout(regmap, PHY_PMA_CMN_READY, reg, - reg & 1, 0, POLL_TIMEOUT_US); - if (ret == -ETIMEDOUT) { - dev_err(cdns_phy->dev, - "timeout waiting for PMA common ready\n"); - return -ETIMEDOUT; - } - - return 0; -} - -static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy, - struct cdns_torrent_inst *inst) -{ - unsigned int i; - - if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) - /* PMA common configuration 19.2MHz */ - cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy); - else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) - /* PMA common configuration 25MHz */ - cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy); - - /* PMA lane configuration to deal with multi-link operation */ - for (i = 0; i < inst->num_lanes; i++) - cdns_torrent_dp_pma_lane_cfg(cdns_phy, i); -} - -static -void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy) -{ - struct regmap *regmap = cdns_phy->regmap_common_cdb; - - /* refclock registers - assumes 19.2 MHz refclock */ - cdns_torrent_phy_write(regmap, CMN_SSM_BIAS_TMR, 0x0014); - cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLPRE_TMR, 0x0027); - cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLLOCK_TMR, 0x00A1); - cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLPRE_TMR, 0x0027); - cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLLOCK_TMR, 0x00A1); - cdns_torrent_phy_write(regmap, CMN_BGCAL_INIT_TMR, 0x0060); - cdns_torrent_phy_write(regmap, CMN_BGCAL_ITER_TMR, 0x0060); - cdns_torrent_phy_write(regmap, CMN_IBCAL_INIT_TMR, 0x0014); - cdns_torrent_phy_write(regmap, CMN_TXPUCAL_INIT_TMR, 0x0018); - cdns_torrent_phy_write(regmap, CMN_TXPUCAL_ITER_TMR, 0x0005); - cdns_torrent_phy_write(regmap, CMN_TXPDCAL_INIT_TMR, 0x0018); - cdns_torrent_phy_write(regmap, CMN_TXPDCAL_ITER_TMR, 0x0005); - cdns_torrent_phy_write(regmap, CMN_RXCAL_INIT_TMR, 0x0240); - cdns_torrent_phy_write(regmap, CMN_RXCAL_ITER_TMR, 0x0005); - cdns_torrent_phy_write(regmap, CMN_SD_CAL_INIT_TMR, 0x0002); - cdns_torrent_phy_write(regmap, CMN_SD_CAL_ITER_TMR, 0x0002); - cdns_torrent_phy_write(regmap, CMN_SD_CAL_REFTIM_START, 0x000B); - cdns_torrent_phy_write(regmap, CMN_SD_CAL_PLLCNT_START, 0x0137); - - /* PLL registers */ - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08); - cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08); - cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004); - cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_INIT_TMR, 0x00C0); - cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_ITER_TMR, 0x0004); - cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_INIT_TMR, 0x00C0); - cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_ITER_TMR, 0x0004); - cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_REFTIM_START, 0x0260); - cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_TCTRL, 0x0003); - cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_REFTIM_START, 0x0260); - cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_TCTRL, 0x0003); -} - -/* - * Set registers responsible for enabling and configuring SSC, with second and - * third register values provided by parameters. - */ -static -void cdns_torrent_dp_enable_ssc_19_2mhz(struct cdns_torrent_phy *cdns_phy, - u32 ctrl2_val, u32 ctrl3_val) -{ - struct regmap *regmap = cdns_phy->regmap_common_cdb; - - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001); - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val); - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl3_val); - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl3_val); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003); -} - -static -void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy, - u32 rate, bool ssc) -{ - struct regmap *regmap = cdns_phy->regmap_common_cdb; - - /* Assumes 19.2 MHz refclock */ - switch (rate) { - /* Setting VCO for 10.8GHz */ - case 2700: - case 5400: - cdns_torrent_phy_write(regmap, - CMN_PLL0_INTDIV_M0, 0x0119); - cdns_torrent_phy_write(regmap, - CMN_PLL0_FRACDIVL_M0, 0x4000); - cdns_torrent_phy_write(regmap, - CMN_PLL0_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL0_HIGH_THR_M0, 0x00BC); - cdns_torrent_phy_write(regmap, - CMN_PDIAG_PLL0_CTRL_M0, 0x0012); - cdns_torrent_phy_write(regmap, - CMN_PLL1_INTDIV_M0, 0x0119); - cdns_torrent_phy_write(regmap, - CMN_PLL1_FRACDIVL_M0, 0x4000); - cdns_torrent_phy_write(regmap, - CMN_PLL1_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL1_HIGH_THR_M0, 0x00BC); - cdns_torrent_phy_write(regmap, - CMN_PDIAG_PLL1_CTRL_M0, 0x0012); - if (ssc) - cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x033A, - 0x006A); - break; - /* Setting VCO for 9.72GHz */ - case 1620: - case 2430: - case 3240: - cdns_torrent_phy_write(regmap, - CMN_PLL0_INTDIV_M0, 0x01FA); - cdns_torrent_phy_write(regmap, - CMN_PLL0_FRACDIVL_M0, 0x4000); - cdns_torrent_phy_write(regmap, - CMN_PLL0_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL0_HIGH_THR_M0, 0x0152); - cdns_torrent_phy_write(regmap, - CMN_PDIAG_PLL0_CTRL_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL1_INTDIV_M0, 0x01FA); - cdns_torrent_phy_write(regmap, - CMN_PLL1_FRACDIVL_M0, 0x4000); - cdns_torrent_phy_write(regmap, - CMN_PLL1_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL1_HIGH_THR_M0, 0x0152); - cdns_torrent_phy_write(regmap, - CMN_PDIAG_PLL1_CTRL_M0, 0x0002); - if (ssc) - cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x05DD, - 0x0069); - break; - /* Setting VCO for 8.64GHz */ - case 2160: - case 4320: - cdns_torrent_phy_write(regmap, - CMN_PLL0_INTDIV_M0, 0x01C2); - cdns_torrent_phy_write(regmap, - CMN_PLL0_FRACDIVL_M0, 0x0000); - cdns_torrent_phy_write(regmap, - CMN_PLL0_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL0_HIGH_THR_M0, 0x012C); - cdns_torrent_phy_write(regmap, - CMN_PDIAG_PLL0_CTRL_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL1_INTDIV_M0, 0x01C2); - cdns_torrent_phy_write(regmap, - CMN_PLL1_FRACDIVL_M0, 0x0000); - cdns_torrent_phy_write(regmap, - CMN_PLL1_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL1_HIGH_THR_M0, 0x012C); - cdns_torrent_phy_write(regmap, - CMN_PDIAG_PLL1_CTRL_M0, 0x0002); - if (ssc) - cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x0536, - 0x0069); - break; - /* Setting VCO for 8.1GHz */ - case 8100: - cdns_torrent_phy_write(regmap, - CMN_PLL0_INTDIV_M0, 0x01A5); - cdns_torrent_phy_write(regmap, - CMN_PLL0_FRACDIVL_M0, 0xE000); - cdns_torrent_phy_write(regmap, - CMN_PLL0_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL0_HIGH_THR_M0, 0x011A); - cdns_torrent_phy_write(regmap, - CMN_PDIAG_PLL0_CTRL_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL1_INTDIV_M0, 0x01A5); - cdns_torrent_phy_write(regmap, - CMN_PLL1_FRACDIVL_M0, 0xE000); - cdns_torrent_phy_write(regmap, - CMN_PLL1_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL1_HIGH_THR_M0, 0x011A); - cdns_torrent_phy_write(regmap, - CMN_PDIAG_PLL1_CTRL_M0, 0x0002); - if (ssc) - cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x04D7, - 0x006A); - break; - } - - if (ssc) { - cdns_torrent_phy_write(regmap, - CMN_PLL0_VCOCAL_PLLCNT_START, 0x025E); - cdns_torrent_phy_write(regmap, - CMN_PLL0_LOCK_PLLCNT_THR, 0x0005); - cdns_torrent_phy_write(regmap, - CMN_PLL1_VCOCAL_PLLCNT_START, 0x025E); - cdns_torrent_phy_write(regmap, - CMN_PLL1_LOCK_PLLCNT_THR, 0x0005); - } else { - cdns_torrent_phy_write(regmap, - CMN_PLL0_VCOCAL_PLLCNT_START, 0x0260); - cdns_torrent_phy_write(regmap, - CMN_PLL1_VCOCAL_PLLCNT_START, 0x0260); - /* Set reset register values to disable SSC */ - cdns_torrent_phy_write(regmap, - CMN_PLL0_SS_CTRL1_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL0_SS_CTRL2_M0, 0x0000); - cdns_torrent_phy_write(regmap, - CMN_PLL0_SS_CTRL3_M0, 0x0000); - cdns_torrent_phy_write(regmap, - CMN_PLL0_SS_CTRL4_M0, 0x0000); - cdns_torrent_phy_write(regmap, - CMN_PLL0_LOCK_PLLCNT_THR, 0x0003); - cdns_torrent_phy_write(regmap, - CMN_PLL1_SS_CTRL1_M0, 0x0002); - cdns_torrent_phy_write(regmap, - CMN_PLL1_SS_CTRL2_M0, 0x0000); - cdns_torrent_phy_write(regmap, - CMN_PLL1_SS_CTRL3_M0, 0x0000); - cdns_torrent_phy_write(regmap, - CMN_PLL1_SS_CTRL4_M0, 0x0000); - cdns_torrent_phy_write(regmap, - CMN_PLL1_LOCK_PLLCNT_THR, 0x0003); - } - - cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x0099); - cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x0099); - cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x0099); - cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x0099); -} - -static -void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy) -{ - struct regmap *regmap = cdns_phy->regmap_common_cdb; - - /* refclock registers - assumes 25 MHz refclock */ - cdns_torrent_phy_write(regmap, CMN_SSM_BIAS_TMR, 0x0019); - cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLPRE_TMR, 0x0032); - cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLLOCK_TMR, 0x00D1); - cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLPRE_TMR, 0x0032); - cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLLOCK_TMR, 0x00D1); - cdns_torrent_phy_write(regmap, CMN_BGCAL_INIT_TMR, 0x007D); - cdns_torrent_phy_write(regmap, CMN_BGCAL_ITER_TMR, 0x007D); - cdns_torrent_phy_write(regmap, CMN_IBCAL_INIT_TMR, 0x0019); - cdns_torrent_phy_write(regmap, CMN_TXPUCAL_INIT_TMR, 0x001E); - cdns_torrent_phy_write(regmap, CMN_TXPUCAL_ITER_TMR, 0x0006); - cdns_torrent_phy_write(regmap, CMN_TXPDCAL_INIT_TMR, 0x001E); - cdns_torrent_phy_write(regmap, CMN_TXPDCAL_ITER_TMR, 0x0006); - cdns_torrent_phy_write(regmap, CMN_RXCAL_INIT_TMR, 0x02EE); - cdns_torrent_phy_write(regmap, CMN_RXCAL_ITER_TMR, 0x0006); - cdns_torrent_phy_write(regmap, CMN_SD_CAL_INIT_TMR, 0x0002); - cdns_torrent_phy_write(regmap, CMN_SD_CAL_ITER_TMR, 0x0002); - cdns_torrent_phy_write(regmap, CMN_SD_CAL_REFTIM_START, 0x000E); - cdns_torrent_phy_write(regmap, CMN_SD_CAL_PLLCNT_START, 0x012B); - - /* PLL registers */ - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08); - cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08); - cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004); - cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_INIT_TMR, 0x00FA); - cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_ITER_TMR, 0x0004); - cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_INIT_TMR, 0x00FA); - cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_ITER_TMR, 0x0004); - cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_REFTIM_START, 0x0317); - cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_TCTRL, 0x0003); - cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_REFTIM_START, 0x0317); - cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_TCTRL, 0x0003); -} - -/* - * Set registers responsible for enabling and configuring SSC, with second - * register value provided by a parameter. - */ -static void cdns_torrent_dp_enable_ssc_25mhz(struct cdns_torrent_phy *cdns_phy, - u32 ctrl2_val) -{ - struct regmap *regmap = cdns_phy->regmap_common_cdb; - - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001); - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val); - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x007F); - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x007F); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003); -} - -static -void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy, - u32 rate, bool ssc) -{ - struct regmap *regmap = cdns_phy->regmap_common_cdb; - - /* Assumes 25 MHz refclock */ - switch (rate) { - /* Setting VCO for 10.8GHz */ - case 2700: - case 5400: - cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01B0); - cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000); - cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0120); - cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01B0); - cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000); - cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0120); - if (ssc) - cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x0423); - break; - /* Setting VCO for 9.72GHz */ - case 1620: - case 2430: - case 3240: - cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0184); - cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xCCCD); - cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0104); - cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0184); - cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xCCCD); - cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0104); - if (ssc) - cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x03B9); - break; - /* Setting VCO for 8.64GHz */ - case 2160: - case 4320: - cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0159); - cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x999A); - cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00E7); - cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0159); - cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x999A); - cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00E7); - if (ssc) - cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x034F); - break; - /* Setting VCO for 8.1GHz */ - case 8100: - cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0144); - cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000); - cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00D8); - cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0144); - cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000); - cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00D8); - if (ssc) - cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x031A); - break; - } - - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002); - - if (ssc) { - cdns_torrent_phy_write(regmap, - CMN_PLL0_VCOCAL_PLLCNT_START, 0x0315); - cdns_torrent_phy_write(regmap, - CMN_PLL0_LOCK_PLLCNT_THR, 0x0005); - cdns_torrent_phy_write(regmap, - CMN_PLL1_VCOCAL_PLLCNT_START, 0x0315); - cdns_torrent_phy_write(regmap, - CMN_PLL1_LOCK_PLLCNT_THR, 0x0005); - } else { - cdns_torrent_phy_write(regmap, - CMN_PLL0_VCOCAL_PLLCNT_START, 0x0317); - cdns_torrent_phy_write(regmap, - CMN_PLL1_VCOCAL_PLLCNT_START, 0x0317); - /* Set reset register values to disable SSC */ - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000); - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000); - cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000); - cdns_torrent_phy_write(regmap, - CMN_PLL0_LOCK_PLLCNT_THR, 0x0003); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000); - cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000); - cdns_torrent_phy_write(regmap, - CMN_PLL1_LOCK_PLLCNT_THR, 0x0003); - } - - cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x00C7); - cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x00C7); - cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x00C7); - cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7); -} - -static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy, - u32 rate, u32 num_lanes) -{ - unsigned int clk_sel_val = 0; - unsigned int hsclk_div_val = 0; - unsigned int i; - - /* 16'h0000 for single DP link configuration */ - regmap_field_write(cdns_phy->phy_pll_cfg, 0x0); - - switch (rate) { - case 1620: - clk_sel_val = 0x0f01; - hsclk_div_val = 2; - break; - case 2160: - case 2430: - case 2700: - clk_sel_val = 0x0701; - hsclk_div_val = 1; - break; - case 3240: - clk_sel_val = 0x0b00; - hsclk_div_val = 2; - break; - case 4320: - case 5400: - clk_sel_val = 0x0301; - hsclk_div_val = 0; - break; - case 8100: - clk_sel_val = 0x0200; - hsclk_div_val = 0; - break; - } - - cdns_torrent_phy_write(cdns_phy->regmap_common_cdb, - CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val); - cdns_torrent_phy_write(cdns_phy->regmap_common_cdb, - CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val); - - /* PMA lane configuration to deal with multi-link operation */ - for (i = 0; i < num_lanes; i++) - cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[i], - XCVR_DIAG_HSCLK_DIV, hsclk_div_val); -} - -static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy, - unsigned int lane) -{ - /* Per lane, refclock-dependent receiver detection setting */ - if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) - cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], - TX_RCVDET_ST_TMR, 0x0780); - else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) - cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], - TX_RCVDET_ST_TMR, 0x09C4); - - /* Writing Tx/Rx Power State Controllers registers */ - cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], - TX_PSC_A0, 0x00FB); - cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], - TX_PSC_A2, 0x04AA); - cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], - TX_PSC_A3, 0x04AA); - cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], - RX_PSC_A0, 0x0000); - cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], - RX_PSC_A2, 0x0000); - cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], - RX_PSC_A3, 0x0000); - - cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], - RX_PSC_CAL, 0x0000); - - cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], - RX_REE_GCSM1_CTRL, 0x0000); - cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], - RX_REE_GCSM2_CTRL, 0x0000); - cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane], - RX_REE_PERGCSM_CTRL, 0x0000); - - cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], - XCVR_DIAG_BIDI_CTRL, 0x000F); - cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], - XCVR_DIAG_PLLDRC_CTRL, 0x0001); - cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane], - XCVR_DIAG_HSCLK_SEL, 0x0000); -} - -static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy, - u32 num_lanes, - enum phy_powerstate powerstate) +static int cdns_torrent_dp_init(struct phy *phy) { - /* Register value for power state for a single byte. */ - u32 value_part; - u32 value; - u32 mask; - u32 read_val; - u32 ret; - struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; - - switch (powerstate) { - case (POWERSTATE_A0): - value_part = 0x01U; - break; - case (POWERSTATE_A2): - value_part = 0x04U; - break; - default: - /* Powerstate A3 */ - value_part = 0x08U; - break; - } + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); - /* Select values of registers and mask, depending on enabled - * lane count. - */ - switch (num_lanes) { - /* lane 0 */ - case (1): - value = value_part; - mask = 0x0000003FU; - break; - /* lanes 0-1 */ - case (2): - value = (value_part - | (value_part << 8)); - mask = 0x00003F3FU; + switch (cdns_phy->ref_clk_rate) { + case CLK_19_2_MHZ: + case CLK_25_MHZ: + case CLK_100_MHZ: + /* Valid Ref Clock Rate */ break; - /* lanes 0-3, all */ default: - value = (value_part - | (value_part << 8) - | (value_part << 16) - | (value_part << 24)); - mask = 0x3F3F3F3FU; - break; - } - - /* Set power state A<n>. */ - cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, value); - /* Wait, until PHY acknowledges power state completion. */ - ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK, - read_val, (read_val & mask) == value, 0, - POLL_TIMEOUT_US); - cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000); - ndelay(100); - - return ret; -} - -static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes) -{ - unsigned int read_val; - int ret; - struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; - - /* - * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the - * master lane - */ - ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_PLLCLK_EN_ACK, - read_val, read_val & 1, - 0, POLL_TIMEOUT_US); - if (ret == -ETIMEDOUT) { - dev_err(cdns_phy->dev, - "timeout waiting for link PLL clock enable ack\n"); - return ret; + dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n"); + return -EINVAL; } - ndelay(100); - - ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes, - POWERSTATE_A2); - if (ret) - return ret; - - ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes, - POWERSTATE_A0); + cdns_torrent_dp_common_init(cdns_phy, inst); - return ret; + return cdns_torrent_dp_start(cdns_phy, inst, phy); } static int cdns_torrent_derived_refclk_enable(struct clk_hw *hw) @@ -1764,56 +1715,6 @@ static int cdns_torrent_derived_refclk_register(struct cdns_torrent_phy *cdns_ph return 0; } -static int cdns_torrent_phy_on(struct phy *phy) -{ - struct cdns_torrent_inst *inst = phy_get_drvdata(phy); - struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); - u32 read_val; - int ret; - - if (cdns_phy->nsubnodes == 1) { - /* Take the PHY lane group out of reset */ - reset_control_deassert(inst->lnk_rst); - - /* Take the PHY out of reset */ - ret = reset_control_deassert(cdns_phy->phy_rst); - if (ret) - return ret; - } - - /* - * Wait for cmn_ready assertion - * PHY_PMA_CMN_CTRL1[0] == 1 - */ - ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_1, - read_val, read_val, 1000, - PLL_LOCK_TIMEOUT); - if (ret) { - dev_err(cdns_phy->dev, "Timeout waiting for CMN ready\n"); - return ret; - } - - mdelay(10); - - return 0; -} - -static int cdns_torrent_phy_off(struct phy *phy) -{ - struct cdns_torrent_inst *inst = phy_get_drvdata(phy); - struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); - int ret; - - if (cdns_phy->nsubnodes != 1) - return 0; - - ret = reset_control_assert(cdns_phy->phy_rst); - if (ret) - return ret; - - return reset_control_assert(inst->lnk_rst); -} - static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base, u32 block_offset, u8 reg_offset_shift, @@ -1854,6 +1755,7 @@ static int cdns_torrent_regfield_init(struct cdns_torrent_phy *cdns_phy) struct device *dev = cdns_phy->dev; struct regmap_field *field; struct regmap *regmap; + int i; regmap = cdns_phy->regmap_phy_pcs_common_cdb; field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg); @@ -1887,6 +1789,16 @@ static int cdns_torrent_regfield_init(struct cdns_torrent_phy *cdns_phy) } cdns_phy->phy_pma_pll_raw_ctrl = field; + for (i = 0; i < MAX_NUM_LANES; i++) { + regmap = cdns_phy->regmap_phy_pcs_lane_cdb[i]; + field = devm_regmap_field_alloc(dev, regmap, phy_pcs_iso_link_ctrl_1); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PCS_ISO_LINK_CTRL reg field init for ln %d failed\n", i); + return PTR_ERR(field); + } + cdns_phy->phy_pcs_iso_link_ctrl_1[i] = field; + } + return 0; } @@ -1947,6 +1859,17 @@ static int cdns_torrent_regmap_init(struct cdns_torrent_phy *cdns_phy) return PTR_ERR(regmap); } cdns_phy->regmap_rx_lane_cdb[i] = regmap; + + block_offset = TORRENT_PHY_PCS_LANE_CDB_OFFSET(i, block_offset_shift, + reg_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift, + &cdns_torrent_phy_pcs_lane_cdb_config[i]); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_phy_pcs_lane_cdb[i] = regmap; } block_offset = TORRENT_COMMON_CDB_OFFSET; @@ -1987,6 +1910,7 @@ static int cdns_torrent_phy_init(struct phy *phy) struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); const struct cdns_torrent_data *init_data = cdns_phy->init_data; struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; + enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate; struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; struct cdns_torrent_inst *inst = phy_get_drvdata(phy); enum cdns_torrent_phy_type phy_type = inst->phy_type; @@ -2000,9 +1924,6 @@ static int cdns_torrent_phy_init(struct phy *phy) if (cdns_phy->nsubnodes > 1) return 0; - if (phy_type == TYPE_DP) - return cdns_torrent_dp_init(phy); - /** * Spread spectrum generation is not required or supported * for SGMII/QSGMII @@ -2052,7 +1973,7 @@ static int cdns_torrent_phy_init(struct phy *phy) } /* PMA common registers configurations */ - cmn_vals = init_data->cmn_vals[phy_type][TYPE_NONE][ssc]; + cmn_vals = init_data->cmn_vals[ref_clk][phy_type][TYPE_NONE][ssc]; if (cmn_vals) { reg_pairs = cmn_vals->reg_pairs; num_regs = cmn_vals->num_regs; @@ -2063,7 +1984,7 @@ static int cdns_torrent_phy_init(struct phy *phy) } /* PMA TX lane registers configurations */ - tx_ln_vals = init_data->tx_ln_vals[phy_type][TYPE_NONE][ssc]; + tx_ln_vals = init_data->tx_ln_vals[ref_clk][phy_type][TYPE_NONE][ssc]; if (tx_ln_vals) { reg_pairs = tx_ln_vals->reg_pairs; num_regs = tx_ln_vals->num_regs; @@ -2076,7 +1997,7 @@ static int cdns_torrent_phy_init(struct phy *phy) } /* PMA RX lane registers configurations */ - rx_ln_vals = init_data->rx_ln_vals[phy_type][TYPE_NONE][ssc]; + rx_ln_vals = init_data->rx_ln_vals[ref_clk][phy_type][TYPE_NONE][ssc]; if (rx_ln_vals) { reg_pairs = rx_ln_vals->reg_pairs; num_regs = rx_ln_vals->num_regs; @@ -2088,14 +2009,39 @@ static int cdns_torrent_phy_init(struct phy *phy) } } + if (phy_type == TYPE_DP) + return cdns_torrent_dp_init(phy); + + return 0; +} + +static const struct phy_ops cdns_torrent_phy_ops = { + .init = cdns_torrent_phy_init, + .configure = cdns_torrent_dp_configure, + .power_on = cdns_torrent_phy_on, + .power_off = cdns_torrent_phy_off, + .owner = THIS_MODULE, +}; + +static int cdns_torrent_noop_phy_on(struct phy *phy) +{ + /* Give 5ms to 10ms delay for the PIPE clock to be stable */ + usleep_range(5000, 10000); + return 0; } +static const struct phy_ops noop_ops = { + .power_on = cdns_torrent_noop_phy_on, + .owner = THIS_MODULE, +}; + static int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) { const struct cdns_torrent_data *init_data = cdns_phy->init_data; struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; + enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate; struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; enum cdns_torrent_phy_type phy_t1, phy_t2, tmp_phy_type; struct cdns_torrent_vals *pcs_cmn_vals; @@ -2184,7 +2130,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) } /* PMA common registers configurations */ - cmn_vals = init_data->cmn_vals[phy_t1][phy_t2][ssc]; + cmn_vals = init_data->cmn_vals[ref_clk][phy_t1][phy_t2][ssc]; if (cmn_vals) { reg_pairs = cmn_vals->reg_pairs; num_regs = cmn_vals->num_regs; @@ -2195,7 +2141,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) } /* PMA TX lane registers configurations */ - tx_ln_vals = init_data->tx_ln_vals[phy_t1][phy_t2][ssc]; + tx_ln_vals = init_data->tx_ln_vals[ref_clk][phy_t1][phy_t2][ssc]; if (tx_ln_vals) { reg_pairs = tx_ln_vals->reg_pairs; num_regs = tx_ln_vals->num_regs; @@ -2208,7 +2154,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) } /* PMA RX lane registers configurations */ - rx_ln_vals = init_data->rx_ln_vals[phy_t1][phy_t2][ssc]; + rx_ln_vals = init_data->rx_ln_vals[ref_clk][phy_t1][phy_t2][ssc]; if (rx_ln_vals) { reg_pairs = rx_ln_vals->reg_pairs; num_regs = rx_ln_vals->num_regs; @@ -2286,6 +2232,7 @@ static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy) static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy) { struct device *dev = cdns_phy->dev; + unsigned long ref_clk_rate; int ret; cdns_phy->clk = devm_clk_get(dev, "refclk"); @@ -2300,13 +2247,29 @@ static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy) return ret; } - cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk); - if (!(cdns_phy->ref_clk_rate)) { + ref_clk_rate = clk_get_rate(cdns_phy->clk); + if (!ref_clk_rate) { dev_err(cdns_phy->dev, "Failed to get ref clock rate\n"); clk_disable_unprepare(cdns_phy->clk); return -EINVAL; } + switch (ref_clk_rate) { + case REF_CLK_19_2MHZ: + cdns_phy->ref_clk_rate = CLK_19_2_MHZ; + break; + case REF_CLK_25MHZ: + cdns_phy->ref_clk_rate = CLK_25_MHZ; + break; + case REF_CLK_100MHZ: + cdns_phy->ref_clk_rate = CLK_100_MHZ; + break; + default: + dev_err(cdns_phy->dev, "Invalid Ref Clock Rate\n"); + clk_disable_unprepare(cdns_phy->clk); + return -EINVAL; + } + return 0; } @@ -2505,10 +2468,9 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) init_dp_regmap++; } - dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n", - cdns_phy->phys[node].num_lanes, - cdns_phy->max_bit_rate / 1000, - cdns_phy->max_bit_rate % 1000); + dev_dbg(dev, "DP max bit rate %d.%03d Gbps\n", + cdns_phy->max_bit_rate / 1000, + cdns_phy->max_bit_rate % 1000); gphy->attrs.bus_width = cdns_phy->phys[node].num_lanes; gphy->attrs.max_link_rate = cdns_phy->max_bit_rate; @@ -2540,6 +2502,17 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) goto put_lnk_rst; } + if (cdns_phy->nsubnodes > 1) + dev_dbg(dev, "Multi-link: %s (%d lanes) & %s (%d lanes)", + cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type), + cdns_phy->phys[0].num_lanes, + cdns_torrent_get_phy_type(cdns_phy->phys[1].phy_type), + cdns_phy->phys[1].num_lanes); + else + dev_dbg(dev, "Single link: %s (%d lanes)", + cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type), + cdns_phy->phys[0].num_lanes); + return 0; put_child: @@ -2573,6 +2546,206 @@ static int cdns_torrent_phy_remove(struct platform_device *pdev) return 0; } +/* Single DisplayPort(DP) link configuration */ +static struct cdns_reg_pairs sl_dp_link_cmn_regs[] = { + {0x0000, PHY_PLL_CFG}, +}; + +static struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals sl_dp_link_cmn_vals = { + .reg_pairs = sl_dp_link_cmn_regs, + .num_regs = ARRAY_SIZE(sl_dp_link_cmn_regs), +}; + +static struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = { + .reg_pairs = sl_dp_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(sl_dp_xcvr_diag_ln_regs), +}; + +/* Single DP, 19.2 MHz Ref clk, no SSC */ +static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = { + {0x0014, CMN_SSM_BIAS_TMR}, + {0x0027, CMN_PLLSM0_PLLPRE_TMR}, + {0x00A1, CMN_PLLSM0_PLLLOCK_TMR}, + {0x0027, CMN_PLLSM1_PLLPRE_TMR}, + {0x00A1, CMN_PLLSM1_PLLLOCK_TMR}, + {0x0060, CMN_BGCAL_INIT_TMR}, + {0x0060, CMN_BGCAL_ITER_TMR}, + {0x0014, CMN_IBCAL_INIT_TMR}, + {0x0018, CMN_TXPUCAL_INIT_TMR}, + {0x0005, CMN_TXPUCAL_ITER_TMR}, + {0x0018, CMN_TXPDCAL_INIT_TMR}, + {0x0005, CMN_TXPDCAL_ITER_TMR}, + {0x0240, CMN_RXCAL_INIT_TMR}, + {0x0005, CMN_RXCAL_ITER_TMR}, + {0x0002, CMN_SD_CAL_INIT_TMR}, + {0x0002, CMN_SD_CAL_ITER_TMR}, + {0x000B, CMN_SD_CAL_REFTIM_START}, + {0x0137, CMN_SD_CAL_PLLCNT_START}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x00C0, CMN_PLL0_VCOCAL_INIT_TMR}, + {0x0004, CMN_PLL0_VCOCAL_ITER_TMR}, + {0x00C0, CMN_PLL1_VCOCAL_INIT_TMR}, + {0x0004, CMN_PLL1_VCOCAL_ITER_TMR}, + {0x0260, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0260, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL} +}; + +static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = { + {0x0780, TX_RCVDET_ST_TMR}, + {0x00FB, TX_PSC_A0}, + {0x04AA, TX_PSC_A2}, + {0x04AA, TX_PSC_A3}, + {0x000F, XCVR_DIAG_BIDI_CTRL} +}; + +static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = { + {0x0000, RX_PSC_A0}, + {0x0000, RX_PSC_A2}, + {0x0000, RX_PSC_A3}, + {0x0000, RX_PSC_CAL}, + {0x0000, RX_REE_GCSM1_CTRL}, + {0x0000, RX_REE_GCSM2_CTRL}, + {0x0000, RX_REE_PERGCSM_CTRL} +}; + +static struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = { + .reg_pairs = sl_dp_19_2_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = { + .reg_pairs = sl_dp_19_2_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = { + .reg_pairs = sl_dp_19_2_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_rx_ln_regs), +}; + +/* Single DP, 25 MHz Ref clk, no SSC */ +static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = { + {0x0019, CMN_SSM_BIAS_TMR}, + {0x0032, CMN_PLLSM0_PLLPRE_TMR}, + {0x00D1, CMN_PLLSM0_PLLLOCK_TMR}, + {0x0032, CMN_PLLSM1_PLLPRE_TMR}, + {0x00D1, CMN_PLLSM1_PLLLOCK_TMR}, + {0x007D, CMN_BGCAL_INIT_TMR}, + {0x007D, CMN_BGCAL_ITER_TMR}, + {0x0019, CMN_IBCAL_INIT_TMR}, + {0x001E, CMN_TXPUCAL_INIT_TMR}, + {0x0006, CMN_TXPUCAL_ITER_TMR}, + {0x001E, CMN_TXPDCAL_INIT_TMR}, + {0x0006, CMN_TXPDCAL_ITER_TMR}, + {0x02EE, CMN_RXCAL_INIT_TMR}, + {0x0006, CMN_RXCAL_ITER_TMR}, + {0x0002, CMN_SD_CAL_INIT_TMR}, + {0x0002, CMN_SD_CAL_ITER_TMR}, + {0x000E, CMN_SD_CAL_REFTIM_START}, + {0x012B, CMN_SD_CAL_PLLCNT_START}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x00FA, CMN_PLL0_VCOCAL_INIT_TMR}, + {0x0004, CMN_PLL0_VCOCAL_ITER_TMR}, + {0x00FA, CMN_PLL1_VCOCAL_INIT_TMR}, + {0x0004, CMN_PLL1_VCOCAL_ITER_TMR}, + {0x0317, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0317, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL} +}; + +static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = { + {0x09C4, TX_RCVDET_ST_TMR}, + {0x00FB, TX_PSC_A0}, + {0x04AA, TX_PSC_A2}, + {0x04AA, TX_PSC_A3}, + {0x000F, XCVR_DIAG_BIDI_CTRL} +}; + +static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = { + {0x0000, RX_PSC_A0}, + {0x0000, RX_PSC_A2}, + {0x0000, RX_PSC_A3}, + {0x0000, RX_PSC_CAL}, + {0x0000, RX_REE_GCSM1_CTRL}, + {0x0000, RX_REE_GCSM2_CTRL}, + {0x0000, RX_REE_PERGCSM_CTRL} +}; + +static struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = { + .reg_pairs = sl_dp_25_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = { + .reg_pairs = sl_dp_25_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = { + .reg_pairs = sl_dp_25_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_rx_ln_regs), +}; + +/* Single DP, 100 MHz Ref clk, no SSC */ +static struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = { + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL} +}; + +static struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = { + {0x00FB, TX_PSC_A0}, + {0x04AA, TX_PSC_A2}, + {0x04AA, TX_PSC_A3}, + {0x000F, XCVR_DIAG_BIDI_CTRL} +}; + +static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = { + {0x0000, RX_PSC_A0}, + {0x0000, RX_PSC_A2}, + {0x0000, RX_PSC_A3}, + {0x0000, RX_PSC_CAL}, + {0x0000, RX_REE_GCSM1_CTRL}, + {0x0000, RX_REE_GCSM2_CTRL}, + {0x0000, RX_REE_PERGCSM_CTRL} +}; + +static struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = { + .reg_pairs = sl_dp_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = { + .reg_pairs = sl_dp_100_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = { + .reg_pairs = sl_dp_100_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_rx_ln_regs), +}; + /* USB and SGMII/QSGMII link configuration */ static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = { {0x0002, PHY_PLL_CFG}, @@ -3311,6 +3484,11 @@ static const struct cdns_torrent_data cdns_map_torrent = { .block_offset_shift = 0x2, .reg_offset_shift = 0x2, .link_cmn_vals = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_link_cmn_vals, + }, + }, [TYPE_PCIE] = { [TYPE_NONE] = { [NO_SSC] = NULL, @@ -3387,6 +3565,11 @@ static const struct cdns_torrent_data cdns_map_torrent = { }, }, .xcvr_diag_vals = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_xcvr_diag_ln_vals, + }, + }, [TYPE_PCIE] = { [TYPE_NONE] = { [NO_SSC] = NULL, @@ -3487,230 +3670,293 @@ static const struct cdns_torrent_data cdns_map_torrent = { }, }, .cmn_vals = { - [TYPE_PCIE] = { - [TYPE_NONE] = { - [NO_SSC] = NULL, - [EXTERNAL_SSC] = NULL, - [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals, - }, - [TYPE_SGMII] = { - [NO_SSC] = &pcie_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, - }, - [TYPE_QSGMII] = { - [NO_SSC] = &pcie_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &pcie_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, - }, - }, - [TYPE_SGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [CLK_19_2_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_19_2_no_ssc_cmn_vals, + }, }, }, - [TYPE_QSGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [CLK_25_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_25_no_ssc_cmn_vals, + }, }, }, - [TYPE_USB] = { - [TYPE_NONE] = { - [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + [CLK_100_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals, + }, }, [TYPE_PCIE] = { - [NO_SSC] = &usb_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals, + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, }, [TYPE_SGMII] = { - [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + }, }, [TYPE_QSGMII] = { - [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, - }, - }, - }, - .tx_ln_vals = { - [TYPE_PCIE] = { - [TYPE_NONE] = { - [NO_SSC] = NULL, - [EXTERNAL_SSC] = NULL, - [INTERNAL_SSC] = NULL, - }, - [TYPE_SGMII] = { - [NO_SSC] = NULL, - [EXTERNAL_SSC] = NULL, - [INTERNAL_SSC] = NULL, - }, - [TYPE_QSGMII] = { - [NO_SSC] = NULL, - [EXTERNAL_SSC] = NULL, - [INTERNAL_SSC] = NULL, + [TYPE_NONE] = { + [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + }, }, [TYPE_USB] = { - [NO_SSC] = NULL, - [EXTERNAL_SSC] = NULL, - [INTERNAL_SSC] = NULL, + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, }, }, - [TYPE_SGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + .tx_ln_vals = { + [CLK_19_2_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_19_2_no_ssc_tx_ln_vals, + }, }, }, - [TYPE_QSGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [CLK_25_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_25_no_ssc_tx_ln_vals, + }, }, }, - [TYPE_USB] = { - [TYPE_NONE] = { - [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [CLK_100_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals, + }, }, [TYPE_PCIE] = { - [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_QSGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_USB] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, }, [TYPE_SGMII] = { - [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, }, [TYPE_QSGMII] = { - [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - }, - }, - }, - .rx_ln_vals = { - [TYPE_PCIE] = { - [TYPE_NONE] = { - [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, - }, - [TYPE_SGMII] = { - [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, - }, - [TYPE_QSGMII] = { - [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, }, [TYPE_USB] = { - [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, }, }, - [TYPE_SGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + .rx_ln_vals = { + [CLK_19_2_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_19_2_no_ssc_rx_ln_vals, + }, }, }, - [TYPE_QSGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [CLK_25_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_25_no_ssc_rx_ln_vals, + }, }, }, - [TYPE_USB] = { - [TYPE_NONE] = { - [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [CLK_100_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals, + }, }, [TYPE_PCIE] = { - [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, }, [TYPE_SGMII] = { - [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, }, [TYPE_QSGMII] = { - [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, }, }, }, @@ -3720,6 +3966,11 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = { .block_offset_shift = 0x0, .reg_offset_shift = 0x1, .link_cmn_vals = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_link_cmn_vals, + }, + }, [TYPE_PCIE] = { [TYPE_NONE] = { [NO_SSC] = NULL, @@ -3796,6 +4047,11 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = { }, }, .xcvr_diag_vals = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_xcvr_diag_ln_vals, + }, + }, [TYPE_PCIE] = { [TYPE_NONE] = { [NO_SSC] = NULL, @@ -3896,230 +4152,293 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = { }, }, .cmn_vals = { - [TYPE_PCIE] = { - [TYPE_NONE] = { - [NO_SSC] = NULL, - [EXTERNAL_SSC] = NULL, - [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals, - }, - [TYPE_SGMII] = { - [NO_SSC] = &pcie_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, - }, - [TYPE_QSGMII] = { - [NO_SSC] = &pcie_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &pcie_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, - }, - }, - [TYPE_SGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [CLK_19_2_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_19_2_no_ssc_cmn_vals, + }, }, }, - [TYPE_QSGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [CLK_25_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_25_no_ssc_cmn_vals, + }, }, }, - [TYPE_USB] = { - [TYPE_NONE] = { - [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + [CLK_100_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals, + }, }, [TYPE_PCIE] = { - [NO_SSC] = &usb_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals, + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, }, [TYPE_SGMII] = { - [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + }, }, [TYPE_QSGMII] = { - [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, - [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, - }, - }, - }, - .tx_ln_vals = { - [TYPE_PCIE] = { - [TYPE_NONE] = { - [NO_SSC] = NULL, - [EXTERNAL_SSC] = NULL, - [INTERNAL_SSC] = NULL, - }, - [TYPE_SGMII] = { - [NO_SSC] = NULL, - [EXTERNAL_SSC] = NULL, - [INTERNAL_SSC] = NULL, - }, - [TYPE_QSGMII] = { - [NO_SSC] = NULL, - [EXTERNAL_SSC] = NULL, - [INTERNAL_SSC] = NULL, + [TYPE_NONE] = { + [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + }, }, [TYPE_USB] = { - [NO_SSC] = NULL, - [EXTERNAL_SSC] = NULL, - [INTERNAL_SSC] = NULL, + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, }, }, - [TYPE_SGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + }, + .tx_ln_vals = { + [CLK_19_2_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_19_2_no_ssc_tx_ln_vals, + }, }, }, - [TYPE_QSGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + [CLK_25_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_25_no_ssc_tx_ln_vals, + }, }, }, - [TYPE_USB] = { - [TYPE_NONE] = { - [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [CLK_100_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals, + }, }, [TYPE_PCIE] = { - [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - }, - [TYPE_SGMII] = { - [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - }, - [TYPE_QSGMII] = { - [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, - }, - }, - }, - .rx_ln_vals = { - [TYPE_PCIE] = { - [TYPE_NONE] = { - [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_QSGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_USB] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, }, [TYPE_SGMII] = { - [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + }, }, [TYPE_QSGMII] = { - [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + }, }, [TYPE_USB] = { - [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, }, }, - [TYPE_SGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + .rx_ln_vals = { + [CLK_19_2_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_19_2_no_ssc_rx_ln_vals, + }, }, }, - [TYPE_QSGMII] = { - [TYPE_NONE] = { - [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - }, - [TYPE_PCIE] = { - [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - }, - [TYPE_USB] = { - [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [CLK_25_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_25_no_ssc_rx_ln_vals, + }, }, }, - [TYPE_USB] = { - [TYPE_NONE] = { - [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [CLK_100_MHZ] = { + [TYPE_DP] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals, + }, }, [TYPE_PCIE] = { - [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, }, [TYPE_SGMII] = { - [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, }, [TYPE_QSGMII] = { - [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, - [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, - [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, }, }, }, diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c index 8ad8f717ef43..5fb4217fb8e0 100644 --- a/drivers/phy/mediatek/phy-mtk-hdmi.c +++ b/drivers/phy/mediatek/phy-mtk-hdmi.c @@ -100,7 +100,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_hdmi_phy *hdmi_phy; - struct resource *mem; struct clk *ref_clk; const char *ref_clk_name; struct clk_init_data clk_init = { @@ -116,11 +115,9 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) if (!hdmi_phy) return -ENOMEM; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hdmi_phy->regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(hdmi_phy->regs)) { + hdmi_phy->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(hdmi_phy->regs)) return PTR_ERR(hdmi_phy->regs); - } ref_clk = devm_clk_get(dev, "pll_ref"); if (IS_ERR(ref_clk)) { diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c index 01cf31633019..28ad9403c441 100644 --- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c +++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c @@ -130,7 +130,6 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_mipi_tx *mipi_tx; - struct resource *mem; const char *ref_clk_name; struct clk *ref_clk; struct clk_init_data clk_init = { @@ -148,11 +147,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) mipi_tx->driver_data = of_device_get_match_data(dev); - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mipi_tx->regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(mipi_tx->regs)) { + mipi_tx->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mipi_tx->regs)) return PTR_ERR(mipi_tx->regs); - } ref_clk = devm_clk_get(dev, NULL); if (IS_ERR(ref_clk)) { @@ -203,10 +200,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) phy_set_drvdata(phy, mipi_tx); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(phy_provider)) { - ret = PTR_ERR(phy_provider); - return ret; - } + if (IS_ERR(phy_provider)) + return PTR_ERR(phy_provider); mipi_tx->dev = dev; diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 731c483a04de..cdcef865fe9e 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -10,11 +10,13 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/regmap.h> /* version V1 sub-banks offset base address */ /* banks shared by multiple phys */ @@ -27,7 +29,8 @@ #define SSUSB_SIFSLV_V1_U3PHYD 0x000 #define SSUSB_SIFSLV_V1_U3PHYA 0x200 -/* version V2 sub-banks offset base address */ +/* version V2/V3 sub-banks offset base address */ +/* V3: U2FREQ is not used anymore, but reserved */ /* u2 phy banks */ #define SSUSB_SIFSLV_V2_MISC 0x000 #define SSUSB_SIFSLV_V2_U2FREQ 0x100 @@ -40,6 +43,8 @@ #define U3P_USBPHYACR0 0x000 #define PA0_RG_U2PLL_FORCE_ON BIT(15) +#define PA0_USB20_PLL_PREDIV GENMASK(7, 6) +#define PA0_USB20_PLL_PREDIV_VAL(x) ((0x3 & (x)) << 6) #define PA0_RG_USB20_INTR_EN BIT(5) #define U3P_USBPHYACR1 0x004 @@ -51,6 +56,8 @@ #define PA1_RG_TERM_SEL_VAL(x) ((0x7 & (x)) << 8) #define U3P_USBPHYACR2 0x008 +#define PA2_RG_U2PLL_BW GENMASK(21, 19) +#define PA2_RG_U2PLL_BW_VAL(x) ((0x7 & (x)) << 19) #define PA2_RG_SIF_U2PLL_FORCE_EN BIT(18) #define U3P_USBPHYACR5 0x014 @@ -72,6 +79,14 @@ #define P2C_USB20_GPIO_MODE BIT(8) #define P2C_U2_GPIO_CTR_MSK (P2C_RG_USB20_GPIO_CTL | P2C_USB20_GPIO_MODE) +#define U3P_U2PHYA_RESV 0x030 +#define P2R_RG_U2PLL_FBDIV_26M 0x1bb13b +#define P2R_RG_U2PLL_FBDIV_48M 0x3c0000 + +#define U3P_U2PHYA_RESV1 0x044 +#define P2R_RG_U2PLL_REFCLK_SEL BIT(5) +#define P2R_RG_U2PLL_FRA_EN BIT(3) + #define U3D_U2PHYDCR0 0x060 #define P2C_RG_SIF_U2PLL_FORCE_ON BIT(24) @@ -267,14 +282,31 @@ #define RG_CDR_BIRLTD0_GEN3_MSK GENMASK(4, 0) #define RG_CDR_BIRLTD0_GEN3_VAL(x) (0x1f & (x)) +/* PHY switch between pcie/usb3/sgmii/sata */ +#define USB_PHY_SWITCH_CTRL 0x0 +#define RG_PHY_SW_TYPE GENMASK(3, 0) +#define RG_PHY_SW_PCIE 0x0 +#define RG_PHY_SW_USB3 0x1 +#define RG_PHY_SW_SGMII 0x2 +#define RG_PHY_SW_SATA 0x3 + +#define TPHY_CLKS_CNT 2 + enum mtk_phy_version { MTK_PHY_V1 = 1, MTK_PHY_V2, + MTK_PHY_V3, }; struct mtk_phy_pdata { /* avoid RX sensitivity level degradation only for mt8173 */ bool avoid_rx_sen_degradation; + /* + * workaround only for mt8195, HW fix it for others of V3, + * u2phy should use integer mode instead of fractional mode of + * 48M PLL, fix it by switching PLL to 26M from default 48M + */ + bool sw_pll_48m_to_26m; enum mtk_phy_version version; }; @@ -298,10 +330,12 @@ struct mtk_phy_instance { struct u2phy_banks u2_banks; struct u3phy_banks u3_banks; }; - struct clk *ref_clk; /* reference clock of (digital) phy */ - struct clk *da_ref_clk; /* reference clock of analog phy */ + struct clk_bulk_data clks[TPHY_CLKS_CNT]; u32 index; - u8 type; + u32 type; + struct regmap *type_sw; + u32 type_sw_reg; + u32 type_sw_index; int eye_src; int eye_vrt; int eye_term; @@ -330,6 +364,10 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy, int fm_out; u32 tmp; + /* HW V3 doesn't support slew rate cal anymore */ + if (tphy->pdata->version == MTK_PHY_V3) + return; + /* use force value */ if (instance->eye_src) return; @@ -450,6 +488,33 @@ static void u3_phy_instance_init(struct mtk_tphy *tphy, dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index); } +static void u2_phy_pll_26m_set(struct mtk_tphy *tphy, + struct mtk_phy_instance *instance) +{ + struct u2phy_banks *u2_banks = &instance->u2_banks; + void __iomem *com = u2_banks->com; + u32 tmp; + + if (!tphy->pdata->sw_pll_48m_to_26m) + return; + + tmp = readl(com + U3P_USBPHYACR0); + tmp &= ~PA0_USB20_PLL_PREDIV; + tmp |= PA0_USB20_PLL_PREDIV_VAL(0); + writel(tmp, com + U3P_USBPHYACR0); + + tmp = readl(com + U3P_USBPHYACR2); + tmp &= ~PA2_RG_U2PLL_BW; + tmp |= PA2_RG_U2PLL_BW_VAL(3); + writel(tmp, com + U3P_USBPHYACR2); + + writel(P2R_RG_U2PLL_FBDIV_26M, com + U3P_U2PHYA_RESV); + + tmp = readl(com + U3P_U2PHYA_RESV1); + tmp |= P2R_RG_U2PLL_FRA_EN | P2R_RG_U2PLL_REFCLK_SEL; + writel(tmp, com + U3P_U2PHYA_RESV1); +} + static void u2_phy_instance_init(struct mtk_tphy *tphy, struct mtk_phy_instance *instance) { @@ -509,6 +574,9 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy, tmp |= PA6_RG_U2_SQTH_VAL(2); writel(tmp, com + U3P_USBPHYACR6); + /* Workaround only for mt8195, HW fix it for others (V3) */ + u2_phy_pll_26m_set(tphy, instance); + dev_dbg(tphy->dev, "%s(%d)\n", __func__, index); } @@ -878,7 +946,7 @@ static void u2_phy_props_set(struct mtk_tphy *tphy, writel(tmp, com + U3P_U2PHYBC12C); } - if (instance->eye_src) { + if (tphy->pdata->version < MTK_PHY_V3 && instance->eye_src) { tmp = readl(com + U3P_USBPHYACR5); tmp &= ~PA5_RG_U2_HSTX_SRCTRL; tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(instance->eye_src); @@ -914,24 +982,73 @@ static void u2_phy_props_set(struct mtk_tphy *tphy, } } -static int mtk_phy_init(struct phy *phy) +/* type switch for usb3/pcie/sgmii/sata */ +static int phy_type_syscon_get(struct mtk_phy_instance *instance, + struct device_node *dn) { - struct mtk_phy_instance *instance = phy_get_drvdata(phy); - struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent); + struct of_phandle_args args; int ret; - ret = clk_prepare_enable(instance->ref_clk); - if (ret) { - dev_err(tphy->dev, "failed to enable ref_clk\n"); + /* type switch function is optional */ + if (!of_property_read_bool(dn, "mediatek,syscon-type")) + return 0; + + ret = of_parse_phandle_with_fixed_args(dn, "mediatek,syscon-type", + 2, 0, &args); + if (ret) return ret; + + instance->type_sw_reg = args.args[0]; + instance->type_sw_index = args.args[1] & 0x3; /* <=3 */ + instance->type_sw = syscon_node_to_regmap(args.np); + of_node_put(args.np); + dev_info(&instance->phy->dev, "type_sw - reg %#x, index %d\n", + instance->type_sw_reg, instance->type_sw_index); + + return PTR_ERR_OR_ZERO(instance->type_sw); +} + +static int phy_type_set(struct mtk_phy_instance *instance) +{ + int type; + u32 mask; + + if (!instance->type_sw) + return 0; + + switch (instance->type) { + case PHY_TYPE_USB3: + type = RG_PHY_SW_USB3; + break; + case PHY_TYPE_PCIE: + type = RG_PHY_SW_PCIE; + break; + case PHY_TYPE_SGMII: + type = RG_PHY_SW_SGMII; + break; + case PHY_TYPE_SATA: + type = RG_PHY_SW_SATA; + break; + case PHY_TYPE_USB2: + default: + return 0; } - ret = clk_prepare_enable(instance->da_ref_clk); - if (ret) { - dev_err(tphy->dev, "failed to enable da_ref\n"); - clk_disable_unprepare(instance->ref_clk); + mask = RG_PHY_SW_TYPE << (instance->type_sw_index * BITS_PER_BYTE); + regmap_update_bits(instance->type_sw, instance->type_sw_reg, mask, type); + + return 0; +} + +static int mtk_phy_init(struct phy *phy) +{ + struct mtk_phy_instance *instance = phy_get_drvdata(phy); + struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent); + int ret; + + ret = clk_bulk_prepare_enable(TPHY_CLKS_CNT, instance->clks); + if (ret) return ret; - } switch (instance->type) { case PHY_TYPE_USB2: @@ -947,10 +1064,12 @@ static int mtk_phy_init(struct phy *phy) case PHY_TYPE_SATA: sata_phy_instance_init(tphy, instance); break; + case PHY_TYPE_SGMII: + /* nothing to do, only used to set type */ + break; default: dev_err(tphy->dev, "incompatible PHY type\n"); - clk_disable_unprepare(instance->ref_clk); - clk_disable_unprepare(instance->da_ref_clk); + clk_bulk_disable_unprepare(TPHY_CLKS_CNT, instance->clks); return -EINVAL; } @@ -993,8 +1112,7 @@ static int mtk_phy_exit(struct phy *phy) if (instance->type == PHY_TYPE_USB2) u2_phy_instance_exit(tphy, instance); - clk_disable_unprepare(instance->ref_clk); - clk_disable_unprepare(instance->da_ref_clk); + clk_bulk_disable_unprepare(TPHY_CLKS_CNT, instance->clks); return 0; } @@ -1037,21 +1155,27 @@ static struct phy *mtk_phy_xlate(struct device *dev, if (!(instance->type == PHY_TYPE_USB2 || instance->type == PHY_TYPE_USB3 || instance->type == PHY_TYPE_PCIE || - instance->type == PHY_TYPE_SATA)) { + instance->type == PHY_TYPE_SATA || + instance->type == PHY_TYPE_SGMII)) { dev_err(dev, "unsupported device type: %d\n", instance->type); return ERR_PTR(-EINVAL); } - if (tphy->pdata->version == MTK_PHY_V1) { + switch (tphy->pdata->version) { + case MTK_PHY_V1: phy_v1_banks_init(tphy, instance); - } else if (tphy->pdata->version == MTK_PHY_V2) { + break; + case MTK_PHY_V2: + case MTK_PHY_V3: phy_v2_banks_init(tphy, instance); - } else { + break; + default: dev_err(dev, "phy version is not supported\n"); return ERR_PTR(-EINVAL); } phy_parse_property(tphy, instance); + phy_type_set(instance); return instance->phy; } @@ -1075,17 +1199,28 @@ static const struct mtk_phy_pdata tphy_v2_pdata = { .version = MTK_PHY_V2, }; +static const struct mtk_phy_pdata tphy_v3_pdata = { + .version = MTK_PHY_V3, +}; + static const struct mtk_phy_pdata mt8173_pdata = { .avoid_rx_sen_degradation = true, .version = MTK_PHY_V1, }; +static const struct mtk_phy_pdata mt8195_pdata = { + .sw_pll_48m_to_26m = true, + .version = MTK_PHY_V3, +}; + static const struct of_device_id mtk_tphy_id_table[] = { { .compatible = "mediatek,mt2701-u3phy", .data = &tphy_v1_pdata }, { .compatible = "mediatek,mt2712-u3phy", .data = &tphy_v2_pdata }, { .compatible = "mediatek,mt8173-u3phy", .data = &mt8173_pdata }, + { .compatible = "mediatek,mt8195-tphy", .data = &mt8195_pdata }, { .compatible = "mediatek,generic-tphy-v1", .data = &tphy_v1_pdata }, { .compatible = "mediatek,generic-tphy-v2", .data = &tphy_v2_pdata }, + { .compatible = "mediatek,generic-tphy-v3", .data = &tphy_v3_pdata }, { }, }; MODULE_DEVICE_TABLE(of, mtk_tphy_id_table); @@ -1129,16 +1264,21 @@ static int mtk_tphy_probe(struct platform_device *pdev) } } - tphy->src_ref_clk = U3P_REF_CLK; - tphy->src_coef = U3P_SLEW_RATE_COEF; - /* update parameters of slew rate calibrate if exist */ - device_property_read_u32(dev, "mediatek,src-ref-clk-mhz", - &tphy->src_ref_clk); - device_property_read_u32(dev, "mediatek,src-coef", &tphy->src_coef); + if (tphy->pdata->version < MTK_PHY_V3) { + tphy->src_ref_clk = U3P_REF_CLK; + tphy->src_coef = U3P_SLEW_RATE_COEF; + /* update parameters of slew rate calibrate if exist */ + device_property_read_u32(dev, "mediatek,src-ref-clk-mhz", + &tphy->src_ref_clk); + device_property_read_u32(dev, "mediatek,src-coef", + &tphy->src_coef); + } port = 0; for_each_child_of_node(np, child_np) { struct mtk_phy_instance *instance; + struct clk_bulk_data *clks; + struct device *subdev; struct phy *phy; instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); @@ -1156,16 +1296,16 @@ static int mtk_tphy_probe(struct platform_device *pdev) goto put_child; } + subdev = &phy->dev; retval = of_address_to_resource(child_np, 0, &res); if (retval) { - dev_err(dev, "failed to get address resource(id-%d)\n", + dev_err(subdev, "failed to get address resource(id-%d)\n", port); goto put_child; } - instance->port_base = devm_ioremap_resource(&phy->dev, &res); + instance->port_base = devm_ioremap_resource(subdev, &res); if (IS_ERR(instance->port_base)) { - dev_err(dev, "failed to remap phy regs\n"); retval = PTR_ERR(instance->port_base); goto put_child; } @@ -1175,20 +1315,16 @@ static int mtk_tphy_probe(struct platform_device *pdev) phy_set_drvdata(phy, instance); port++; - instance->ref_clk = devm_clk_get_optional(&phy->dev, "ref"); - if (IS_ERR(instance->ref_clk)) { - dev_err(dev, "failed to get ref_clk(id-%d)\n", port); - retval = PTR_ERR(instance->ref_clk); + clks = instance->clks; + clks[0].id = "ref"; /* digital (& analog) clock */ + clks[1].id = "da_ref"; /* analog clock */ + retval = devm_clk_bulk_get_optional(subdev, TPHY_CLKS_CNT, clks); + if (retval) goto put_child; - } - instance->da_ref_clk = - devm_clk_get_optional(&phy->dev, "da_ref"); - if (IS_ERR(instance->da_ref_clk)) { - dev_err(dev, "failed to get da_ref_clk(id-%d)\n", port); - retval = PTR_ERR(instance->da_ref_clk); + retval = phy_type_syscon_get(instance, child_np); + if (retval) goto put_child; - } } provider = devm_of_phy_provider_register(dev, mtk_phy_xlate); diff --git a/drivers/phy/mediatek/phy-mtk-ufs.c b/drivers/phy/mediatek/phy-mtk-ufs.c index 769b00b038d8..a6af06941203 100644 --- a/drivers/phy/mediatek/phy-mtk-ufs.c +++ b/drivers/phy/mediatek/phy-mtk-ufs.c @@ -31,11 +31,12 @@ #define FRC_CDR_ISO_EN BIT(19) #define CDR_ISO_EN BIT(20) +#define UFSPHY_CLKS_CNT 2 + struct ufs_mtk_phy { struct device *dev; void __iomem *mmio; - struct clk *mp_clk; - struct clk *unipro_clk; + struct clk_bulk_data clks[UFSPHY_CLKS_CNT]; }; static inline u32 mphy_readl(struct ufs_mtk_phy *phy, u32 reg) @@ -74,20 +75,11 @@ static struct ufs_mtk_phy *get_ufs_mtk_phy(struct phy *generic_phy) static int ufs_mtk_phy_clk_init(struct ufs_mtk_phy *phy) { struct device *dev = phy->dev; + struct clk_bulk_data *clks = phy->clks; - phy->unipro_clk = devm_clk_get(dev, "unipro"); - if (IS_ERR(phy->unipro_clk)) { - dev_err(dev, "failed to get clock: unipro"); - return PTR_ERR(phy->unipro_clk); - } - - phy->mp_clk = devm_clk_get(dev, "mp"); - if (IS_ERR(phy->mp_clk)) { - dev_err(dev, "failed to get clock: mp"); - return PTR_ERR(phy->mp_clk); - } - - return 0; + clks[0].id = "unipro"; + clks[1].id = "mp"; + return devm_clk_bulk_get(dev, UFSPHY_CLKS_CNT, clks); } static void ufs_mtk_phy_set_active(struct ufs_mtk_phy *phy) @@ -150,26 +142,13 @@ static int ufs_mtk_phy_power_on(struct phy *generic_phy) struct ufs_mtk_phy *phy = get_ufs_mtk_phy(generic_phy); int ret; - ret = clk_prepare_enable(phy->unipro_clk); - if (ret) { - dev_err(phy->dev, "unipro_clk enable failed %d\n", ret); - goto out; - } - - ret = clk_prepare_enable(phy->mp_clk); - if (ret) { - dev_err(phy->dev, "mp_clk enable failed %d\n", ret); - goto out_unprepare_unipro_clk; - } + ret = clk_bulk_prepare_enable(UFSPHY_CLKS_CNT, phy->clks); + if (ret) + return ret; ufs_mtk_phy_set_active(phy); return 0; - -out_unprepare_unipro_clk: - clk_disable_unprepare(phy->unipro_clk); -out: - return ret; } static int ufs_mtk_phy_power_off(struct phy *generic_phy) @@ -178,8 +157,7 @@ static int ufs_mtk_phy_power_off(struct phy *generic_phy) ufs_mtk_phy_set_deep_hibern(phy); - clk_disable_unprepare(phy->unipro_clk); - clk_disable_unprepare(phy->mp_clk); + clk_bulk_disable_unprepare(UFSPHY_CLKS_CNT, phy->clks); return 0; } diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index cfe359488f5c..f14032170b1c 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -234,6 +234,11 @@ static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_PCS_READY_STATUS] = 0x160, }; +static const unsigned int sm6115_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_START_CTRL] = 0x00, + [QPHY_PCS_READY_STATUS] = 0x168, +}; + static const unsigned int sm8250_pcie_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_SW_RESET] = 0x00, [QPHY_START_CTRL] = 0x44, @@ -1329,6 +1334,97 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60), }; +static const struct qmp_phy_init_tbl sm6115_ufsphy_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x01), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x04), + QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE1, 0x98), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE1, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE1, 0x32), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_INITVAL1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00), + + /* Rate B */ + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x44), +}; + +static const struct qmp_phy_init_tbl sm6115_ufsphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45), + QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06), +}; + +static const struct qmp_phy_init_tbl sm6115_ufsphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x0F), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x40), + QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_TERM_BW, 0x5B), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F), + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5B), +}; + +static const struct qmp_phy_init_tbl sm6115_ufsphy_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_RX_PWM_GEAR_BAND, 0x15), + QMP_PHY_INIT_CFG(QPHY_RX_SIGDET_CTRL2, 0x6d), + QMP_PHY_INIT_CFG(QPHY_TX_LARGE_AMP_DRV_LVL, 0x0f), + QMP_PHY_INIT_CFG(QPHY_TX_SMALL_AMP_DRV_LVL, 0x02), + QMP_PHY_INIT_CFG(QPHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28), + QMP_PHY_INIT_CFG(QPHY_RX_SYM_RESYNC_CTRL, 0x03), + QMP_PHY_INIT_CFG(QPHY_TX_LARGE_AMP_POST_EMP_LVL, 0x12), + QMP_PHY_INIT_CFG(QPHY_TX_SMALL_AMP_POST_EMP_LVL, 0x0f), + QMP_PHY_INIT_CFG(QPHY_RX_MIN_HIBERN8_TIME, 0x9a), /* 8 us */ +}; + static const struct qmp_phy_init_tbl sdm845_ufsphy_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04), @@ -2035,6 +2131,113 @@ static const struct qmp_phy_init_tbl qmp_v4_dp_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_EMP_POST1_LVL, 0x20), }; +static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0x4c), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x90), +}; + +static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x5), +}; + +static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x6e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x6e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x37), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xd4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x39), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xec), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x39), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x75), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RCLK_AUXDATA_SEL, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x03), +}; + +static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RATE_SLEW_CNTRL1, 0x0b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x01), +}; + +static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), +}; + static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08), QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34), @@ -3289,6 +3492,31 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = { .no_pcs_sw_reset = true, }; +static const struct qmp_phy_cfg sm6115_ufsphy_cfg = { + .type = PHY_TYPE_UFS, + .nlanes = 1, + + .serdes_tbl = sm6115_ufsphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm6115_ufsphy_serdes_tbl), + .tx_tbl = sm6115_ufsphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm6115_ufsphy_tx_tbl), + .rx_tbl = sm6115_ufsphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm6115_ufsphy_rx_tbl), + .pcs_tbl = sm6115_ufsphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm6115_ufsphy_pcs_tbl), + .clk_list = sdm845_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm6115_ufsphy_regs_layout, + + .start_ctrl = SERDES_START, + .pwrdn_ctrl = SW_PWRDN, + + .is_dual_lane_phy = false, + .no_pcs_sw_reset = true, +}; + static const struct qmp_phy_cfg msm8998_pciephy_cfg = { .type = PHY_TYPE_PCIE, .nlanes = 1, @@ -3399,6 +3627,76 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = { .is_dual_lane_phy = true, }; +static const struct qmp_phy_cfg sc8180x_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sc8180x_qmp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl), + .tx_tbl = sc8180x_qmp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_tx_tbl), + .rx_tbl = sc8180x_qmp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_rx_tbl), + .pcs_tbl = sc8180x_qmp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_tbl), + .pcs_misc_tbl = sc8180x_qmp_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sc8180x_dpphy_cfg = { + .type = PHY_TYPE_DP, + .nlanes = 1, + + .serdes_tbl = qmp_v4_dp_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl), + .tx_tbl = qmp_v4_dp_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl), + + .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr, + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr), + .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr, + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr), + .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2, + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2), + .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3, + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3), + + .clk_list = qmp_v3_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), + .reset_list = sc7180_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, + + .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init, + .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx, + .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy, + .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate, +}; + +static const struct qmp_phy_combo_cfg sc8180x_usb3dpphy_cfg = { + .usb_cfg = &sm8150_usb3phy_cfg, + .dp_cfg = &sc8180x_dpphy_cfg, +}; + static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = { .type = PHY_TYPE_USB3, .nlanes = 1, @@ -5018,6 +5316,7 @@ static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy, { struct clk_init_data init = { }; struct qmp_phy_dp_clks *dp_clks; + char name[64]; int ret; dp_clks = devm_kzalloc(qmp->dev, sizeof(*dp_clks), GFP_KERNEL); @@ -5027,15 +5326,17 @@ static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy, dp_clks->qphy = qphy; qphy->dp_clks = dp_clks; + snprintf(name, sizeof(name), "%s::link_clk", dev_name(qmp->dev)); init.ops = &qcom_qmp_dp_link_clk_ops; - init.name = "qmp_dp_phy_pll_link_clk"; + init.name = name; dp_clks->dp_link_hw.init = &init; ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_link_hw); if (ret) return ret; + snprintf(name, sizeof(name), "%s::vco_div_clk", dev_name(qmp->dev)); init.ops = &qcom_qmp_dp_pixel_clk_ops; - init.name = "qmp_dp_phy_pll_vco_div_clk"; + init.name = name; dp_clks->dp_pixel_hw.init = &init; ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_pixel_hw); if (ret) @@ -5226,18 +5527,27 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { .compatible = "qcom,ipq6018-qmp-pcie-phy", .data = &ipq6018_pciephy_cfg, }, { + .compatible = "qcom,ipq6018-qmp-usb3-phy", + .data = &ipq8074_usb3phy_cfg, + }, { .compatible = "qcom,sc7180-qmp-usb3-phy", .data = &sc7180_usb3phy_cfg, }, { .compatible = "qcom,sc7180-qmp-usb3-dp-phy", /* It's a combo phy */ }, { + .compatible = "qcom,sc8180x-qmp-pcie-phy", + .data = &sc8180x_pciephy_cfg, + }, { .compatible = "qcom,sc8180x-qmp-ufs-phy", .data = &sm8150_ufsphy_cfg, }, { .compatible = "qcom,sc8180x-qmp-usb3-phy", .data = &sm8150_usb3phy_cfg, }, { + .compatible = "qcom,sc8180x-qmp-usb3-dp-phy", + /* It's a combo phy */ + }, { .compatible = "qcom,sdm845-qhp-pcie-phy", .data = &sdm845_qhp_pciephy_cfg, }, { @@ -5256,6 +5566,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { .compatible = "qcom,msm8998-qmp-usb3-phy", .data = &msm8998_usb3phy_cfg, }, { + .compatible = "qcom,sm6115-qmp-ufs-phy", + .data = &sm6115_ufsphy_cfg, + }, { .compatible = "qcom,sm8150-qmp-ufs-phy", .data = &sm8150_ufsphy_cfg, }, { @@ -5314,6 +5627,10 @@ static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = { .compatible = "qcom,sm8250-qmp-usb3-dp-phy", .data = &sm8250_usb3dpphy_cfg, }, + { + .compatible = "qcom,sc8180x-qmp-usb3-dp-phy", + .data = &sc8180x_usb3dpphy_cfg, + }, { } }; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index 6592b58b13f6..bebeac2c091c 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -191,6 +191,8 @@ #define QSERDES_COM_VCO_TUNE2_MODE0 0x130 #define QSERDES_COM_VCO_TUNE1_MODE1 0x134 #define QSERDES_COM_VCO_TUNE2_MODE1 0x138 +#define QSERDES_COM_VCO_TUNE_INITVAL1 0x13c +#define QSERDES_COM_VCO_TUNE_INITVAL2 0x140 #define QSERDES_COM_VCO_TUNE_TIMER1 0x144 #define QSERDES_COM_VCO_TUNE_TIMER2 0x148 #define QSERDES_COM_BG_CTRL 0x170 @@ -220,6 +222,10 @@ /* Only for QMP V2 PHY - RX registers */ #define QSERDES_RX_UCDR_SO_GAIN_HALF 0x010 #define QSERDES_RX_UCDR_SO_GAIN 0x01c +#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF 0x030 +#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER 0x034 +#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH 0x038 +#define QSERDES_RX_UCDR_SVS_SO_GAIN 0x03c #define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN 0x040 #define QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x048 #define QSERDES_RX_RX_TERM_BW 0x090 @@ -243,6 +249,10 @@ #define QPHY_POWER_DOWN_CONTROL 0x04 #define QPHY_TXDEEMPH_M6DB_V0 0x24 #define QPHY_TXDEEMPH_M3P5DB_V0 0x28 +#define QPHY_TX_LARGE_AMP_DRV_LVL 0x34 +#define QPHY_TX_LARGE_AMP_POST_EMP_LVL 0x38 +#define QPHY_TX_SMALL_AMP_DRV_LVL 0x3c +#define QPHY_TX_SMALL_AMP_POST_EMP_LVL 0x40 #define QPHY_ENDPOINT_REFCLK_DRIVE 0x54 #define QPHY_RX_IDLE_DTCT_CNTRL 0x58 #define QPHY_POWER_STATE_CONFIG1 0x60 @@ -253,6 +263,11 @@ #define QPHY_LOCK_DETECT_CONFIG3 0x88 #define QPHY_PWRUP_RESET_DLY_TIME_AUXCLK 0xa0 #define QPHY_LP_WAKEUP_DLY_TIME_AUXCLK 0xa4 +#define QPHY_RX_MIN_STALL_NOCONFIG_TIME_CAP 0xcc +#define QPHY_RX_SYM_RESYNC_CTRL 0x13c +#define QPHY_RX_MIN_HIBERN8_TIME 0x140 +#define QPHY_RX_SIGDET_CTRL2 0x148 +#define QPHY_RX_PWM_GEAR_BAND 0x154 #define QPHY_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB 0x1A8 #define QPHY_OSC_DTCT_ACTIONS 0x1AC #define QPHY_RX_SIGDET_LVL 0x1D8 @@ -280,6 +295,8 @@ #define QSERDES_V3_COM_SSC_PER2 0x020 #define QSERDES_V3_COM_SSC_STEP_SIZE1 0x024 #define QSERDES_V3_COM_SSC_STEP_SIZE2 0x028 +#define QSERDES_V3_COM_POST_DIV 0x02c +#define QSERDES_V3_COM_POST_DIV_MUX 0x030 #define QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN 0x034 # define QSERDES_V3_COM_BIAS_EN 0x0001 # define QSERDES_V3_COM_BIAS_EN_MUX 0x0002 @@ -291,6 +308,7 @@ #define QSERDES_V3_COM_CLK_ENABLE1 0x038 #define QSERDES_V3_COM_SYS_CLK_CTRL 0x03c #define QSERDES_V3_COM_SYSCLK_BUF_ENABLE 0x040 +#define QSERDES_V3_COM_PLL_EN 0x044 #define QSERDES_V3_COM_PLL_IVCO 0x048 #define QSERDES_V3_COM_LOCK_CMP1_MODE0 0x098 #define QSERDES_V3_COM_LOCK_CMP2_MODE0 0x09c diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c index 5c6c17673396..53e46c220a3a 100644 --- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c +++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Copyright (C) 2016 Linaro Ltd */ #include <linux/module.h> diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index fbc55232120e..9de617ca9daa 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -64,6 +64,7 @@ /* VBCTRL */ #define USB2_VBCTRL_OCCLREN BIT(16) #define USB2_VBCTRL_DRVVBUSSEL BIT(8) +#define USB2_VBCTRL_VBOUT BIT(0) /* LINECTRL1 */ #define USB2_LINECTRL1_DPRPD_EN BIT(19) @@ -78,6 +79,10 @@ #define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */ #define USB2_ADPCTRL_DRVVBUS BIT(4) +/* RZ/G2L specific */ +#define USB2_OBINT_IDCHG_EN BIT(0) +#define USB2_LINECTRL1_USB2_IDMON BIT(0) + #define NUM_OF_PHYS 4 enum rcar_gen3_phy_index { PHY_INDEX_BOTH_HC, @@ -112,9 +117,16 @@ struct rcar_gen3_chan { struct mutex lock; /* protects rphys[...].powered */ enum usb_dr_mode dr_mode; int irq; + u32 obint_enable_bits; bool extcon_host; bool is_otg_channel; bool uses_otg_pins; + bool soc_no_adp_ctrl; +}; + +struct rcar_gen3_phy_drv_data { + const struct phy_ops *phy_usb2_ops; + bool no_adp_ctrl; }; /* @@ -172,14 +184,22 @@ static void rcar_gen3_set_linectrl(struct rcar_gen3_chan *ch, int dp, int dm) static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus) { void __iomem *usb2_base = ch->base; - u32 val = readl(usb2_base + USB2_ADPCTRL); + u32 vbus_ctrl_reg = USB2_ADPCTRL; + u32 vbus_ctrl_val = USB2_ADPCTRL_DRVVBUS; + u32 val; dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, vbus); + if (ch->soc_no_adp_ctrl) { + vbus_ctrl_reg = USB2_VBCTRL; + vbus_ctrl_val = USB2_VBCTRL_VBOUT; + } + + val = readl(usb2_base + vbus_ctrl_reg); if (vbus) - val |= USB2_ADPCTRL_DRVVBUS; + val |= vbus_ctrl_val; else - val &= ~USB2_ADPCTRL_DRVVBUS; - writel(val, usb2_base + USB2_ADPCTRL); + val &= ~vbus_ctrl_val; + writel(val, usb2_base + vbus_ctrl_reg); } static void rcar_gen3_control_otg_irq(struct rcar_gen3_chan *ch, int enable) @@ -188,9 +208,9 @@ static void rcar_gen3_control_otg_irq(struct rcar_gen3_chan *ch, int enable) u32 val = readl(usb2_base + USB2_OBINTEN); if (ch->uses_otg_pins && enable) - val |= USB2_OBINT_BITS; + val |= ch->obint_enable_bits; else - val &= ~USB2_OBINT_BITS; + val &= ~ch->obint_enable_bits; writel(val, usb2_base + USB2_OBINTEN); } @@ -252,6 +272,9 @@ static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch) if (!ch->uses_otg_pins) return (ch->dr_mode == USB_DR_MODE_HOST) ? false : true; + if (ch->soc_no_adp_ctrl) + return !!(readl(ch->base + USB2_LINECTRL1) & USB2_LINECTRL1_USB2_IDMON); + return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG); } @@ -376,16 +399,17 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) USB2_LINECTRL1_DMRPD_EN | USB2_LINECTRL1_DM_RPD; writel(val, usb2_base + USB2_LINECTRL1); - val = readl(usb2_base + USB2_VBCTRL); - val &= ~USB2_VBCTRL_OCCLREN; - writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL); - val = readl(usb2_base + USB2_ADPCTRL); - writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); - + if (!ch->soc_no_adp_ctrl) { + val = readl(usb2_base + USB2_VBCTRL); + val &= ~USB2_VBCTRL_OCCLREN; + writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL); + val = readl(usb2_base + USB2_ADPCTRL); + writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); + } msleep(20); writel(0xffffffff, usb2_base + USB2_OBINTSTA); - writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTEN); + writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN); rcar_gen3_device_recognition(ch); } @@ -397,9 +421,9 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) u32 status = readl(usb2_base + USB2_OBINTSTA); irqreturn_t ret = IRQ_NONE; - if (status & USB2_OBINT_BITS) { + if (status & ch->obint_enable_bits) { dev_vdbg(ch->dev, "%s: %08x\n", __func__, status); - writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA); + writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); rcar_gen3_device_recognition(ch); ret = IRQ_HANDLED; } @@ -535,26 +559,45 @@ static const struct phy_ops rz_g1c_phy_usb2_ops = { .owner = THIS_MODULE, }; +static const struct rcar_gen3_phy_drv_data rcar_gen3_phy_usb2_data = { + .phy_usb2_ops = &rcar_gen3_phy_usb2_ops, + .no_adp_ctrl = false, +}; + +static const struct rcar_gen3_phy_drv_data rz_g1c_phy_usb2_data = { + .phy_usb2_ops = &rz_g1c_phy_usb2_ops, + .no_adp_ctrl = false, +}; + +static const struct rcar_gen3_phy_drv_data rz_g2l_phy_usb2_data = { + .phy_usb2_ops = &rcar_gen3_phy_usb2_ops, + .no_adp_ctrl = true, +}; + static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = { { .compatible = "renesas,usb2-phy-r8a77470", - .data = &rz_g1c_phy_usb2_ops, + .data = &rz_g1c_phy_usb2_data, }, { .compatible = "renesas,usb2-phy-r8a7795", - .data = &rcar_gen3_phy_usb2_ops, + .data = &rcar_gen3_phy_usb2_data, }, { .compatible = "renesas,usb2-phy-r8a7796", - .data = &rcar_gen3_phy_usb2_ops, + .data = &rcar_gen3_phy_usb2_data, }, { .compatible = "renesas,usb2-phy-r8a77965", - .data = &rcar_gen3_phy_usb2_ops, + .data = &rcar_gen3_phy_usb2_data, + }, + { + .compatible = "renesas,rzg2l-usb2-phy", + .data = &rz_g2l_phy_usb2_data, }, { .compatible = "renesas,rcar-gen3-usb2-phy", - .data = &rcar_gen3_phy_usb2_ops, + .data = &rcar_gen3_phy_usb2_data, }, { /* sentinel */ }, }; @@ -608,10 +651,10 @@ static enum usb_dr_mode rcar_gen3_get_dr_mode(struct device_node *np) static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) { + const struct rcar_gen3_phy_drv_data *phy_data; struct device *dev = &pdev->dev; struct rcar_gen3_chan *channel; struct phy_provider *provider; - const struct phy_ops *phy_usb2_ops; int ret = 0, i; if (!dev->of_node) { @@ -627,6 +670,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) if (IS_ERR(channel->base)) return PTR_ERR(channel->base); + channel->obint_enable_bits = USB2_OBINT_BITS; /* get irq number here and request_irq for OTG in phy_init */ channel->irq = platform_get_irq_optional(pdev, 0); channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node); @@ -653,16 +697,21 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) * And then, phy-core will manage runtime pm for this device. */ pm_runtime_enable(dev); - phy_usb2_ops = of_device_get_match_data(dev); - if (!phy_usb2_ops) { + + phy_data = of_device_get_match_data(dev); + if (!phy_data) { ret = -EINVAL; goto error; } + channel->soc_no_adp_ctrl = phy_data->no_adp_ctrl; + if (phy_data->no_adp_ctrl) + channel->obint_enable_bits = USB2_OBINT_IDCHG_EN; + mutex_init(&channel->lock); for (i = 0; i < NUM_OF_PHYS; i++) { channel->rphys[i].phy = devm_phy_create(dev, NULL, - phy_usb2_ops); + phy_data->phy_usb2_ops); if (IS_ERR(channel->rphys[i].phy)) { dev_err(dev, "Failed to create USB2 PHY\n"); ret = PTR_ERR(channel->rphys[i].phy); diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index beacac1dd253..4f569d9307b9 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -1180,8 +1180,10 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) next_child: /* to prevent out of boundary */ - if (++index >= rphy->phy_cfg->num_ports) + if (++index >= rphy->phy_cfg->num_ports) { + of_node_put(child_np); break; + } } provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); diff --git a/drivers/phy/samsung/Makefile b/drivers/phy/samsung/Makefile index 3959100fe8a2..65e4cc59403f 100644 --- a/drivers/phy/samsung/Makefile +++ b/drivers/phy/samsung/Makefile @@ -2,7 +2,10 @@ obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o obj-$(CONFIG_PHY_EXYNOS_PCIE) += phy-exynos-pcie.o -obj-$(CONFIG_PHY_SAMSUNG_UFS) += phy-samsung-ufs.o +obj-$(CONFIG_PHY_SAMSUNG_UFS) += phy-exynos-ufs.o +phy-exynos-ufs-y += phy-samsung-ufs.o +phy-exynos-ufs-y += phy-exynos7-ufs.o +phy-exynos-ufs-y += phy-exynosautov9-ufs.o obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-exynos-usb2.o phy-exynos-usb2-y += phy-samsung-usb2.o phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o diff --git a/drivers/phy/samsung/phy-exynos7-ufs.h b/drivers/phy/samsung/phy-exynos7-ufs.c index 518923141958..7c9008e163db 100644 --- a/drivers/phy/samsung/phy-exynos7-ufs.h +++ b/drivers/phy/samsung/phy-exynos7-ufs.c @@ -1,11 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +// SPDX-License-Identifier: GPL-2.0-only /* * UFS PHY driver data for Samsung EXYNOS7 SoC * * Copyright (C) 2020 Samsung Electronics Co., Ltd. */ -#ifndef _PHY_EXYNOS7_UFS_H_ -#define _PHY_EXYNOS7_UFS_H_ #include "phy-samsung-ufs.h" @@ -68,7 +66,7 @@ static const struct samsung_ufs_phy_cfg *exynos7_ufs_phy_cfgs[CFG_TAG_MAX] = { [CFG_POST_PWR_HS] = exynos7_post_pwr_hs_cfg, }; -static struct samsung_ufs_phy_drvdata exynos7_ufs_phy = { +const struct samsung_ufs_phy_drvdata exynos7_ufs_phy = { .cfg = exynos7_ufs_phy_cfgs, .isol = { .offset = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL, @@ -77,5 +75,3 @@ static struct samsung_ufs_phy_drvdata exynos7_ufs_phy = { }, .has_symbol_clk = 1, }; - -#endif /* _PHY_EXYNOS7_UFS_H_ */ diff --git a/drivers/phy/samsung/phy-exynosautov9-ufs.c b/drivers/phy/samsung/phy-exynosautov9-ufs.c new file mode 100644 index 000000000000..36398a15c2db --- /dev/null +++ b/drivers/phy/samsung/phy-exynosautov9-ufs.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * UFS PHY driver data for Samsung EXYNOSAUTO v9 SoC + * + * Copyright (C) 2021 Samsung Electronics Co., Ltd. + */ + +#include "phy-samsung-ufs.h" + +#define EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL 0x728 +#define EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL_MASK 0x1 +#define EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL_EN BIT(0) + +#define PHY_TRSV_REG_CFG_AUTOV9(o, v, d) \ + PHY_TRSV_REG_CFG_OFFSET(o, v, d, 0x50) + +/* Calibration for phy initialization */ +static const struct samsung_ufs_phy_cfg exynosautov9_pre_init_cfg[] = { + PHY_COMN_REG_CFG(0x023, 0x80, PWR_MODE_ANY), + PHY_COMN_REG_CFG(0x01d, 0x10, PWR_MODE_ANY), + + PHY_TRSV_REG_CFG_AUTOV9(0x044, 0xb5, PWR_MODE_ANY), + PHY_TRSV_REG_CFG_AUTOV9(0x04d, 0x43, PWR_MODE_ANY), + PHY_TRSV_REG_CFG_AUTOV9(0x05b, 0x20, PWR_MODE_ANY), + PHY_TRSV_REG_CFG_AUTOV9(0x05e, 0xc0, PWR_MODE_ANY), + PHY_TRSV_REG_CFG_AUTOV9(0x038, 0x12, PWR_MODE_ANY), + PHY_TRSV_REG_CFG_AUTOV9(0x059, 0x58, PWR_MODE_ANY), + PHY_TRSV_REG_CFG_AUTOV9(0x06c, 0x18, PWR_MODE_ANY), + PHY_TRSV_REG_CFG_AUTOV9(0x06d, 0x02, PWR_MODE_ANY), + + PHY_COMN_REG_CFG(0x023, 0xc0, PWR_MODE_ANY), + PHY_COMN_REG_CFG(0x023, 0x00, PWR_MODE_ANY), + + PHY_TRSV_REG_CFG(0x042, 0x5d, PWR_MODE_ANY), + PHY_TRSV_REG_CFG(0x043, 0x80, PWR_MODE_ANY), + + END_UFS_PHY_CFG, +}; + +/* Calibration for HS mode series A/B */ +static const struct samsung_ufs_phy_cfg exynosautov9_pre_pwr_hs_cfg[] = { + PHY_TRSV_REG_CFG(0x032, 0xbc, PWR_MODE_HS_ANY), + PHY_TRSV_REG_CFG(0x03c, 0x7f, PWR_MODE_HS_ANY), + PHY_TRSV_REG_CFG(0x048, 0xc0, PWR_MODE_HS_ANY), + + PHY_TRSV_REG_CFG(0x04a, 0x00, PWR_MODE_HS_G3_SER_B), + PHY_TRSV_REG_CFG(0x04b, 0x10, PWR_MODE_HS_G1_SER_B | + PWR_MODE_HS_G3_SER_B), + PHY_TRSV_REG_CFG(0x04d, 0x63, PWR_MODE_HS_G3_SER_B), + + END_UFS_PHY_CFG, +}; + +static const struct samsung_ufs_phy_cfg *exynosautov9_ufs_phy_cfgs[CFG_TAG_MAX] = { + [CFG_PRE_INIT] = exynosautov9_pre_init_cfg, + [CFG_PRE_PWR_HS] = exynosautov9_pre_pwr_hs_cfg, +}; + +const struct samsung_ufs_phy_drvdata exynosautov9_ufs_phy = { + .cfg = exynosautov9_ufs_phy_cfgs, + .isol = { + .offset = EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL, + .mask = EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL_MASK, + .en = EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL_EN, + }, + .has_symbol_clk = 0, +}; diff --git a/drivers/phy/samsung/phy-samsung-ufs.c b/drivers/phy/samsung/phy-samsung-ufs.c index dd9ab1519d83..602ddef259eb 100644 --- a/drivers/phy/samsung/phy-samsung-ufs.c +++ b/drivers/phy/samsung/phy-samsung-ufs.c @@ -347,6 +347,9 @@ static const struct of_device_id samsung_ufs_phy_match[] = { { .compatible = "samsung,exynos7-ufs-phy", .data = &exynos7_ufs_phy, + }, { + .compatible = "samsung,exynosautov9-ufs-phy", + .data = &exynosautov9_ufs_phy, }, {}, }; diff --git a/drivers/phy/samsung/phy-samsung-ufs.h b/drivers/phy/samsung/phy-samsung-ufs.h index 5de78710524c..91a0e9f94f98 100644 --- a/drivers/phy/samsung/phy-samsung-ufs.h +++ b/drivers/phy/samsung/phy-samsung-ufs.h @@ -10,6 +10,9 @@ #ifndef _PHY_SAMSUNG_UFS_ #define _PHY_SAMSUNG_UFS_ +#include <linux/phy/phy.h> +#include <linux/regmap.h> + #define PHY_COMN_BLK 1 #define PHY_TRSV_BLK 2 #define END_UFS_PHY_CFG { 0 } @@ -24,14 +27,17 @@ .id = PHY_COMN_BLK, \ } -#define PHY_TRSV_REG_CFG(o, v, d) { \ +#define PHY_TRSV_REG_CFG_OFFSET(o, v, d, c) { \ .off_0 = PHY_APB_ADDR((o)), \ - .off_1 = PHY_APB_ADDR((o) + PHY_TRSV_CH_OFFSET), \ + .off_1 = PHY_APB_ADDR((o) + (c)), \ .val = (v), \ .desc = (d), \ .id = PHY_TRSV_BLK, \ } +#define PHY_TRSV_REG_CFG(o, v, d) \ + PHY_TRSV_REG_CFG_OFFSET(o, v, d, PHY_TRSV_CH_OFFSET) + /* UFS PHY registers */ #define PHY_PLL_LOCK_STATUS 0x1e #define PHY_CDR_LOCK_STATUS 0x5e @@ -134,6 +140,7 @@ static inline void samsung_ufs_phy_ctrl_isol( phy->isol->mask, isol ? 0 : phy->isol->en); } -#include "phy-exynos7-ufs.h" +extern const struct samsung_ufs_phy_drvdata exynos7_ufs_phy; +extern const struct samsung_ufs_phy_drvdata exynosautov9_ufs_phy; #endif /* _PHY_SAMSUNG_UFS_ */ diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 0aadac678191..963de5913e50 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -1273,7 +1273,7 @@ static int tegra_xusb_padctl_remove(struct platform_device *pdev) return err; } -static int tegra_xusb_padctl_suspend_noirq(struct device *dev) +static __maybe_unused int tegra_xusb_padctl_suspend_noirq(struct device *dev) { struct tegra_xusb_padctl *padctl = dev_get_drvdata(dev); @@ -1283,7 +1283,7 @@ static int tegra_xusb_padctl_suspend_noirq(struct device *dev) return 0; } -static int tegra_xusb_padctl_resume_noirq(struct device *dev) +static __maybe_unused int tegra_xusb_padctl_resume_noirq(struct device *dev) { struct tegra_xusb_padctl *padctl = dev_get_drvdata(dev); diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c index 5771e2486a3b..ac71017a0bc1 100644 --- a/drivers/phy/ti/phy-twl4030-usb.c +++ b/drivers/phy/ti/phy-twl4030-usb.c @@ -162,6 +162,8 @@ struct twl4030_usb { atomic_t connected; bool vbus_supplied; bool musb_mailbox_pending; + unsigned long runtime_suspended:1; + unsigned long needs_resume:1; struct delayed_work id_workaround_work; }; @@ -384,6 +386,9 @@ static void __twl4030_phy_power(struct twl4030_usb *twl, int on) WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); } +static int twl4030_usb_runtime_suspend(struct device *dev); +static int twl4030_usb_runtime_resume(struct device *dev); + static int __maybe_unused twl4030_usb_suspend(struct device *dev) { struct twl4030_usb *twl = dev_get_drvdata(dev); @@ -395,6 +400,10 @@ static int __maybe_unused twl4030_usb_suspend(struct device *dev) */ dev_dbg(twl->dev, "%s\n", __func__); disable_irq(twl->irq); + if (!twl->runtime_suspended && !atomic_read(&twl->connected)) { + twl4030_usb_runtime_suspend(dev); + twl->needs_resume = 1; + } return 0; } @@ -405,9 +414,13 @@ static int __maybe_unused twl4030_usb_resume(struct device *dev) dev_dbg(twl->dev, "%s\n", __func__); enable_irq(twl->irq); + if (twl->needs_resume) + twl4030_usb_runtime_resume(dev); /* check whether cable status changed */ twl4030_usb_irq(0, twl); + twl->runtime_suspended = 0; + return 0; } @@ -422,6 +435,8 @@ static int __maybe_unused twl4030_usb_runtime_suspend(struct device *dev) regulator_disable(twl->usb1v8); regulator_disable(twl->usb3v1); + twl->runtime_suspended = 1; + return 0; } diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index 35652152ce5d..f478d8a17115 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -626,6 +626,9 @@ static int xpsgtr_phy_power_on(struct phy *phy) struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy); int ret = 0; + /* Skip initialization if not required. */ + if (!xpsgtr_phy_init_required(gtr_phy)) + return ret; /* * Wait for the PLL to lock. For DP, only wait on DP0 to avoid * cumulating waits for both lanes. The user is expected to initialize diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c index 3e4ef2b87526..0bcd19597e4a 100644 --- a/drivers/pinctrl/intel/pinctrl-tigerlake.c +++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c @@ -701,32 +701,32 @@ static const struct pinctrl_pin_desc tglh_pins[] = { static const struct intel_padgroup tglh_community0_gpps[] = { TGL_GPP(0, 0, 24, 0), /* GPP_A */ - TGL_GPP(1, 25, 44, 128), /* GPP_R */ - TGL_GPP(2, 45, 70, 32), /* GPP_B */ - TGL_GPP(3, 71, 78, INTEL_GPIO_BASE_NOMAP), /* vGPIO_0 */ + TGL_GPP(1, 25, 44, 32), /* GPP_R */ + TGL_GPP(2, 45, 70, 64), /* GPP_B */ + TGL_GPP(3, 71, 78, 96), /* vGPIO_0 */ }; static const struct intel_padgroup tglh_community1_gpps[] = { - TGL_GPP(0, 79, 104, 96), /* GPP_D */ - TGL_GPP(1, 105, 128, 64), /* GPP_C */ - TGL_GPP(2, 129, 136, 160), /* GPP_S */ - TGL_GPP(3, 137, 153, 192), /* GPP_G */ - TGL_GPP(4, 154, 180, 224), /* vGPIO */ + TGL_GPP(0, 79, 104, 128), /* GPP_D */ + TGL_GPP(1, 105, 128, 160), /* GPP_C */ + TGL_GPP(2, 129, 136, 192), /* GPP_S */ + TGL_GPP(3, 137, 153, 224), /* GPP_G */ + TGL_GPP(4, 154, 180, 256), /* vGPIO */ }; static const struct intel_padgroup tglh_community3_gpps[] = { - TGL_GPP(0, 181, 193, 256), /* GPP_E */ - TGL_GPP(1, 194, 217, 288), /* GPP_F */ + TGL_GPP(0, 181, 193, 288), /* GPP_E */ + TGL_GPP(1, 194, 217, 320), /* GPP_F */ }; static const struct intel_padgroup tglh_community4_gpps[] = { - TGL_GPP(0, 218, 241, 320), /* GPP_H */ + TGL_GPP(0, 218, 241, 352), /* GPP_H */ TGL_GPP(1, 242, 251, 384), /* GPP_J */ - TGL_GPP(2, 252, 266, 352), /* GPP_K */ + TGL_GPP(2, 252, 266, 416), /* GPP_K */ }; static const struct intel_padgroup tglh_community5_gpps[] = { - TGL_GPP(0, 267, 281, 416), /* GPP_I */ + TGL_GPP(0, 267, 281, 448), /* GPP_I */ TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP), /* JTAG */ }; diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c index 5b3b048725cc..45ebdeba985a 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c @@ -925,12 +925,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw, err = hw->soc->bias_set(hw, desc, pullup); if (err) return err; - } else if (hw->soc->bias_set_combo) { - err = hw->soc->bias_set_combo(hw, desc, pullup, arg); - if (err) - return err; } else { - return -ENOTSUPP; + err = mtk_pinconf_bias_set_rev1(hw, desc, pullup); + if (err) + err = mtk_pinconf_bias_set(hw, desc, pullup); } } diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index a76be6cc26ee..5b764740b829 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -444,8 +444,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on) unsigned long flags; struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct amd_gpio *gpio_dev = gpiochip_get_data(gc); - u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) | - BIT(WAKE_CNTRL_OFF_S4); + u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3); raw_spin_lock_irqsave(&gpio_dev->lock, flags); pin_reg = readl(gpio_dev->base + (d->hwirq)*4); diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c index f831526d06ff..49e32684dbb2 100644 --- a/drivers/pinctrl/pinctrl-k210.c +++ b/drivers/pinctrl/pinctrl-k210.c @@ -950,23 +950,37 @@ static int k210_fpioa_probe(struct platform_device *pdev) return ret; pdata->pclk = devm_clk_get_optional(dev, "pclk"); - if (!IS_ERR(pdata->pclk)) - clk_prepare_enable(pdata->pclk); + if (!IS_ERR(pdata->pclk)) { + ret = clk_prepare_enable(pdata->pclk); + if (ret) + goto disable_clk; + } pdata->sysctl_map = syscon_regmap_lookup_by_phandle_args(np, "canaan,k210-sysctl-power", 1, &pdata->power_offset); - if (IS_ERR(pdata->sysctl_map)) - return PTR_ERR(pdata->sysctl_map); + if (IS_ERR(pdata->sysctl_map)) { + ret = PTR_ERR(pdata->sysctl_map); + goto disable_pclk; + } k210_fpioa_init_ties(pdata); pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata); - if (IS_ERR(pdata->pctl)) - return PTR_ERR(pdata->pctl); + if (IS_ERR(pdata->pctl)) { + ret = PTR_ERR(pdata->pctl); + goto disable_pclk; + } return 0; + +disable_pclk: + clk_disable_unprepare(pdata->pclk); +disable_clk: + clk_disable_unprepare(pdata->clk); + + return ret; } static const struct of_device_id k210_fpioa_dt_ids[] = { diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 2f51b4f99393..cad4e60df618 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -13,7 +13,7 @@ config PINCTRL_MSM config PINCTRL_APQ8064 tristate "Qualcomm APQ8064 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -21,7 +21,7 @@ config PINCTRL_APQ8064 config PINCTRL_APQ8084 tristate "Qualcomm APQ8084 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -29,7 +29,7 @@ config PINCTRL_APQ8084 config PINCTRL_IPQ4019 tristate "Qualcomm IPQ4019 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -37,7 +37,7 @@ config PINCTRL_IPQ4019 config PINCTRL_IPQ8064 tristate "Qualcomm IPQ8064 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -45,7 +45,7 @@ config PINCTRL_IPQ8064 config PINCTRL_IPQ8074 tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for @@ -55,7 +55,7 @@ config PINCTRL_IPQ8074 config PINCTRL_IPQ6018 tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for @@ -65,7 +65,7 @@ config PINCTRL_IPQ6018 config PINCTRL_MSM8226 tristate "Qualcomm 8226 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -74,7 +74,7 @@ config PINCTRL_MSM8226 config PINCTRL_MSM8660 tristate "Qualcomm 8660 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -82,7 +82,7 @@ config PINCTRL_MSM8660 config PINCTRL_MSM8960 tristate "Qualcomm 8960 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -90,7 +90,7 @@ config PINCTRL_MSM8960 config PINCTRL_MDM9615 tristate "Qualcomm 9615 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -98,7 +98,7 @@ config PINCTRL_MDM9615 config PINCTRL_MSM8X74 tristate "Qualcomm 8x74 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -106,7 +106,7 @@ config PINCTRL_MSM8X74 config PINCTRL_MSM8916 tristate "Qualcomm 8916 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -114,7 +114,7 @@ config PINCTRL_MSM8916 config PINCTRL_MSM8953 tristate "Qualcomm 8953 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -124,7 +124,7 @@ config PINCTRL_MSM8953 config PINCTRL_MSM8976 tristate "Qualcomm 8976 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -134,7 +134,7 @@ config PINCTRL_MSM8976 config PINCTRL_MSM8994 tristate "Qualcomm 8994 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -143,7 +143,7 @@ config PINCTRL_MSM8994 config PINCTRL_MSM8996 tristate "Qualcomm MSM8996 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -151,7 +151,7 @@ config PINCTRL_MSM8996 config PINCTRL_MSM8998 tristate "Qualcomm MSM8998 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -159,7 +159,7 @@ config PINCTRL_MSM8998 config PINCTRL_QCS404 tristate "Qualcomm QCS404 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -167,7 +167,7 @@ config PINCTRL_QCS404 config PINCTRL_QDF2XXX tristate "Qualcomm Technologies QDF2xxx pin controller driver" - depends on GPIOLIB && ACPI + depends on ACPI depends on PINCTRL_MSM help This is the GPIO driver for the TLMM block found on the @@ -175,7 +175,7 @@ config PINCTRL_QDF2XXX config PINCTRL_QCOM_SPMI_PMIC tristate "Qualcomm SPMI PMIC pin controller driver" - depends on GPIOLIB && OF && SPMI + depends on OF && SPMI select REGMAP_SPMI select PINMUX select PINCONF @@ -190,7 +190,7 @@ config PINCTRL_QCOM_SPMI_PMIC config PINCTRL_QCOM_SSBI_PMIC tristate "Qualcomm SSBI PMIC pin controller driver" - depends on GPIOLIB && OF + depends on OF select PINMUX select PINCONF select GENERIC_PINCONF @@ -204,7 +204,7 @@ config PINCTRL_QCOM_SSBI_PMIC config PINCTRL_SC7180 tristate "Qualcomm Technologies Inc SC7180 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -213,7 +213,7 @@ config PINCTRL_SC7180 config PINCTRL_SC7280 tristate "Qualcomm Technologies Inc SC7280 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -222,7 +222,7 @@ config PINCTRL_SC7280 config PINCTRL_SC8180X tristate "Qualcomm Technologies Inc SC8180x pin controller driver" - depends on GPIOLIB && (OF || ACPI) + depends on (OF || ACPI) depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -231,7 +231,7 @@ config PINCTRL_SC8180X config PINCTRL_SDM660 tristate "Qualcomm Technologies Inc SDM660 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -240,7 +240,7 @@ config PINCTRL_SDM660 config PINCTRL_SDM845 tristate "Qualcomm Technologies Inc SDM845 pin controller driver" - depends on GPIOLIB && (OF || ACPI) + depends on (OF || ACPI) depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -249,7 +249,7 @@ config PINCTRL_SDM845 config PINCTRL_SDX55 tristate "Qualcomm Technologies Inc SDX55 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -258,7 +258,7 @@ config PINCTRL_SDX55 config PINCTRL_SM6125 tristate "Qualcomm Technologies Inc SM6125 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -267,7 +267,7 @@ config PINCTRL_SM6125 config PINCTRL_SM8150 tristate "Qualcomm Technologies Inc SM8150 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -276,7 +276,7 @@ config PINCTRL_SM8150 config PINCTRL_SM8250 tristate "Qualcomm Technologies Inc SM8250 pin controller driver" - depends on GPIOLIB && OF + depends on OF depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the @@ -285,8 +285,7 @@ config PINCTRL_SM8250 config PINCTRL_SM8350 tristate "Qualcomm Technologies Inc SM8350 pin controller driver" - depends on GPIOLIB && OF - select PINCTRL_MSM + depends on PINCTRL_MSM help This is the pinctrl, pinmux, pinconf and gpiolib driver for the Qualcomm Technologies Inc TLMM block found on the Qualcomm diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index dc8d39ae045b..9c7679c06dca 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -1219,10 +1219,12 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) } /* - * We suppose that we won't have any more functions than pins, - * we'll reallocate that later anyway + * Find an upper bound for the maximum number of functions: in + * the worst case we have gpio_in, gpio_out, irq and up to four + * special functions per pin, plus one entry for the sentinel. + * We'll reallocate that later anyway. */ - pctl->functions = kcalloc(pctl->ngroups, + pctl->functions = kcalloc(4 * pctl->ngroups + 4, sizeof(*pctl->functions), GFP_KERNEL); if (!pctl->functions) diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 7d385c3b2239..d12db6c316ea 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -508,6 +508,7 @@ config THINKPAD_ACPI depends on RFKILL || RFKILL = n depends on ACPI_VIDEO || ACPI_VIDEO = n depends on BACKLIGHT_CLASS_DEVICE + depends on I2C select ACPI_PLATFORM_PROFILE select HWMON select NVRAM @@ -691,6 +692,7 @@ config INTEL_HID_EVENT tristate "INTEL HID Event" depends on ACPI depends on INPUT + depends on I2C select INPUT_SPARSEKMAP help This driver provides support for the Intel HID Event hotkey interface. @@ -742,6 +744,7 @@ config INTEL_VBTN tristate "INTEL VIRTUAL BUTTON" depends on ACPI depends on INPUT + depends on I2C select INPUT_SPARSEKMAP help This driver provides support for the Intel Virtual Button interface. diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index b9da58ee9b1e..3481479a2942 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -46,34 +46,79 @@ #define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE #define AMD_PMC_RESULT_FAILED 0xFF +/* FCH SSC Registers */ +#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30 +#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34 +#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38 +#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C +#define FCH_SSC_MAPPING_SIZE 0x800 +#define FCH_BASE_PHY_ADDR_LOW 0xFED81100 +#define FCH_BASE_PHY_ADDR_HIGH 0x00000000 + +/* SMU Message Definations */ +#define SMU_MSG_GETSMUVERSION 0x02 +#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04 +#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05 +#define SMU_MSG_LOG_START 0x06 +#define SMU_MSG_LOG_RESET 0x07 +#define SMU_MSG_LOG_DUMP_DATA 0x08 +#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09 /* List of supported CPU ids */ #define AMD_CPU_ID_RV 0x15D0 #define AMD_CPU_ID_RN 0x1630 #define AMD_CPU_ID_PCO AMD_CPU_ID_RV #define AMD_CPU_ID_CZN AMD_CPU_ID_RN +#define AMD_CPU_ID_YC 0x14B5 -#define AMD_SMU_FW_VERSION 0x0 #define PMC_MSG_DELAY_MIN_US 100 #define RESPONSE_REGISTER_LOOP_MAX 200 +#define SOC_SUBSYSTEM_IP_MAX 12 +#define DELAY_MIN_US 2000 +#define DELAY_MAX_US 3000 enum amd_pmc_def { MSG_TEST = 0x01, MSG_OS_HINT_PCO, MSG_OS_HINT_RN, }; +struct amd_pmc_bit_map { + const char *name; + u32 bit_mask; +}; + +static const struct amd_pmc_bit_map soc15_ip_blk[] = { + {"DISPLAY", BIT(0)}, + {"CPU", BIT(1)}, + {"GFX", BIT(2)}, + {"VDD", BIT(3)}, + {"ACP", BIT(4)}, + {"VCN", BIT(5)}, + {"ISP", BIT(6)}, + {"NBIO", BIT(7)}, + {"DF", BIT(8)}, + {"USB0", BIT(9)}, + {"USB1", BIT(10)}, + {"LAPIC", BIT(11)}, + {} +}; + struct amd_pmc_dev { void __iomem *regbase; - void __iomem *smu_base; + void __iomem *smu_virt_addr; + void __iomem *fch_virt_addr; u32 base_addr; u32 cpu_id; + u32 active_ips; struct device *dev; + struct mutex lock; /* generic mutex lock */ #if IS_ENABLED(CONFIG_DEBUG_FS) struct dentry *dbgfs_dir; #endif /* CONFIG_DEBUG_FS */ }; static struct amd_pmc_dev pmc; +static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret); static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset) { @@ -85,18 +130,77 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3 iowrite32(val, dev->regbase + reg_offset); } +struct smu_metrics { + u32 table_version; + u32 hint_count; + u32 s0i3_cyclecount; + u32 timein_s0i2; + u64 timeentering_s0i3_lastcapture; + u64 timeentering_s0i3_totaltime; + u64 timeto_resume_to_os_lastcapture; + u64 timeto_resume_to_os_totaltime; + u64 timein_s0i3_lastcapture; + u64 timein_s0i3_totaltime; + u64 timein_swdrips_lastcapture; + u64 timein_swdrips_totaltime; + u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX]; + u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX]; +} __packed; + #ifdef CONFIG_DEBUG_FS static int smu_fw_info_show(struct seq_file *s, void *unused) { struct amd_pmc_dev *dev = s->private; - u32 value; + struct smu_metrics table; + int idx; + + if (dev->cpu_id == AMD_CPU_ID_PCO) + return -EINVAL; + + memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics)); + + seq_puts(s, "\n=== SMU Statistics ===\n"); + seq_printf(s, "Table Version: %d\n", table.table_version); + seq_printf(s, "Hint Count: %d\n", table.hint_count); + seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount); + seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture); + seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture); + + seq_puts(s, "\n=== Active time (in us) ===\n"); + for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) { + if (soc15_ip_blk[idx].bit_mask & dev->active_ips) + seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name, + table.timecondition_notmet_lastcapture[idx]); + } - value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION); - seq_printf(s, "SMU FW Info: %x\n", value); return 0; } DEFINE_SHOW_ATTRIBUTE(smu_fw_info); +static int s0ix_stats_show(struct seq_file *s, void *unused) +{ + struct amd_pmc_dev *dev = s->private; + u64 entry_time, exit_time, residency; + + entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET); + entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET); + + exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET); + exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET); + + /* It's in 48MHz. We need to convert it */ + residency = exit_time - entry_time; + do_div(residency, 48); + + seq_puts(s, "=== S0ix statistics ===\n"); + seq_printf(s, "S0ix Entry Time: %lld\n", entry_time); + seq_printf(s, "S0ix Exit Time: %lld\n", exit_time); + seq_printf(s, "Residency Time: %lld\n", residency); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(s0ix_stats); + static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev) { debugfs_remove_recursive(dev->dbgfs_dir); @@ -107,6 +211,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL); debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev, &smu_fw_info_fops); + debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev, + &s0ix_stats_fops); } #else static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) @@ -118,6 +224,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev) } #endif /* CONFIG_DEBUG_FS */ +static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) +{ + u32 phys_addr_low, phys_addr_hi; + u64 smu_phys_addr; + + if (dev->cpu_id == AMD_CPU_ID_PCO) + return -EINVAL; + + /* Get Active devices list from SMU */ + amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1); + + /* Get dram address */ + amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1); + amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1); + smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); + + dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics)); + if (!dev->smu_virt_addr) + return -ENOMEM; + + /* Start the logging */ + amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0); + + return 0; +} + static void amd_pmc_dump_registers(struct amd_pmc_dev *dev) { u32 value; @@ -132,19 +264,19 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev) dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value); } -static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set) +static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret) { int rc; - u8 msg; u32 val; + mutex_lock(&dev->lock); /* Wait until we get a valid response */ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE, - val, val > 0, PMC_MSG_DELAY_MIN_US, + val, val != 0, PMC_MSG_DELAY_MIN_US, PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); if (rc) { dev_err(dev->dev, "failed to talk to SMU\n"); - return rc; + goto out_unlock; } /* Write zero to response register */ @@ -154,34 +286,91 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set) amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set); /* Write message ID to message ID register */ - msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO; amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg); - return 0; + + /* Wait until we get a valid response */ + rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE, + val, val != 0, PMC_MSG_DELAY_MIN_US, + PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); + if (rc) { + dev_err(dev->dev, "SMU response timed out\n"); + goto out_unlock; + } + + switch (val) { + case AMD_PMC_RESULT_OK: + if (ret) { + /* PMFW may take longer time to return back the data */ + usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US); + *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT); + } + break; + case AMD_PMC_RESULT_CMD_REJECT_BUSY: + dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val); + rc = -EBUSY; + goto out_unlock; + case AMD_PMC_RESULT_CMD_UNKNOWN: + dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val); + rc = -EINVAL; + goto out_unlock; + case AMD_PMC_RESULT_CMD_REJECT_PREREQ: + case AMD_PMC_RESULT_FAILED: + default: + dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val); + rc = -EIO; + goto out_unlock; + } + +out_unlock: + mutex_unlock(&dev->lock); + amd_pmc_dump_registers(dev); + return rc; +} + +static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) +{ + switch (dev->cpu_id) { + case AMD_CPU_ID_PCO: + return MSG_OS_HINT_PCO; + case AMD_CPU_ID_RN: + case AMD_CPU_ID_YC: + return MSG_OS_HINT_RN; + } + return -EINVAL; } static int __maybe_unused amd_pmc_suspend(struct device *dev) { struct amd_pmc_dev *pdev = dev_get_drvdata(dev); int rc; + u8 msg; + + /* Reset and Start SMU logging - to monitor the s0i3 stats */ + amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0); + amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0); - rc = amd_pmc_send_cmd(pdev, 1); + msg = amd_pmc_get_os_hint(pdev); + rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0); if (rc) dev_err(pdev->dev, "suspend failed\n"); - amd_pmc_dump_registers(pdev); - return 0; + return rc; } static int __maybe_unused amd_pmc_resume(struct device *dev) { struct amd_pmc_dev *pdev = dev_get_drvdata(dev); int rc; + u8 msg; + + /* Let SMU know that we are looking for stats */ + amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0); - rc = amd_pmc_send_cmd(pdev, 0); + msg = amd_pmc_get_os_hint(pdev); + rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0); if (rc) dev_err(pdev->dev, "resume failed\n"); - amd_pmc_dump_registers(pdev); return 0; } @@ -190,6 +379,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = { }; static const struct pci_device_id pmc_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) }, @@ -201,9 +391,8 @@ static int amd_pmc_probe(struct platform_device *pdev) { struct amd_pmc_dev *dev = &pmc; struct pci_dev *rdev; - u32 base_addr_lo; - u32 base_addr_hi; - u64 base_addr; + u32 base_addr_lo, base_addr_hi; + u64 base_addr, fch_phys_addr; int err; u32 val; @@ -248,16 +437,25 @@ static int amd_pmc_probe(struct platform_device *pdev) pci_dev_put(rdev); base_addr = ((u64)base_addr_hi << 32 | base_addr_lo); - dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE); - if (!dev->smu_base) - return -ENOMEM; - dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET, AMD_PMC_MAPPING_SIZE); if (!dev->regbase) return -ENOMEM; - amd_pmc_dump_registers(dev); + mutex_init(&dev->lock); + + /* Use FCH registers to get the S0ix stats */ + base_addr_lo = FCH_BASE_PHY_ADDR_LOW; + base_addr_hi = FCH_BASE_PHY_ADDR_HIGH; + fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo); + dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE); + if (!dev->fch_virt_addr) + return -ENOMEM; + + /* Use SMU to get the s0i3 debug stats */ + err = amd_pmc_setup_smu_logging(dev); + if (err) + dev_err(dev->dev, "SMU debugging info not supported on this platform\n"); platform_set_drvdata(pdev, dev); amd_pmc_dbgfs_register(dev); @@ -269,11 +467,14 @@ static int amd_pmc_remove(struct platform_device *pdev) struct amd_pmc_dev *dev = platform_get_drvdata(pdev); amd_pmc_dbgfs_unregister(dev); + mutex_destroy(&dev->lock); return 0; } static const struct acpi_device_id amd_pmc_acpi_ids[] = { {"AMDI0005", 0}, + {"AMDI0006", 0}, + {"AMDI0007", 0}, {"AMD0004", 0}, { } }; diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 0cb927f0f301..a81dc4b191b7 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -41,6 +41,10 @@ static int wapf = -1; module_param(wapf, uint, 0444); MODULE_PARM_DESC(wapf, "WAPF value"); +static int tablet_mode_sw = -1; +module_param(tablet_mode_sw, uint, 0444); +MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip"); + static struct quirk_entry *quirks; static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str, @@ -458,6 +462,15 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_use_lid_flip_devid, }, + { + .callback = dmi_matched, + .ident = "ASUS TP200s / E205SA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "E205SA"), + }, + .driver_data = &quirk_asus_use_lid_flip_devid, + }, {}, }; @@ -477,6 +490,21 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver) else wapf = quirks->wapf; + switch (tablet_mode_sw) { + case 0: + quirks->use_kbd_dock_devid = false; + quirks->use_lid_flip_devid = false; + break; + case 1: + quirks->use_kbd_dock_devid = true; + quirks->use_lid_flip_devid = false; + break; + case 2: + quirks->use_kbd_dock_devid = false; + quirks->use_lid_flip_devid = true; + break; + } + if (quirks->i8042_filter) { ret = i8042_install_filter(quirks->i8042_filter); if (ret) { diff --git a/drivers/platform/x86/dual_accel_detect.h b/drivers/platform/x86/dual_accel_detect.h new file mode 100644 index 000000000000..a9eae17cc43d --- /dev/null +++ b/drivers/platform/x86/dual_accel_detect.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Helper code to detect 360 degree hinges (yoga) style 2-in-1 devices using 2 accelerometers + * to allow the OS to determine the angle between the display and the base of the device. + * + * On Windows these are read by a special HingeAngleService process which calls undocumented + * ACPI methods, to let the firmware know if the 2-in-1 is in tablet- or laptop-mode. + * The firmware may use this to disable the kbd and touchpad to avoid spurious input in + * tablet-mode as well as to report SW_TABLET_MODE info to the OS. + * + * Since Linux does not call these undocumented methods, the SW_TABLET_MODE info reported + * by various drivers/platform/x86 drivers is incorrect. These drivers use the detection + * code in this file to disable SW_TABLET_MODE reporting to avoid reporting broken info + * (instead userspace can derive the status itself by directly reading the 2 accels). + */ + +#include <linux/acpi.h> +#include <linux/i2c.h> + +static int dual_accel_i2c_resource_count(struct acpi_resource *ares, void *data) +{ + struct acpi_resource_i2c_serialbus *sb; + int *count = data; + + if (i2c_acpi_get_i2c_resource(ares, &sb)) + *count = *count + 1; + + return 1; +} + +static int dual_accel_i2c_client_count(struct acpi_device *adev) +{ + int ret, count = 0; + LIST_HEAD(r); + + ret = acpi_dev_get_resources(adev, &r, dual_accel_i2c_resource_count, &count); + if (ret < 0) + return ret; + + acpi_dev_free_resource_list(&r); + return count; +} + +static bool dual_accel_detect_bosc0200(void) +{ + struct acpi_device *adev; + int count; + + adev = acpi_dev_get_first_match_dev("BOSC0200", NULL, -1); + if (!adev) + return false; + + count = dual_accel_i2c_client_count(adev); + + acpi_dev_put(adev); + + return count == 2; +} + +static bool dual_accel_detect(void) +{ + /* Systems which use a pair of accels with KIOX010A / KIOX020A ACPI ids */ + if (acpi_dev_present("KIOX010A", NULL, -1) && + acpi_dev_present("KIOX020A", NULL, -1)) + return true; + + /* Systems which use a single DUAL250E ACPI device to model 2 accels */ + if (acpi_dev_present("DUAL250E", NULL, -1)) + return true; + + /* Systems which use a single BOSC0200 ACPI device to model 2 accels */ + if (dual_accel_detect_bosc0200()) + return true; + + return false; +} diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c index 5529d7b0abea..7f3a03f937f6 100644 --- a/drivers/platform/x86/gigabyte-wmi.c +++ b/drivers/platform/x86/gigabyte-wmi.c @@ -140,12 +140,15 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev) }} static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"), + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"), + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"), { } diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 078648a9201b..2e4e97a626a5 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/suspend.h> +#include "dual_accel_detect.h" /* When NOT in tablet mode, VGBS returns with the flag 0x40 */ #define TABLET_MODE_FLAG BIT(6) @@ -25,6 +26,7 @@ static const struct acpi_device_id intel_hid_ids[] = { {"INT33D5", 0}, {"INTC1051", 0}, {"INTC1054", 0}, + {"INTC1070", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, intel_hid_ids); @@ -121,6 +123,7 @@ struct intel_hid_priv { struct input_dev *array; struct input_dev *switches; bool wakeup_mode; + bool dual_accel; }; #define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054" @@ -450,22 +453,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) * SW_TABLET_MODE report, in these cases we enable support when receiving * the first event instead of during driver setup. * - * Some 360 degree hinges (yoga) style 2-in-1 devices use 2 accelerometers - * to allow the OS to determine the angle between the display and the base - * of the device. On Windows these are read by a special HingeAngleService - * process which calls an ACPI DSM (Device Specific Method) on the - * ACPI KIOX010A device node for the sensor in the display, to let the - * firmware know if the 2-in-1 is in tablet- or laptop-mode so that it can - * disable the kbd and touchpad to avoid spurious input in tablet-mode. - * - * The linux kxcjk1013 driver calls the DSM for this once at probe time - * to ensure that the builtin kbd and touchpad work. On some devices this - * causes a "spurious" 0xcd event on the intel-hid ACPI dev. In this case - * there is not a functional tablet-mode switch, so we should not register - * the tablet-mode switch device. + * See dual_accel_detect.h for more info on the dual_accel check. */ - if (!priv->switches && (event == 0xcc || event == 0xcd) && - !acpi_dev_present("KIOX010A", NULL, -1)) { + if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) { dev_info(&device->dev, "switch event received, enable switches supports\n"); err = intel_hid_switches_setup(device); if (err) @@ -606,6 +596,8 @@ static int intel_hid_probe(struct platform_device *device) return -ENOMEM; dev_set_drvdata(&device->dev, priv); + priv->dual_accel = dual_accel_detect(); + err = intel_hid_input_setup(device); if (err) { pr_err("Failed to setup Intel HID hotkeys\n"); diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 888a764efad1..309166431063 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/suspend.h> +#include "dual_accel_detect.h" /* Returned when NOT in tablet mode on some HP Stream x360 11 models */ #define VGBS_TABLET_MODE_FLAG_ALT 0x10 @@ -66,6 +67,7 @@ static const struct key_entry intel_vbtn_switchmap[] = { struct intel_vbtn_priv { struct input_dev *buttons_dev; struct input_dev *switches_dev; + bool dual_accel; bool has_buttons; bool has_switches; bool wakeup_mode; @@ -160,6 +162,10 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) input_dev = priv->buttons_dev; } else if ((ke = sparse_keymap_entry_from_scancode(priv->switches_dev, event))) { if (!priv->has_switches) { + /* See dual_accel_detect.h for more info */ + if (priv->dual_accel) + return; + dev_info(&device->dev, "Registering Intel Virtual Switches input-dev after receiving a switch event\n"); ret = input_register_device(priv->switches_dev); if (ret) @@ -248,11 +254,15 @@ static const struct dmi_system_id dmi_switches_allow_list[] = { {} /* Array terminator */ }; -static bool intel_vbtn_has_switches(acpi_handle handle) +static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel) { unsigned long long vgbs; acpi_status status; + /* See dual_accel_detect.h for more info */ + if (dual_accel) + return false; + if (!dmi_check_system(dmi_switches_allow_list)) return false; @@ -263,13 +273,14 @@ static bool intel_vbtn_has_switches(acpi_handle handle) static int intel_vbtn_probe(struct platform_device *device) { acpi_handle handle = ACPI_HANDLE(&device->dev); - bool has_buttons, has_switches; + bool dual_accel, has_buttons, has_switches; struct intel_vbtn_priv *priv; acpi_status status; int err; + dual_accel = dual_accel_detect(); has_buttons = acpi_has_method(handle, "VBDL"); - has_switches = intel_vbtn_has_switches(handle); + has_switches = intel_vbtn_has_switches(handle, dual_accel); if (!has_buttons && !has_switches) { dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n"); @@ -281,6 +292,7 @@ static int intel_vbtn_probe(struct platform_device *device) return -ENOMEM; dev_set_drvdata(&device->dev, priv); + priv->dual_accel = dual_accel; priv->has_buttons = has_buttons; priv->has_switches = has_switches; diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c index c37349f97bb8..d063d91db9bc 100644 --- a/drivers/platform/x86/pcengines-apuv2.c +++ b/drivers/platform/x86/pcengines-apuv2.c @@ -94,6 +94,7 @@ static struct gpiod_lookup_table gpios_led_table = { NULL, 1, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3, NULL, 2, GPIO_ACTIVE_LOW), + {} /* Terminating entry */ } }; @@ -123,6 +124,7 @@ static struct gpiod_lookup_table gpios_key_table = { .table = { GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_MODESW, NULL, 0, GPIO_ACTIVE_LOW), + {} /* Terminating entry */ } }; diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 3671b5d20613..6cfed4427fb0 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -571,6 +571,11 @@ static ssize_t current_value_store(struct kobject *kobj, else ret = tlmi_save_bios_settings(""); + if (!ret && !tlmi_priv.pending_changes) { + tlmi_priv.pending_changes = true; + /* let userland know it may need to check reboot pending again */ + kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE); + } out: kfree(auth_str); kfree(set_str); @@ -647,6 +652,14 @@ static struct kobj_type tlmi_pwd_setting_ktype = { .sysfs_ops = &tlmi_kobj_sysfs_ops, }; +static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", tlmi_priv.pending_changes); +} + +static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot); + /* ---- Initialisation --------------------------------------------------------- */ static void tlmi_release_attr(void) { @@ -659,6 +672,7 @@ static void tlmi_release_attr(void) kobject_put(&tlmi_priv.setting[i]->kobj); } } + sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr); kset_unregister(tlmi_priv.attribute_kset); /* Authentication structures */ @@ -709,8 +723,8 @@ static int tlmi_sysfs_init(void) /* Build attribute */ tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset; - ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype, - NULL, "%s", tlmi_priv.setting[i]->display_name); + ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL, + "%s", tlmi_priv.setting[i]->display_name); if (ret) goto fail_create_attr; @@ -719,6 +733,10 @@ static int tlmi_sysfs_init(void) goto fail_create_attr; } + ret = sysfs_create_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr); + if (ret) + goto fail_create_attr; + /* Create authentication entries */ tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL, &tlmi_priv.class_dev->kobj); @@ -727,8 +745,7 @@ static int tlmi_sysfs_init(void) goto fail_create_attr; } tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype, - NULL, "%s", "Admin"); + ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin"); if (ret) goto fail_create_attr; @@ -737,8 +754,7 @@ static int tlmi_sysfs_init(void) goto fail_create_attr; tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype, - NULL, "%s", "System"); + ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "System"); if (ret) goto fail_create_attr; @@ -818,6 +834,7 @@ static int tlmi_analyze(void) pr_info("Error retrieving possible values for %d : %s\n", i, setting->display_name); } + kobject_init(&setting->kobj, &tlmi_attr_setting_ktype); tlmi_priv.setting[i] = setting; tlmi_priv.settings_count++; kfree(item); @@ -844,10 +861,12 @@ static int tlmi_analyze(void) if (pwdcfg.password_state & TLMI_PAP_PWD) tlmi_priv.pwd_admin->valid = true; + kobject_init(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype); + tlmi_priv.pwd_power = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL); if (!tlmi_priv.pwd_power) { ret = -ENOMEM; - goto fail_clear_attr; + goto fail_free_pwd_admin; } strscpy(tlmi_priv.pwd_power->kbdlang, "us", TLMI_LANG_MAXLEN); tlmi_priv.pwd_power->encoding = TLMI_ENCODING_ASCII; @@ -859,11 +878,19 @@ static int tlmi_analyze(void) if (pwdcfg.password_state & TLMI_POP_PWD) tlmi_priv.pwd_power->valid = true; + kobject_init(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype); + return 0; +fail_free_pwd_admin: + kfree(tlmi_priv.pwd_admin); fail_clear_attr: - for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) - kfree(tlmi_priv.setting[i]); + for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) { + if (tlmi_priv.setting[i]) { + kfree(tlmi_priv.setting[i]->possible_values); + kfree(tlmi_priv.setting[i]); + } + } return ret; } diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h index 6fa8da7af6c7..eb598846628a 100644 --- a/drivers/platform/x86/think-lmi.h +++ b/drivers/platform/x86/think-lmi.h @@ -60,6 +60,7 @@ struct think_lmi { bool can_get_bios_selections; bool can_set_bios_password; bool can_get_password_settings; + bool pending_changes; struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT]; struct device *class_dev; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 603156a6e3ed..50ff04c84650 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -73,6 +73,7 @@ #include <linux/uaccess.h> #include <acpi/battery.h> #include <acpi/video.h> +#include "dual_accel_detect.h" /* ThinkPad CMOS commands */ #define TP_CMOS_VOLUME_DOWN 0 @@ -3232,7 +3233,7 @@ static int hotkey_init_tablet_mode(void) * the laptop/tent/tablet mode to the EC. The bmc150 iio driver * does not support this, so skip the hotkey on these models. */ - if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1)) + if (has_tablet_mode && !dual_accel_detect()) tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS; type = "GMMS"; } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) { diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c index b010e4ca3383..11c60a273446 100644 --- a/drivers/platform/x86/wireless-hotkey.c +++ b/drivers/platform/x86/wireless-hotkey.c @@ -78,7 +78,7 @@ static int wl_add(struct acpi_device *device) err = wireless_input_setup(); if (err) - pr_err("Failed to setup hp wireless hotkeys\n"); + pr_err("Failed to setup wireless hotkeys\n"); return err; } diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 3d45ed0157c6..a6ebdb269fdd 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -1728,6 +1728,7 @@ static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di) break; case AB8500_FG_CALIB_WAIT: dev_dbg(di->dev, "Calibration WFI\n"); + break; default: break; } @@ -2224,6 +2225,7 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data) queue_work(di->fg_wq, &di->fg_work); break; } + break; default: break; } diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c index a17849bfacbf..b72826cf6794 100644 --- a/drivers/power/supply/abx500_chargalg.c +++ b/drivers/power/supply/abx500_chargalg.c @@ -1150,6 +1150,7 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data) default: break; } + break; default: break; } diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c index 7a41fb7b0dec..42f93d4c6ee3 100644 --- a/drivers/pps/clients/pps_parport.c +++ b/drivers/pps/clients/pps_parport.c @@ -22,8 +22,6 @@ #include <linux/parport.h> #include <linux/pps_kernel.h> -#define DRVDESC "parallel port PPS client" - /* module parameters */ #define CLEAR_WAIT_MAX 100 @@ -138,6 +136,12 @@ static void parport_attach(struct parport *port) .dev = NULL }; + if (clear_wait > CLEAR_WAIT_MAX) { + pr_err("clear_wait value should be not greater then %d\n", + CLEAR_WAIT_MAX); + return; + } + device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL); if (!device) { pr_err("memory allocation failed, not attaching\n"); @@ -214,38 +218,8 @@ static struct parport_driver pps_parport_driver = { .detach = parport_detach, .devmodel = true, }; - -/* module staff */ - -static int __init pps_parport_init(void) -{ - int ret; - - pr_info(DRVDESC "\n"); - - if (clear_wait > CLEAR_WAIT_MAX) { - pr_err("clear_wait value should be not greater" - " then %d\n", CLEAR_WAIT_MAX); - return -EINVAL; - } - - ret = parport_register_driver(&pps_parport_driver); - if (ret) { - pr_err("unable to register with parport\n"); - return ret; - } - - return 0; -} - -static void __exit pps_parport_exit(void) -{ - parport_unregister_driver(&pps_parport_driver); -} - -module_init(pps_parport_init); -module_exit(pps_parport_exit); +module_parport_driver(pps_parport_driver); MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>"); -MODULE_DESCRIPTION(DRVDESC); +MODULE_DESCRIPTION("parallel port PPS client"); MODULE_LICENSE("GPL"); diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 8c20e524e9ad..e085c255da0c 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -90,7 +90,8 @@ config PTP_1588_CLOCK_INES config PTP_1588_CLOCK_PCH tristate "Intel PCH EG20T as PTP clock" depends on X86_32 || COMPILE_TEST - depends on HAS_IOMEM && NET + depends on HAS_IOMEM && PCI + depends on NET imply PTP_1588_CLOCK help This driver adds support for using the PCH EG20T as a PTP diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index 8673d1743faa..28a6fe342d3e 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -3,7 +3,7 @@ # Makefile for PTP 1588 clock support. # -ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o +ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o ptp_vclock.o ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index a23a37a4d5dc..4dfc52e06704 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -24,10 +24,11 @@ #define PTP_PPS_EVENT PPS_CAPTUREASSERT #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC) +struct class *ptp_class; + /* private globals */ static dev_t ptp_devt; -static struct class *ptp_class; static DEFINE_IDA(ptp_clocks_map); @@ -76,6 +77,11 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + return ptp->info->settime64(ptp->info, tp); } @@ -97,6 +103,11 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) struct ptp_clock_info *ops; int err = -EOPNOTSUPP; + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + ops = ptp->info; if (tx->modes & ADJ_SETOFFSET) { @@ -161,6 +172,7 @@ static void ptp_clock_release(struct device *dev) ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); + mutex_destroy(&ptp->n_vclocks_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); kfree(ptp); } @@ -185,6 +197,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, { struct ptp_clock *ptp; int err = 0, index, major = MAJOR(ptp_devt); + size_t size; if (info->n_alarm > PTP_MAX_ALARMS) return ERR_PTR(-EINVAL); @@ -208,6 +221,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, spin_lock_init(&ptp->tsevq.lock); mutex_init(&ptp->tsevq_mux); mutex_init(&ptp->pincfg_mux); + mutex_init(&ptp->n_vclocks_mux); init_waitqueue_head(&ptp->tsev_wq); if (ptp->info->do_aux_work) { @@ -218,7 +232,22 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, pr_err("failed to create ptp aux_worker %d\n", err); goto kworker_err; } - ptp->pps_source->lookup_cookie = ptp; + } + + /* PTP virtual clock is being registered under physical clock */ + if (parent && parent->class && parent->class->name && + strcmp(parent->class->name, "ptp") == 0) + ptp->is_virtual_clock = true; + + if (!ptp->is_virtual_clock) { + ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS; + + size = sizeof(int) * ptp->max_vclocks; + ptp->vclock_index = kzalloc(size, GFP_KERNEL); + if (!ptp->vclock_index) { + err = -ENOMEM; + goto no_mem_for_vclocks; + } } err = ptp_populate_pin_groups(ptp); @@ -238,6 +267,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, pr_err("failed to register pps source\n"); goto no_pps; } + ptp->pps_source->lookup_cookie = ptp; } /* Initialize a new device of our class in our clock structure. */ @@ -265,11 +295,14 @@ no_clock: no_pps: ptp_cleanup_pin_groups(ptp); no_pin_groups: + kfree(ptp->vclock_index); +no_mem_for_vclocks: if (ptp->kworker) kthread_destroy_worker(ptp->kworker); kworker_err: mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); + mutex_destroy(&ptp->n_vclocks_mux); ida_simple_remove(&ptp_clocks_map, index); no_slot: kfree(ptp); @@ -280,9 +313,16 @@ EXPORT_SYMBOL(ptp_clock_register); int ptp_clock_unregister(struct ptp_clock *ptp) { + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + ptp->defunct = 1; wake_up_interruptible(&ptp->tsev_wq); + kfree(ptp->vclock_index); + if (ptp->kworker) { kthread_cancel_delayed_work_sync(&ptp->aux_work); kthread_destroy_worker(ptp->kworker); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 6b97155148f1..dba6be477067 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -18,6 +18,7 @@ #define PTP_MAX_TIMESTAMPS 128 #define PTP_BUF_TIMESTAMPS 30 +#define PTP_DEFAULT_MAX_VCLOCKS 20 struct timestamp_event_queue { struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; @@ -46,6 +47,24 @@ struct ptp_clock { const struct attribute_group *pin_attr_groups[2]; struct kthread_worker *kworker; struct kthread_delayed_work aux_work; + unsigned int max_vclocks; + unsigned int n_vclocks; + int *vclock_index; + struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */ + bool is_virtual_clock; +}; + +#define info_to_vclock(d) container_of((d), struct ptp_vclock, info) +#define cc_to_vclock(d) container_of((d), struct ptp_vclock, cc) +#define dw_to_vclock(d) container_of((d), struct ptp_vclock, refresh_work) + +struct ptp_vclock { + struct ptp_clock *pclock; + struct ptp_clock_info info; + struct ptp_clock *clock; + struct cyclecounter cc; + struct timecounter tc; + spinlock_t lock; /* protects tc/cc */ }; /* @@ -61,6 +80,24 @@ static inline int queue_cnt(struct timestamp_event_queue *q) return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt; } +/* Check if ptp virtual clock is in use */ +static inline bool ptp_vclock_in_use(struct ptp_clock *ptp) +{ + bool in_use = false; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return true; + + if (!ptp->is_virtual_clock && ptp->n_vclocks) + in_use = true; + + mutex_unlock(&ptp->n_vclocks_mux); + + return in_use; +} + +extern struct class *ptp_class; + /* * see ptp_chardev.c */ @@ -89,4 +126,6 @@ extern const struct attribute_group *ptp_groups[]; int ptp_populate_pin_groups(struct ptp_clock *ptp); void ptp_cleanup_pin_groups(struct ptp_clock *ptp); +struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock); +void ptp_vclock_unregister(struct ptp_vclock *vclock); #endif diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index be076a91e20e..41b92dc2f011 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -3,6 +3,7 @@ * PTP 1588 clock support - sysfs interface. * * Copyright (C) 2010 OMICRON electronics GmbH + * Copyright 2021 NXP */ #include <linux/capability.h> #include <linux/slab.h> @@ -148,6 +149,159 @@ out: } static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); +static int unregister_vclock(struct device *dev, void *data) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_clock_info *info = ptp->info; + struct ptp_vclock *vclock; + u32 *num = data; + + vclock = info_to_vclock(info); + dev_info(dev->parent, "delete virtual clock ptp%d\n", + vclock->clock->index); + + ptp_vclock_unregister(vclock); + (*num)--; + + /* For break. Not error. */ + if (*num == 0) + return -EINVAL; + + return 0; +} + +static ssize_t n_vclocks_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + ssize_t size; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks); + + mutex_unlock(&ptp->n_vclocks_mux); + + return size; +} + +static ssize_t n_vclocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_vclock *vclock; + int err = -EINVAL; + u32 num, i; + + if (kstrtou32(buf, 0, &num)) + return err; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + if (num > ptp->max_vclocks) { + dev_err(dev, "max value is %d\n", ptp->max_vclocks); + goto out; + } + + /* Need to create more vclocks */ + if (num > ptp->n_vclocks) { + for (i = 0; i < num - ptp->n_vclocks; i++) { + vclock = ptp_vclock_register(ptp); + if (!vclock) + goto out; + + *(ptp->vclock_index + ptp->n_vclocks + i) = + vclock->clock->index; + + dev_info(dev, "new virtual clock ptp%d\n", + vclock->clock->index); + } + } + + /* Need to delete vclocks */ + if (num < ptp->n_vclocks) { + i = ptp->n_vclocks - num; + device_for_each_child_reverse(dev, &i, + unregister_vclock); + + for (i = 1; i <= ptp->n_vclocks - num; i++) + *(ptp->vclock_index + ptp->n_vclocks - i) = -1; + } + + if (num == 0) + dev_info(dev, "only physical clock in use now\n"); + else + dev_info(dev, "guarantee physical clock free running\n"); + + ptp->n_vclocks = num; + mutex_unlock(&ptp->n_vclocks_mux); + + return count; +out: + mutex_unlock(&ptp->n_vclocks_mux); + return err; +} +static DEVICE_ATTR_RW(n_vclocks); + +static ssize_t max_vclocks_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + ssize_t size; + + size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks); + + return size; +} + +static ssize_t max_vclocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + unsigned int *vclock_index; + int err = -EINVAL; + size_t size; + u32 max; + + if (kstrtou32(buf, 0, &max) || max == 0) + return -EINVAL; + + if (max == ptp->max_vclocks) + return count; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + if (max < ptp->n_vclocks) + goto out; + + size = sizeof(int) * max; + vclock_index = kzalloc(size, GFP_KERNEL); + if (!vclock_index) { + err = -ENOMEM; + goto out; + } + + size = sizeof(int) * ptp->n_vclocks; + memcpy(vclock_index, ptp->vclock_index, size); + + kfree(ptp->vclock_index); + ptp->vclock_index = vclock_index; + ptp->max_vclocks = max; + + mutex_unlock(&ptp->n_vclocks_mux); + + return count; +out: + mutex_unlock(&ptp->n_vclocks_mux); + return err; +} +static DEVICE_ATTR_RW(max_vclocks); + static struct attribute *ptp_attrs[] = { &dev_attr_clock_name.attr, @@ -162,6 +316,8 @@ static struct attribute *ptp_attrs[] = { &dev_attr_fifo.attr, &dev_attr_period.attr, &dev_attr_pps_enable.attr, + &dev_attr_n_vclocks.attr, + &dev_attr_max_vclocks.attr, NULL }; @@ -183,6 +339,10 @@ static umode_t ptp_is_attribute_visible(struct kobject *kobj, } else if (attr == &dev_attr_pps_enable.attr) { if (!info->pps) mode = 0; + } else if (attr == &dev_attr_n_vclocks.attr || + attr == &dev_attr_max_vclocks.attr) { + if (ptp->is_virtual_clock) + mode = 0; } return mode; diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c new file mode 100644 index 000000000000..e0f87c57749a --- /dev/null +++ b/drivers/ptp/ptp_vclock.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PTP virtual clock driver + * + * Copyright 2021 NXP + */ +#include <linux/slab.h> +#include "ptp_private.h" + +#define PTP_VCLOCK_CC_SHIFT 31 +#define PTP_VCLOCK_CC_MULT (1 << PTP_VCLOCK_CC_SHIFT) +#define PTP_VCLOCK_FADJ_SHIFT 9 +#define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL +#define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2) + +static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + s64 adj; + + adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT; + adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR); + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_read(&vclock->tc); + vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj; + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_adjtime(&vclock->tc, delta); + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static int ptp_vclock_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&vclock->lock, flags); + ns = timecounter_read(&vclock->tc); + spin_unlock_irqrestore(&vclock->lock, flags); + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int ptp_vclock_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + u64 ns = timespec64_to_ns(ts); + unsigned long flags; + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_init(&vclock->tc, &vclock->cc, ns); + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static long ptp_vclock_refresh(struct ptp_clock_info *ptp) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + struct timespec64 ts; + + ptp_vclock_gettime(&vclock->info, &ts); + + return PTP_VCLOCK_REFRESH_INTERVAL; +} + +static const struct ptp_clock_info ptp_vclock_info = { + .owner = THIS_MODULE, + .name = "ptp virtual clock", + /* The maximum ppb value that long scaled_ppm can support */ + .max_adj = 32767999, + .adjfine = ptp_vclock_adjfine, + .adjtime = ptp_vclock_adjtime, + .gettime64 = ptp_vclock_gettime, + .settime64 = ptp_vclock_settime, + .do_aux_work = ptp_vclock_refresh, +}; + +static u64 ptp_vclock_read(const struct cyclecounter *cc) +{ + struct ptp_vclock *vclock = cc_to_vclock(cc); + struct ptp_clock *ptp = vclock->pclock; + struct timespec64 ts = {}; + + if (ptp->info->gettimex64) + ptp->info->gettimex64(ptp->info, &ts, NULL); + else + ptp->info->gettime64(ptp->info, &ts); + + return timespec64_to_ns(&ts); +} + +static const struct cyclecounter ptp_vclock_cc = { + .read = ptp_vclock_read, + .mask = CYCLECOUNTER_MASK(32), + .mult = PTP_VCLOCK_CC_MULT, + .shift = PTP_VCLOCK_CC_SHIFT, +}; + +struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) +{ + struct ptp_vclock *vclock; + + vclock = kzalloc(sizeof(*vclock), GFP_KERNEL); + if (!vclock) + return NULL; + + vclock->pclock = pclock; + vclock->info = ptp_vclock_info; + vclock->cc = ptp_vclock_cc; + + snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt", + pclock->index); + + spin_lock_init(&vclock->lock); + + vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev); + if (IS_ERR_OR_NULL(vclock->clock)) { + kfree(vclock); + return NULL; + } + + timecounter_init(&vclock->tc, &vclock->cc, 0); + ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL); + + return vclock; +} + +void ptp_vclock_unregister(struct ptp_vclock *vclock) +{ + ptp_clock_unregister(vclock->clock); + kfree(vclock); +} + +int ptp_get_vclocks_index(int pclock_index, int **vclock_index) +{ + char name[PTP_CLOCK_NAME_LEN] = ""; + struct ptp_clock *ptp; + struct device *dev; + int num = 0; + + if (pclock_index < 0) + return num; + + snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index); + dev = class_find_device_by_name(ptp_class, name); + if (!dev) + return num; + + ptp = dev_get_drvdata(dev); + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) { + put_device(dev); + return num; + } + + *vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL); + if (!(*vclock_index)) + goto out; + + memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks); + num = ptp->n_vclocks; +out: + mutex_unlock(&ptp->n_vclocks_mux); + put_device(dev); + return num; +} +EXPORT_SYMBOL(ptp_get_vclocks_index); + +void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index) +{ + char name[PTP_CLOCK_NAME_LEN] = ""; + struct ptp_vclock *vclock; + struct ptp_clock *ptp; + unsigned long flags; + struct device *dev; + u64 ns; + + snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", vclock_index); + dev = class_find_device_by_name(ptp_class, name); + if (!dev) + return; + + ptp = dev_get_drvdata(dev); + if (!ptp->is_virtual_clock) { + put_device(dev); + return; + } + + vclock = info_to_vclock(ptp->info); + + ns = ktime_to_ns(hwtstamps->hwtstamp); + + spin_lock_irqsave(&vclock->lock, flags); + ns = timecounter_cyc2time(&vclock->tc, ns); + spin_unlock_irqrestore(&vclock->lock, flags); + + put_device(dev); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} +EXPORT_SYMBOL(ptp_convert_timestamp); diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index 5537b5f6dd5d..e157273fd2f7 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -190,12 +190,9 @@ static int berlin_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period); - if (err) - return err; - } + err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period); + if (err) + return err; if (!enabled) return berlin_pwm_enable(chip, pwm); diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c index 8a3d781e6514..fc3cb7d669c6 100644 --- a/drivers/pwm/pwm-ep93xx.c +++ b/drivers/pwm/pwm-ep93xx.c @@ -64,6 +64,11 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, int ret; struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip); bool enabled = state->enabled; + void __iomem *base = ep93xx_pwm->base; + unsigned long long c; + unsigned long period_cycles; + unsigned long duty_cycles; + unsigned long term; if (state->polarity != pwm->state.polarity) { if (enabled) { @@ -97,57 +102,47 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip); - void __iomem *base = ep93xx_pwm->base; - unsigned long long c; - unsigned long period_cycles; - unsigned long duty_cycles; - unsigned long term; + /* + * The clock needs to be enabled to access the PWM registers. + * Configuration can be changed at any time. + */ + if (!pwm_is_enabled(pwm)) { + ret = clk_prepare_enable(ep93xx_pwm->clk); + if (ret) + return ret; + } - /* - * The clock needs to be enabled to access the PWM registers. - * Configuration can be changed at any time. - */ - if (!pwm_is_enabled(pwm)) { - ret = clk_prepare_enable(ep93xx_pwm->clk); - if (ret) - return ret; - } + c = clk_get_rate(ep93xx_pwm->clk); + c *= state->period; + do_div(c, 1000000000); + period_cycles = c; + + c = period_cycles; + c *= state->duty_cycle; + do_div(c, state->period); + duty_cycles = c; - c = clk_get_rate(ep93xx_pwm->clk); - c *= state->period; - do_div(c, 1000000000); - period_cycles = c; - - c = period_cycles; - c *= state->duty_cycle; - do_div(c, state->period); - duty_cycles = c; - - if (period_cycles < 0x10000 && duty_cycles < 0x10000) { - term = readw(base + EP93XX_PWMx_TERM_COUNT); - - /* Order is important if PWM is running */ - if (period_cycles > term) { - writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); - writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); - } else { - writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); - writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); - } - ret = 0; + if (period_cycles < 0x10000 && duty_cycles < 0x10000) { + term = readw(base + EP93XX_PWMx_TERM_COUNT); + + /* Order is important if PWM is running */ + if (period_cycles > term) { + writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); + writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); } else { - ret = -EINVAL; + writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); + writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); } + ret = 0; + } else { + ret = -EINVAL; + } - if (!pwm_is_enabled(pwm)) - clk_disable_unprepare(ep93xx_pwm->clk); + if (!pwm_is_enabled(pwm)) + clk_disable_unprepare(ep93xx_pwm->clk); - if (ret) - return ret; - } + if (ret) + return ret; if (!enabled) { ret = clk_prepare_enable(ep93xx_pwm->clk); diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c index 48c31dac2f32..54c7990967dd 100644 --- a/drivers/pwm/pwm-spear.c +++ b/drivers/pwm/pwm-spear.c @@ -177,12 +177,9 @@ static int spear_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period); - if (err) - return err; - } + err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period); + if (err) + return err; if (!pwm->state.enabled) return spear_pwm_enable(chip, pwm); diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c index f2a85e8dd941..7004f55bbf11 100644 --- a/drivers/pwm/pwm-sprd.c +++ b/drivers/pwm/pwm-sprd.c @@ -183,13 +183,10 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, } } - if (state->period != cstate->period || - state->duty_cycle != cstate->duty_cycle) { - ret = sprd_pwm_config(spc, pwm, state->duty_cycle, - state->period); - if (ret) - return ret; - } + ret = sprd_pwm_config(spc, pwm, state->duty_cycle, + state->period); + if (ret) + return ret; sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1); } else if (cstate->enabled) { diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index dec3f1fb150c..35eb19a5a0d1 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c @@ -189,16 +189,13 @@ static int ecap_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - if (state->period > NSEC_PER_SEC) - return -ERANGE; + if (state->period > NSEC_PER_SEC) + return -ERANGE; - err = ecap_pwm_config(chip, pwm, state->duty_cycle, - state->period, enabled); - if (err) - return err; - } + err = ecap_pwm_config(chip, pwm, state->duty_cycle, + state->period, enabled); + if (err) + return err; if (!enabled) return ecap_pwm_enable(chip, pwm); diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c index e16c3727db7a..aa42da4d141e 100644 --- a/drivers/regulator/bd9576-regulator.c +++ b/drivers/regulator/bd9576-regulator.c @@ -294,9 +294,9 @@ static bool check_temp_flag_mismatch(struct regulator_dev *rdev, int severity, struct bd957x_regulator_data *r) { if ((severity == REGULATOR_SEVERITY_ERR && - r->ovd_notif != REGULATOR_EVENT_OVER_TEMP) || + r->temp_notif != REGULATOR_EVENT_OVER_TEMP) || (severity == REGULATOR_SEVERITY_WARN && - r->ovd_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) { + r->temp_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) { dev_warn(rdev_get_dev(rdev), "Can't support both thermal WARN and ERR\n"); if (severity == REGULATOR_SEVERITY_WARN) diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c index bff8c515dcde..d144a4bdb76d 100644 --- a/drivers/regulator/hi6421-regulator.c +++ b/drivers/regulator/hi6421-regulator.c @@ -366,9 +366,8 @@ static struct hi6421_regulator_info static int hi6421_regulator_enable(struct regulator_dev *rdev) { - struct hi6421_regulator_pdata *pdata; + struct hi6421_regulator_pdata *pdata = rdev_get_drvdata(rdev); - pdata = dev_get_drvdata(rdev->dev.parent); /* hi6421 spec requires regulator enablement must be serialized: * - Because when BUCK, LDO switching from off to on, it will have * a huge instantaneous current; so you can not turn on two or @@ -385,9 +384,10 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev) static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; unsigned int reg_val; + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); if (reg_val & info->mode_mask) return REGULATOR_MODE_IDLE; @@ -397,9 +397,10 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; unsigned int reg_val; + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); if (reg_val & info->mode_mask) return REGULATOR_MODE_STANDBY; @@ -410,9 +411,10 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; unsigned int new_mode; + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); switch (mode) { case REGULATOR_MODE_NORMAL: new_mode = 0; @@ -434,9 +436,10 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; unsigned int new_mode; + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); switch (mode) { case REGULATOR_MODE_NORMAL: new_mode = 0; @@ -459,7 +462,9 @@ static unsigned int hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; + + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); if (load_uA > info->eco_microamp) return REGULATOR_MODE_NORMAL; @@ -543,14 +548,13 @@ static int hi6421_regulator_probe(struct platform_device *pdev) if (!pdata) return -ENOMEM; mutex_init(&pdata->lock); - platform_set_drvdata(pdev, pdata); for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) { /* assign per-regulator data */ info = &hi6421_regulator_info[i]; config.dev = pdev->dev.parent; - config.driver_data = info; + config.driver_data = pdata; config.regmap = pmic->regmap; rdev = devm_regulator_register(&pdev->dev, &info->desc, diff --git a/drivers/regulator/hi6421v600-regulator.c b/drivers/regulator/hi6421v600-regulator.c index 9b162c0555c3..845bc3b4026d 100644 --- a/drivers/regulator/hi6421v600-regulator.c +++ b/drivers/regulator/hi6421v600-regulator.c @@ -98,10 +98,9 @@ static const unsigned int ldo34_voltages[] = { static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev) { - struct hi6421_spmi_reg_priv *priv; + struct hi6421_spmi_reg_priv *priv = rdev_get_drvdata(rdev); int ret; - priv = dev_get_drvdata(rdev->dev.parent); /* cannot enable more than one regulator at one time */ mutex_lock(&priv->enable_mutex); @@ -119,9 +118,10 @@ static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev) static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev) { - struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev); + struct hi6421_spmi_reg_info *sreg; unsigned int reg_val; + sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc); regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); if (reg_val & sreg->eco_mode_mask) @@ -133,9 +133,10 @@ static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev) static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev); + struct hi6421_spmi_reg_info *sreg; unsigned int val; + sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc); switch (mode) { case REGULATOR_MODE_NORMAL: val = 0; @@ -159,7 +160,9 @@ hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA) { - struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev); + struct hi6421_spmi_reg_info *sreg; + + sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc); if (!sreg->eco_uA || ((unsigned int)load_uA > sreg->eco_uA)) return REGULATOR_MODE_NORMAL; @@ -252,13 +255,12 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev) return -ENOMEM; mutex_init(&priv->enable_mutex); - platform_set_drvdata(pdev, priv); for (i = 0; i < ARRAY_SIZE(regulator_info); i++) { info = ®ulator_info[i]; config.dev = pdev->dev.parent; - config.driver_data = info; + config.driver_data = priv; config.regmap = pmic->regmap; rdev = devm_regulator_register(dev, &info->desc, &config); diff --git a/drivers/regulator/mtk-dvfsrc-regulator.c b/drivers/regulator/mtk-dvfsrc-regulator.c index d3d876198d6e..234af3a66c77 100644 --- a/drivers/regulator/mtk-dvfsrc-regulator.c +++ b/drivers/regulator/mtk-dvfsrc-regulator.c @@ -179,8 +179,7 @@ static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev) for (i = 0; i < regulator_init_data->size; i++) { config.dev = dev->parent; config.driver_data = (mt_regulators + i); - rdev = devm_regulator_register(dev->parent, - &(mt_regulators + i)->desc, + rdev = devm_regulator_register(dev, &(mt_regulators + i)->desc, &config); if (IS_ERR(rdev)) { dev_err(dev, "failed to register %s\n", diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c index 4bca64de0f67..2ee334174e2b 100644 --- a/drivers/regulator/rtmv20-regulator.c +++ b/drivers/regulator/rtmv20-regulator.c @@ -37,7 +37,7 @@ #define RTMV20_WIDTH2_MASK GENMASK(7, 0) #define RTMV20_LBPLVL_MASK GENMASK(3, 0) #define RTMV20_LBPEN_MASK BIT(7) -#define RTMV20_STROBEPOL_MASK BIT(1) +#define RTMV20_STROBEPOL_MASK BIT(0) #define RTMV20_VSYNPOL_MASK BIT(1) #define RTMV20_FSINEN_MASK BIT(7) #define RTMV20_ESEN_MASK BIT(6) diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 0de1a463c509..fb5d8152652d 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1004,15 +1004,23 @@ static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) static void dasd_eckd_store_conf_data(struct dasd_device *device, struct dasd_conf_data *conf_data, int chp) { + struct dasd_eckd_private *private = device->private; struct channel_path_desc_fmt0 *chp_desc; struct subchannel_id sch_id; + void *cdp; - ccw_device_get_schid(device->cdev, &sch_id); /* * path handling and read_conf allocate data * free it before replacing the pointer + * also replace the old private->conf_data pointer + * with the new one if this points to the same data */ - kfree(device->path[chp].conf_data); + cdp = device->path[chp].conf_data; + if (private->conf_data == cdp) { + private->conf_data = (void *)conf_data; + dasd_eckd_identify_conf_parts(private); + } + ccw_device_get_schid(device->cdev, &sch_id); device->path[chp].conf_data = conf_data; device->path[chp].cssid = sch_id.cssid; device->path[chp].ssid = sch_id.ssid; @@ -1020,6 +1028,7 @@ static void dasd_eckd_store_conf_data(struct dasd_device *device, if (chp_desc) device->path[chp].chpid = chp_desc->chpid; kfree(chp_desc); + kfree(cdp); } static void dasd_eckd_clear_conf_data(struct dasd_device *device) diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 8abb42923307..cc8237afeffa 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -371,8 +371,6 @@ __tapechar_ioctl(struct tape_device *device, case MTSEEK: if (device->required_tapemarks) tape_std_terminate_write(device); - default: - ; } rc = tape_mtop(device, op.mt_op, op.mt_count); diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index b341075397d9..377e3689d1d4 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -1454,6 +1454,7 @@ again: get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) ctcm_ccw_check_rc(ch, rc, "normal RX"); + break; default: break; } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 2abf86c104d5..d7cdd9cfe485 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -279,7 +279,7 @@ static void qeth_l2_set_pnso_mode(struct qeth_card *card, static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; QETH_CARD_TEXT(card, 2, "fdbflush"); @@ -679,7 +679,7 @@ static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code, struct net_if_token *token, struct mac_addr_lnid *addr_lnid) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; u8 ntfy_mac[ETH_ALEN]; ether_addr_copy(ntfy_mac, addr_lnid->mac); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d308ff744a29..f0d6f205c53c 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -434,6 +434,7 @@ static int qeth_l3_correct_routing_type(struct qeth_card *card, if (qeth_is_ipafunc_supported(card, prot, IPA_OSA_MC_ROUTER)) return 0; + goto out_inval; default: goto out_inval; } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 544efd4c42f0..b8cd75a872ee 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -487,6 +487,7 @@ static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev, if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || 0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) || + 0 != (status & ZFCP_STATUS_PORT_LINK_TEST) || 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) || 0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) i = sprintf(buf, "unknown\n"); diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 84fc7a0c6ff4..4a84599ff491 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -2642,6 +2642,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt) //#endif clear_bit(SCpnt->device->id * 8 + (u8)(SCpnt->device->lun & 0x7), host->busyluns); + fallthrough; /* * We found the command, and cleared it out. Either diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 30ed3d23635a..9c4458a99025 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -1375,6 +1375,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne case IS_COMPLETE: break; } + break; default: break; @@ -2010,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, "request sense complete, result=0x%04x%02x%02x", result, SCpnt->SCp.Message, SCpnt->SCp.Status); - if (result != DID_OK || SCpnt->SCp.Status != GOOD) + if (result != DID_OK || SCpnt->SCp.Status != SAM_STAT_GOOD) /* * Something went wrong. Make sure that we don't * have valid data in the sense buffer that could diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 25f6e1ac9e7b..66652ab409cc 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev, if (!h->ctlr) err = SCSI_DH_RES_TEMP_UNAVAIL; else { - list_add_rcu(&h->node, &h->ctlr->dh_list); h->sdev = sdev; + list_add_rcu(&h->node, &h->ctlr->dh_list); } spin_unlock(&list_lock); err = SCSI_DH_OK; @@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev ) spin_lock(&list_lock); if (h->ctlr) { list_del_rcu(&h->node); - h->sdev = NULL; kref_put(&h->ctlr->kref, release_controller); } spin_unlock(&list_lock); sdev->handler_data = NULL; + synchronize_rcu(); kfree(h); } diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 929a3b043ad7..3f6f14f0cafb 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -488,6 +488,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost_printk(KERN_WARNING, shost, "error handler thread failed to spawn, error = %ld\n", PTR_ERR(shost->ehandler)); + shost->ehandler = NULL; goto fail; } diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index bee1bec49c09..935b01ee44b7 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -807,6 +807,13 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost, for (i = 0; i < size; ++i) { struct ibmvfc_event *evt = &pool->events[i]; + /* + * evt->active states + * 1 = in flight + * 0 = being completed + * -1 = free/freed + */ + atomic_set(&evt->active, -1); atomic_set(&evt->free, 1); evt->crq.valid = 0x80; evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); @@ -1017,6 +1024,7 @@ static void ibmvfc_free_event(struct ibmvfc_event *evt) BUG_ON(!ibmvfc_valid_event(pool, evt)); BUG_ON(atomic_inc_return(&evt->free) != 1); + BUG_ON(atomic_dec_and_test(&evt->active)); spin_lock_irqsave(&evt->queue->l_lock, flags); list_add_tail(&evt->queue_list, &evt->queue->free); @@ -1072,6 +1080,12 @@ static void ibmvfc_complete_purge(struct list_head *purge_list) **/ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) { + /* + * Anything we are failing should still be active. Otherwise, it + * implies we already got a response for the command and are doing + * something bad like double completing it. + */ + BUG_ON(!atomic_dec_and_test(&evt->active)); if (evt->cmnd) { evt->cmnd->result = (error_code << 16); evt->done = ibmvfc_scsi_eh_done; @@ -1723,6 +1737,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, evt->done(evt); } else { + atomic_set(&evt->active, 1); spin_unlock_irqrestore(&evt->queue->l_lock, flags); ibmvfc_trc_start(evt); } @@ -3251,7 +3266,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, return; } - if (unlikely(atomic_read(&evt->free))) { + if (unlikely(atomic_dec_if_positive(&evt->active))) { dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", crq->ioba); return; @@ -3778,7 +3793,7 @@ static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost return; } - if (unlikely(atomic_read(&evt->free))) { + if (unlikely(atomic_dec_if_positive(&evt->active))) { dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", crq->ioba); return; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 4f0f3baefae4..92fb889d7eb0 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -745,6 +745,7 @@ struct ibmvfc_event { struct ibmvfc_target *tgt; struct scsi_cmnd *cmnd; atomic_t free; + atomic_t active; union ibmvfc_iu *xfer_iu; void (*done)(struct ibmvfc_event *evt); void (*_done)(struct ibmvfc_event *evt); diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 9f5068f3bcfb..dd205414e505 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -461,7 +461,7 @@ static void sas_discover_domain(struct work_struct *work) break; #else pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); - /* Fall through */ + fallthrough; #endif /* Fall through - only for the #else condition above. */ default: diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 5983e05b648f..e29523a1b530 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -13193,6 +13193,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) if (!phba) return -ENOMEM; + INIT_LIST_HEAD(&phba->poll_list); + /* Perform generic PCI device enabling operation */ error = lpfc_enable_pci_dev(phba); if (error) @@ -13327,7 +13329,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Enable RAS FW log support */ lpfc_sli4_ras_setup(phba); - INIT_LIST_HEAD(&phba->poll_list); timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index abf7b401f5b9..c509440bd161 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -238,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) mimd_t mimd; uint32_t adapno; int iterator; - + bool is_found; if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { *rval = -EFAULT; @@ -254,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) adapter = NULL; iterator = 0; + is_found = false; list_for_each_entry(adapter, &adapters_list_g, list) { - if (iterator++ == adapno) break; + if (iterator++ == adapno) { + is_found = true; + break; + } } - if (!adapter) { + if (!is_found) { *rval = -ENODEV; return NULL; } @@ -725,6 +729,7 @@ ioctl_done(uioc_t *kioc) uint32_t adapno; int iterator; mraid_mmadp_t* adapter; + bool is_found; /* * When the kioc returns from driver, make sure it still doesn't @@ -747,19 +752,23 @@ ioctl_done(uioc_t *kioc) iterator = 0; adapter = NULL; adapno = kioc->adapno; + is_found = false; con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " "ioctl that was timedout before\n")); list_for_each_entry(adapter, &adapters_list_g, list) { - if (iterator++ == adapno) break; + if (iterator++ == adapno) { + is_found = true; + break; + } } kioc->timedout = 0; - if (adapter) { + if (is_found) mraid_mm_dealloc_kioc( adapter, kioc ); - } + } else { wake_up(&wait_q); diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 9eceafca59bc..2dba2b0af166 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -2607,14 +2607,13 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) goto out; } drv_info->information_length = cpu_to_le32(data_len); - strncpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); - strncpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); - drv_info->os_name[sizeof(drv_info->os_name) - 1] = 0; - strncpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); - drv_info->os_version[sizeof(drv_info->os_version) - 1] = 0; - strncpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); - strncpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); - strncpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, sizeof(drv_info->driver_release_date)); + strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); + strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); + strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); + strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); + strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); + strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, + sizeof(drv_info->driver_release_date)); drv_info->driver_capabilities = 0; memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, sizeof(mrioc->driver_info)); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index c39955239d1c..cf4a3a2c22ad 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2983,13 +2983,13 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) } /** - * _base_free_irq - free irq + * mpt3sas_base_free_irq - free irq * @ioc: per adapter object * * Freeing respective reply_queue from the list. */ -static void -_base_free_irq(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) { struct adapter_reply_queue *reply_q, *next; @@ -3191,12 +3191,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc, } /** - * _base_disable_msix - disables msix + * mpt3sas_base_disable_msix - disables msix * @ioc: per adapter object * */ -static void -_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) { if (!ioc->msix_enable) return; @@ -3304,8 +3304,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) for (i = 0; i < ioc->reply_queue_count; i++) { r = _base_request_irq(ioc, i); if (r) { - _base_free_irq(ioc); - _base_disable_msix(ioc); + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); goto try_ioapic; } } @@ -3342,8 +3342,8 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); - _base_free_irq(ioc); - _base_disable_msix(ioc); + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); kfree(ioc->replyPostRegisterIndex); ioc->replyPostRegisterIndex = NULL; @@ -7613,14 +7613,14 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) } /** - * _base_make_ioc_ready - put controller in READY state + * mpt3sas_base_make_ioc_ready - put controller in READY state * @ioc: per adapter object * @type: FORCE_BIG_HAMMER or SOFT_RESET * * Return: 0 for success, non-zero for failure. */ -static int -_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) +int +mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) { u32 ioc_state; int rc; @@ -7851,7 +7851,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) return r; } - rc = _base_static_config_pages(ioc); + r = _base_static_config_pages(ioc); if (r) return r; @@ -7897,7 +7897,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) if (ioc->chip_phys && ioc->chip) { mpt3sas_base_mask_interrupts(ioc); ioc->shost_recovery = 1; - _base_make_ioc_ready(ioc, SOFT_RESET); + mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); ioc->shost_recovery = 0; } @@ -8017,7 +8017,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->build_sg_mpi = &_base_build_sg; ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; - r = _base_make_ioc_ready(ioc, SOFT_RESET); + r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); if (r) goto out_free_resources; @@ -8471,7 +8471,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, _base_pre_reset_handler(ioc); mpt3sas_wait_for_commands_to_complete(ioc); mpt3sas_base_mask_interrupts(ioc); - r = _base_make_ioc_ready(ioc, type); + r = mpt3sas_base_make_ioc_ready(ioc, type); if (r) goto out; _base_clear_outstanding_commands(ioc); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index d4834c8ee9c0..0c6c3df0038d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -1730,6 +1730,10 @@ do { ioc_err(ioc, "In func: %s\n", __func__); \ status, mpi_request, sz); } while (0) int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count); +int +mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type); +void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc); /* scsih shared API */ struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 866d118f7931..8e64a6f14542 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -11295,7 +11295,12 @@ scsih_shutdown(struct pci_dev *pdev) _scsih_ir_shutdown(ioc); _scsih_nvme_shutdown(ioc); - mpt3sas_base_detach(ioc); + mpt3sas_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); } diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 0b8802beb7ce..ec05c42e8ee6 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -77,7 +77,7 @@ DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); * @attr: device attribute (unused) * @buf: the buffer returned * - * A sysfs 'read only' shost attribute. + * A sysfs 'read-only' shost attribute. */ static ssize_t controller_fatal_error_show(struct device *cdev, struct device_attribute *attr, char *buf) @@ -149,7 +149,7 @@ static ssize_t pm8001_ctl_ila_version_show(struct device *cdev, static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL); /** - * pm8001_ctl_inactive_fw_version_show - Inacative firmware version number + * pm8001_ctl_inactive_fw_version_show - Inactive firmware version number * @cdev: pointer to embedded class device * @attr: device attribute (unused) * @buf: the buffer returned @@ -396,6 +396,7 @@ static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); * @cdev:pointer to embedded class device * @attr: device attribute (unused) * @buf: the buffer returned + * * A sysfs 'read-only' shost attribute. */ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev, @@ -430,6 +431,7 @@ static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL); * @cdev:pointer to embedded class device * @attr: device attribute (unused) * @buf: the buffer returned + * * A sysfs 'read-only' shost attribute. */ @@ -464,6 +466,7 @@ static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL); * @cdev:pointer to embedded class device * @attr: device attribute (unused) * @buf:the buffer returned + * * A sysfs 'read-only' shost attribute. */ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, @@ -555,13 +558,13 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev, static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); /** - ** pm8001_ctl_fatal_log_show - fatal error logging - ** @cdev:pointer to embedded class device - ** @attr: device attribute - ** @buf: the buffer returned - ** - ** A sysfs 'read-only' shost attribute. - **/ + * pm8001_ctl_fatal_log_show - fatal error logging + * @cdev:pointer to embedded class device + * @attr: device attribute + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, struct device_attribute *attr, char *buf) @@ -575,13 +578,13 @@ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL); /** - ** non_fatal_log_show - non fatal error logging - ** @cdev:pointer to embedded class device - ** @attr: device attribute - ** @buf: the buffer returned - ** - ** A sysfs 'read-only' shost attribute. - **/ + * non_fatal_log_show - non fatal error logging + * @cdev:pointer to embedded class device + * @attr: device attribute + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ static ssize_t non_fatal_log_show(struct device *cdev, struct device_attribute *attr, char *buf) { @@ -620,12 +623,13 @@ static ssize_t non_fatal_count_store(struct device *cdev, static DEVICE_ATTR_RW(non_fatal_count); /** - ** pm8001_ctl_gsm_log_show - gsm dump collection - ** @cdev:pointer to embedded class device - ** @attr: device attribute (unused) - ** @buf: the buffer returned - ** A sysfs 'read-only' shost attribute. - **/ + * pm8001_ctl_gsm_log_show - gsm dump collection + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev, struct device_attribute *attr, char *buf) { diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 33f8217577b1..17c0f26e683a 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -384,7 +384,7 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, /** * pm8001_bar4_shift - function is called to shift BAR base address - * @pm8001_ha : our hba card infomation + * @pm8001_ha : our hba card information * @shiftValue : shifting value in memory bar. */ int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) @@ -1151,7 +1151,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) } /** - * pm8001_chip_iounmap - which maped when initialized. + * pm8001_chip_iounmap - which mapped when initialized. * @pm8001_ha: our hba card information */ void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) @@ -1187,10 +1187,10 @@ pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); } - /** - * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt - * @pm8001_ha: our hba card information - */ +/** + * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ static void pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) { @@ -1876,8 +1876,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, * @piomb: the message contents of this outbound message. * * When FW has completed a ssp request for example a IO request, after it has - * filled the SG data with the data, it will trigger this event represent - * that he has finished the job,please check the coresponding buffer. + * filled the SG data with the data, it will trigger this event representing + * that he has finished the job; please check the corresponding buffer. * So we will tell the caller who maybe waiting the result to tell upper layer * that the task has been finished. */ @@ -3522,7 +3522,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) * * when sas layer find a device it will notify LLDD, then the driver register * the domain device to FW, this event is the return device ID which the FW - * has assigned, from now,inter-communication with FW is no longer using the + * has assigned, from now, inter-communication with FW is no longer using the * SAS address, use device ID which FW assigned. */ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 313248c7bab9..47db7e0beae6 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -233,7 +233,7 @@ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) /** * pm8001_interrupt_handler_intx - main INTx interrupt handler. * @irq: interrupt number - * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure. + * @dev_id: sas_ha structure. The HBA is retrieved from sas_ha structure. */ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) @@ -439,9 +439,9 @@ err_out: } /** - * pm8001_ioremap - remap the pci high physical address to kernal virtual + * pm8001_ioremap - remap the pci high physical address to kernel virtual * address so that we can access them. - * @pm8001_ha:our hba structure. + * @pm8001_ha: our hba structure. */ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) { @@ -652,7 +652,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, * pm8001_init_sas_add - initialize sas address * @pm8001_ha: our ha struct. * - * Currently we just set the fixed SAS address to our HBA,for manufacture, + * Currently we just set the fixed SAS address to our HBA, for manufacture, * it should read from the EEPROM */ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) @@ -790,7 +790,7 @@ struct pm8001_mpi3_phy_pg_trx_config { }; /** - * pm8001_get_internal_phy_settings : Retrieves the internal PHY settings + * pm8001_get_internal_phy_settings - Retrieves the internal PHY settings * @pm8001_ha : our adapter * @phycfg : PHY config page to populate */ @@ -810,7 +810,7 @@ void pm8001_get_internal_phy_settings(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_get_external_phy_settings : Retrieves the external PHY settings + * pm8001_get_external_phy_settings - Retrieves the external PHY settings * @pm8001_ha : our adapter * @phycfg : PHY config page to populate */ @@ -830,7 +830,7 @@ void pm8001_get_external_phy_settings(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_get_phy_mask : Retrieves the mask that denotes if a PHY is int/ext + * pm8001_get_phy_mask - Retrieves the mask that denotes if a PHY is int/ext * @pm8001_ha : our adapter * @phymask : The PHY mask */ @@ -868,7 +868,7 @@ void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask) } /** - * pm8001_set_phy_settings_ven_117c_12G() : Configure ATTO 12Gb PHY settings + * pm8001_set_phy_settings_ven_117c_12G() - Configure ATTO 12Gb PHY settings * @pm8001_ha : our adapter */ static @@ -903,7 +903,7 @@ int pm8001_set_phy_settings_ven_117c_12G(struct pm8001_hba_info *pm8001_ha) } /** - * pm8001_configure_phy_settings : Configures PHY settings based on vendor ID. + * pm8001_configure_phy_settings - Configures PHY settings based on vendor ID. * @pm8001_ha : our hba. */ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha) @@ -1053,8 +1053,8 @@ intx: * @ent: pci device id * * This function is the main initialization function, when register a new - * pci driver it is invoked, all struct an hardware initilization should be done - * here, also, register interrupt + * pci driver it is invoked, all struct and hardware initialization should be + * done here, also, register interrupt. */ static int pm8001_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1172,10 +1172,11 @@ err_out_enable: return rc; } -/* +/** * pm8001_init_ccb_tag - allocate memory to CCB and tag. * @pm8001_ha: our hba card information. * @shost: scsi host which has been allocated outside. + * @pdev: pci device. */ static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost, @@ -1270,7 +1271,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev) * pm8001_pci_suspend - power management suspend main entry point * @dev: Device struct * - * Returns 0 success, anything else error. + * Return: 0 on success, anything else on error. */ static int __maybe_unused pm8001_pci_suspend(struct device *dev) { @@ -1315,7 +1316,7 @@ static int __maybe_unused pm8001_pci_suspend(struct device *dev) * pm8001_pci_resume - power management resume main entry point * @dev: Device struct * - * Returns 0 success, anything else error. + * Return: 0 on success, anything else on error. */ static int __maybe_unused pm8001_pci_resume(struct device *dev) { diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 6f33d821e545..32e60f0c3b14 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -98,14 +98,16 @@ void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) pm8001_tag_free(pm8001_ha, i); } - /** - * pm8001_mem_alloc - allocate memory for pm8001. - * @pdev: pci device. - * @virt_addr: the allocated virtual address - * @pphys_addr_hi: the physical address high byte address. - * @pphys_addr_lo: the physical address low byte address. - * @mem_size: memory size. - */ +/** + * pm8001_mem_alloc - allocate memory for pm8001. + * @pdev: pci device. + * @virt_addr: the allocated virtual address + * @pphys_addr: DMA address for this device + * @pphys_addr_hi: the physical address high byte address. + * @pphys_addr_lo: the physical address low byte address. + * @mem_size: memory size. + * @align: requested byte alignment + */ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, u32 mem_size, u32 align) @@ -339,7 +341,7 @@ static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task + * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task * @pm8001_ha: our hba card information * @ccb: the ccb which attached to ssp task */ @@ -554,10 +556,10 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, pm8001_tag_free(pm8001_ha, ccb_idx); } - /** - * pm8001_alloc_dev - find a empty pm8001_device - * @pm8001_ha: our hba card information - */ +/** + * pm8001_alloc_dev - find a empty pm8001_device + * @pm8001_ha: our hba card information + */ static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; @@ -682,8 +684,7 @@ int pm8001_dev_found(struct domain_device *dev) void pm8001_task_done(struct sas_task *task) { - if (!del_timer(&task->slow_task->timer)) - return; + del_timer(&task->slow_task->timer); complete(&task->slow_task->completion); } @@ -691,9 +692,14 @@ static void pm8001_tmf_timedout(struct timer_list *t) { struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task *task = slow->task; + unsigned long flags; - task->task_state_flags |= SAS_TASK_STATE_ABORTED; - complete(&task->slow_task->completion); + spin_lock_irqsave(&task->task_state_lock, flags); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + complete(&task->slow_task->completion); + } + spin_unlock_irqrestore(&task->task_state_lock, flags); } #define PM8001_TASK_TIMEOUT 20 @@ -705,7 +711,7 @@ static void pm8001_tmf_timedout(struct timer_list *t) * @parameter: ssp task parameter. * * when errors or exception happened, we may want to do something, for example - * abort the issued task which result in this execption, it is done by calling + * abort the issued task which result in this exception, it is done by calling * this function, note it is also with the task execute interface. */ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, @@ -746,13 +752,10 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, } res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ - if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { - if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { - pm8001_dbg(pm8001_ha, FAIL, - "TMF task[%x]timeout.\n", - tmf->tmf); - goto ex_err; - } + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n", + tmf->tmf); + goto ex_err; } if (task->task_status.resp == SAS_TASK_COMPLETE && @@ -832,12 +835,9 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, wait_for_completion(&task->slow_task->completion); res = TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ - if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { - if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { - pm8001_dbg(pm8001_ha, FAIL, - "TMF task timeout.\n"); - goto ex_err; - } + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n"); + goto ex_err; } if (task->task_status.resp == SAS_TASK_COMPLETE && @@ -984,11 +984,12 @@ void pm8001_open_reject_retry( } /** - * pm8001_I_T_nexus_reset() - * Standard mandates link reset for ATA (type 0) and hard reset for - * SSP (type 1) , only for RECOVERY - * @dev: the device structure for the device to reset. - */ + * pm8001_I_T_nexus_reset() - reset the initiator/target connection + * @dev: the device structure for the device to reset. + * + * Standard mandates link reset for ATA (type 0) and hard reset for + * SSP (type 1), only for RECOVERY + */ int pm8001_I_T_nexus_reset(struct domain_device *dev) { int rc = TMF_RESP_FUNC_FAILED; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 45ecd9639977..6ffe17b849ae 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -140,7 +140,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev, pm8001_ha->fatal_bar_loc = 0; } - /* Read until accum_len is retrived */ + /* Read until accum_len is retrieved */ accum_len = pm8001_mr32(fatal_table_address, MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); /* Determine length of data between previously stored transfer length @@ -1011,7 +1011,7 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) value); return -EBUSY; } - /* check the MPI-State for initialization upto 100ms*/ + /* check the MPI-State for initialization up to 100ms*/ max_wait_count = 5;/* 100 msec */ do { msleep(FW_READY_INTERVAL); @@ -1093,7 +1093,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); - /** + /* * lower 26 bits of SCRATCHPAD0 register describes offset within the * PCIe BAR where the MPI configuration table is present */ @@ -1101,7 +1101,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n", offset, value); - /** + /* * Upper 6 bits describe the offset within PCI config space where BAR * is located. */ @@ -1109,7 +1109,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) pcibar = get_pci_bar_index(pcilogic); pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar); - /** + /* * Make sure the offset falls inside the ioremapped PCI BAR */ if (offset > pm8001_ha->io_mem[pcibar].memsize) { @@ -1121,7 +1121,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl_addr = base_addr = pm8001_ha->io_mem[pcibar].memvirtaddr + offset; - /** + /* * Validate main configuration table address: first DWord should read * "PMCS" */ @@ -1385,7 +1385,7 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_encrypt_update - update flash with encryption informtion + * pm80xx_encrypt_update - update flash with encryption information * @pm8001_ha: our hba card information. */ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) @@ -1422,7 +1422,7 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_chip_init - the main init function that initialize whole PM8001 chip. + * pm80xx_chip_init - the main init function that initializes whole PM8001 chip. * @pm8001_ha: our hba card information */ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) @@ -1541,7 +1541,7 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_fatal_errors - returns non zero *ONLY* when fatal errors + * pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors * @pm8001_ha: our hba card information * * Fatal errors are recoverable only after a host reboot. @@ -1576,8 +1576,8 @@ pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that the clear all - * the FW register status to the originated status. + * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that all + * FW register status are reset to the originated status. * @pm8001_ha: our hba card information */ @@ -1895,13 +1895,13 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, } /** - * mpi_ssp_completion- process the event that FW response to the SSP request. + * mpi_ssp_completion - process the event that FW response to the SSP request. * @pm8001_ha: our hba card information * @piomb: the message contents of this outbound message. * * When FW has completed a ssp request for example a IO request, after it has - * filled the SG data with the data, it will trigger this event represent - * that he has finished the job,please check the coresponding buffer. + * filled the SG data with the data, it will trigger this event representing + * that he has finished the job; please check the corresponding buffer. * So we will tell the caller who maybe waiting the result to tell upper layer * that the task has been finished. */ @@ -3217,7 +3217,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * pm80xx_hw_event_ack_req- For PM8001, some events need to acknowledge to FW. * @pm8001_ha: our hba card information * @Qnum: the outbound queue message number. * @SEA: source of event to ack @@ -3275,7 +3275,7 @@ static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha, } /** - * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * hw_event_sas_phy_up - FW tells me a SAS phy up event. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3353,7 +3353,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * hw_event_sata_phy_up - FW tells me a SATA phy up event. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3400,7 +3400,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * hw_event_phy_down -we should notify the libsas the phy is down. + * hw_event_phy_down - we should notify the libsas the phy is down. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3500,7 +3500,7 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_thermal_hw_event -The hw event has come. + * mpi_thermal_hw_event - a thermal hw event has come. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3530,7 +3530,7 @@ static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_hw_event -The hw event has come. + * mpi_hw_event - The hw event has come. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -4025,7 +4025,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SET_DEV_INFO: pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n"); break; - /* spcv specifc commands */ + /* spcv specific commands */ case OPC_OUB_PHY_START_RESP: pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PHY_START_RESP opcode:%x\n", opc); @@ -4186,7 +4186,7 @@ static void build_smp_cmd(u32 deviceID, __le32 hTag, } /** - * pm80xx_chip_smp_req - send a SMP task to FW + * pm80xx_chip_smp_req - send an SMP task to FW * @pm8001_ha: our hba card information. * @ccb: the ccb information this request used. */ @@ -4346,7 +4346,7 @@ static int check_enc_sat_cmd(struct sas_task *task) } /** - * pm80xx_chip_ssp_io_req - send a SSP task to FW + * pm80xx_chip_ssp_io_req - send an SSP task to FW * @pm8001_ha: our hba card information. * @ccb: the ccb information this request used. */ @@ -4750,13 +4750,13 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | LINKMODE_AUTO | pm8001_ha->link_rate | phy_id); /* SSC Disable and SAS Analog ST configuration */ - /** + /* payload.ase_sh_lm_slr_phyid = cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | phy_id); Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need - **/ + */ payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 8f9727e525aa..7456a26aef51 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -194,7 +194,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) * @bufflen: len of buffer * @sense: optional sense buffer * @sshdr: optional decoded sense header - * @timeout: request timeout in seconds + * @timeout: request timeout in HZ * @retries: number of times to retry request * @flags: flags for ->cmd_flags * @rq_flags: flags for ->rq_flags diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index b059bf2b61d4..5b6996a2401b 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -475,7 +475,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, error = shost->hostt->target_alloc(starget); if(error) { - dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); + if (error != -ENXIO) + dev_err(dev, "target allocation failed, error %d\n", error); /* don't want scsi_target_reap to do the final * put because it will be under the host lock */ scsi_target_destroy(starget); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 32489d25158f..ae9bfc658203 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -807,11 +807,14 @@ store_state_field(struct device *dev, struct device_attribute *attr, mutex_lock(&sdev->state_mutex); ret = scsi_device_set_state(sdev, state); /* - * If the device state changes to SDEV_RUNNING, we need to run - * the queue to avoid I/O hang. + * If the device state changes to SDEV_RUNNING, we need to + * rescan the device to revalidate it, and run the queue to + * avoid I/O hang. */ - if (ret == 0 && state == SDEV_RUNNING) + if (ret == 0 && state == SDEV_RUNNING) { + scsi_rescan_device(dev); blk_mq_run_hw_queues(sdev->request_queue, true); + } mutex_unlock(&sdev->state_mutex); return ret == 0 ? count : -EINVAL; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index b07105ae7c91..d8b05d8b5470 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -439,39 +439,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct iscsi_transport *t = iface->transport; - int param; - int param_type; + int param = -1; if (attr == &dev_attr_iface_enabled.attr) param = ISCSI_NET_PARAM_IFACE_ENABLE; - else if (attr == &dev_attr_iface_vlan_id.attr) - param = ISCSI_NET_PARAM_VLAN_ID; - else if (attr == &dev_attr_iface_vlan_priority.attr) - param = ISCSI_NET_PARAM_VLAN_PRIORITY; - else if (attr == &dev_attr_iface_vlan_enabled.attr) - param = ISCSI_NET_PARAM_VLAN_ENABLED; - else if (attr == &dev_attr_iface_mtu.attr) - param = ISCSI_NET_PARAM_MTU; - else if (attr == &dev_attr_iface_port.attr) - param = ISCSI_NET_PARAM_PORT; - else if (attr == &dev_attr_iface_ipaddress_state.attr) - param = ISCSI_NET_PARAM_IPADDR_STATE; - else if (attr == &dev_attr_iface_delayed_ack_en.attr) - param = ISCSI_NET_PARAM_DELAYED_ACK_EN; - else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) - param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; - else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) - param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; - else if (attr == &dev_attr_iface_tcp_wsf.attr) - param = ISCSI_NET_PARAM_TCP_WSF; - else if (attr == &dev_attr_iface_tcp_timer_scale.attr) - param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; - else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) - param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; - else if (attr == &dev_attr_iface_cache_id.attr) - param = ISCSI_NET_PARAM_CACHE_ID; - else if (attr == &dev_attr_iface_redirect_en.attr) - param = ISCSI_NET_PARAM_REDIRECT_EN; else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; else if (attr == &dev_attr_iface_header_digest.attr) @@ -508,6 +479,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; else if (attr == &dev_attr_iface_initiator_name.attr) param = ISCSI_IFACE_PARAM_INITIATOR_NAME; + + if (param != -1) + return t->attr_is_visible(ISCSI_IFACE_PARAM, param); + + if (attr == &dev_attr_iface_vlan_id.attr) + param = ISCSI_NET_PARAM_VLAN_ID; + else if (attr == &dev_attr_iface_vlan_priority.attr) + param = ISCSI_NET_PARAM_VLAN_PRIORITY; + else if (attr == &dev_attr_iface_vlan_enabled.attr) + param = ISCSI_NET_PARAM_VLAN_ENABLED; + else if (attr == &dev_attr_iface_mtu.attr) + param = ISCSI_NET_PARAM_MTU; + else if (attr == &dev_attr_iface_port.attr) + param = ISCSI_NET_PARAM_PORT; + else if (attr == &dev_attr_iface_ipaddress_state.attr) + param = ISCSI_NET_PARAM_IPADDR_STATE; + else if (attr == &dev_attr_iface_delayed_ack_en.attr) + param = ISCSI_NET_PARAM_DELAYED_ACK_EN; + else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) + param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) + param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf.attr) + param = ISCSI_NET_PARAM_TCP_WSF; + else if (attr == &dev_attr_iface_tcp_timer_scale.attr) + param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; + else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) + param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; + else if (attr == &dev_attr_iface_cache_id.attr) + param = ISCSI_NET_PARAM_CACHE_ID; + else if (attr == &dev_attr_iface_redirect_en.attr) + param = ISCSI_NET_PARAM_REDIRECT_EN; else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { if (attr == &dev_attr_ipv4_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV4_ADDR; @@ -598,32 +601,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, return 0; } - switch (param) { - case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: - case ISCSI_IFACE_PARAM_HDRDGST_EN: - case ISCSI_IFACE_PARAM_DATADGST_EN: - case ISCSI_IFACE_PARAM_IMM_DATA_EN: - case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: - case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: - case ISCSI_IFACE_PARAM_PDU_INORDER_EN: - case ISCSI_IFACE_PARAM_ERL: - case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: - case ISCSI_IFACE_PARAM_FIRST_BURST: - case ISCSI_IFACE_PARAM_MAX_R2T: - case ISCSI_IFACE_PARAM_MAX_BURST: - case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: - case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: - case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: - case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: - case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: - case ISCSI_IFACE_PARAM_INITIATOR_NAME: - param_type = ISCSI_IFACE_PARAM; - break; - default: - param_type = ISCSI_NET_PARAM; - } - - return t->attr_is_visible(param_type, param); + return t->attr_is_visible(ISCSI_NET_PARAM, param); } static struct attribute *iscsi_iface_attrs[] = { diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6d2d63629a90..b8d55af763f9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -98,11 +98,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); -#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT) #define SD_MINORS 16 -#else -#define SD_MINORS 0 -#endif static void sd_config_discard(struct scsi_disk *, unsigned int); static void sd_config_write_same(struct scsi_disk *); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 94c254e9012e..a6d3ac0a6cbc 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -221,7 +221,7 @@ static unsigned int sr_get_events(struct scsi_device *sdev) else if (med->media_event_code == 2) return DISK_EVENT_MEDIA_CHANGE; else if (med->media_event_code == 3) - return DISK_EVENT_EJECT_REQUEST; + return DISK_EVENT_MEDIA_CHANGE; return 0; } diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 328bb961c281..37506b3fe5a9 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1199,14 +1199,24 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, vstor_packet->vm_srb.sense_info_length); if (vstor_packet->vm_srb.scsi_status != 0 || - vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) - storvsc_log(device, STORVSC_LOGGING_ERROR, + vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) { + + /* + * Log TEST_UNIT_READY errors only as warnings. Hyper-V can + * return errors when detecting devices using TEST_UNIT_READY, + * and logging these as errors produces unhelpful noise. + */ + int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ? + STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR; + + storvsc_log(device, loglevel, "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n", request->cmd->request->tag, stor_pkt->vm_srb.cdb[0], vstor_packet->vm_srb.scsi_status, vstor_packet->vm_srb.srb_status, vstor_packet->status); + } if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION && (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID)) diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index c98d540ac044..194755c9ddfe 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -1229,8 +1229,13 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, bool is_scsi_cmd) { - if (hba->vops && hba->vops->setup_xfer_req) - return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); + if (hba->vops && hba->vops->setup_xfer_req) { + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } } static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c index f2b5d347d227..e5ae26227bdb 100644 --- a/drivers/slimbus/messaging.c +++ b/drivers/slimbus/messaging.c @@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn) int ret = 0; spin_lock_irqsave(&ctrl->txn_lock, flags); - ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0, + ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1, SLIM_MAX_TIDS, GFP_ATOMIC); if (ret < 0) { spin_unlock_irqrestore(&ctrl->txn_lock, flags); @@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) goto slim_xfer_err; } } - + /* Initialize tid to invalid value */ + txn->tid = 0; need_tid = slim_tid_txn(txn->mt, txn->mc); if (need_tid) { @@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) txn->mt, txn->mc, txn->la, ret); slim_xfer_err: - if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) { + if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) { /* * remove runtime-pm vote if this was TX only, or * if there was error during this transaction diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index c054e83ab636..7040293c2ee8 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -618,7 +618,7 @@ static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf) (mc == SLIM_USR_MC_GENERIC_ACK && mt == SLIM_MSG_MT_SRC_REFERRED_USER)) { slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4); - pm_runtime_mark_last_busy(ctrl->dev); + pm_runtime_mark_last_busy(ctrl->ctrl.dev); } } @@ -1080,7 +1080,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl) { u32 cfg = readl_relaxed(ctrl->ngd->base); - if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) + if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN || + ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP) qcom_slim_ngd_init_dma(ctrl); /* By default enable message queues */ @@ -1131,6 +1132,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl) dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n"); return 0; } + qcom_slim_ngd_setup(ctrl); return 0; } @@ -1257,13 +1259,14 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable) } /* controller state should be in sync with framework state */ complete(&ctrl->qmi.qmi_comp); - if (!pm_runtime_enabled(ctrl->dev) || - !pm_runtime_suspended(ctrl->dev)) - qcom_slim_ngd_runtime_resume(ctrl->dev); + if (!pm_runtime_enabled(ctrl->ctrl.dev) || + !pm_runtime_suspended(ctrl->ctrl.dev)) + qcom_slim_ngd_runtime_resume(ctrl->ctrl.dev); else - pm_runtime_resume(ctrl->dev); - pm_runtime_mark_last_busy(ctrl->dev); - pm_runtime_put(ctrl->dev); + pm_runtime_resume(ctrl->ctrl.dev); + + pm_runtime_mark_last_busy(ctrl->ctrl.dev); + pm_runtime_put(ctrl->ctrl.dev); ret = slim_register_controller(&ctrl->ctrl); if (ret) { @@ -1389,7 +1392,7 @@ static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl, /* Make sure the last dma xfer is finished */ mutex_lock(&ctrl->tx_lock); if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) { - pm_runtime_get_noresume(ctrl->dev); + pm_runtime_get_noresume(ctrl->ctrl.dev); ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN; qcom_slim_ngd_down(ctrl); qcom_slim_ngd_exit_dma(ctrl); @@ -1617,6 +1620,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev) struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); int ret = 0; + qcom_slim_ngd_exit_dma(ctrl); if (!ctrl->qmi.handle) return 0; diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index f678e4d9e585..a05e9fbcd3e0 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ obj-$(CONFIG_ARCH_GEMINI) += gemini/ obj-y += imx/ -obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/ +obj-y += ixp4xx/ obj-$(CONFIG_SOC_XWAY) += lantiq/ obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/ obj-y += mediatek/ diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c index 3f711c1a0996..bbae3d39c7be 100644 --- a/drivers/soc/fsl/qe/qe_ic.c +++ b/drivers/soc/fsl/qe/qe_ic.c @@ -23,6 +23,7 @@ #include <linux/signal.h> #include <linux/device.h> #include <linux/spinlock.h> +#include <linux/platform_device.h> #include <asm/irq.h> #include <asm/io.h> #include <soc/fsl/qe/qe.h> @@ -53,8 +54,8 @@ struct qe_ic { struct irq_chip hc_irq; /* VIRQ numbers of QE high/low irqs */ - unsigned int virq_high; - unsigned int virq_low; + int virq_high; + int virq_low; }; /* @@ -404,42 +405,40 @@ static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) chip->irq_eoi(&desc->irq_data); } -static void __init qe_ic_init(struct device_node *node) +static int qe_ic_init(struct platform_device *pdev) { + struct device *dev = &pdev->dev; void (*low_handler)(struct irq_desc *desc); void (*high_handler)(struct irq_desc *desc); struct qe_ic *qe_ic; - struct resource res; - u32 ret; + struct resource *res; + struct device_node *node = pdev->dev.of_node; - ret = of_address_to_resource(node, 0, &res); - if (ret) - return; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(dev, "no memory resource defined\n"); + return -ENODEV; + } - qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL); + qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL); if (qe_ic == NULL) - return; + return -ENOMEM; - qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, - &qe_ic_host_ops, qe_ic); - if (qe_ic->irqhost == NULL) { - kfree(qe_ic); - return; + qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res)); + if (qe_ic->regs == NULL) { + dev_err(dev, "failed to ioremap() registers\n"); + return -ENODEV; } - qe_ic->regs = ioremap(res.start, resource_size(&res)); - qe_ic->hc_irq = qe_ic_irq_chip; - qe_ic->virq_high = irq_of_parse_and_map(node, 0); - qe_ic->virq_low = irq_of_parse_and_map(node, 1); + qe_ic->virq_high = platform_get_irq(pdev, 0); + qe_ic->virq_low = platform_get_irq(pdev, 1); - if (!qe_ic->virq_low) { - printk(KERN_ERR "Failed to map QE_IC low IRQ\n"); - kfree(qe_ic); - return; - } - if (qe_ic->virq_high != qe_ic->virq_low) { + if (qe_ic->virq_low <= 0) + return -ENODEV; + + if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) { low_handler = qe_ic_cascade_low; high_handler = qe_ic_cascade_high; } else { @@ -447,29 +446,42 @@ static void __init qe_ic_init(struct device_node *node) high_handler = NULL; } + qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, + &qe_ic_host_ops, qe_ic); + if (qe_ic->irqhost == NULL) { + dev_err(dev, "failed to add irq domain\n"); + return -ENODEV; + } + qe_ic_write(qe_ic->regs, QEIC_CICR, 0); irq_set_handler_data(qe_ic->virq_low, qe_ic); irq_set_chained_handler(qe_ic->virq_low, low_handler); - if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) { + if (high_handler) { irq_set_handler_data(qe_ic->virq_high, qe_ic); irq_set_chained_handler(qe_ic->virq_high, high_handler); } + return 0; } +static const struct of_device_id qe_ic_ids[] = { + { .compatible = "fsl,qe-ic"}, + { .type = "qeic"}, + {}, +}; -static int __init qe_ic_of_init(void) +static struct platform_driver qe_ic_driver = { - struct device_node *np; + .driver = { + .name = "qe-ic", + .of_match_table = qe_ic_ids, + }, + .probe = qe_ic_init, +}; - np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); - if (!np) { - np = of_find_node_by_type(NULL, "qeic"); - if (!np) - return -ENODEV; - } - qe_ic_init(np); - of_node_put(np); +static int __init qe_ic_of_init(void) +{ + platform_driver_register(&qe_ic_driver); return 0; } subsys_initcall(qe_ic_of_init); diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index 071e14496e4b..cc57a384d74d 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -5,8 +5,6 @@ #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> -#include <linux/nvmem-consumer.h> #include <linux/of_address.h> #include <linux/slab.h> #include <linux/sys_soc.h> @@ -31,7 +29,7 @@ struct imx8_soc_data { char *name; - u32 (*soc_revision)(struct device *dev); + u32 (*soc_revision)(void); }; static u64 soc_uid; @@ -52,7 +50,7 @@ static u32 imx8mq_soc_revision_from_atf(void) static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; }; #endif -static u32 __init imx8mq_soc_revision(struct device *dev) +static u32 __init imx8mq_soc_revision(void) { struct device_node *np; void __iomem *ocotp_base; @@ -77,20 +75,9 @@ static u32 __init imx8mq_soc_revision(struct device *dev) rev = REV_B1; } - if (dev) { - int ret; - - ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid); - if (ret) { - iounmap(ocotp_base); - of_node_put(np); - return ret; - } - } else { - soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); - soc_uid <<= 32; - soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); - } + soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); + soc_uid <<= 32; + soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); iounmap(ocotp_base); of_node_put(np); @@ -120,7 +107,7 @@ static void __init imx8mm_soc_uid(void) of_node_put(np); } -static u32 __init imx8mm_soc_revision(struct device *dev) +static u32 __init imx8mm_soc_revision(void) { struct device_node *np; void __iomem *anatop_base; @@ -138,15 +125,7 @@ static u32 __init imx8mm_soc_revision(struct device *dev) iounmap(anatop_base); of_node_put(np); - if (dev) { - int ret; - - ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid); - if (ret) - return ret; - } else { - imx8mm_soc_uid(); - } + imx8mm_soc_uid(); return rev; } @@ -171,7 +150,7 @@ static const struct imx8_soc_data imx8mp_soc_data = { .soc_revision = imx8mm_soc_revision, }; -static __maybe_unused const struct of_device_id imx8_machine_match[] = { +static __maybe_unused const struct of_device_id imx8_soc_match[] = { { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, }, { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, }, { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, }, @@ -179,20 +158,12 @@ static __maybe_unused const struct of_device_id imx8_machine_match[] = { { } }; -static __maybe_unused const struct of_device_id imx8_soc_match[] = { - { .compatible = "fsl,imx8mq-soc", .data = &imx8mq_soc_data, }, - { .compatible = "fsl,imx8mm-soc", .data = &imx8mm_soc_data, }, - { .compatible = "fsl,imx8mn-soc", .data = &imx8mn_soc_data, }, - { .compatible = "fsl,imx8mp-soc", .data = &imx8mp_soc_data, }, - { } -}; - #define imx8_revision(soc_rev) \ soc_rev ? \ kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \ "unknown" -static int imx8_soc_info(struct platform_device *pdev) +static int __init imx8_soc_init(void) { struct soc_device_attribute *soc_dev_attr; struct soc_device *soc_dev; @@ -211,10 +182,7 @@ static int imx8_soc_info(struct platform_device *pdev) if (ret) goto free_soc; - if (pdev) - id = of_match_node(imx8_soc_match, pdev->dev.of_node); - else - id = of_match_node(imx8_machine_match, of_root); + id = of_match_node(imx8_soc_match, of_root); if (!id) { ret = -ENODEV; goto free_soc; @@ -223,16 +191,8 @@ static int imx8_soc_info(struct platform_device *pdev) data = id->data; if (data) { soc_dev_attr->soc_id = data->name; - if (data->soc_revision) { - if (pdev) { - soc_rev = data->soc_revision(&pdev->dev); - ret = soc_rev; - if (ret < 0) - goto free_soc; - } else { - soc_rev = data->soc_revision(NULL); - } - } + if (data->soc_revision) + soc_rev = data->soc_revision(); } soc_dev_attr->revision = imx8_revision(soc_rev); @@ -270,24 +230,4 @@ free_soc: kfree(soc_dev_attr); return ret; } - -/* Retain device_initcall is for backward compatibility with DTS. */ -static int __init imx8_soc_init(void) -{ - if (of_find_matching_node_and_match(NULL, imx8_soc_match, NULL)) - return 0; - - return imx8_soc_info(NULL); -} device_initcall(imx8_soc_init); - -static struct platform_driver imx8_soc_info_driver = { - .probe = imx8_soc_info, - .driver = { - .name = "imx8_soc_info", - .of_match_table = imx8_soc_match, - }, -}; - -module_platform_driver(imx8_soc_info_driver); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c index 7bd19354982a..f490c4ca51f5 100644 --- a/drivers/soc/ixp4xx/ixp4xx-npe.c +++ b/drivers/soc/ixp4xx/ixp4xx-npe.c @@ -21,7 +21,6 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/soc/ixp4xx/npe.h> -#include <mach/hardware.h> #include <linux/soc/ixp4xx/cpu.h> #define DEBUG_MSG 0 @@ -694,8 +693,8 @@ static int ixp4xx_npe_probe(struct platform_device *pdev) if (!(ixp4xx_read_feature_bits() & (IXP4XX_FEATURE_RESET_NPEA << i))) { - dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n", - i, res->start, res->end); + dev_info(dev, "NPE%d at %pR not available\n", + i, res); continue; /* NPE already disabled or not present */ } npe->regs = devm_ioremap_resource(dev, res); @@ -703,13 +702,12 @@ static int ixp4xx_npe_probe(struct platform_device *pdev) return PTR_ERR(npe->regs); if (npe_reset(npe)) { - dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n", - i, res->start, res->end); + dev_info(dev, "NPE%d at %pR does not reset\n", + i, res); continue; } npe->valid = 1; - dev_info(dev, "NPE%d at 0x%08x-0x%08x registered\n", - i, res->start, res->end); + dev_info(dev, "NPE%d at %pR registered\n", i, res); found++; } diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c index 7149510b307e..9154c7029b05 100644 --- a/drivers/soc/ixp4xx/ixp4xx-qmgr.c +++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c @@ -12,7 +12,6 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/soc/ixp4xx/qmgr.h> -#include <mach/hardware.h> #include <linux/soc/ixp4xx/cpu.h> static struct qmgr_regs __iomem *qmgr_regs; @@ -147,12 +146,12 @@ static irqreturn_t qmgr_irq1_a0(int irq, void *pdev) /* ACK - it may clear any bits so don't rely on it */ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]); - en_bitmap = qmgr_regs->irqen[0]; + en_bitmap = __raw_readl(&qmgr_regs->irqen[0]); while (en_bitmap) { i = __fls(en_bitmap); /* number of the last "low" queue */ en_bitmap &= ~BIT(i); - src = qmgr_regs->irqsrc[i >> 3]; - stat = qmgr_regs->stat1[i >> 3]; + src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]); + stat = __raw_readl(&qmgr_regs->stat1[i >> 3]); if (src & 4) /* the IRQ condition is inverted */ stat = ~stat; if (stat & BIT(src & 3)) { @@ -172,7 +171,8 @@ static irqreturn_t qmgr_irq2_a0(int irq, void *pdev) /* ACK - it may clear any bits so don't rely on it */ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]); - req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h; + req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) & + __raw_readl(&qmgr_regs->statne_h); while (req_bitmap) { i = __fls(req_bitmap); /* number of the last "high" queue */ req_bitmap &= ~BIT(i); diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index 20ace654553a..8b53ed1cc67e 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -15,7 +15,7 @@ config ARCH_TEGRA_2x_SOC select PL310_ERRATA_769419 if CACHE_L2X0 select SOC_TEGRA_FLOWCTRL select SOC_TEGRA_PMC - select SOC_TEGRA20_VOLTAGE_COUPLER + select SOC_TEGRA20_VOLTAGE_COUPLER if REGULATOR select TEGRA_TIMER help Support for NVIDIA Tegra AP20 and T20 processors, based on the @@ -29,7 +29,7 @@ config ARCH_TEGRA_3x_SOC select PL310_ERRATA_769419 if CACHE_L2X0 select SOC_TEGRA_FLOWCTRL select SOC_TEGRA_PMC - select SOC_TEGRA30_VOLTAGE_COUPLER + select SOC_TEGRA30_VOLTAGE_COUPLER if REGULATOR select TEGRA_TIMER help Support for NVIDIA Tegra T30 processor family, based on the @@ -155,7 +155,9 @@ config SOC_TEGRA_POWERGATE_BPMP config SOC_TEGRA20_VOLTAGE_COUPLER bool "Voltage scaling support for Tegra20 SoCs" depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST + depends on REGULATOR config SOC_TEGRA30_VOLTAGE_COUPLER bool "Voltage scaling support for Tegra30 SoCs" depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST + depends on REGULATOR diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c index 39a3e1a032e0..44fc9ee13fc7 100644 --- a/drivers/spi/spi-altera-dfl.c +++ b/drivers/spi/spi-altera-dfl.c @@ -104,13 +104,6 @@ static const struct regmap_config indirect_regbus_cfg = { .reg_read = indirect_bus_reg_read, }; -static struct spi_board_info m10_bmc_info = { - .modalias = "m10-d5005", - .max_speed_hz = 12500000, - .bus_num = 0, - .chip_select = 0, -}; - static void config_spi_master(void __iomem *base, struct spi_master *master) { u64 v; @@ -130,6 +123,7 @@ static void config_spi_master(void __iomem *base, struct spi_master *master) static int dfl_spi_altera_probe(struct dfl_device *dfl_dev) { + struct spi_board_info board_info = { 0 }; struct device *dev = &dfl_dev->dev; struct spi_master *master; struct altera_spi *hw; @@ -170,9 +164,18 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev) goto exit; } - if (!spi_new_device(master, &m10_bmc_info)) { + if (dfl_dev->revision == FME_FEATURE_REV_MAX10_SPI_N5010) + strscpy(board_info.modalias, "m10-n5010", SPI_NAME_SIZE); + else + strscpy(board_info.modalias, "m10-d5005", SPI_NAME_SIZE); + + board_info.max_speed_hz = 12500000; + board_info.bus_num = 0; + board_info.chip_select = 0; + + if (!spi_new_device(master, &board_info)) { dev_err(dev, "%s failed to create SPI device: %s\n", - __func__, m10_bmc_info.modalias); + __func__, board_info.modalias); } return 0; diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 2ef74885ffa2..788dcdf25f00 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -352,8 +352,6 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) } mr = spi_readl(as, MR); - if (spi->cs_gpiod) - gpiod_set_value(spi->cs_gpiod, 1); } else { u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; int i; @@ -369,8 +367,6 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) mr = spi_readl(as, MR); mr = SPI_BFINS(PCS, ~(1 << chip_select), mr); - if (spi->cs_gpiod) - gpiod_set_value(spi->cs_gpiod, 1); spi_writel(as, MR, mr); } @@ -400,8 +396,6 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) if (!spi->cs_gpiod) spi_writel(as, CR, SPI_BIT(LASTXFER)); - else - gpiod_set_value(spi->cs_gpiod, 0); } static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock) @@ -1483,7 +1477,8 @@ static int atmel_spi_probe(struct platform_device *pdev) master->bus_num = pdev->id; master->num_chipselect = 4; master->setup = atmel_spi_setup; - master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX); + master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX | + SPI_MASTER_GPIO_SS); master->transfer_one = atmel_spi_one_transfer; master->set_cs = atmel_spi_set_cs; master->cleanup = atmel_spi_cleanup; diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 5f8771fe1a31..775c0bf2f923 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -83,6 +83,7 @@ MODULE_PARM_DESC(polling_limit_us, * struct bcm2835_spi - BCM2835 SPI controller * @regs: base address of register map * @clk: core clock, divided to calculate serial clock + * @clk_hz: core clock cached speed * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full * @tfr: SPI transfer currently processed * @ctlr: SPI controller reverse lookup @@ -116,6 +117,7 @@ MODULE_PARM_DESC(polling_limit_us, struct bcm2835_spi { void __iomem *regs; struct clk *clk; + unsigned long clk_hz; int irq; struct spi_transfer *tfr; struct spi_controller *ctlr; @@ -1045,19 +1047,18 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, { struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); struct bcm2835_spidev *slv = spi_get_ctldata(spi); - unsigned long spi_hz, clk_hz, cdiv; + unsigned long spi_hz, cdiv; unsigned long hz_per_byte, byte_limit; u32 cs = slv->prepare_cs; /* set clock */ spi_hz = tfr->speed_hz; - clk_hz = clk_get_rate(bs->clk); - if (spi_hz >= clk_hz / 2) { + if (spi_hz >= bs->clk_hz / 2) { cdiv = 2; /* clk_hz/2 is the fastest we can go */ } else if (spi_hz) { /* CDIV must be a multiple of two */ - cdiv = DIV_ROUND_UP(clk_hz, spi_hz); + cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz); cdiv += (cdiv % 2); if (cdiv >= 65536) @@ -1065,7 +1066,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, } else { cdiv = 0; /* 0 is the slowest we can go */ } - tfr->effective_speed_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536); + tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536); bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); /* handle all the 3-wire mode */ @@ -1354,6 +1355,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) return bs->irq ? bs->irq : -ENODEV; clk_prepare_enable(bs->clk); + bs->clk_hz = clk_get_rate(bs->clk); err = bcm2835_dma_init(ctlr, &pdev->dev, bs); if (err) diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 7a00346ff9b9..101cc71bffa7 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -309,6 +309,9 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr) { unsigned int dummy_clk; + if (!op->dummy.nbytes) + return 0; + dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth); if (dtr) dummy_clk /= 2; @@ -322,7 +325,15 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata, f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE; f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE; f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; - f_pdata->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr; + + /* + * For an op to be DTR, cmd phase along with every other non-empty + * phase should have dtr field set to 1. If an op phase has zero + * nbytes, ignore its dtr field; otherwise, check its dtr field. + */ + f_pdata->dtr = op->cmd.dtr && + (!op->addr.nbytes || op->addr.dtr) && + (!op->data.nbytes || op->data.dtr); switch (op->data.buswidth) { case 0: @@ -797,19 +808,20 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, reg = cqspi_calc_rdreg(f_pdata); writel(reg, reg_base + CQSPI_REG_RD_INSTR); - if (f_pdata->dtr) { - /* - * Some flashes like the cypress Semper flash expect a 4-byte - * dummy address with the Read SR command in DTR mode, but this - * controller does not support sending address with the Read SR - * command. So, disable write completion polling on the - * controller's side. spi-nor will take care of polling the - * status register. - */ - reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); - reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; - writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); - } + /* + * SPI NAND flashes require the address of the status register to be + * passed in the Read SR command. Also, some SPI NOR flashes like the + * cypress Semper flash expect a 4-byte dummy address in the Read SR + * command in DTR mode. + * + * But this controller does not support address phase in the Read SR + * command when doing auto-HW polling. So, disable write completion + * polling on the controller's side. spinand and spi-nor will take + * care of polling the status register. + */ + reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); + reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; + writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); reg = readl(reg_base + CQSPI_REG_SIZE); reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; @@ -1224,8 +1236,15 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem, { bool all_true, all_false; - all_true = op->cmd.dtr && op->addr.dtr && op->dummy.dtr && - op->data.dtr; + /* + * op->dummy.dtr is required for converting nbytes into ncycles. + * Also, don't check the dtr field of the op phase having zero nbytes. + */ + all_true = op->cmd.dtr && + (!op->addr.nbytes || op->addr.dtr) && + (!op->dummy.nbytes || op->dummy.dtr) && + (!op->data.nbytes || op->data.dtr); + all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr && !op->data.dtr; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index a3afd1b9ac56..ceb16e70d235 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -517,6 +517,12 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_apb; } + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); if (ret < 0) master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; @@ -531,11 +537,6 @@ static int cdns_spi_probe(struct platform_device *pdev) /* SPI controller initializations */ cdns_spi_init_hw(xspi); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); - irq = platform_get_irq(pdev, 0); if (irq <= 0) { ret = -ENXIO; @@ -566,6 +567,9 @@ static int cdns_spi_probe(struct platform_device *pdev) master->bits_per_word_mask = SPI_BPW_MASK(8); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + ret = spi_register_master(master); if (ret) { dev_err(&pdev->dev, "spi_register_master failed\n"); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 39dc02e366f4..fa68e9817929 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -505,8 +505,10 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx, struct spi_message *msg) { struct spi_device *spi = msg->spi; + struct spi_transfer *xfer; u32 ctrl = MX51_ECSPI_CTRL_ENABLE; - u32 testreg; + u32 min_speed_hz = ~0U; + u32 testreg, delay; u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); /* set Master or Slave mode */ @@ -567,6 +569,35 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx, writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); + /* + * Wait until the changes in the configuration register CONFIGREG + * propagate into the hardware. It takes exactly one tick of the + * SCLK clock, but we will wait two SCLK clock just to be sure. The + * effect of the delay it takes for the hardware to apply changes + * is noticable if the SCLK clock run very slow. In such a case, if + * the polarity of SCLK should be inverted, the GPIO ChipSelect might + * be asserted before the SCLK polarity changes, which would disrupt + * the SPI communication as the device on the other end would consider + * the change of SCLK polarity as a clock tick already. + * + * Because spi_imx->spi_bus_clk is only set in bitbang prepare_message + * callback, iterate over all the transfers in spi_message, find the + * one with lowest bus frequency, and use that bus frequency for the + * delay calculation. In case all transfers have speed_hz == 0, then + * min_speed_hz is ~0 and the resulting delay is zero. + */ + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!xfer->speed_hz) + continue; + min_speed_hz = min(xfer->speed_hz, min_speed_hz); + } + + delay = (2 * 1000000) / min_speed_hz; + if (likely(delay < 10)) /* SCLK is faster than 200 kHz */ + udelay(delay); + else /* SCLK is _very_ slow */ + usleep_range(delay, delay + 10); + return 0; } @@ -574,7 +605,7 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, struct spi_device *spi) { u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); - u32 clk, delay; + u32 clk; /* Clear BL field and set the right value */ ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; @@ -596,23 +627,6 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); - /* - * Wait until the changes in the configuration register CONFIGREG - * propagate into the hardware. It takes exactly one tick of the - * SCLK clock, but we will wait two SCLK clock just to be sure. The - * effect of the delay it takes for the hardware to apply changes - * is noticable if the SCLK clock run very slow. In such a case, if - * the polarity of SCLK should be inverted, the GPIO ChipSelect might - * be asserted before the SCLK polarity changes, which would disrupt - * the SPI communication as the device on the other end would consider - * the change of SCLK polarity as a clock tick already. - */ - delay = (2 * 1000000) / clk; - if (likely(delay < 10)) /* SCLK is faster than 100 kHz */ - udelay(delay); - else /* SCLK is _very_ slow */ - usleep_range(delay, delay + 10); - return 0; } diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index b2c4621db34d..c208efeadd18 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -785,6 +785,8 @@ static int meson_spicc_remove(struct platform_device *pdev) clk_disable_unprepare(spicc->core); clk_disable_unprepare(spicc->pclk); + spi_master_put(spicc->master); + return 0; } diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 976f73b9e299..7914255521c3 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -426,14 +426,15 @@ static int mtk_spi_fifo_transfer(struct spi_master *master, mtk_spi_prepare_transfer(master, xfer); mtk_spi_setup_packet(master); - cnt = xfer->len / 4; - iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); - - remainder = xfer->len % 4; - if (remainder > 0) { - reg_val = 0; - memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); - writel(reg_val, mdata->base + SPI_TX_DATA_REG); + if (xfer->tx_buf) { + cnt = xfer->len / 4; + iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); + remainder = xfer->len % 4; + if (remainder > 0) { + reg_val = 0; + memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); + writel(reg_val, mdata->base + SPI_TX_DATA_REG); + } } mtk_spi_enable_transfer(master); @@ -793,12 +794,6 @@ static int mtk_spi_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); - ret = devm_spi_register_master(&pdev->dev, master); - if (ret) { - dev_err(&pdev->dev, "failed to register master (%d)\n", ret); - goto err_disable_runtime_pm; - } - if (mdata->dev_comp->need_pad_sel) { if (mdata->pad_num != master->num_chipselect) { dev_err(&pdev->dev, @@ -838,6 +833,12 @@ static int mtk_spi_probe(struct platform_device *pdev) dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n", addr_bits, ret); + ret = devm_spi_register_master(&pdev->dev, master); + if (ret) { + dev_err(&pdev->dev, "failed to register master (%d)\n", ret); + goto err_disable_runtime_pm; + } + return 0; err_disable_runtime_pm: diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c index 37dfc6e82804..9708b7827ff7 100644 --- a/drivers/spi/spi-mux.c +++ b/drivers/spi/spi-mux.c @@ -167,10 +167,17 @@ err_put_ctlr: return ret; } +static const struct spi_device_id spi_mux_id[] = { + { "spi-mux" }, + { } +}; +MODULE_DEVICE_TABLE(spi, spi_mux_id); + static const struct of_device_id spi_mux_of_match[] = { { .compatible = "spi-mux" }, { } }; +MODULE_DEVICE_TABLE(of, spi_mux_of_match); static struct spi_driver spi_mux_driver = { .probe = spi_mux_probe, @@ -178,6 +185,7 @@ static struct spi_driver spi_mux_driver = { .name = "spi-mux", .of_match_table = spi_mux_of_match, }, + .id_table = spi_mux_id, }; module_spi_driver(spi_mux_driver); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 8ffcffbb8157..05618a618939 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -884,15 +884,18 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) ier = readl_relaxed(spi->base + STM32H7_SPI_IER); mask = ier; - /* EOTIE is triggered on EOT, SUSP and TXC events. */ + /* + * EOTIE enables irq from EOT, SUSP and TXC events. We need to set + * SUSP to acknowledge it later. TXC is automatically cleared + */ + mask |= STM32H7_SPI_SR_SUSP; /* - * When TXTF is set, DXPIE and TXPIE are cleared. So in case of - * Full-Duplex, need to poll RXP event to know if there are remaining - * data, before disabling SPI. + * DXPIE is set in Full-Duplex, one IT will be raised if TXP and RXP + * are set. So in case of Full-Duplex, need to poll TXP and RXP event. */ - if (spi->rx_buf && !spi->cur_usedma) - mask |= STM32H7_SPI_SR_RXP; + if ((spi->cur_comm == SPI_FULL_DUPLEX) && !spi->cur_usedma) + mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP; if (!(sr & mask)) { dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", @@ -1925,6 +1928,7 @@ static int stm32_spi_probe(struct platform_device *pdev) master->can_dma = stm32_spi_can_dma; pm_runtime_set_active(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); pm_runtime_enable(&pdev->dev); ret = spi_register_master(master); @@ -1940,6 +1944,8 @@ static int stm32_spi_probe(struct platform_device *pdev) err_pm_disable: pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); err_dma_release: if (spi->dma_tx) dma_release_channel(spi->dma_tx); @@ -1956,9 +1962,14 @@ static int stm32_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct stm32_spi *spi = spi_master_get_devdata(master); + pm_runtime_get_sync(&pdev->dev); + spi_unregister_master(master); spi->cfg->disable(spi); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); if (master->dma_tx) dma_release_channel(master->dma_tx); if (master->dma_rx) @@ -1966,7 +1977,6 @@ static int stm32_spi_remove(struct platform_device *pdev) clk_disable_unprepare(spi->clk); - pm_runtime_disable(&pdev->dev); pinctrl_pm_select_sleep_state(&pdev->dev); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c99181165321..e4dc593b1f32 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -58,6 +58,10 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf) const struct spi_device *spi = to_spi_device(dev); int len; + len = of_device_modalias(dev, buf, PAGE_SIZE); + if (len != -ENODEV) + return len; + len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); if (len != -ENODEV) return len; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 24e9469ea35b..6dc29ce3b4bf 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -677,7 +677,6 @@ static struct class *spidev_class; static const struct of_device_id spidev_dt_ids[] = { { .compatible = "rohm,dh2228fv" }, { .compatible = "lineartechnology,ltc2488" }, - { .compatible = "ge,achc" }, { .compatible = "semtech,sx1301" }, { .compatible = "lwn,bk4" }, { .compatible = "dh,dhcom-board" }, diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c index 691030e1a5ed..f9bdf4e33134 100644 --- a/drivers/staging/mt7621-pci/pci-mt7621.c +++ b/drivers/staging/mt7621-pci/pci-mt7621.c @@ -422,7 +422,6 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie) dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n", slot); mt7621_control_assert(port); - clk_disable_unprepare(port->clk); port->enabled = false; if (slot == 0) { diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c index 22974277afa0..4eff3fdecdb8 100644 --- a/drivers/staging/rtl8712/hal_init.c +++ b/drivers/staging/rtl8712/hal_init.c @@ -29,21 +29,31 @@ #define FWBUFF_ALIGN_SZ 512 #define MAX_DUMP_FWSZ (48 * 1024) +static void rtl871x_load_fw_fail(struct _adapter *adapter) +{ + struct usb_device *udev = adapter->dvobjpriv.pusbdev; + struct device *dev = &udev->dev; + struct device *parent = dev->parent; + + complete(&adapter->rtl8712_fw_ready); + + dev_err(&udev->dev, "r8712u: Firmware request failed\n"); + + if (parent) + device_lock(parent); + + device_release_driver(dev); + + if (parent) + device_unlock(parent); +} + static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context) { struct _adapter *adapter = context; if (!firmware) { - struct usb_device *udev = adapter->dvobjpriv.pusbdev; - struct usb_interface *usb_intf = adapter->pusb_intf; - - dev_err(&udev->dev, "r8712u: Firmware request failed\n"); - usb_put_dev(udev); - usb_set_intfdata(usb_intf, NULL); - r8712_free_drv_sw(adapter); - adapter->dvobj_deinit(adapter); - complete(&adapter->rtl8712_fw_ready); - free_netdev(adapter->pnetdev); + rtl871x_load_fw_fail(adapter); return; } adapter->fw = firmware; diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c index 5901026949f2..d5fc9026b036 100644 --- a/drivers/staging/rtl8712/rtl8712_led.c +++ b/drivers/staging/rtl8712/rtl8712_led.c @@ -1820,3 +1820,11 @@ void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction) break; } } + +void r8712_flush_led_works(struct _adapter *padapter) +{ + struct led_priv *pledpriv = &padapter->ledpriv; + + flush_work(&pledpriv->SwLed0.BlinkWorkItem); + flush_work(&pledpriv->SwLed1.BlinkWorkItem); +} diff --git a/drivers/staging/rtl8712/rtl871x_led.h b/drivers/staging/rtl8712/rtl871x_led.h index ee19c873cf01..2f0768132ad8 100644 --- a/drivers/staging/rtl8712/rtl871x_led.h +++ b/drivers/staging/rtl8712/rtl871x_led.h @@ -112,6 +112,7 @@ struct led_priv { void r8712_InitSwLeds(struct _adapter *padapter); void r8712_DeInitSwLeds(struct _adapter *padapter); void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction); +void r8712_flush_led_works(struct _adapter *padapter); #endif diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c index 23cff43437e2..cd6d9ff0bebc 100644 --- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c +++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c @@ -224,3 +224,11 @@ void r8712_unregister_cmd_alive(struct _adapter *padapter) } mutex_unlock(&pwrctrl->mutex_lock); } + +void r8712_flush_rwctrl_works(struct _adapter *padapter) +{ + struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv; + + flush_work(&pwrctrl->SetPSModeWorkItem); + flush_work(&pwrctrl->rpwm_workitem); +} diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h index bf6623cfaf27..b35b9c7920eb 100644 --- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h +++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h @@ -108,5 +108,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter, void r8712_set_ps_mode(struct _adapter *padapter, uint ps_mode, uint smart_ps); void r8712_set_rpwm(struct _adapter *padapter, u8 val8); +void r8712_flush_rwctrl_works(struct _adapter *padapter); #endif /* __RTL871X_PWRCTRL_H_ */ diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index 2434b13c8b12..505ebeb643dc 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -591,35 +591,30 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf) { struct net_device *pnetdev = usb_get_intfdata(pusb_intf); struct usb_device *udev = interface_to_usbdev(pusb_intf); + struct _adapter *padapter = netdev_priv(pnetdev); + + /* never exit with a firmware callback pending */ + wait_for_completion(&padapter->rtl8712_fw_ready); + usb_set_intfdata(pusb_intf, NULL); + release_firmware(padapter->fw); + if (drvpriv.drv_registered) + padapter->surprise_removed = true; + if (pnetdev->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(pnetdev); /* will call netdev_close() */ + r8712_flush_rwctrl_works(padapter); + r8712_flush_led_works(padapter); + udelay(1); + /* Stop driver mlme relation timer */ + r8712_stop_drv_timers(padapter); + r871x_dev_unload(padapter); + r8712_free_drv_sw(padapter); + free_netdev(pnetdev); + + /* decrease the reference count of the usb device structure + * when disconnect + */ + usb_put_dev(udev); - if (pnetdev) { - struct _adapter *padapter = netdev_priv(pnetdev); - - /* never exit with a firmware callback pending */ - wait_for_completion(&padapter->rtl8712_fw_ready); - pnetdev = usb_get_intfdata(pusb_intf); - usb_set_intfdata(pusb_intf, NULL); - if (!pnetdev) - goto firmware_load_fail; - release_firmware(padapter->fw); - if (drvpriv.drv_registered) - padapter->surprise_removed = true; - if (pnetdev->reg_state != NETREG_UNINITIALIZED) - unregister_netdev(pnetdev); /* will call netdev_close() */ - flush_scheduled_work(); - udelay(1); - /* Stop driver mlme relation timer */ - r8712_stop_drv_timers(padapter); - r871x_dev_unload(padapter); - r8712_free_drv_sw(padapter); - free_netdev(pnetdev); - - /* decrease the reference count of the usb device structure - * when disconnect - */ - usb_put_dev(udev); - } -firmware_load_fail: /* If we didn't unplug usb dongle and remove/insert module, driver * fails on sitesurvey for the first time when device is up. * Reset usb port for sitesurvey fail issue. diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig index a88467334dac..7eae820eae3b 100644 --- a/drivers/staging/rtl8723bs/Kconfig +++ b/drivers/staging/rtl8723bs/Kconfig @@ -5,6 +5,7 @@ config RTL8723BS depends on m select WIRELESS_EXT select WEXT_PRIV + select CRYPTO_LIB_ARC4 help This option enables support for RTL8723BS SDIO drivers, such as the wifi found on the 1st gen Intel Compute Stick, the CHIP diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c index 2dd251ce177e..a545832a468e 100644 --- a/drivers/staging/rtl8723bs/hal/sdio_ops.c +++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c @@ -909,6 +909,8 @@ void sd_int_dpc(struct adapter *adapter) } else { rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt); } + } else { + kfree(c2h_evt); } } else { /* Error handling for malloc fail */ diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index b32f4ee88e79..ca1b2312d6e7 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -25,7 +25,7 @@ #include "target_core_alua.h" static sense_reason_t -sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); +sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool); static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); static sense_reason_t @@ -279,14 +279,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) } static sense_reason_t -sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) +sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops) { struct se_device *dev = cmd->se_dev; sector_t end_lba = dev->transport->get_blocks(dev) + 1; unsigned int sectors = sbc_get_write_same_sectors(cmd); sense_reason_t ret; - if ((flags[0] & 0x04) || (flags[0] & 0x02)) { + if ((flags & 0x04) || (flags & 0x02)) { pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); @@ -308,7 +308,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o } /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ - if (flags[0] & 0x10) { + if (flags & 0x10) { pr_warn("WRITE SAME with ANCHOR not supported\n"); return TCM_INVALID_CDB_FIELD; } @@ -316,7 +316,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting * translated into block discard requests within backend code. */ - if (flags[0] & 0x08) { + if (flags & 0x08) { if (!ops->execute_unmap) return TCM_UNSUPPORTED_SCSI_OPCODE; @@ -331,7 +331,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o if (!ops->execute_write_same) return TCM_UNSUPPORTED_SCSI_OPCODE; - ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); + ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true); if (ret) return ret; @@ -717,10 +717,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_ } static sense_reason_t -sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, u32 sectors, bool is_write) { - u8 protect = cdb[1] >> 5; int sp_ops = cmd->se_sess->sup_prot_ops; int pi_prot_type = dev->dev_attrib.pi_prot_type; bool fabric_prot = false; @@ -768,7 +767,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, fallthrough; default: pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " - "PROTECT: 0x%02x\n", cdb[0], protect); + "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect); return TCM_INVALID_CDB_FIELD; } @@ -843,7 +842,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); if (ret) return ret; @@ -857,7 +856,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); if (ret) return ret; @@ -871,7 +870,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); if (ret) return ret; @@ -892,7 +891,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); if (ret) return ret; @@ -906,7 +905,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); if (ret) return ret; @@ -921,7 +920,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); if (ret) return ret; @@ -980,7 +979,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be64(&cdb[12]); - ret = sbc_setup_write_same(cmd, &cdb[10], ops); + ret = sbc_setup_write_same(cmd, cdb[10], ops); if (ret) return ret; break; @@ -1079,7 +1078,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be64(&cdb[2]); - ret = sbc_setup_write_same(cmd, &cdb[1], ops); + ret = sbc_setup_write_same(cmd, cdb[1], ops); if (ret) return ret; break; @@ -1097,7 +1096,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) * Follow sbcr26 with WRITE_SAME (10) and check for the existence * of byte 1 bit 3 UNMAP instead of original reserved field */ - ret = sbc_setup_write_same(cmd, &cdb[1], ops); + ret = sbc_setup_write_same(cmd, cdb[1], ops); if (ret) return ret; break; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7e35eddd9eb7..26ceabe34de5 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -886,7 +886,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) INIT_WORK(&cmd->work, success ? target_complete_ok_work : target_complete_failure_work); - if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) + if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) cpu = cmd->cpuid; else cpu = wwn->cmd_compl_affinity; diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index 6e6eb836e9b6..945f03da0223 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -184,7 +184,7 @@ static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, struct optee_msg_arg *ma; shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), - TEE_SHM_MAPPED); + TEE_SHM_MAPPED | TEE_SHM_PRIV); if (IS_ERR(shm)) return shm; @@ -416,11 +416,13 @@ void optee_enable_shm_cache(struct optee *optee) } /** - * optee_disable_shm_cache() - Disables caching of some shared memory allocation - * in OP-TEE + * __optee_disable_shm_cache() - Disables caching of some shared memory + * allocation in OP-TEE * @optee: main service struct + * @is_mapped: true if the cached shared memory addresses were mapped by this + * kernel, are safe to dereference, and should be freed */ -void optee_disable_shm_cache(struct optee *optee) +static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped) { struct optee_call_waiter w; @@ -439,6 +441,13 @@ void optee_disable_shm_cache(struct optee *optee) if (res.result.status == OPTEE_SMC_RETURN_OK) { struct tee_shm *shm; + /* + * Shared memory references that were not mapped by + * this kernel must be ignored to prevent a crash. + */ + if (!is_mapped) + continue; + shm = reg_pair_to_ptr(res.result.shm_upper32, res.result.shm_lower32); tee_shm_free(shm); @@ -449,6 +458,27 @@ void optee_disable_shm_cache(struct optee *optee) optee_cq_wait_final(&optee->call_queue, &w); } +/** + * optee_disable_shm_cache() - Disables caching of mapped shared memory + * allocations in OP-TEE + * @optee: main service struct + */ +void optee_disable_shm_cache(struct optee *optee) +{ + return __optee_disable_shm_cache(optee, true); +} + +/** + * optee_disable_unmapped_shm_cache() - Disables caching of shared memory + * allocations in OP-TEE which are not + * currently mapped + * @optee: main service struct + */ +void optee_disable_unmapped_shm_cache(struct optee *optee) +{ + return __optee_disable_shm_cache(optee, false); +} + #define PAGELIST_ENTRIES_PER_PAGE \ ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index ddb8f9ecf307..5ce13b099d7d 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/arm-smccc.h> +#include <linux/crash_dump.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/module.h> @@ -277,7 +278,8 @@ static void optee_release(struct tee_context *ctx) if (!ctxdata) return; - shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED); + shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), + TEE_SHM_MAPPED | TEE_SHM_PRIV); if (!IS_ERR(shm)) { arg = tee_shm_get_va(shm, 0); /* @@ -572,6 +574,13 @@ static optee_invoke_fn *get_invoke_func(struct device *dev) return ERR_PTR(-EINVAL); } +/* optee_remove - Device Removal Routine + * @pdev: platform device information struct + * + * optee_remove is called by platform subsystem to alert the driver + * that it should release the device + */ + static int optee_remove(struct platform_device *pdev) { struct optee *optee = platform_get_drvdata(pdev); @@ -602,6 +611,18 @@ static int optee_remove(struct platform_device *pdev) return 0; } +/* optee_shutdown - Device Removal Routine + * @pdev: platform device information struct + * + * platform_shutdown is called by the platform subsystem to alert + * the driver that a shutdown, reboot, or kexec is happening and + * device must be disabled. + */ +static void optee_shutdown(struct platform_device *pdev) +{ + optee_disable_shm_cache(platform_get_drvdata(pdev)); +} + static int optee_probe(struct platform_device *pdev) { optee_invoke_fn *invoke_fn; @@ -612,6 +633,16 @@ static int optee_probe(struct platform_device *pdev) u32 sec_caps; int rc; + /* + * The kernel may have crashed at the same time that all available + * secure world threads were suspended and we cannot reschedule the + * suspended threads without access to the crashed kernel's wait_queue. + * Therefore, we cannot reliably initialize the OP-TEE driver in the + * kdump kernel. + */ + if (is_kdump_kernel()) + return -ENODEV; + invoke_fn = get_invoke_func(&pdev->dev); if (IS_ERR(invoke_fn)) return PTR_ERR(invoke_fn); @@ -686,6 +717,15 @@ static int optee_probe(struct platform_device *pdev) optee->memremaped_shm = memremaped_shm; optee->pool = pool; + /* + * Ensure that there are no pre-existing shm objects before enabling + * the shm cache so that there's no chance of receiving an invalid + * address during shutdown. This could occur, for example, if we're + * kexec booting from an older kernel that did not properly cleanup the + * shm cache. + */ + optee_disable_unmapped_shm_cache(optee); + optee_enable_shm_cache(optee); if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) @@ -728,6 +768,7 @@ MODULE_DEVICE_TABLE(of, optee_dt_match); static struct platform_driver optee_driver = { .probe = optee_probe, .remove = optee_remove, + .shutdown = optee_shutdown, .driver = { .name = "optee", .of_match_table = optee_dt_match, diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index e25b216a14ef..dbdd367be156 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -159,6 +159,7 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); void optee_enable_shm_cache(struct optee *optee); void optee_disable_shm_cache(struct optee *optee); +void optee_disable_unmapped_shm_cache(struct optee *optee); int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, struct page **pages, size_t num_pages, diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index 1849180b0278..efbaff7ad7e5 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -314,7 +314,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = cmd_alloc_suppl(ctx, sz); break; case OPTEE_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED); + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; @@ -502,7 +502,8 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { case OPTEE_SMC_RPC_FUNC_ALLOC: - shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED); + shm = tee_shm_alloc(ctx, param->a1, + TEE_SHM_MAPPED | TEE_SHM_PRIV); if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { reg_pair_from_64(¶m->a1, ¶m->a2, pa); reg_pair_from_64(¶m->a4, ¶m->a5, diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c index d767eebf30bd..c41a9a501a6e 100644 --- a/drivers/tee/optee/shm_pool.c +++ b/drivers/tee/optee/shm_pool.c @@ -27,13 +27,19 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, shm->paddr = page_to_phys(page); shm->size = PAGE_SIZE << order; - if (shm->flags & TEE_SHM_DMA_BUF) { + /* + * Shared memory private to the OP-TEE driver doesn't need + * to be registered with OP-TEE. + */ + if (!(shm->flags & TEE_SHM_PRIV)) { unsigned int nr_pages = 1 << order, i; struct page **pages; pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL); - if (!pages) - return -ENOMEM; + if (!pages) { + rc = -ENOMEM; + goto err; + } for (i = 0; i < nr_pages; i++) { pages[i] = page; @@ -44,15 +50,21 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, rc = optee_shm_register(shm->ctx, shm, pages, nr_pages, (unsigned long)shm->kaddr); kfree(pages); + if (rc) + goto err; } + return 0; + +err: + __free_pages(page, order); return rc; } static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm) { - if (shm->flags & TEE_SHM_DMA_BUF) + if (!(shm->flags & TEE_SHM_PRIV)) optee_shm_unregister(shm->ctx, shm); free_pages((unsigned long)shm->kaddr, get_order(shm->size)); diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 00472f5ce22e..8a9384a64f3e 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -117,7 +117,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) return ERR_PTR(-EINVAL); } - if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) { + if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) { dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); return ERR_PTR(-EINVAL); } @@ -193,6 +193,24 @@ err_dev_put: } EXPORT_SYMBOL_GPL(tee_shm_alloc); +/** + * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * + * The returned memory registered in secure world and is suitable to be + * passed as a memory buffer in parameter argument to + * tee_client_invoke_func(). The memory allocated is later freed with a + * call to tee_shm_free(). + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) +{ + return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED); +} +EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); + struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags) { diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 83b1ef3d5d03..10d6b228cc94 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -1875,18 +1875,6 @@ static struct attribute *switch_attrs[] = { NULL, }; -static bool has_port(const struct tb_switch *sw, enum tb_port_type type) -{ - const struct tb_port *port; - - tb_switch_for_each_port(sw, port) { - if (!port->disabled && port->config.type == type) - return true; - } - - return false; -} - static umode_t switch_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { @@ -1895,8 +1883,7 @@ static umode_t switch_attr_is_visible(struct kobject *kobj, if (attr == &dev_attr_authorized.attr) { if (sw->tb->security_level == TB_SECURITY_NOPCIE || - sw->tb->security_level == TB_SECURITY_DPONLY || - !has_port(sw, TB_TYPE_PCIE_UP)) + sw->tb->security_level == TB_SECURITY_DPONLY) return 0; } else if (attr == &dev_attr_device.attr) { if (!sw->device) diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c index 4caab8714e2c..2350fb3bb5e4 100644 --- a/drivers/tty/serial/8250/8250_aspeed_vuart.c +++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c @@ -329,6 +329,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port) { struct uart_8250_port *up = up_to_u8250p(port); unsigned int iir, lsr; + unsigned long flags; unsigned int space, count; iir = serial_port_in(port, UART_IIR); @@ -336,7 +337,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port) if (iir & UART_IIR_NO_INT) return 0; - spin_lock(&port->lock); + spin_lock_irqsave(&port->lock, flags); lsr = serial_port_in(port, UART_LSR); @@ -370,7 +371,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port) if (lsr & UART_LSR_THRE) serial8250_tx_chars(up); - uart_unlock_and_check_sysrq(port); + uart_unlock_and_check_sysrq_irqrestore(port, flags); return 1; } diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c index 4e75d2e4f87c..fc65a2293ce9 100644 --- a/drivers/tty/serial/8250/8250_fsl.c +++ b/drivers/tty/serial/8250/8250_fsl.c @@ -30,10 +30,11 @@ struct fsl8250_data { int fsl8250_handle_irq(struct uart_port *port) { unsigned char lsr, orig_lsr; + unsigned long flags; unsigned int iir; struct uart_8250_port *up = up_to_u8250p(port); - spin_lock(&up->port.lock); + spin_lock_irqsave(&up->port.lock, flags); iir = port->serial_in(port, UART_IIR); if (iir & UART_IIR_NO_INT) { @@ -82,7 +83,7 @@ int fsl8250_handle_irq(struct uart_port *port) up->lsr_saved_flags = orig_lsr; - uart_unlock_and_check_sysrq(&up->port); + uart_unlock_and_check_sysrq_irqrestore(&up->port, flags); return 1; } diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index f7d3023f860f..fb65dc601b23 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -93,10 +93,13 @@ static void mtk8250_dma_rx_complete(void *param) struct dma_tx_state state; int copied, total, cnt; unsigned char *ptr; + unsigned long flags; if (data->rx_status == DMA_RX_SHUTDOWN) return; + spin_lock_irqsave(&up->port.lock, flags); + dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); total = dma->rx_size - state.residue; cnt = total; @@ -120,6 +123,8 @@ static void mtk8250_dma_rx_complete(void *param) tty_flip_buffer_push(tty_port); mtk8250_rx_dma(up); + + spin_unlock_irqrestore(&up->port.lock, flags); } static void mtk8250_rx_dma(struct uart_8250_port *up) diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 75827b608fdb..a808c283883e 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -3836,6 +3836,12 @@ static const struct pci_device_id blacklist[] = { { PCI_VDEVICE(INTEL, 0x0f0c), }, { PCI_VDEVICE(INTEL, 0x228a), }, { PCI_VDEVICE(INTEL, 0x228c), }, + { PCI_VDEVICE(INTEL, 0x4b96), }, + { PCI_VDEVICE(INTEL, 0x4b97), }, + { PCI_VDEVICE(INTEL, 0x4b98), }, + { PCI_VDEVICE(INTEL, 0x4b99), }, + { PCI_VDEVICE(INTEL, 0x4b9a), }, + { PCI_VDEVICE(INTEL, 0x4b9b), }, { PCI_VDEVICE(INTEL, 0x9ce3), }, { PCI_VDEVICE(INTEL, 0x9ce4), }, @@ -3996,6 +4002,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) if (pci_match_id(pci_use_msi, dev)) { dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n"); pci_set_master(dev); + uart.port.flags &= ~UPF_SHARE_IRQ; rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES); } else { dev_dbg(&dev->dev, "Using legacy interrupts\n"); diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 2164290cbd31..1da29a219842 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -311,7 +311,11 @@ static const struct serial8250_config uart_config[] = { /* Uart divisor latch read */ static int default_serial_dl_read(struct uart_8250_port *up) { - return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8; + /* Assign these in pieces to truncate any bits above 7. */ + unsigned char dll = serial_in(up, UART_DLL); + unsigned char dlm = serial_in(up, UART_DLM); + + return dll | dlm << 8; } /* Uart divisor latch write */ @@ -1297,9 +1301,11 @@ static void autoconfig(struct uart_8250_port *up) serial_out(up, UART_LCR, 0); serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); - scratch = serial_in(up, UART_IIR) >> 6; - switch (scratch) { + /* Assign this as it is to truncate any bits above 7. */ + scratch = serial_in(up, UART_IIR); + + switch (scratch >> 6) { case 0: autoconfig_8250(up); break; @@ -1893,11 +1899,12 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) unsigned char status; struct uart_8250_port *up = up_to_u8250p(port); bool skip_rx = false; + unsigned long flags; if (iir & UART_IIR_NO_INT) return 0; - spin_lock(&port->lock); + spin_lock_irqsave(&port->lock, flags); status = serial_port_in(port, UART_LSR); @@ -1923,7 +1930,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) (up->ier & UART_IER_THRI)) serial8250_tx_chars(up); - uart_unlock_and_check_sysrq(port); + uart_unlock_and_check_sysrq_irqrestore(port, flags); return 1; } diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 508128ddfa01..f0e5da77ed6d 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1415,7 +1415,7 @@ static unsigned int lpuart_get_mctrl(struct uart_port *port) static unsigned int lpuart32_get_mctrl(struct uart_port *port) { - unsigned int mctrl = 0; + unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; u32 reg; reg = lpuart32_read(port, UARTCTRL); diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 0c1e4df52215..ef11860cd69e 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1293,7 +1293,8 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty freq = uartclk; if (freq == 0) { dev_err(dev, "Cannot get clock rate\n"); - return -EINVAL; + ret = -EINVAL; + goto out_clk; } if (xtal) { diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 222032792d6c..eba5b9ecba34 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -1045,9 +1045,11 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup) if (tup->cdata->fifo_mode_enable_status) { ret = tegra_uart_wait_fifo_mode_enabled(tup); - dev_err(tup->uport.dev, "FIFO mode not enabled\n"); - if (ret < 0) + if (ret < 0) { + dev_err(tup->uport.dev, + "Failed to enable FIFO mode: %d\n", ret); return ret; + } } else { /* * For all tegra devices (up to t210), there is a hardware diff --git a/drivers/usb/cdns3/cdns3-ep0.c b/drivers/usb/cdns3/cdns3-ep0.c index 02ec7ab4bb48..e29989d57bef 100644 --- a/drivers/usb/cdns3/cdns3-ep0.c +++ b/drivers/usb/cdns3/cdns3-ep0.c @@ -731,6 +731,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, request->actual = 0; priv_dev->status_completion_no_call = true; priv_dev->pending_status_request = request; + usb_gadget_set_state(&priv_dev->gadget, USB_STATE_CONFIGURED); spin_unlock_irqrestore(&priv_dev->lock, flags); /* diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c index c23f53e9b1ef..27df0c697897 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.c +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -1882,7 +1882,7 @@ static int __cdnsp_gadget_init(struct cdns *cdns) pdev->gadget.name = "cdnsp-gadget"; pdev->gadget.speed = USB_SPEED_UNKNOWN; pdev->gadget.sg_supported = 1; - pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS; + pdev->gadget.max_speed = max_speed; pdev->gadget.lpm_capable = 1; pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL); diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h index 783ca8ffde00..f740fa6089d8 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.h +++ b/drivers/usb/cdns3/cdnsp-gadget.h @@ -383,8 +383,8 @@ struct cdnsp_intr_reg { #define IMAN_IE BIT(1) #define IMAN_IP BIT(0) /* bits 2:31 need to be preserved */ -#define IMAN_IE_SET(p) (((p) & IMAN_IE) | 0x2) -#define IMAN_IE_CLEAR(p) (((p) & IMAN_IE) & ~(0x2)) +#define IMAN_IE_SET(p) ((p) | IMAN_IE) +#define IMAN_IE_CLEAR(p) ((p) & ~IMAN_IE) /* IMOD - Interrupter Moderation Register - irq_control bitmasks. */ /* diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index 68972746e363..1b1438457fb0 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -1932,15 +1932,13 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) } if (enqd_len + trb_buff_len >= full_len) { - if (need_zero_pkt && zero_len_trb) { - zero_len_trb = true; - } else { - field &= ~TRB_CHAIN; - field |= TRB_IOC; - more_trbs_coming = false; - need_zero_pkt = false; - preq->td.last_trb = ring->enqueue; - } + if (need_zero_pkt) + zero_len_trb = !zero_len_trb; + + field &= ~TRB_CHAIN; + field |= TRB_IOC; + more_trbs_coming = false; + preq->td.last_trb = ring->enqueue; } /* Only set interrupt on short packet for OUT endpoints. */ @@ -1955,7 +1953,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); - cdnsp_queue_trb(pdev, ring, more_trbs_coming | need_zero_pkt, + cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb, lower_32_bits(send_addr), upper_32_bits(send_addr), length_field, diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index fdf79bcf7eb0..35d5908b5478 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -824,7 +824,7 @@ static struct usb_class_driver wdm_class = { }; /* --- WWAN framework integration --- */ -#ifdef CONFIG_WWAN +#ifdef CONFIG_WWAN_CORE static int wdm_wwan_port_start(struct wwan_port *port) { struct wdm_device *desc = wwan_port_get_drvdata(port); @@ -963,11 +963,11 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length) /* inbuf has been copied, it is safe to check for outstanding data */ schedule_work(&desc->service_outs_intr); } -#else /* CONFIG_WWAN */ +#else /* CONFIG_WWAN_CORE */ static void wdm_wwan_init(struct wdm_device *desc) {} static void wdm_wwan_deinit(struct wdm_device *desc) {} static void wdm_wwan_rx(struct wdm_device *desc, int length) {} -#endif /* CONFIG_WWAN */ +#endif /* CONFIG_WWAN_CORE */ /* --- error handling --- */ static void wdm_rxwork(struct work_struct *work) diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 74d5a9c5238a..73f419adce61 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -2324,17 +2324,10 @@ static void usbtmc_interrupt(struct urb *urb) dev_err(dev, "overflow with length %d, actual length is %d\n", data->iin_wMaxPacketSize, urb->actual_length); fallthrough; - case -ECONNRESET: - case -ENOENT: - case -ESHUTDOWN: - case -EILSEQ: - case -ETIME: - case -EPIPE: + default: /* urb terminated, clean up */ dev_dbg(dev, "urb terminated, status: %d\n", status); return; - default: - dev_err(dev, "unknown status received: %d\n", status); } exit: rv = usb_submit_urb(urb, GFP_ATOMIC); diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c index 3740cf95560e..0697fde51d00 100644 --- a/drivers/usb/common/usb-otg-fsm.c +++ b/drivers/usb/common/usb-otg-fsm.c @@ -193,7 +193,11 @@ static void otg_start_hnp_polling(struct otg_fsm *fsm) if (!fsm->host_req_flag) return; - INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work); + if (!fsm->hnp_work_inited) { + INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work); + fsm->hnp_work_inited = true; + } + schedule_delayed_work(&fsm->hnp_polling_work, msecs_to_jiffies(T_HOST_REQ_POLL)); } diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index b97464498763..9618ba622a2d 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1133,7 +1133,7 @@ static int do_proc_control(struct usb_dev_state *ps, "wIndex=%04x wLength=%04x\n", ctrl->bRequestType, ctrl->bRequest, ctrl->wValue, ctrl->wIndex, ctrl->wLength); - if (ctrl->bRequestType & 0x80) { + if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength) { pipe = usb_rcvctrlpipe(dev, 0); snoop_urb(dev, NULL, pipe, ctrl->wLength, tmo, SUBMIT, NULL, 0); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d1efc7141333..86658a81d284 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -48,6 +48,7 @@ #define USB_TP_TRANSMISSION_DELAY 40 /* ns */ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */ +#define USB_PING_RESPONSE_TIME 400 /* ns */ /* Protect struct usb_device->state and ->children members * Note: Both are also protected by ->dev.sem, except that ->state can @@ -182,8 +183,9 @@ int usb_device_supports_lpm(struct usb_device *udev) } /* - * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from - * either U1 or U2. + * Set the Maximum Exit Latency (MEL) for the host to wakup up the path from + * U1/U2, send a PING to the device and receive a PING_RESPONSE. + * See USB 3.1 section C.1.5.2 */ static void usb_set_lpm_mel(struct usb_device *udev, struct usb3_lpm_parameters *udev_lpm_params, @@ -193,35 +195,37 @@ static void usb_set_lpm_mel(struct usb_device *udev, unsigned int hub_exit_latency) { unsigned int total_mel; - unsigned int device_mel; - unsigned int hub_mel; /* - * Calculate the time it takes to transition all links from the roothub - * to the parent hub into U0. The parent hub must then decode the - * packet (hub header decode latency) to figure out which port it was - * bound for. - * - * The Hub Header decode latency is expressed in 0.1us intervals (0x1 - * means 0.1us). Multiply that by 100 to get nanoseconds. + * tMEL1. time to transition path from host to device into U0. + * MEL for parent already contains the delay up to parent, so only add + * the exit latency for the last link (pick the slower exit latency), + * and the hub header decode latency. See USB 3.1 section C 2.2.1 + * Store MEL in nanoseconds */ total_mel = hub_lpm_params->mel + - (hub->descriptor->u.ss.bHubHdrDecLat * 100); + max(udev_exit_latency, hub_exit_latency) * 1000 + + hub->descriptor->u.ss.bHubHdrDecLat * 100; /* - * How long will it take to transition the downstream hub's port into - * U0? The greater of either the hub exit latency or the device exit - * latency. - * - * The BOS U1/U2 exit latencies are expressed in 1us intervals. - * Multiply that by 1000 to get nanoseconds. + * tMEL2. Time to submit PING packet. Sum of tTPTransmissionDelay for + * each link + wHubDelay for each hub. Add only for last link. + * tMEL4, the time for PING_RESPONSE to traverse upstream is similar. + * Multiply by 2 to include it as well. */ - device_mel = udev_exit_latency * 1000; - hub_mel = hub_exit_latency * 1000; - if (device_mel > hub_mel) - total_mel += device_mel; - else - total_mel += hub_mel; + total_mel += (__le16_to_cpu(hub->descriptor->u.ss.wHubDelay) + + USB_TP_TRANSMISSION_DELAY) * 2; + + /* + * tMEL3, tPingResponse. Time taken by device to generate PING_RESPONSE + * after receiving PING. Also add 2100ns as stated in USB 3.1 C 1.5.2.4 + * to cover the delay if the PING_RESPONSE is queued behind a Max Packet + * Size DP. + * Note these delays should be added only once for the entire path, so + * add them to the MEL of the device connected to the roothub. + */ + if (!hub->hdev->parent) + total_mel += USB_PING_RESPONSE_TIME + 2100; udev_lpm_params->mel = total_mel; } @@ -4113,6 +4117,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev, } /* + * Don't allow device intiated U1/U2 if the system exit latency + one bus + * interval is greater than the minimum service interval of any active + * periodic endpoint. See USB 3.2 section 9.4.9 + */ +static bool usb_device_may_initiate_lpm(struct usb_device *udev, + enum usb3_link_state state) +{ + unsigned int sel; /* us */ + int i, j; + + if (state == USB3_LPM_U1) + sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); + else if (state == USB3_LPM_U2) + sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); + else + return false; + + for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { + struct usb_interface *intf; + struct usb_endpoint_descriptor *desc; + unsigned int interval; + + intf = udev->actconfig->interface[i]; + if (!intf) + continue; + + for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) { + desc = &intf->cur_altsetting->endpoint[j].desc; + + if (usb_endpoint_xfer_int(desc) || + usb_endpoint_xfer_isoc(desc)) { + interval = (1 << (desc->bInterval - 1)) * 125; + if (sel + 125 > interval) + return false; + } + } + } + return true; +} + +/* * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated * U1/U2 entry. * @@ -4184,20 +4229,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, * U1/U2_ENABLE */ if (udev->actconfig && - usb_set_device_initiated_lpm(udev, state, true) == 0) { - if (state == USB3_LPM_U1) - udev->usb3_lpm_u1_enabled = 1; - else if (state == USB3_LPM_U2) - udev->usb3_lpm_u2_enabled = 1; - } else { - /* Don't request U1/U2 entry if the device - * cannot transition to U1/U2. - */ - usb_set_lpm_timeout(udev, state, 0); - hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); + usb_device_may_initiate_lpm(udev, state)) { + if (usb_set_device_initiated_lpm(udev, state, true)) { + /* + * Request to enable device initiated U1/U2 failed, + * better to turn off lpm in this case. + */ + usb_set_lpm_timeout(udev, state, 0); + hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); + return; + } } -} + if (state == USB3_LPM_U1) + udev->usb3_lpm_u1_enabled = 1; + else if (state == USB3_LPM_U2) + udev->usb3_lpm_u2_enabled = 1; +} /* * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated * U1/U2 entry. diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 6114cf83bb44..8239fe7129dd 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -501,10 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = { /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, - /* Fibocom L850-GL LTE Modem */ - { USB_DEVICE(0x2cb7, 0x0007), .driver_info = - USB_QUIRK_IGNORE_REMOTE_WAKEUP }, - /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index ab6b815e0089..483de2bbfaab 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -383,6 +383,9 @@ enum dwc2_ep0_state { * 0 - No (default) * 1 - Partial power down * 2 - Hibernation + * @no_clock_gating: Specifies whether to avoid clock gating feature. + * 0 - No (use clock gating) + * 1 - Yes (avoid it) * @lpm: Enable LPM support. * 0 - No * 1 - Yes @@ -480,6 +483,7 @@ struct dwc2_core_params { #define DWC2_POWER_DOWN_PARAM_NONE 0 #define DWC2_POWER_DOWN_PARAM_PARTIAL 1 #define DWC2_POWER_DOWN_PARAM_HIBERNATION 2 + bool no_clock_gating; bool lpm; bool lpm_clock_gating; diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index a5ab03808da6..a5c52b237e72 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -556,7 +556,8 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg) * If neither hibernation nor partial power down are supported, * clock gating is used to save power. */ - dwc2_gadget_enter_clock_gating(hsotg); + if (!hsotg->params.no_clock_gating) + dwc2_gadget_enter_clock_gating(hsotg); } /* diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index c581ee41ac81..3146df6e6510 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -2749,12 +2749,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, return; } - /* Zlp for all endpoints, for ep0 only in DATA IN stage */ + /* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */ if (hs_ep->send_zlp) { - dwc2_hsotg_program_zlp(hsotg, hs_ep); hs_ep->send_zlp = 0; - /* transfer will be completed on next complete interrupt */ - return; + if (!using_desc_dma(hsotg)) { + dwc2_hsotg_program_zlp(hsotg, hs_ep); + /* transfer will be completed on next complete interrupt */ + return; + } } if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) { @@ -3900,9 +3902,27 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, __func__); } } else { + /* Mask GINTSTS_GOUTNAKEFF interrupt */ + dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF); + if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF)) dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK); + if (!using_dma(hsotg)) { + /* Wait for GINTSTS_RXFLVL interrupt */ + if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, + GINTSTS_RXFLVL, 100)) { + dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n", + __func__); + } else { + /* + * Pop GLOBAL OUT NAK status packet from RxFIFO + * to assert GOUTNAKEFF interrupt + */ + dwc2_readl(hsotg, GRXSTSP); + } + } + /* Wait for global nak to take effect */ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_GOUTNAKEFF, 100)) @@ -4348,6 +4368,9 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now) epctl = dwc2_readl(hs, epreg); if (value) { + /* Unmask GOUTNAKEFF interrupt */ + dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF); + if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF)) dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK); // STALL bit will be set in GOUTNAKEFF interrupt handler diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 035d4911a3c3..2a7828971d05 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -3338,7 +3338,8 @@ int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex) * If not hibernation nor partial power down are supported, * clock gating is used to save power. */ - dwc2_host_enter_clock_gating(hsotg); + if (!hsotg->params.no_clock_gating) + dwc2_host_enter_clock_gating(hsotg); break; } @@ -4402,7 +4403,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) * If not hibernation nor partial power down are supported, * clock gating is used to save power. */ - dwc2_host_enter_clock_gating(hsotg); + if (!hsotg->params.no_clock_gating) + dwc2_host_enter_clock_gating(hsotg); /* After entering suspend, hardware is not accessible */ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 67c5eb140232..59e119345994 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -76,6 +76,7 @@ static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg) struct dwc2_core_params *p = &hsotg->params; p->power_down = DWC2_POWER_DOWN_PARAM_NONE; + p->no_clock_gating = true; p->phy_utmi_width = 8; } diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index dccdf13b5f9e..5991766239ba 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1279,6 +1279,7 @@ struct dwc3 { unsigned dis_metastability_quirk:1; unsigned dis_split_quirk:1; + unsigned async_callbacks:1; u16 imod_interval; }; diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 3cd294264372..2f9e45eed228 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -597,11 +597,13 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) { - int ret; + int ret = -EINVAL; - spin_unlock(&dwc->lock); - ret = dwc->gadget_driver->setup(dwc->gadget, ctrl); - spin_lock(&dwc->lock); + if (dwc->async_callbacks) { + spin_unlock(&dwc->lock); + ret = dwc->gadget_driver->setup(dwc->gadget, ctrl); + spin_lock(&dwc->lock); + } return ret; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index af6d7f157989..84fe57ef5a49 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2250,6 +2250,17 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) } /* + * Avoid issuing a runtime resume if the device is already in the + * suspended state during gadget disconnect. DWC3 gadget was already + * halted/stopped during runtime suspend. + */ + if (!is_on) { + pm_runtime_barrier(dwc->dev); + if (pm_runtime_suspended(dwc->dev)) + return 0; + } + + /* * Check the return value for successful resume, or error. For a * successful resume, the DWC3 runtime PM resume routine will handle * the run stop sequence, so avoid duplicate operations here. @@ -2585,6 +2596,16 @@ static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA) return ret; } +static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable) +{ + struct dwc3 *dwc = gadget_to_dwc(g); + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + dwc->async_callbacks = enable; + spin_unlock_irqrestore(&dwc->lock, flags); +} + static const struct usb_gadget_ops dwc3_gadget_ops = { .get_frame = dwc3_gadget_get_frame, .wakeup = dwc3_gadget_wakeup, @@ -2596,6 +2617,7 @@ static const struct usb_gadget_ops dwc3_gadget_ops = { .udc_set_ssp_rate = dwc3_gadget_set_ssp_rate, .get_config_params = dwc3_gadget_config_params, .vbus_draw = dwc3_gadget_vbus_draw, + .udc_async_callbacks = dwc3_gadget_async_callbacks, }; /* -------------------------------------------------------------------------- */ @@ -3231,7 +3253,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, static void dwc3_disconnect_gadget(struct dwc3 *dwc) { - if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { + if (dwc->async_callbacks && dwc->gadget_driver->disconnect) { spin_unlock(&dwc->lock); dwc->gadget_driver->disconnect(dwc->gadget); spin_lock(&dwc->lock); @@ -3240,7 +3262,7 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc) static void dwc3_suspend_gadget(struct dwc3 *dwc) { - if (dwc->gadget_driver && dwc->gadget_driver->suspend) { + if (dwc->async_callbacks && dwc->gadget_driver->suspend) { spin_unlock(&dwc->lock); dwc->gadget_driver->suspend(dwc->gadget); spin_lock(&dwc->lock); @@ -3249,7 +3271,7 @@ static void dwc3_suspend_gadget(struct dwc3 *dwc) static void dwc3_resume_gadget(struct dwc3 *dwc) { - if (dwc->gadget_driver && dwc->gadget_driver->resume) { + if (dwc->async_callbacks && dwc->gadget_driver->resume) { spin_unlock(&dwc->lock); dwc->gadget_driver->resume(dwc->gadget); spin_lock(&dwc->lock); @@ -3261,7 +3283,7 @@ static void dwc3_reset_gadget(struct dwc3 *dwc) if (!dwc->gadget_driver) return; - if (dwc->gadget->speed != USB_SPEED_UNKNOWN) { + if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) { spin_unlock(&dwc->lock); usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver); spin_lock(&dwc->lock); @@ -3585,7 +3607,7 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) * implemented. */ - if (dwc->gadget_driver && dwc->gadget_driver->resume) { + if (dwc->async_callbacks && dwc->gadget_driver->resume) { spin_unlock(&dwc->lock); dwc->gadget_driver->resume(dwc->gadget); spin_lock(&dwc->lock); diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 02683ac0719d..bb476e121eae 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -41,6 +41,7 @@ struct f_hidg { unsigned char bInterfaceSubClass; unsigned char bInterfaceProtocol; unsigned char protocol; + unsigned char idle; unsigned short report_desc_length; char *report_desc; unsigned short report_length; @@ -338,6 +339,11 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, spin_lock_irqsave(&hidg->write_spinlock, flags); + if (!hidg->req) { + spin_unlock_irqrestore(&hidg->write_spinlock, flags); + return -ESHUTDOWN; + } + #define WRITE_COND (!hidg->write_pending) try_again: /* write queue */ @@ -358,8 +364,14 @@ try_again: count = min_t(unsigned, count, hidg->report_length); spin_unlock_irqrestore(&hidg->write_spinlock, flags); - status = copy_from_user(req->buf, buffer, count); + if (!req) { + ERROR(hidg->func.config->cdev, "hidg->req is NULL\n"); + status = -ESHUTDOWN; + goto release_write_pending; + } + + status = copy_from_user(req->buf, buffer, count); if (status != 0) { ERROR(hidg->func.config->cdev, "copy_from_user error\n"); @@ -387,14 +399,17 @@ try_again: spin_unlock_irqrestore(&hidg->write_spinlock, flags); + if (!hidg->in_ep->enabled) { + ERROR(hidg->func.config->cdev, "in_ep is disabled\n"); + status = -ESHUTDOWN; + goto release_write_pending; + } + status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); - if (status < 0) { - ERROR(hidg->func.config->cdev, - "usb_ep_queue error on int endpoint %zd\n", status); + if (status < 0) goto release_write_pending; - } else { + else status = count; - } return status; release_write_pending: @@ -523,6 +538,14 @@ static int hidg_setup(struct usb_function *f, goto respond; break; + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 + | HID_REQ_GET_IDLE): + VDBG(cdev, "get_idle\n"); + length = min_t(unsigned int, length, 1); + ((u8 *) req->buf)[0] = hidg->idle; + goto respond; + break; + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_SET_REPORT): VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength); @@ -546,6 +569,14 @@ static int hidg_setup(struct usb_function *f, goto stall; break; + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 + | HID_REQ_SET_IDLE): + VDBG(cdev, "set_idle\n"); + length = 0; + hidg->idle = value >> 8; + goto respond; + break; + case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8 | USB_REQ_GET_DESCRIPTOR): switch (value >> 8) { @@ -773,6 +804,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass; hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol; hidg->protocol = HID_REPORT_PROTOCOL; + hidg->idle = 1; hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_ss_in_comp_desc.wBytesPerInterval = cpu_to_le16(hidg->report_length); diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index bffef8e47dac..281ca766698a 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -1198,7 +1198,7 @@ void gserial_free_line(unsigned char port_num) struct gs_port *port; mutex_lock(&ports[port_num].lock); - if (WARN_ON(!ports[port_num].port)) { + if (!ports[port_num].port) { mutex_unlock(&ports[port_num].lock); return; } diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 8e8588933628..15db7a3868fe 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c @@ -586,6 +586,7 @@ static int qe_ep_init(struct qe_udc *udc, case USB_SPEED_FULL: if (max <= 1023) break; + fallthrough; default: goto en_done; } diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c index 34f4db554977..d2a2b20cc1ad 100644 --- a/drivers/usb/gadget/udc/max3420_udc.c +++ b/drivers/usb/gadget/udc/max3420_udc.c @@ -1255,12 +1255,14 @@ static int max3420_probe(struct spi_device *spi) err = devm_request_irq(&spi->dev, irq, max3420_irq_handler, 0, "max3420", udc); if (err < 0) - return err; + goto del_gadget; udc->thread_task = kthread_create(max3420_thread, udc, "max3420-thread"); - if (IS_ERR(udc->thread_task)) - return PTR_ERR(udc->thread_task); + if (IS_ERR(udc->thread_task)) { + err = PTR_ERR(udc->thread_task); + goto del_gadget; + } irq = of_irq_get_byname(spi->dev.of_node, "vbus"); if (irq <= 0) { /* no vbus irq implies self-powered design */ @@ -1280,10 +1282,14 @@ static int max3420_probe(struct spi_device *spi) err = devm_request_irq(&spi->dev, irq, max3420_vbus_handler, 0, "vbus", udc); if (err < 0) - return err; + goto del_gadget; } return 0; + +del_gadget: + usb_del_gadget_udc(&udc->gadget); + return err; } static int max3420_remove(struct spi_device *spi) diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index a54d1cef17db..c0ca7144e512 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -3853,6 +3853,7 @@ static int tegra_xudc_probe(struct platform_device *pdev) return 0; free_eps: + pm_runtime_disable(&pdev->dev); tegra_xudc_free_eps(xudc); free_event_ring: tegra_xudc_free_event_ring(xudc); diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 36f5bf6a0752..10b0365f3439 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -703,24 +703,28 @@ EXPORT_SYMBOL_GPL(ehci_setup); static irqreturn_t ehci_irq (struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); - u32 status, masked_status, pcd_status = 0, cmd; + u32 status, current_status, masked_status, pcd_status = 0; + u32 cmd; int bh; spin_lock(&ehci->lock); - status = ehci_readl(ehci, &ehci->regs->status); + status = 0; + current_status = ehci_readl(ehci, &ehci->regs->status); +restart: /* e.g. cardbus physical eject */ - if (status == ~(u32) 0) { + if (current_status == ~(u32) 0) { ehci_dbg (ehci, "device removed\n"); goto dead; } + status |= current_status; /* * We don't use STS_FLR, but some controllers don't like it to * remain on, so mask it out along with the other status bits. */ - masked_status = status & (INTR_MASK | STS_FLR); + masked_status = current_status & (INTR_MASK | STS_FLR); /* Shared IRQ? */ if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) { @@ -730,6 +734,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) /* clear (just) interrupts */ ehci_writel(ehci, masked_status, &ehci->regs->status); + + /* For edge interrupts, don't race with an interrupt bit being raised */ + current_status = ehci_readl(ehci, &ehci->regs->status); + if (current_status & INTR_MASK) + goto restart; + cmd = ehci_readl(ehci, &ehci->regs->command); bh = 0; diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index e7a8e0609853..59cc1bc7f12f 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -153,8 +153,6 @@ struct max3421_hcd { */ struct urb *curr_urb; enum scheduling_pass sched_pass; - struct usb_device *loaded_dev; /* dev that's loaded into the chip */ - int loaded_epnum; /* epnum whose toggles are loaded */ int urb_done; /* > 0 -> no errors, < 0: errno */ size_t curr_len; u8 hien; @@ -492,39 +490,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) * Caller must NOT hold HCD spinlock. */ static void -max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, - int force_toggles) +max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum) { - struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); - int old_epnum, same_ep, rcvtog, sndtog; - struct usb_device *old_dev; + int rcvtog, sndtog; u8 hctl; - old_dev = max3421_hcd->loaded_dev; - old_epnum = max3421_hcd->loaded_epnum; - - same_ep = (dev == old_dev && epnum == old_epnum); - if (same_ep && !force_toggles) - return; - - if (old_dev && !same_ep) { - /* save the old end-points toggles: */ - u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); - - rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; - sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; - - /* no locking: HCD (i.e., we) own toggles, don't we? */ - usb_settoggle(old_dev, old_epnum, 0, rcvtog); - usb_settoggle(old_dev, old_epnum, 1, sndtog); - } /* setup new endpoint's toggle bits: */ rcvtog = usb_gettoggle(dev, epnum, 0); sndtog = usb_gettoggle(dev, epnum, 1); hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); - max3421_hcd->loaded_epnum = epnum; spi_wr8(hcd, MAX3421_REG_HCTL, hctl); /* @@ -532,7 +508,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, * address-assignment so it's best to just always load the * address whenever the end-point changed/was forced. */ - max3421_hcd->loaded_dev = dev; spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); } @@ -667,7 +642,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); struct urb *urb, *curr_urb = NULL; struct max3421_ep *max3421_ep; - int epnum, force_toggles = 0; + int epnum; struct usb_host_endpoint *ep; struct list_head *pos; unsigned long flags; @@ -777,7 +752,6 @@ done: usb_settoggle(urb->dev, epnum, 0, 1); usb_settoggle(urb->dev, epnum, 1, 1); max3421_ep->pkt_state = PKT_STATE_SETUP; - force_toggles = 1; } else max3421_ep->pkt_state = PKT_STATE_TRANSFER; } @@ -785,7 +759,7 @@ done: spin_unlock_irqrestore(&max3421_hcd->lock, flags); max3421_ep->last_active = max3421_hcd->frame_number; - max3421_set_address(hcd, urb->dev, epnum, force_toggles); + max3421_set_address(hcd, urb->dev, epnum); max3421_set_speed(hcd, urb->dev); max3421_next_transfer(hcd, 0); return 1; @@ -1379,6 +1353,16 @@ max3421_urb_done(struct usb_hcd *hcd) status = 0; urb = max3421_hcd->curr_urb; if (urb) { + /* save the old end-points toggles: */ + u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); + int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; + int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; + int epnum = usb_endpoint_num(&urb->ep->desc); + + /* no locking: HCD (i.e., we) own toggles, don't we? */ + usb_settoggle(urb->dev, epnum, 0, rcvtog); + usb_settoggle(urb->dev, epnum, 1, sndtog); + max3421_hcd->curr_urb = NULL; spin_lock_irqsave(&max3421_hcd->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 9bbd7ddd0003..a24aea3d2759 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -611,8 +611,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev) if (ohci_at91->wakeup) enable_irq_wake(hcd->irq); - ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1); - ret = ohci_suspend(hcd, ohci_at91->wakeup); if (ret) { if (ohci_at91->wakeup) @@ -632,7 +630,10 @@ ohci_hcd_at91_drv_suspend(struct device *dev) /* flush the writes */ (void) ohci_readl (ohci, &ohci->regs->control); msleep(1); + ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1); at91_stop_clock(ohci_at91); + } else { + ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1); } return ret; @@ -644,6 +645,8 @@ ohci_hcd_at91_drv_resume(struct device *dev) struct usb_hcd *hcd = dev_get_drvdata(dev); struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd); + ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0); + if (ohci_at91->wakeup) disable_irq_wake(hcd->irq); else @@ -651,8 +654,6 @@ ohci_hcd_at91_drv_resume(struct device *dev) ohci_resume(hcd, false); - ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0); - return 0; } diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e9b18fc17617..151e93c4bd57 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1638,11 +1638,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) * Inform the usbcore about resume-in-progress by returning * a non-zero value even if there are no status changes. */ + spin_lock_irqsave(&xhci->lock, flags); + status = bus_state->resuming_ports; mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; - spin_lock_irqsave(&xhci->lock, flags); /* For each port, did anything change? If so, set that bit in buf. */ for (i = 0; i < max_ports; i++) { temp = readl(ports[i]->addr); diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c index 1da647961c25..5923844ed821 100644 --- a/drivers/usb/host/xhci-pci-renesas.c +++ b/drivers/usb/host/xhci-pci-renesas.c @@ -207,8 +207,7 @@ static int renesas_check_rom_state(struct pci_dev *pdev) return 0; case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */ - dev_dbg(&pdev->dev, "Unknown ROM status ...\n"); - break; + return 0; case RENESAS_ROM_STATUS_ERROR: /* Error State */ default: /* All other states are marked as "Reserved states" */ @@ -225,12 +224,13 @@ static int renesas_fw_check_running(struct pci_dev *pdev) u8 fw_state; int err; - /* - * Only if device has ROM and loaded FW we can skip loading and - * return success. Otherwise (even unknown state), attempt to load FW. - */ - if (renesas_check_rom(pdev) && !renesas_check_rom_state(pdev)) - return 0; + /* Check if device has ROM and loaded, if so skip everything */ + err = renesas_check_rom(pdev); + if (err) { /* we have rom */ + err = renesas_check_rom_state(pdev); + if (!err) + return err; + } /* * Test if the device is actually needing the firmware. As most diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 18c2bbddf080..1c9a7957c45c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -636,7 +636,14 @@ static const struct pci_device_id pci_ids[] = { { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, pci_ids); + +/* + * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't + * load firmware, so don't encumber the xhci-pci driver with it. + */ +#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) MODULE_FIRMWARE("renesas_usb_fw.mem"); +#endif /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver xhci_pci_driver = { diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 640a46f0d118..f086960fe2b5 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -35,6 +35,7 @@ struct omap2430_glue { struct device *control_otghs; unsigned int is_runtime_suspended:1; unsigned int needs_resume:1; + unsigned int phy_suspended:1; }; #define glue_to_musb(g) platform_get_drvdata(g->musb) @@ -458,8 +459,10 @@ static int omap2430_runtime_suspend(struct device *dev) omap2430_low_level_exit(musb); - phy_power_off(musb->phy); - phy_exit(musb->phy); + if (!glue->phy_suspended) { + phy_power_off(musb->phy); + phy_exit(musb->phy); + } glue->is_runtime_suspended = 1; @@ -474,8 +477,10 @@ static int omap2430_runtime_resume(struct device *dev) if (!musb) return 0; - phy_init(musb->phy); - phy_power_on(musb->phy); + if (!glue->phy_suspended) { + phy_init(musb->phy); + phy_power_on(musb->phy); + } omap2430_low_level_init(musb); musb_writel(musb->mregs, OTG_INTERFSEL, @@ -489,9 +494,23 @@ static int omap2430_runtime_resume(struct device *dev) return 0; } +/* I2C and SPI PHYs need to be suspended before the glue layer */ static int omap2430_suspend(struct device *dev) { struct omap2430_glue *glue = dev_get_drvdata(dev); + struct musb *musb = glue_to_musb(glue); + + phy_power_off(musb->phy); + phy_exit(musb->phy); + glue->phy_suspended = 1; + + return 0; +} + +/* Glue layer needs to be suspended after musb_suspend() */ +static int omap2430_suspend_late(struct device *dev) +{ + struct omap2430_glue *glue = dev_get_drvdata(dev); if (glue->is_runtime_suspended) return 0; @@ -501,7 +520,7 @@ static int omap2430_suspend(struct device *dev) return omap2430_runtime_suspend(dev); } -static int omap2430_resume(struct device *dev) +static int omap2430_resume_early(struct device *dev) { struct omap2430_glue *glue = dev_get_drvdata(dev); @@ -513,10 +532,24 @@ static int omap2430_resume(struct device *dev) return omap2430_runtime_resume(dev); } +static int omap2430_resume(struct device *dev) +{ + struct omap2430_glue *glue = dev_get_drvdata(dev); + struct musb *musb = glue_to_musb(glue); + + phy_init(musb->phy); + phy_power_on(musb->phy); + glue->phy_suspended = 0; + + return 0; +} + static const struct dev_pm_ops omap2430_pm_ops = { .runtime_suspend = omap2430_runtime_suspend, .runtime_resume = omap2430_runtime_resume, .suspend = omap2430_suspend, + .suspend_late = omap2430_suspend_late, + .resume_early = omap2430_resume_early, .resume = omap2430_resume, }; diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index 83ed5089475a..1b24492bb4e5 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c @@ -86,10 +86,10 @@ static struct usb_phy *__device_to_usb_phy(struct device *dev) list_for_each_entry(usb_phy, &phy_list, head) { if (usb_phy->dev == dev) - break; + return usb_phy; } - return usb_phy; + return NULL; } static void usb_phy_set_default_current(struct usb_phy *usb_phy) @@ -150,8 +150,14 @@ static int usb_phy_uevent(struct device *dev, struct kobj_uevent_env *env) struct usb_phy *usb_phy; char uchger_state[50] = { 0 }; char uchger_type[50] = { 0 }; + unsigned long flags; + spin_lock_irqsave(&phy_lock, flags); usb_phy = __device_to_usb_phy(dev); + spin_unlock_irqrestore(&phy_lock, flags); + + if (!usb_phy) + return -ENODEV; snprintf(uchger_state, ARRAY_SIZE(uchger_state), "USB_CHARGER_STATE=%s", usb_chger_state[usb_phy->chg_state]); diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index b5e7991dc7d9..a3c2b01ccf7b 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -101,6 +101,8 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map); +static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable); +static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable); struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) { struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); @@ -123,6 +125,11 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) if (chan) { dmaengine_terminate_all(chan); usbhsf_dma_unmap(pkt); + } else { + if (usbhs_pipe_is_dir_in(pipe)) + usbhsf_rx_irq_ctrl(pipe, 0); + else + usbhsf_tx_irq_ctrl(pipe, 0); } usbhs_pipe_clear_without_sequence(pipe, 0, 0); diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 2db917eab799..8a521b5ea769 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -851,6 +851,7 @@ static struct usb_serial_driver ch341_device = { .owner = THIS_MODULE, .name = "ch341-uart", }, + .bulk_in_size = 512, .id_table = id_table, .num_ports = 1, .open = ch341_open, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 09b845d0da41..3c80bfbf3bec 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ + { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ @@ -202,8 +203,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ - { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */ - { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */ + { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */ + { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 4a1f3a95d017..33bbb3470ca3 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -219,6 +219,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_AUTO_M3_OP_COM_V2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index add602bebd82..755858ca20ba 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -159,6 +159,9 @@ /* Vardaan Enterprises Serial Interface VEUSB422R3 */ #define FTDI_VARDAAN_PID 0xF070 +/* Auto-M3 Ltd. - OP-COM USB V2 - OBD interface Adapter */ +#define FTDI_AUTO_M3_OP_COM_V2_PID 0x4f50 + /* * Xsens Technologies BV products (http://www.xsens.com). */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 7608584ef4fe..039450069ca4 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_UC15 0x9090 /* These u-blox products use Qualcomm's vendor ID */ #define UBLOX_PRODUCT_R410M 0x90b2 +#define UBLOX_PRODUCT_R6XX 0x90fa /* These Yuga products use Qualcomm's vendor ID */ #define YUGA_PRODUCT_CLM920_NC5 0x9625 @@ -1101,6 +1102,8 @@ static const struct usb_device_id option_ids[] = { /* u-blox products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX), + .driver_info = RSVD(3) }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, @@ -1200,6 +1203,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */ .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */ + .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 2f2f5047452b..930b3d50a330 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -418,24 +418,34 @@ static int pl2303_detect_type(struct usb_serial *serial) bcdDevice = le16_to_cpu(desc->bcdDevice); bcdUSB = le16_to_cpu(desc->bcdUSB); - switch (bcdDevice) { - case 0x100: - /* - * Assume it's an HXN-type if the device doesn't support the old read - * request value. - */ - if (bcdUSB == 0x200 && !pl2303_supports_hx_status(serial)) - return TYPE_HXN; + switch (bcdUSB) { + case 0x110: + switch (bcdDevice) { + case 0x300: + return TYPE_HX; + case 0x400: + return TYPE_HXD; + default: + return TYPE_HX; + } break; - case 0x300: - if (bcdUSB == 0x200) + case 0x200: + switch (bcdDevice) { + case 0x100: + case 0x305: + /* + * Assume it's an HXN-type if the device doesn't + * support the old read request value. + */ + if (!pl2303_supports_hx_status(serial)) + return TYPE_HXN; + break; + case 0x300: return TYPE_TA; - - return TYPE_HX; - case 0x400: - return TYPE_HXD; - case 0x500: - return TYPE_TB; + case 0x500: + return TYPE_TB; + } + break; } dev_err(&serial->interface->dev, diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index f9677a5ec31b..c35a6db993f1 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -45,6 +45,13 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME), +/* Reported-by: Julian Sikorski <belegdol@gmail.com> */ +UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999, + "LaCie", + "Rugged USB3-FW", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c index 6eaeba9b096e..e7745d1c2a5c 100644 --- a/drivers/usb/typec/stusb160x.c +++ b/drivers/usb/typec/stusb160x.c @@ -686,6 +686,15 @@ static int stusb160x_probe(struct i2c_client *client) return -ENODEV; /* + * This fwnode has a "compatible" property, but is never populated as a + * struct device. Instead we simply parse it to read the properties. + * This it breaks fw_devlink=on. To maintain backward compatibility + * with existing DT files, we work around this by deleting any + * fwnode_links to/from this fwnode. + */ + fw_devlink_purge_absent_suppliers(fwnode); + + /* * When both VDD and VSYS power supplies are present, the low power * supply VSYS is selected when VSYS voltage is above 3.1 V. * Otherwise VDD is selected. @@ -739,10 +748,6 @@ static int stusb160x_probe(struct i2c_client *client) typec_set_pwr_opmode(chip->port, chip->pwr_opmode); if (client->irq) { - ret = stusb160x_irq_init(chip, client->irq); - if (ret) - goto port_unregister; - chip->role_sw = fwnode_usb_role_switch_get(fwnode); if (IS_ERR(chip->role_sw)) { ret = PTR_ERR(chip->role_sw); @@ -752,6 +757,10 @@ static int stusb160x_probe(struct i2c_client *client) ret); goto port_unregister; } + + ret = stusb160x_irq_init(chip, client->irq); + if (ret) + goto role_sw_put; } else { /* * If Source or Dual power role, need to enable VDD supply @@ -775,6 +784,9 @@ static int stusb160x_probe(struct i2c_client *client) return 0; +role_sw_put: + if (chip->role_sw) + usb_role_switch_put(chip->role_sw); port_unregister: typec_unregister_port(chip->port); all_reg_disable: diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 5b22a1c931a9..f4079b5cb26d 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -1737,6 +1737,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, return rlen; } +static void tcpm_pd_handle_msg(struct tcpm_port *port, + enum pd_msg_request message, + enum tcpm_ams ams); + static void tcpm_handle_vdm_request(struct tcpm_port *port, const __le32 *payload, int cnt) { @@ -1764,11 +1768,11 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port, port->vdm_state = VDM_STATE_DONE; } - if (PD_VDO_SVDM(p[0])) { + if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) { rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action); } else { if (port->negotiated_rev >= PD_REV30) - tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP); + tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); } /* @@ -2471,10 +2475,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port, NONE_AMS); break; case PD_DATA_VENDOR_DEF: - if (tcpm_vdm_ams(port) || port->nr_snk_vdo) - tcpm_handle_vdm_request(port, msg->payload, cnt); - else if (port->negotiated_rev > PD_REV20) - tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); + tcpm_handle_vdm_request(port, msg->payload, cnt); break; case PD_DATA_BIST: port->bist_request = le32_to_cpu(msg->payload[0]); @@ -5369,7 +5370,7 @@ EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset); void tcpm_sink_frs(struct tcpm_port *port) { spin_lock(&port->pd_event_lock); - port->pd_events = TCPM_FRS_EVENT; + port->pd_events |= TCPM_FRS_EVENT; spin_unlock(&port->pd_event_lock); kthread_queue_work(port->wq, &port->event_work); } @@ -5378,7 +5379,7 @@ EXPORT_SYMBOL_GPL(tcpm_sink_frs); void tcpm_sourcing_vbus(struct tcpm_port *port) { spin_lock(&port->pd_event_lock); - port->pd_events = TCPM_SOURCING_VBUS; + port->pd_events |= TCPM_SOURCING_VBUS; spin_unlock(&port->pd_event_lock); kthread_queue_work(port->wq, &port->event_work); } diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 938219bc1b4b..21b3ae25c76d 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -629,6 +629,15 @@ static int tps6598x_probe(struct i2c_client *client) if (!fwnode) return -ENODEV; + /* + * This fwnode has a "compatible" property, but is never populated as a + * struct device. Instead we simply parse it to read the properties. + * This breaks fw_devlink=on. To maintain backward compatibility + * with existing DT files, we work around this by deleting any + * fwnode_links to/from this fwnode. + */ + fw_devlink_purge_absent_suppliers(fwnode); + tps->role_sw = fwnode_usb_role_switch_get(fwnode); if (IS_ERR(tps->role_sw)) { ret = PTR_ERR(tps->role_sw); diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index 21b78f1cd521..351c6cfb24c3 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -493,9 +493,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, dev, &ifc_vdpa_ops, NULL); - if (adapter == NULL) { + if (IS_ERR(adapter)) { IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); - return -ENOMEM; + return PTR_ERR(adapter); } pci_set_master(pdev); diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index dcee6039e966..e59135fa867e 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -512,11 +512,6 @@ out: mutex_unlock(&mr->mkey_mtx); } -static bool map_empty(struct vhost_iotlb *iotlb) -{ - return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX); -} - int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, bool *change_map) { @@ -524,10 +519,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io int err = 0; *change_map = false; - if (map_empty(iotlb)) { - mlx5_vdpa_destroy_mr(mvdev); - return 0; - } mutex_lock(&mr->mkey_mtx); if (mr->initialized) { mlx5_vdpa_info(mvdev, "memory map update\n"); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 2a31467f7ac5..3cc12fcab08d 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -526,7 +526,6 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent) void __iomem *uar_page = ndev->mvdev.res.uar->map; u32 out[MLX5_ST_SZ_DW(create_cq_out)]; struct mlx5_vdpa_cq *vcq = &mvq->cq; - unsigned int irqn; __be64 *pas; int inlen; void *cqc; @@ -566,7 +565,7 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent) /* Use vector 0 by default. Consider adding code to choose least used * vector. */ - err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn); + err = mlx5_vector2eqn(mdev, 0, &eqn); if (err) goto err_vec; @@ -753,12 +752,12 @@ static int get_queue_type(struct mlx5_vdpa_net *ndev) type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); /* prefer split queue */ - if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED) - return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED; + if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT) + return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT; - WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)); + WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)); - return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT; + return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED; } static bool vq_is_tx(u16 idx) @@ -2030,6 +2029,12 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) return -ENOSPC; mdev = mgtdev->madev->mdev; + if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) & + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) { + dev_warn(mdev->device, "missing support for split virtqueues\n"); + return -EOPNOTSUPP; + } + /* we save one virtqueue for control virtqueue should we require it */ max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues); max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 14e024de5cbf..c621cf7feec0 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -251,8 +251,10 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr) vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, dev_attr->name); - if (!vdpasim) + if (IS_ERR(vdpasim)) { + ret = PTR_ERR(vdpasim); goto err_alloc; + } vdpasim->dev_attr = *dev_attr; INIT_WORK(&vdpasim->work, dev_attr->work_fn); diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index 7b4a6396c553..fe0527329857 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -436,9 +436,9 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa, dev, &vp_vdpa_ops, NULL); - if (vp_vdpa == NULL) { + if (IS_ERR(vp_vdpa)) { dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n"); - return -ENOMEM; + return PTR_ERR(vp_vdpa); } mdev = &vp_vdpa->mdev; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 210ab35a7ebf..9479f7f79217 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -614,7 +614,8 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, long pinned; int ret = 0; - if (msg->iova < v->range.first || + if (msg->iova < v->range.first || !msg->size || + msg->iova > U64_MAX - msg->size + 1 || msg->iova + msg->size - 1 > v->range.last) return -EINVAL; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index b9e853e6094d..59edb5a1ffe2 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -735,10 +735,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz) (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); } +/* Make sure 64 bit math will not overflow. */ static bool vhost_overflow(u64 uaddr, u64 size) { - /* Make sure 64 bit math will not overflow. */ - return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size; + if (uaddr > ULONG_MAX || size > ULONG_MAX) + return true; + + if (!size) + return false; + + return uaddr > ULONG_MAX - size + 1; } /* Caller should have vq mutex and device mutex. */ diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 4af8fa259d65..14e2043d7685 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -359,7 +359,7 @@ __vringh_iov(struct vringh *vrh, u16 i, iov = wiov; else { iov = riov; - if (unlikely(wiov && wiov->i)) { + if (unlikely(wiov && wiov->used)) { vringh_bad("Readable desc %p after writable", &descs[i]); err = -EINVAL; diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 98f193078c05..1c855145711b 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -970,13 +970,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) fb_var_to_videomode(&mode2, &info->var); /* make sure we don't delete the videomode of current var */ ret = fb_mode_is_equal(&mode1, &mode2); - - if (!ret) - fbcon_mode_deleted(info, &mode1); - - if (!ret) - fb_delete_videomode(&mode1, &info->modelist); - + if (!ret) { + ret = fbcon_mode_deleted(info, &mode1); + if (!ret) + fb_delete_videomode(&mode1, &info->modelist); + } return ret ? -EINVAL : 0; } diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c index ffbf900648d9..438e2c78142f 100644 --- a/drivers/video/fbdev/xilinxfb.c +++ b/drivers/video/fbdev/xilinxfb.c @@ -241,6 +241,8 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi) case FB_BLANK_POWERDOWN: /* turn off panel */ xilinx_fb_out32(drvdata, REG_CTRL, 0); + break; + default: break; } diff --git a/drivers/virt/acrn/vm.c b/drivers/virt/acrn/vm.c index 0d002a355a93..fbc9f1042000 100644 --- a/drivers/virt/acrn/vm.c +++ b/drivers/virt/acrn/vm.c @@ -64,6 +64,14 @@ int acrn_vm_destroy(struct acrn_vm *vm) test_and_set_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags)) return 0; + ret = hcall_destroy_vm(vm->vmid); + if (ret < 0) { + dev_err(acrn_dev.this_device, + "Failed to destroy VM %u\n", vm->vmid); + clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags); + return ret; + } + /* Remove from global VM list */ write_lock_bh(&acrn_vm_list_lock); list_del_init(&vm->list); @@ -78,14 +86,6 @@ int acrn_vm_destroy(struct acrn_vm *vm) vm->monitor_page = NULL; } - ret = hcall_destroy_vm(vm->vmid); - if (ret < 0) { - dev_err(acrn_dev.this_device, - "Failed to destroy VM %u\n", vm->vmid); - clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags); - return ret; - } - acrn_vm_all_ram_unmap(vm); dev_dbg(acrn_dev.this_device, "VM %u destroyed.\n", vm->vmid); diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 4b15c00c0a0a..49984d2cba24 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev) virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); INIT_LIST_HEAD(&dev->vqs); + spin_lock_init(&dev->vqs_list_lock); /* * device_add() causes the bus infrastructure to look for a matching diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 222d630c41fc..b35bb2d57f62 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev) struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct device *dev = get_device(&vp_dev->vdev.dev); + /* + * Device is marked broken on surprise removal so that virtio upper + * layers can abort any ongoing operation. + */ + if (!pci_device_is_present(pci_dev)) + virtio_break_device(&vp_dev->vdev); + pci_disable_sriov(pci_dev); unregister_virtio_device(&vp_dev->vdev); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 89bfe46a8a7f..dd95dfd85e98 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/hrtimer.h> #include <linux/dma-mapping.h> +#include <linux/spinlock.h> #include <xen/xen.h> #ifdef DEBUG @@ -1755,7 +1756,9 @@ static struct virtqueue *vring_create_virtqueue_packed( cpu_to_le16(vq->packed.event_flags_shadow); } + spin_lock(&vdev->vqs_list_lock); list_add_tail(&vq->vq.list, &vdev->vqs); + spin_unlock(&vdev->vqs_list_lock); return &vq->vq; err_desc_extra: @@ -2229,7 +2232,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, memset(vq->split.desc_state, 0, vring.num * sizeof(struct vring_desc_state_split)); + spin_lock(&vdev->vqs_list_lock); list_add_tail(&vq->vq.list, &vdev->vqs); + spin_unlock(&vdev->vqs_list_lock); return &vq->vq; err_extra: @@ -2291,6 +2296,10 @@ void vring_del_virtqueue(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); + spin_lock(&vq->vq.vdev->vqs_list_lock); + list_del(&_vq->list); + spin_unlock(&vq->vq.vdev->vqs_list_lock); + if (vq->we_own_ring) { if (vq->packed_ring) { vring_free_queue(vq->vq.vdev, @@ -2321,7 +2330,6 @@ void vring_del_virtqueue(struct virtqueue *_vq) kfree(vq->split.desc_state); kfree(vq->split.desc_extra); } - list_del(&_vq->list); kfree(vq); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); @@ -2373,7 +2381,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); - return vq->broken; + return READ_ONCE(vq->broken); } EXPORT_SYMBOL_GPL(virtqueue_is_broken); @@ -2385,10 +2393,14 @@ void virtio_break_device(struct virtio_device *dev) { struct virtqueue *_vq; + spin_lock(&dev->vqs_list_lock); list_for_each_entry(_vq, &dev->vqs, list) { struct vring_virtqueue *vq = to_vvq(_vq); - vq->broken = true; + + /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ + WRITE_ONCE(vq->broken, true); } + spin_unlock(&dev->vqs_list_lock); } EXPORT_SYMBOL_GPL(virtio_break_device); diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index e1a141135992..72eaef2caeb1 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -151,6 +151,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index, if (!name) return NULL; + if (index >= vdpa->nvqs) + return ERR_PTR(-ENOENT); + /* Queue shouldn't already be set up. */ if (ops->get_vq_ready(vdpa, index)) return ERR_PTR(-ENOENT); diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index d7e361fb0548..a78704ae3618 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -198,12 +198,12 @@ static void disable_dynirq(struct irq_data *data); static DEFINE_PER_CPU(unsigned int, irq_epoch); -static void clear_evtchn_to_irq_row(unsigned row) +static void clear_evtchn_to_irq_row(int *evtchn_row) { unsigned col; for (col = 0; col < EVTCHN_PER_ROW; col++) - WRITE_ONCE(evtchn_to_irq[row][col], -1); + WRITE_ONCE(evtchn_row[col], -1); } static void clear_evtchn_to_irq_all(void) @@ -213,7 +213,7 @@ static void clear_evtchn_to_irq_all(void) for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { if (evtchn_to_irq[row] == NULL) continue; - clear_evtchn_to_irq_row(row); + clear_evtchn_to_irq_row(evtchn_to_irq[row]); } } @@ -221,6 +221,7 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) { unsigned row; unsigned col; + int *evtchn_row; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; @@ -233,11 +234,18 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) if (irq == -1) return 0; - evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); - if (evtchn_to_irq[row] == NULL) + evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0); + if (evtchn_row == NULL) return -ENOMEM; - clear_evtchn_to_irq_row(row); + clear_evtchn_to_irq_row(evtchn_row); + + /* + * We've prepared an empty row for the mapping. If a different + * thread was faster inserting it, we can drop ours. + */ + if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL) + free_page((unsigned long) evtchn_row); } WRITE_ONCE(evtchn_to_irq[row][col], irq); @@ -1009,7 +1017,7 @@ static void __unbind_from_irq(unsigned int irq) int xen_bind_pirq_gsi_to_irq(unsigned gsi, unsigned pirq, int shareable, char *name) { - int irq = -1; + int irq; struct physdev_irq irq_op; int ret; diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index 06fb7a93a1bd..4d5ae61580aa 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -168,21 +168,6 @@ config OSF4_COMPAT with v4 shared libraries freely available from Compaq. If you're going to use shared libraries from Tru64 version 5.0 or later, say N. -config BINFMT_EM86 - tristate "Kernel support for Linux/Intel ELF binaries" - depends on ALPHA - help - Say Y here if you want to be able to execute Linux/Intel ELF - binaries just like native Alpha binaries on your Alpha machine. For - this to work, you need to have the emulator /usr/bin/em86 in place. - - You can get the same functionality by saying N here and saying Y to - "Kernel support for MISC binaries". - - You may answer M to compile the emulation support as a module and - later load the module when you want to use a Linux/Intel binary. The - module will be called binfmt_em86. If unsure, say Y. - config BINFMT_MISC tristate "Kernel support for MISC binaries" help diff --git a/fs/Makefile b/fs/Makefile index 9c708e1fbe8f..f98f3e691c37 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -39,7 +39,6 @@ obj-$(CONFIG_FS_ENCRYPTION) += crypto/ obj-$(CONFIG_FS_VERITY) += verity/ obj-$(CONFIG_FILE_LOCKING) += locks.o obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o -obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o obj-$(CONFIG_BINFMT_SCRIPT) += binfmt_script.o obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index d3c6bb22c5f4..a3f5de28be79 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -29,16 +29,11 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *); static int afs_deliver_yfs_cb_callback(struct afs_call *); -#define CM_NAME(name) \ - char afs_SRXCB##name##_name[] __tracepoint_string = \ - "CB." #name - /* * CB.CallBack operation type */ -static CM_NAME(CallBack); static const struct afs_call_type afs_SRXCBCallBack = { - .name = afs_SRXCBCallBack_name, + .name = "CB.CallBack", .deliver = afs_deliver_cb_callback, .destructor = afs_cm_destructor, .work = SRXAFSCB_CallBack, @@ -47,9 +42,8 @@ static const struct afs_call_type afs_SRXCBCallBack = { /* * CB.InitCallBackState operation type */ -static CM_NAME(InitCallBackState); static const struct afs_call_type afs_SRXCBInitCallBackState = { - .name = afs_SRXCBInitCallBackState_name, + .name = "CB.InitCallBackState", .deliver = afs_deliver_cb_init_call_back_state, .destructor = afs_cm_destructor, .work = SRXAFSCB_InitCallBackState, @@ -58,9 +52,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState = { /* * CB.InitCallBackState3 operation type */ -static CM_NAME(InitCallBackState3); static const struct afs_call_type afs_SRXCBInitCallBackState3 = { - .name = afs_SRXCBInitCallBackState3_name, + .name = "CB.InitCallBackState3", .deliver = afs_deliver_cb_init_call_back_state3, .destructor = afs_cm_destructor, .work = SRXAFSCB_InitCallBackState, @@ -69,9 +62,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState3 = { /* * CB.Probe operation type */ -static CM_NAME(Probe); static const struct afs_call_type afs_SRXCBProbe = { - .name = afs_SRXCBProbe_name, + .name = "CB.Probe", .deliver = afs_deliver_cb_probe, .destructor = afs_cm_destructor, .work = SRXAFSCB_Probe, @@ -80,9 +72,8 @@ static const struct afs_call_type afs_SRXCBProbe = { /* * CB.ProbeUuid operation type */ -static CM_NAME(ProbeUuid); static const struct afs_call_type afs_SRXCBProbeUuid = { - .name = afs_SRXCBProbeUuid_name, + .name = "CB.ProbeUuid", .deliver = afs_deliver_cb_probe_uuid, .destructor = afs_cm_destructor, .work = SRXAFSCB_ProbeUuid, @@ -91,9 +82,8 @@ static const struct afs_call_type afs_SRXCBProbeUuid = { /* * CB.TellMeAboutYourself operation type */ -static CM_NAME(TellMeAboutYourself); static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { - .name = afs_SRXCBTellMeAboutYourself_name, + .name = "CB.TellMeAboutYourself", .deliver = afs_deliver_cb_tell_me_about_yourself, .destructor = afs_cm_destructor, .work = SRXAFSCB_TellMeAboutYourself, @@ -102,9 +92,8 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { /* * YFS CB.CallBack operation type */ -static CM_NAME(YFS_CallBack); static const struct afs_call_type afs_SRXYFSCB_CallBack = { - .name = afs_SRXCBYFS_CallBack_name, + .name = "YFSCB.CallBack", .deliver = afs_deliver_yfs_cb_callback, .destructor = afs_cm_destructor, .work = SRXAFSCB_CallBack, diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 78719f2f567e..ac829e63c570 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -656,7 +656,6 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, return ret; } - ret = -ENOENT; if (!cookie.found) { _leave(" = -ENOENT [not found]"); return -ENOENT; @@ -2020,17 +2019,20 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir, if (d_count(new_dentry) > 2) { /* copy the target dentry's name */ - ret = -ENOMEM; op->rename.tmp = d_alloc(new_dentry->d_parent, &new_dentry->d_name); - if (!op->rename.tmp) + if (!op->rename.tmp) { + op->error = -ENOMEM; goto error; + } ret = afs_sillyrename(new_dvnode, AFS_FS_I(d_inode(new_dentry)), new_dentry, op->key); - if (ret) + if (ret) { + op->error = ret; goto error; + } op->dentry_2 = op->rename.tmp; op->rename.rehash = NULL; diff --git a/fs/afs/write.c b/fs/afs/write.c index 3104b62c2082..c0534697268e 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -771,14 +771,20 @@ int afs_writepages(struct address_space *mapping, if (wbc->range_cyclic) { start = mapping->writeback_index * PAGE_SIZE; ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); - if (start > 0 && wbc->nr_to_write > 0 && ret == 0) - ret = afs_writepages_region(mapping, wbc, 0, start, - &next); - mapping->writeback_index = next / PAGE_SIZE; + if (ret == 0) { + mapping->writeback_index = next / PAGE_SIZE; + if (start > 0 && wbc->nr_to_write > 0) { + ret = afs_writepages_region(mapping, wbc, 0, + start, &next); + if (ret == 0) + mapping->writeback_index = + next / PAGE_SIZE; + } + } } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); - if (wbc->nr_to_write > 0) - mapping->writeback_index = next; + if (wbc->nr_to_write > 0 && ret == 0) + mapping->writeback_index = next / PAGE_SIZE; } else { ret = afs_writepages_region(mapping, wbc, wbc->range_start, wbc->range_end, &next); diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c deleted file mode 100644 index 06b9b9fddf70..000000000000 --- a/fs/binfmt_em86.c +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/fs/binfmt_em86.c - * - * Based on linux/fs/binfmt_script.c - * Copyright (C) 1996 Martin von Löwis - * original #!-checking implemented by tytso. - * - * em86 changes Copyright (C) 1997 Jim Paradis - */ - -#include <linux/module.h> -#include <linux/string.h> -#include <linux/stat.h> -#include <linux/binfmts.h> -#include <linux/elf.h> -#include <linux/init.h> -#include <linux/fs.h> -#include <linux/file.h> -#include <linux/errno.h> - - -#define EM86_INTERP "/usr/bin/em86" -#define EM86_I_NAME "em86" - -static int load_em86(struct linux_binprm *bprm) -{ - const char *i_name, *i_arg; - char *interp; - struct file * file; - int retval; - struct elfhdr elf_ex; - - /* Make sure this is a Linux/Intel ELF executable... */ - elf_ex = *((struct elfhdr *)bprm->buf); - - if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) - return -ENOEXEC; - - /* First of all, some simple consistency checks */ - if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) || - (!((elf_ex.e_machine == EM_386) || (elf_ex.e_machine == EM_486))) || - !bprm->file->f_op->mmap) { - return -ENOEXEC; - } - - /* Need to be able to load the file after exec */ - if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) - return -ENOENT; - - /* Unlike in the script case, we don't have to do any hairy - * parsing to find our interpreter... it's hardcoded! - */ - interp = EM86_INTERP; - i_name = EM86_I_NAME; - i_arg = NULL; /* We reserve the right to add an arg later */ - - /* - * Splice in (1) the interpreter's name for argv[0] - * (2) (optional) argument to interpreter - * (3) filename of emulated file (replace argv[0]) - * - * This is done in reverse order, because of how the - * user environment and arguments are stored. - */ - remove_arg_zero(bprm); - retval = copy_string_kernel(bprm->filename, bprm); - if (retval < 0) return retval; - bprm->argc++; - if (i_arg) { - retval = copy_string_kernel(i_arg, bprm); - if (retval < 0) return retval; - bprm->argc++; - } - retval = copy_string_kernel(i_name, bprm); - if (retval < 0) return retval; - bprm->argc++; - - /* - * OK, now restart the process with the interpreter's inode. - * Note that we use open_exec() as the name is now in kernel - * space, and we don't need to copy it. - */ - file = open_exec(interp); - if (IS_ERR(file)) - return PTR_ERR(file); - - bprm->interpreter = file; - return 0; -} - -static struct linux_binfmt em86_format = { - .module = THIS_MODULE, - .load_binary = load_em86, -}; - -static int __init init_em86_binfmt(void) -{ - register_binfmt(&em86_format); - return 0; -} - -static void __exit exit_em86_binfmt(void) -{ - unregister_binfmt(&em86_format); -} - -core_initcall(init_em86_binfmt); -module_exit(exit_em86_binfmt); -MODULE_LICENSE("GPL"); diff --git a/fs/block_dev.c b/fs/block_dev.c index 0c424a0cadaa..9ef4f1fc2cb0 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -812,6 +812,8 @@ static void bdev_free_inode(struct inode *inode) free_percpu(bdev->bd_stats); kfree(bdev->bd_meta_info); + if (!bdev_is_partition(bdev)) + kfree(bdev->bd_disk); kmem_cache_free(bdev_cachep, BDEV_I(inode)); } diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 7a8a2fc19533..78b202d198b8 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1488,15 +1488,15 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, int btrfs_find_all_roots(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 time_seq, struct ulist **roots, - bool ignore_offset) + bool ignore_offset, bool skip_commit_root_sem) { int ret; - if (!trans) + if (!trans && !skip_commit_root_sem) down_read(&fs_info->commit_root_sem); ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr, time_seq, roots, ignore_offset); - if (!trans) + if (!trans && !skip_commit_root_sem) up_read(&fs_info->commit_root_sem); return ret; } diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 17abde7f794c..ff5f07f9940b 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -47,7 +47,8 @@ int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, const u64 *extent_item_pos, bool ignore_offset); int btrfs_find_all_roots(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, - u64 time_seq, struct ulist **roots, bool ignore_offset); + u64 time_seq, struct ulist **roots, bool ignore_offset, + bool skip_commit_root_sem); char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, u32 name_len, unsigned long name_off, struct extent_buffer *eb_in, u64 parent, diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 38b127b9edfc..9e7d9d0c763d 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1498,9 +1498,18 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) return; - mutex_lock(&fs_info->reclaim_bgs_lock); + /* + * Long running balances can keep us blocked here for eternity, so + * simply skip reclaim if we're unable to get the mutex. + */ + if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { + btrfs_exclop_finish(fs_info); + return; + } + spin_lock(&fs_info->unused_bgs_lock); while (!list_empty(&fs_info->reclaim_bgs)) { + u64 zone_unusable; int ret = 0; bg = list_first_entry(&fs_info->reclaim_bgs, @@ -1534,13 +1543,22 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) goto next; } + /* + * Cache the zone_unusable value before turning the block group + * to read only. As soon as the blog group is read only it's + * zone_unusable value gets moved to the block group's read-only + * bytes and isn't available for calculations anymore. + */ + zone_unusable = bg->zone_unusable; ret = inc_block_group_ro(bg, 0); up_write(&space_info->groups_sem); if (ret < 0) goto next; - btrfs_info(fs_info, "reclaiming chunk %llu with %llu%% used", - bg->start, div_u64(bg->used * 100, bg->length)); + btrfs_info(fs_info, + "reclaiming chunk %llu with %llu%% used %llu%% unusable", + bg->start, div_u64(bg->used * 100, bg->length), + div64_u64(zone_unusable * 100, bg->length)); trace_btrfs_reclaim_block_group(bg); ret = btrfs_relocate_chunk(fs_info, bg->start); if (ret) @@ -2197,6 +2215,13 @@ error: return ret; } +/* + * This function, insert_block_group_item(), belongs to the phase 2 of chunk + * allocation. + * + * See the comment at btrfs_chunk_alloc() for details about the chunk allocation + * phases. + */ static int insert_block_group_item(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group) { @@ -2219,15 +2244,19 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans, return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); } +/* + * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of + * chunk allocation. + * + * See the comment at btrfs_chunk_alloc() for details about the chunk allocation + * phases. + */ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_group *block_group; int ret = 0; - if (!trans->can_flush_pending_bgs) - return; - while (!list_empty(&trans->new_bgs)) { int index; @@ -2242,6 +2271,13 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) ret = insert_block_group_item(trans, block_group); if (ret) btrfs_abort_transaction(trans, ret); + if (!block_group->chunk_item_inserted) { + mutex_lock(&fs_info->chunk_mutex); + ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); + mutex_unlock(&fs_info->chunk_mutex); + if (ret) + btrfs_abort_transaction(trans, ret); + } ret = btrfs_finish_chunk_alloc(trans, block_group->start, block_group->length); if (ret) @@ -2265,8 +2301,9 @@ next: btrfs_trans_release_chunk_metadata(trans); } -int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, - u64 type, u64 chunk_offset, u64 size) +struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, + u64 bytes_used, u64 type, + u64 chunk_offset, u64 size) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_group *cache; @@ -2276,7 +2313,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, cache = btrfs_create_block_group_cache(fs_info, chunk_offset); if (!cache) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cache->length = size; set_free_space_tree_thresholds(cache); @@ -2290,7 +2327,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, ret = btrfs_load_block_group_zone_info(cache, true); if (ret) { btrfs_put_block_group(cache); - return ret; + return ERR_PTR(ret); } ret = exclude_super_stripes(cache); @@ -2298,7 +2335,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, /* We may have excluded something, so call this just in case */ btrfs_free_excluded_extents(cache); btrfs_put_block_group(cache); - return ret; + return ERR_PTR(ret); } add_new_free_space(cache, chunk_offset, chunk_offset + size); @@ -2325,7 +2362,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, if (ret) { btrfs_remove_free_space_cache(cache); btrfs_put_block_group(cache); - return ret; + return ERR_PTR(ret); } /* @@ -2344,7 +2381,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, btrfs_update_delayed_refs_rsv(trans); set_avail_alloc_bits(fs_info, type); - return 0; + return cache; } /* @@ -3222,11 +3259,203 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); } +static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags) +{ + struct btrfs_block_group *bg; + int ret; + + /* + * Check if we have enough space in the system space info because we + * will need to update device items in the chunk btree and insert a new + * chunk item in the chunk btree as well. This will allocate a new + * system block group if needed. + */ + check_system_chunk(trans, flags); + + bg = btrfs_alloc_chunk(trans, flags); + if (IS_ERR(bg)) { + ret = PTR_ERR(bg); + goto out; + } + + /* + * If this is a system chunk allocation then stop right here and do not + * add the chunk item to the chunk btree. This is to prevent a deadlock + * because this system chunk allocation can be triggered while COWing + * some extent buffer of the chunk btree and while holding a lock on a + * parent extent buffer, in which case attempting to insert the chunk + * item (or update the device item) would result in a deadlock on that + * parent extent buffer. In this case defer the chunk btree updates to + * the second phase of chunk allocation and keep our reservation until + * the second phase completes. + * + * This is a rare case and can only be triggered by the very few cases + * we have where we need to touch the chunk btree outside chunk allocation + * and chunk removal. These cases are basically adding a device, removing + * a device or resizing a device. + */ + if (flags & BTRFS_BLOCK_GROUP_SYSTEM) + return 0; + + ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); + /* + * Normally we are not expected to fail with -ENOSPC here, since we have + * previously reserved space in the system space_info and allocated one + * new system chunk if necessary. However there are two exceptions: + * + * 1) We may have enough free space in the system space_info but all the + * existing system block groups have a profile which can not be used + * for extent allocation. + * + * This happens when mounting in degraded mode. For example we have a + * RAID1 filesystem with 2 devices, lose one device and mount the fs + * using the other device in degraded mode. If we then allocate a chunk, + * we may have enough free space in the existing system space_info, but + * none of the block groups can be used for extent allocation since they + * have a RAID1 profile, and because we are in degraded mode with a + * single device, we are forced to allocate a new system chunk with a + * SINGLE profile. Making check_system_chunk() iterate over all system + * block groups and check if they have a usable profile and enough space + * can be slow on very large filesystems, so we tolerate the -ENOSPC and + * try again after forcing allocation of a new system chunk. Like this + * we avoid paying the cost of that search in normal circumstances, when + * we were not mounted in degraded mode; + * + * 2) We had enough free space info the system space_info, and one suitable + * block group to allocate from when we called check_system_chunk() + * above. However right after we called it, the only system block group + * with enough free space got turned into RO mode by a running scrub, + * and in this case we have to allocate a new one and retry. We only + * need do this allocate and retry once, since we have a transaction + * handle and scrub uses the commit root to search for block groups. + */ + if (ret == -ENOSPC) { + const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); + struct btrfs_block_group *sys_bg; + + sys_bg = btrfs_alloc_chunk(trans, sys_flags); + if (IS_ERR(sys_bg)) { + ret = PTR_ERR(sys_bg); + btrfs_abort_transaction(trans, ret); + goto out; + } + + ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } + + ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } + } else if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } +out: + btrfs_trans_release_chunk_metadata(trans); + + return ret; +} + /* - * If force is CHUNK_ALLOC_FORCE: + * Chunk allocation is done in 2 phases: + * + * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for + * the chunk, the chunk mapping, create its block group and add the items + * that belong in the chunk btree to it - more specifically, we need to + * update device items in the chunk btree and add a new chunk item to it. + * + * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block + * group item to the extent btree and the device extent items to the devices + * btree. + * + * This is done to prevent deadlocks. For example when COWing a node from the + * extent btree we are holding a write lock on the node's parent and if we + * trigger chunk allocation and attempted to insert the new block group item + * in the extent btree right way, we could deadlock because the path for the + * insertion can include that parent node. At first glance it seems impossible + * to trigger chunk allocation after starting a transaction since tasks should + * reserve enough transaction units (metadata space), however while that is true + * most of the time, chunk allocation may still be triggered for several reasons: + * + * 1) When reserving metadata, we check if there is enough free space in the + * metadata space_info and therefore don't trigger allocation of a new chunk. + * However later when the task actually tries to COW an extent buffer from + * the extent btree or from the device btree for example, it is forced to + * allocate a new block group (chunk) because the only one that had enough + * free space was just turned to RO mode by a running scrub for example (or + * device replace, block group reclaim thread, etc), so we can not use it + * for allocating an extent and end up being forced to allocate a new one; + * + * 2) Because we only check that the metadata space_info has enough free bytes, + * we end up not allocating a new metadata chunk in that case. However if + * the filesystem was mounted in degraded mode, none of the existing block + * groups might be suitable for extent allocation due to their incompatible + * profile (for e.g. mounting a 2 devices filesystem, where all block groups + * use a RAID1 profile, in degraded mode using a single device). In this case + * when the task attempts to COW some extent buffer of the extent btree for + * example, it will trigger allocation of a new metadata block group with a + * suitable profile (SINGLE profile in the example of the degraded mount of + * the RAID1 filesystem); + * + * 3) The task has reserved enough transaction units / metadata space, but when + * it attempts to COW an extent buffer from the extent or device btree for + * example, it does not find any free extent in any metadata block group, + * therefore forced to try to allocate a new metadata block group. + * This is because some other task allocated all available extents in the + * meanwhile - this typically happens with tasks that don't reserve space + * properly, either intentionally or as a bug. One example where this is + * done intentionally is fsync, as it does not reserve any transaction units + * and ends up allocating a variable number of metadata extents for log + * tree extent buffers. + * + * We also need this 2 phases setup when adding a device to a filesystem with + * a seed device - we must create new metadata and system chunks without adding + * any of the block group items to the chunk, extent and device btrees. If we + * did not do it this way, we would get ENOSPC when attempting to update those + * btrees, since all the chunks from the seed device are read-only. + * + * Phase 1 does the updates and insertions to the chunk btree because if we had + * it done in phase 2 and have a thundering herd of tasks allocating chunks in + * parallel, we risk having too many system chunks allocated by many tasks if + * many tasks reach phase 1 without the previous ones completing phase 2. In the + * extreme case this leads to exhaustion of the system chunk array in the + * superblock. This is easier to trigger if using a btree node/leaf size of 64K + * and with RAID filesystems (so we have more device items in the chunk btree). + * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of + * the system chunk array due to concurrent allocations") provides more details. + * + * For allocation of system chunks, we defer the updates and insertions into the + * chunk btree to phase 2. This is to prevent deadlocks on extent buffers because + * if the chunk allocation is triggered while COWing an extent buffer of the + * chunk btree, we are holding a lock on the parent of that extent buffer and + * doing the chunk btree updates and insertions can require locking that parent. + * This is for the very few and rare cases where we update the chunk btree that + * are not chunk allocation or chunk removal: adding a device, removing a device + * or resizing a device. + * + * The reservation of system space, done through check_system_chunk(), as well + * as all the updates and insertions into the chunk btree must be done while + * holding fs_info->chunk_mutex. This is important to guarantee that while COWing + * an extent buffer from the chunks btree we never trigger allocation of a new + * system chunk, which would result in a deadlock (trying to lock twice an + * extent buffer of the chunk btree, first time before triggering the chunk + * allocation and the second time during chunk allocation while attempting to + * update the chunks btree). The system chunk array is also updated while holding + * that mutex. The same logic applies to removing chunks - we must reserve system + * space, update the chunk btree and the system chunk array in the superblock + * while holding fs_info->chunk_mutex. + * + * This function, btrfs_chunk_alloc(), belongs to phase 1. + * + * If @force is CHUNK_ALLOC_FORCE: * - return 1 if it successfully allocates a chunk, * - return errors including -ENOSPC otherwise. - * If force is NOT CHUNK_ALLOC_FORCE: + * If @force is NOT CHUNK_ALLOC_FORCE: * - return 0 if it doesn't need to allocate a new chunk, * - return 1 if it successfully allocates a chunk, * - return errors including -ENOSPC otherwise. @@ -3243,6 +3472,13 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, /* Don't re-enter if we're already allocating a chunk */ if (trans->allocating_chunk) return -ENOSPC; + /* + * If we are removing a chunk, don't re-enter or we would deadlock. + * System space reservation and system chunk allocation is done by the + * chunk remove operation (btrfs_remove_chunk()). + */ + if (trans->removing_chunk) + return -ENOSPC; space_info = btrfs_find_space_info(fs_info, flags); ASSERT(space_info); @@ -3306,13 +3542,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, force_metadata_allocation(fs_info); } - /* - * Check if we have enough space in SYSTEM chunk because we may need - * to update devices. - */ - check_system_chunk(trans, flags); - - ret = btrfs_alloc_chunk(trans, flags); + ret = do_chunk_alloc(trans, flags); trans->allocating_chunk = false; spin_lock(&space_info->lock); @@ -3331,22 +3561,6 @@ out: space_info->chunk_alloc = 0; spin_unlock(&space_info->lock); mutex_unlock(&fs_info->chunk_mutex); - /* - * When we allocate a new chunk we reserve space in the chunk block - * reserve to make sure we can COW nodes/leafs in the chunk tree or - * add new nodes/leafs to it if we end up needing to do it when - * inserting the chunk item and updating device items as part of the - * second phase of chunk allocation, performed by - * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a - * large number of new block groups to create in our transaction - * handle's new_bgs list to avoid exhausting the chunk block reserve - * in extreme cases - like having a single transaction create many new - * block groups when starting to write out the free space caches of all - * the block groups that were made dirty during the lifetime of the - * transaction. - */ - if (trans->chunk_bytes_reserved >= (u64)SZ_2M) - btrfs_create_pending_block_groups(trans); return ret; } @@ -3367,7 +3581,6 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) */ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) { - struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_space_info *info; u64 left; @@ -3382,7 +3595,6 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) lockdep_assert_held(&fs_info->chunk_mutex); info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); -again: spin_lock(&info->lock); left = info->total_bytes - btrfs_space_info_used(info, true); spin_unlock(&info->lock); @@ -3401,76 +3613,39 @@ again: if (left < thresh) { u64 flags = btrfs_system_alloc_profile(fs_info); - u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved); - - /* - * If there's not available space for the chunk tree (system - * space) and there are other tasks that reserved space for - * creating a new system block group, wait for them to complete - * the creation of their system block group and release excess - * reserved space. We do this because: - * - * *) We can end up allocating more system chunks than necessary - * when there are multiple tasks that are concurrently - * allocating block groups, which can lead to exhaustion of - * the system array in the superblock; - * - * *) If we allocate extra and unnecessary system block groups, - * despite being empty for a long time, and possibly forever, - * they end not being added to the list of unused block groups - * because that typically happens only when deallocating the - * last extent from a block group - which never happens since - * we never allocate from them in the first place. The few - * exceptions are when mounting a filesystem or running scrub, - * which add unused block groups to the list of unused block - * groups, to be deleted by the cleaner kthread. - * And even when they are added to the list of unused block - * groups, it can take a long time until they get deleted, - * since the cleaner kthread might be sleeping or busy with - * other work (deleting subvolumes, running delayed iputs, - * defrag scheduling, etc); - * - * This is rare in practice, but can happen when too many tasks - * are allocating blocks groups in parallel (via fallocate()) - * and before the one that reserved space for a new system block - * group finishes the block group creation and releases the space - * reserved in excess (at btrfs_create_pending_block_groups()), - * other tasks end up here and see free system space temporarily - * not enough for updating the chunk tree. - * - * We unlock the chunk mutex before waiting for such tasks and - * lock it again after the wait, otherwise we would deadlock. - * It is safe to do so because allocating a system chunk is the - * first thing done while allocating a new block group. - */ - if (reserved > trans->chunk_bytes_reserved) { - const u64 min_needed = reserved - thresh; - - mutex_unlock(&fs_info->chunk_mutex); - wait_event(cur_trans->chunk_reserve_wait, - atomic64_read(&cur_trans->chunk_bytes_reserved) <= - min_needed); - mutex_lock(&fs_info->chunk_mutex); - goto again; - } + struct btrfs_block_group *bg; /* * Ignore failure to create system chunk. We might end up not * needing it, as we might not need to COW all nodes/leafs from * the paths we visit in the chunk tree (they were already COWed * or created in the current transaction for example). + * + * Also, if our caller is allocating a system chunk, do not + * attempt to insert the chunk item in the chunk btree, as we + * could deadlock on an extent buffer since our caller may be + * COWing an extent buffer from the chunk btree. */ - ret = btrfs_alloc_chunk(trans, flags); + bg = btrfs_alloc_chunk(trans, flags); + if (IS_ERR(bg)) { + ret = PTR_ERR(bg); + } else if (!(type & BTRFS_BLOCK_GROUP_SYSTEM)) { + /* + * If we fail to add the chunk item here, we end up + * trying again at phase 2 of chunk allocation, at + * btrfs_create_pending_block_groups(). So ignore + * any error here. + */ + btrfs_chunk_alloc_add_chunk_item(trans, bg); + } } if (!ret) { ret = btrfs_block_rsv_add(fs_info->chunk_root, &fs_info->chunk_block_rsv, thresh, BTRFS_RESERVE_NO_FLUSH); - if (!ret) { - atomic64_add(thresh, &cur_trans->chunk_bytes_reserved); + if (!ret) trans->chunk_bytes_reserved += thresh; - } } } diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 7b927425dc71..c72a71efcb18 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -97,6 +97,7 @@ struct btrfs_block_group { unsigned int removed:1; unsigned int to_copy:1; unsigned int relocating_repair:1; + unsigned int chunk_item_inserted:1; int disk_cache_state; @@ -268,8 +269,9 @@ void btrfs_reclaim_bgs_work(struct work_struct *work); void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info); void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg); int btrfs_read_block_groups(struct btrfs_fs_info *info); -int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, - u64 type, u64 chunk_offset, u64 size); +struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, + u64 bytes_used, u64 type, + u64 chunk_offset, u64 size); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, bool do_chunk_alloc); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 9a023ae0f98b..30d82cdf128c 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -352,7 +352,7 @@ static void end_compressed_bio_write(struct bio *bio) btrfs_record_physical_zoned(inode, cb->start, bio); btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL, cb->start, cb->start + cb->len - 1, - bio->bi_status == BLK_STS_OK); + !cb->errors); end_compressed_writeback(inode, cb); /* note, our inode could be gone now */ diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 4bc3ca2cbd7d..c5c08c87e130 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -364,49 +364,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, return 0; } -static struct extent_buffer *alloc_tree_block_no_bg_flush( - struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 parent_start, - const struct btrfs_disk_key *disk_key, - int level, - u64 hint, - u64 empty_size, - enum btrfs_lock_nesting nest) -{ - struct btrfs_fs_info *fs_info = root->fs_info; - struct extent_buffer *ret; - - /* - * If we are COWing a node/leaf from the extent, chunk, device or free - * space trees, make sure that we do not finish block group creation of - * pending block groups. We do this to avoid a deadlock. - * COWing can result in allocation of a new chunk, and flushing pending - * block groups (btrfs_create_pending_block_groups()) can be triggered - * when finishing allocation of a new chunk. Creation of a pending block - * group modifies the extent, chunk, device and free space trees, - * therefore we could deadlock with ourselves since we are holding a - * lock on an extent buffer that btrfs_create_pending_block_groups() may - * try to COW later. - * For similar reasons, we also need to delay flushing pending block - * groups when splitting a leaf or node, from one of those trees, since - * we are holding a write lock on it and its parent or when inserting a - * new root node for one of those trees. - */ - if (root == fs_info->extent_root || - root == fs_info->chunk_root || - root == fs_info->dev_root || - root == fs_info->free_space_root) - trans->can_flush_pending_bgs = false; - - ret = btrfs_alloc_tree_block(trans, root, parent_start, - root->root_key.objectid, disk_key, level, - hint, empty_size, nest); - trans->can_flush_pending_bgs = true; - - return ret; -} - /* * does the dirty work in cow of a single block. The parent block (if * supplied) is updated to point to the new cow copy. The new buffer is marked @@ -455,8 +412,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) parent_start = parent->start; - cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key, - level, search_start, empty_size, nest); + cow = btrfs_alloc_tree_block(trans, root, parent_start, + root->root_key.objectid, &disk_key, level, + search_start, empty_size, nest); if (IS_ERR(cow)) return PTR_ERR(cow); @@ -2458,9 +2416,9 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, else btrfs_node_key(lower, &lower_key, 0); - c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level, - root->node->start, 0, - BTRFS_NESTING_NEW_ROOT); + c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, + &lower_key, level, root->node->start, 0, + BTRFS_NESTING_NEW_ROOT); if (IS_ERR(c)) return PTR_ERR(c); @@ -2589,8 +2547,9 @@ static noinline int split_node(struct btrfs_trans_handle *trans, mid = (c_nritems + 1) / 2; btrfs_node_key(c, &disk_key, mid); - split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level, - c->start, 0, BTRFS_NESTING_SPLIT); + split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, + &disk_key, level, c->start, 0, + BTRFS_NESTING_SPLIT); if (IS_ERR(split)) return PTR_ERR(split); @@ -3381,10 +3340,10 @@ again: * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just * use BTRFS_NESTING_NEW_ROOT. */ - right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0, - l->start, 0, num_doubles ? - BTRFS_NESTING_NEW_ROOT : - BTRFS_NESTING_SPLIT); + right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, + &disk_key, 0, l->start, 0, + num_doubles ? BTRFS_NESTING_NEW_ROOT : + BTRFS_NESTING_SPLIT); if (IS_ERR(right)) return PTR_ERR(right); diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 06bc842ecdb3..ca848b183474 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -974,7 +974,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); if (qrecord_inserted) - btrfs_qgroup_trace_extent_post(fs_info, record); + btrfs_qgroup_trace_extent_post(trans, record); return 0; } @@ -1069,7 +1069,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, if (qrecord_inserted) - return btrfs_qgroup_trace_extent_post(fs_info, record); + return btrfs_qgroup_trace_extent_post(trans, record); return 0; } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b117dd3b8172..a59ab7b9aea0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -209,7 +209,7 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, static void csum_tree_block(struct extent_buffer *buf, u8 *result) { struct btrfs_fs_info *fs_info = buf->fs_info; - const int num_pages = fs_info->nodesize >> PAGE_SHIFT; + const int num_pages = num_extent_pages(buf); const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize); SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); char *kaddr; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d296483d148f..268ce58d4569 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6019,6 +6019,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) mutex_lock(&fs_info->fs_devices->device_list_mutex); devices = &fs_info->fs_devices->devices; list_for_each_entry(device, devices, dev_list) { + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) + continue; + ret = btrfs_trim_free_extents(device, &group_trimmed); if (ret) { dev_failed++; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e6eb20987351..06f9f167222b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2271,13 +2271,127 @@ static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio, return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0); } +/* + * Split an extent_map at [start, start + len] + * + * This function is intended to be used only for extract_ordered_extent(). + */ +static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len, + u64 pre, u64 post) +{ + struct extent_map_tree *em_tree = &inode->extent_tree; + struct extent_map *em; + struct extent_map *split_pre = NULL; + struct extent_map *split_mid = NULL; + struct extent_map *split_post = NULL; + int ret = 0; + int modified; + unsigned long flags; + + /* Sanity check */ + if (pre == 0 && post == 0) + return 0; + + split_pre = alloc_extent_map(); + if (pre) + split_mid = alloc_extent_map(); + if (post) + split_post = alloc_extent_map(); + if (!split_pre || (pre && !split_mid) || (post && !split_post)) { + ret = -ENOMEM; + goto out; + } + + ASSERT(pre + post < len); + + lock_extent(&inode->io_tree, start, start + len - 1); + write_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, start, len); + if (!em) { + ret = -EIO; + goto out_unlock; + } + + ASSERT(em->len == len); + ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)); + ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE); + + flags = em->flags; + clear_bit(EXTENT_FLAG_PINNED, &em->flags); + clear_bit(EXTENT_FLAG_LOGGING, &flags); + modified = !list_empty(&em->list); + + /* First, replace the em with a new extent_map starting from * em->start */ + split_pre->start = em->start; + split_pre->len = (pre ? pre : em->len - post); + split_pre->orig_start = split_pre->start; + split_pre->block_start = em->block_start; + split_pre->block_len = split_pre->len; + split_pre->orig_block_len = split_pre->block_len; + split_pre->ram_bytes = split_pre->len; + split_pre->flags = flags; + split_pre->compress_type = em->compress_type; + split_pre->generation = em->generation; + + replace_extent_mapping(em_tree, em, split_pre, modified); + + /* + * Now we only have an extent_map at: + * [em->start, em->start + pre] if pre != 0 + * [em->start, em->start + em->len - post] if pre == 0 + */ + + if (pre) { + /* Insert the middle extent_map */ + split_mid->start = em->start + pre; + split_mid->len = em->len - pre - post; + split_mid->orig_start = split_mid->start; + split_mid->block_start = em->block_start + pre; + split_mid->block_len = split_mid->len; + split_mid->orig_block_len = split_mid->block_len; + split_mid->ram_bytes = split_mid->len; + split_mid->flags = flags; + split_mid->compress_type = em->compress_type; + split_mid->generation = em->generation; + add_extent_mapping(em_tree, split_mid, modified); + } + + if (post) { + split_post->start = em->start + em->len - post; + split_post->len = post; + split_post->orig_start = split_post->start; + split_post->block_start = em->block_start + em->len - post; + split_post->block_len = split_post->len; + split_post->orig_block_len = split_post->block_len; + split_post->ram_bytes = split_post->len; + split_post->flags = flags; + split_post->compress_type = em->compress_type; + split_post->generation = em->generation; + add_extent_mapping(em_tree, split_post, modified); + } + + /* Once for us */ + free_extent_map(em); + /* Once for the tree */ + free_extent_map(em); + +out_unlock: + write_unlock(&em_tree->lock); + unlock_extent(&inode->io_tree, start, start + len - 1); +out: + free_extent_map(split_pre); + free_extent_map(split_mid); + free_extent_map(split_post); + + return ret; +} + static blk_status_t extract_ordered_extent(struct btrfs_inode *inode, struct bio *bio, loff_t file_offset) { struct btrfs_ordered_extent *ordered; - struct extent_map *em = NULL, *em_new = NULL; - struct extent_map_tree *em_tree = &inode->extent_tree; u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT; + u64 file_len; u64 len = bio->bi_iter.bi_size; u64 end = start + len; u64 ordered_end; @@ -2317,41 +2431,16 @@ static blk_status_t extract_ordered_extent(struct btrfs_inode *inode, goto out; } + file_len = ordered->num_bytes; pre = start - ordered->disk_bytenr; post = ordered_end - end; ret = btrfs_split_ordered_extent(ordered, pre, post); if (ret) goto out; - - read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, ordered->file_offset, len); - if (!em) { - read_unlock(&em_tree->lock); - ret = -EIO; - goto out; - } - read_unlock(&em_tree->lock); - - ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)); - /* - * We cannot reuse em_new here but have to create a new one, as - * unpin_extent_cache() expects the start of the extent map to be the - * logical offset of the file, which does not hold true anymore after - * splitting. - */ - em_new = create_io_em(inode, em->start + pre, len, - em->start + pre, em->block_start + pre, len, - len, len, BTRFS_COMPRESS_NONE, - BTRFS_ORDERED_REGULAR); - if (IS_ERR(em_new)) { - ret = PTR_ERR(em_new); - goto out; - } - free_extent_map(em_new); + ret = split_zoned_em(inode, file_offset, file_len, pre, post); out: - free_extent_map(em); btrfs_put_ordered_extent(ordered); return errno_to_blk_status(ret); @@ -2903,7 +2992,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) goto out; } - if (ordered_extent->disk) + if (ordered_extent->bdev) btrfs_rewrite_logical_zoned(ordered_extent); btrfs_free_io_failure_record(inode, start, end); @@ -9137,8 +9226,14 @@ static int btrfs_rename_exchange(struct inode *old_dir, bool dest_log_pinned = false; bool need_abort = false; - /* we only allow rename subvolume link between subvolumes */ - if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) + /* + * For non-subvolumes allow exchange only within one subvolume, in the + * same inode namespace. Two subvolumes (represented as directory) can + * be exchanged as they're a logical link and have a fixed inode number. + */ + if (root != dest && + (old_ino != BTRFS_FIRST_FREE_OBJECTID || + new_ino != BTRFS_FIRST_FREE_OBJECTID)) return -EXDEV; /* close the race window with snapshot create/destroy ioctl */ diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 6eb41b7c0c84..5c0f8481e25e 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -190,8 +190,6 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset entry->truncated_len = (u64)-1; entry->qgroup_rsv = ret; entry->physical = (u64)-1; - entry->disk = NULL; - entry->partno = (u8)-1; ASSERT(type == BTRFS_ORDERED_REGULAR || type == BTRFS_ORDERED_NOCOW || diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 566472004edd..b2d88aba8420 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -145,8 +145,7 @@ struct btrfs_ordered_extent { * command in a workqueue context */ u64 physical; - struct gendisk *disk; - u8 partno; + struct block_device *bdev; }; /* diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 07ec06d4e972..0fa121171ca1 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1704,17 +1704,39 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, return 0; } -int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, +int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, struct btrfs_qgroup_extent_record *qrecord) { struct ulist *old_root; u64 bytenr = qrecord->bytenr; int ret; - ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false); + /* + * We are always called in a context where we are already holding a + * transaction handle. Often we are called when adding a data delayed + * reference from btrfs_truncate_inode_items() (truncating or unlinking), + * in which case we will be holding a write lock on extent buffer from a + * subvolume tree. In this case we can't allow btrfs_find_all_roots() to + * acquire fs_info->commit_root_sem, because that is a higher level lock + * that must be acquired before locking any extent buffers. + * + * So we want btrfs_find_all_roots() to not acquire the commit_root_sem + * but we can't pass it a non-NULL transaction handle, because otherwise + * it would not use commit roots and would lock extent buffers, causing + * a deadlock if it ends up trying to read lock the same extent buffer + * that was previously write locked at btrfs_truncate_inode_items(). + * + * So pass a NULL transaction handle to btrfs_find_all_roots() and + * explicitly tell it to not acquire the commit_root_sem - if we are + * holding a transaction handle we don't need its protection. + */ + ASSERT(trans != NULL); + + ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root, + false, true); if (ret < 0) { - fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - btrfs_warn(fs_info, + trans->fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; + btrfs_warn(trans->fs_info, "error accounting new delayed refs extent (err code: %d), quota inconsistent", ret); return 0; @@ -1758,7 +1780,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, kfree(record); return 0; } - return btrfs_qgroup_trace_extent_post(fs_info, record); + return btrfs_qgroup_trace_extent_post(trans, record); } int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, @@ -2629,7 +2651,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) /* Search commit root to find old_roots */ ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0, - &record->old_roots, false); + &record->old_roots, false, false); if (ret < 0) goto cleanup; } @@ -2645,7 +2667,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) * current root. It's safe inside commit_transaction(). */ ret = btrfs_find_all_roots(trans, fs_info, - record->bytenr, BTRFS_SEQ_LAST, &new_roots, false); + record->bytenr, BTRFS_SEQ_LAST, &new_roots, false, false); if (ret < 0) goto cleanup; if (qgroup_to_skip) { @@ -3179,7 +3201,7 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, num_bytes = found.offset; ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0, - &roots, false); + &roots, false, false); if (ret < 0) goto out; /* For rescan, just pass old_roots as NULL */ diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 7283e4f549af..880e9df0dac1 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -298,7 +298,7 @@ int btrfs_qgroup_trace_extent_nolock( * using current root, then we can move all expensive backref walk out of * transaction committing, but not now as qgroup accounting will be wrong again. */ -int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, +int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, struct btrfs_qgroup_extent_record *qrecord); /* diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index f3137285a9e2..98b5aaba46f1 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -224,7 +224,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root, * quota. */ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, - false); + false, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); @@ -237,7 +237,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root, return ret; ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, - false); + false, false); if (ret) { ulist_free(old_roots); ulist_free(new_roots); @@ -261,7 +261,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root, new_roots = NULL; ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, - false); + false, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); @@ -273,7 +273,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root, return -EINVAL; ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, - false); + false, false); if (ret) { ulist_free(old_roots); ulist_free(new_roots); @@ -325,7 +325,7 @@ static int test_multiple_refs(struct btrfs_root *root, } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, - false); + false, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); @@ -338,7 +338,7 @@ static int test_multiple_refs(struct btrfs_root *root, return ret; ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, - false); + false, false); if (ret) { ulist_free(old_roots); ulist_free(new_roots); @@ -360,7 +360,7 @@ static int test_multiple_refs(struct btrfs_root *root, } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, - false); + false, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); @@ -373,7 +373,7 @@ static int test_multiple_refs(struct btrfs_root *root, return ret; ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, - false); + false, false); if (ret) { ulist_free(old_roots); ulist_free(new_roots); @@ -401,7 +401,7 @@ static int test_multiple_refs(struct btrfs_root *root, } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, - false); + false, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); @@ -414,7 +414,7 @@ static int test_multiple_refs(struct btrfs_root *root, return ret; ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, - false); + false, false); if (ret) { ulist_free(old_roots); ulist_free(new_roots); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 50318231c1a8..14b9fdc8aaa9 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -254,23 +254,21 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans) } /* - * To be called after all the new block groups attached to the transaction - * handle have been created (btrfs_create_pending_block_groups()). + * To be called after doing the chunk btree updates right after allocating a new + * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a + * chunk after all chunk btree updates and after finishing the second phase of + * chunk allocation (btrfs_create_pending_block_groups()) in case some block + * group had its chunk item insertion delayed to the second phase. */ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_transaction *cur_trans = trans->transaction; if (!trans->chunk_bytes_reserved) return; - WARN_ON_ONCE(!list_empty(&trans->new_bgs)); - btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv, trans->chunk_bytes_reserved, NULL); - atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved); - cond_wake_up(&cur_trans->chunk_reserve_wait); trans->chunk_bytes_reserved = 0; } @@ -386,8 +384,6 @@ loop: spin_lock_init(&cur_trans->dropped_roots_lock); INIT_LIST_HEAD(&cur_trans->releasing_ebs); spin_lock_init(&cur_trans->releasing_ebs_lock); - atomic64_set(&cur_trans->chunk_bytes_reserved, 0); - init_waitqueue_head(&cur_trans->chunk_reserve_wait); list_add_tail(&cur_trans->list, &fs_info->trans_list); extent_io_tree_init(fs_info, &cur_trans->dirty_pages, IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode); @@ -701,7 +697,6 @@ again: h->fs_info = root->fs_info; h->type = type; - h->can_flush_pending_bgs = true; INIT_LIST_HEAD(&h->new_bgs); smp_mb(); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 07d76029f598..ba45065f9451 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -96,13 +96,6 @@ struct btrfs_transaction { spinlock_t releasing_ebs_lock; struct list_head releasing_ebs; - - /* - * The number of bytes currently reserved, by all transaction handles - * attached to this transaction, for metadata extents of the chunk tree. - */ - atomic64_t chunk_bytes_reserved; - wait_queue_head_t chunk_reserve_wait; }; #define __TRANS_FREEZABLE (1U << 0) @@ -139,7 +132,7 @@ struct btrfs_trans_handle { short aborted; bool adding_csums; bool allocating_chunk; - bool can_flush_pending_bgs; + bool removing_chunk; bool reloc_reserved; bool in_fsync; struct btrfs_root *root; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index cab451d19547..e6430ac9bbe8 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3173,7 +3173,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, if (!log_root_tree->node) { ret = btrfs_alloc_log_tree_node(trans, log_root_tree); if (ret) { - mutex_unlock(&fs_info->tree_log_mutex); + mutex_unlock(&fs_info->tree_root->log_mutex); goto out; } } @@ -5526,16 +5526,29 @@ log_extents: spin_lock(&inode->lock); inode->logged_trans = trans->transid; /* - * Don't update last_log_commit if we logged that an inode exists - * after it was loaded to memory (full_sync bit set). - * This is to prevent data loss when we do a write to the inode, - * then the inode gets evicted after all delalloc was flushed, - * then we log it exists (due to a rename for example) and then - * fsync it. This last fsync would do nothing (not logging the - * extents previously written). + * Don't update last_log_commit if we logged that an inode exists. + * We do this for two reasons: + * + * 1) We might have had buffered writes to this inode that were + * flushed and had their ordered extents completed in this + * transaction, but we did not previously log the inode with + * LOG_INODE_ALL. Later the inode was evicted and after that + * it was loaded again and this LOG_INODE_EXISTS log operation + * happened. We must make sure that if an explicit fsync against + * the inode is performed later, it logs the new extents, an + * updated inode item, etc, and syncs the log. The same logic + * applies to direct IO writes instead of buffered writes. + * + * 2) When we log the inode with LOG_INODE_EXISTS, its inode item + * is logged with an i_size of 0 or whatever value was logged + * before. If later the i_size of the inode is increased by a + * truncate operation, the log is synced through an fsync of + * some other inode and then finally an explicit fsync against + * this inode is made, we must make sure this fsync logs the + * inode with the new i_size, the hole between old i_size and + * the new i_size, and syncs the log. */ - if (inode_only != LOG_INODE_EXISTS || - !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags)) + if (inode_only != LOG_INODE_EXISTS) inode->last_log_commit = inode->last_sub_trans; spin_unlock(&inode->lock); } @@ -6490,8 +6503,8 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, * if this inode hasn't been logged and directory we're renaming it * from hasn't been logged, we don't need to log it */ - if (inode->logged_trans < trans->transid && - (!old_dir || old_dir->logged_trans < trans->transid)) + if (!inode_logged(trans, inode) && + (!old_dir || !inode_logged(trans, old_dir))) return; /* diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 807502cd6510..70f94b75f25a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1078,6 +1078,7 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { list_del_init(&device->dev_alloc_list); clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); + fs_devices->rw_devices--; } list_del_init(&device->dev_list); fs_devices->num_devices--; @@ -1745,19 +1746,14 @@ again: extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); } else { - btrfs_handle_fs_error(fs_info, ret, "Slot search failed"); goto out; } *dev_extent_len = btrfs_dev_extent_length(leaf, extent); ret = btrfs_del_item(trans, root, path); - if (ret) { - btrfs_handle_fs_error(fs_info, ret, - "Failed to remove dev extent item"); - } else { + if (ret == 0) set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); - } out: btrfs_free_path(path); return ret; @@ -2942,7 +2938,7 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) u32 cur; struct btrfs_key key; - mutex_lock(&fs_info->chunk_mutex); + lockdep_assert_held(&fs_info->chunk_mutex); array_size = btrfs_super_sys_array_size(super_copy); ptr = super_copy->sys_chunk_array; @@ -2972,7 +2968,6 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) cur += len; } } - mutex_unlock(&fs_info->chunk_mutex); return ret; } @@ -3012,6 +3007,29 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, return em; } +static int remove_chunk_item(struct btrfs_trans_handle *trans, + struct map_lookup *map, u64 chunk_offset) +{ + int i; + + /* + * Removing chunk items and updating the device items in the chunks btree + * requires holding the chunk_mutex. + * See the comment at btrfs_chunk_alloc() for the details. + */ + lockdep_assert_held(&trans->fs_info->chunk_mutex); + + for (i = 0; i < map->num_stripes; i++) { + int ret; + + ret = btrfs_update_device(trans, map->stripes[i].dev); + if (ret) + return ret; + } + + return btrfs_free_chunk(trans, chunk_offset); +} + int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -3032,14 +3050,16 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) return PTR_ERR(em); } map = em->map_lookup; - mutex_lock(&fs_info->chunk_mutex); - check_system_chunk(trans, map->type); - mutex_unlock(&fs_info->chunk_mutex); /* - * Take the device list mutex to prevent races with the final phase of - * a device replace operation that replaces the device object associated - * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). + * First delete the device extent items from the devices btree. + * We take the device_list_mutex to avoid racing with the finishing phase + * of a device replace operation. See the comment below before acquiring + * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex + * because that can result in a deadlock when deleting the device extent + * items from the devices btree - COWing an extent buffer from the btree + * may result in allocating a new metadata chunk, which would attempt to + * lock again fs_info->chunk_mutex. */ mutex_lock(&fs_devices->device_list_mutex); for (i = 0; i < map->num_stripes; i++) { @@ -3061,18 +3081,73 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) btrfs_clear_space_info_full(fs_info); mutex_unlock(&fs_info->chunk_mutex); } + } + mutex_unlock(&fs_devices->device_list_mutex); - ret = btrfs_update_device(trans, device); + /* + * We acquire fs_info->chunk_mutex for 2 reasons: + * + * 1) Just like with the first phase of the chunk allocation, we must + * reserve system space, do all chunk btree updates and deletions, and + * update the system chunk array in the superblock while holding this + * mutex. This is for similar reasons as explained on the comment at + * the top of btrfs_chunk_alloc(); + * + * 2) Prevent races with the final phase of a device replace operation + * that replaces the device object associated with the map's stripes, + * because the device object's id can change at any time during that + * final phase of the device replace operation + * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the + * replaced device and then see it with an ID of + * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating + * the device item, which does not exists on the chunk btree. + * The finishing phase of device replace acquires both the + * device_list_mutex and the chunk_mutex, in that order, so we are + * safe by just acquiring the chunk_mutex. + */ + trans->removing_chunk = true; + mutex_lock(&fs_info->chunk_mutex); + + check_system_chunk(trans, map->type); + + ret = remove_chunk_item(trans, map, chunk_offset); + /* + * Normally we should not get -ENOSPC since we reserved space before + * through the call to check_system_chunk(). + * + * Despite our system space_info having enough free space, we may not + * be able to allocate extents from its block groups, because all have + * an incompatible profile, which will force us to allocate a new system + * block group with the right profile, or right after we called + * check_system_space() above, a scrub turned the only system block group + * with enough free space into RO mode. + * This is explained with more detail at do_chunk_alloc(). + * + * So if we get -ENOSPC, allocate a new system chunk and retry once. + */ + if (ret == -ENOSPC) { + const u64 sys_flags = btrfs_system_alloc_profile(fs_info); + struct btrfs_block_group *sys_bg; + + sys_bg = btrfs_alloc_chunk(trans, sys_flags); + if (IS_ERR(sys_bg)) { + ret = PTR_ERR(sys_bg); + btrfs_abort_transaction(trans, ret); + goto out; + } + + ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); if (ret) { - mutex_unlock(&fs_devices->device_list_mutex); btrfs_abort_transaction(trans, ret); goto out; } - } - mutex_unlock(&fs_devices->device_list_mutex); - ret = btrfs_free_chunk(trans, chunk_offset); - if (ret) { + ret = remove_chunk_item(trans, map, chunk_offset); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } + } else if (ret) { btrfs_abort_transaction(trans, ret); goto out; } @@ -3087,6 +3162,15 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) } } + mutex_unlock(&fs_info->chunk_mutex); + trans->removing_chunk = false; + + /* + * We are done with chunk btree updates and deletions, so release the + * system space we previously reserved (with check_system_chunk()). + */ + btrfs_trans_release_chunk_metadata(trans); + ret = btrfs_remove_block_group(trans, chunk_offset, em); if (ret) { btrfs_abort_transaction(trans, ret); @@ -3094,6 +3178,10 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) } out: + if (trans->removing_chunk) { + mutex_unlock(&fs_info->chunk_mutex); + trans->removing_chunk = false; + } /* once for us */ free_extent_map(em); return ret; @@ -4860,13 +4948,12 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, u32 array_size; u8 *ptr; - mutex_lock(&fs_info->chunk_mutex); + lockdep_assert_held(&fs_info->chunk_mutex); + array_size = btrfs_super_sys_array_size(super_copy); if (array_size + item_size + sizeof(disk_key) - > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { - mutex_unlock(&fs_info->chunk_mutex); + > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) return -EFBIG; - } ptr = super_copy->sys_chunk_array + array_size; btrfs_cpu_key_to_disk(&disk_key, key); @@ -4875,7 +4962,6 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, memcpy(ptr, chunk, item_size); item_size += sizeof(disk_key); btrfs_set_super_sys_array_size(super_copy, array_size + item_size); - mutex_unlock(&fs_info->chunk_mutex); return 0; } @@ -5225,13 +5311,14 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, } } -static int create_chunk(struct btrfs_trans_handle *trans, +static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, struct alloc_chunk_ctl *ctl, struct btrfs_device_info *devices_info) { struct btrfs_fs_info *info = trans->fs_info; struct map_lookup *map = NULL; struct extent_map_tree *em_tree; + struct btrfs_block_group *block_group; struct extent_map *em; u64 start = ctl->start; u64 type = ctl->type; @@ -5241,7 +5328,7 @@ static int create_chunk(struct btrfs_trans_handle *trans, map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); if (!map) - return -ENOMEM; + return ERR_PTR(-ENOMEM); map->num_stripes = ctl->num_stripes; for (i = 0; i < ctl->ndevs; ++i) { @@ -5263,7 +5350,7 @@ static int create_chunk(struct btrfs_trans_handle *trans, em = alloc_extent_map(); if (!em) { kfree(map); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); em->map_lookup = map; @@ -5279,12 +5366,12 @@ static int create_chunk(struct btrfs_trans_handle *trans, if (ret) { write_unlock(&em_tree->lock); free_extent_map(em); - return ret; + return ERR_PTR(ret); } write_unlock(&em_tree->lock); - ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); - if (ret) + block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); + if (IS_ERR(block_group)) goto error_del_extent; for (i = 0; i < map->num_stripes; i++) { @@ -5304,7 +5391,7 @@ static int create_chunk(struct btrfs_trans_handle *trans, check_raid56_incompat_flag(info, type); check_raid1c34_incompat_flag(info, type); - return 0; + return block_group; error_del_extent: write_lock(&em_tree->lock); @@ -5316,34 +5403,36 @@ error_del_extent: /* One for the tree reference */ free_extent_map(em); - return ret; + return block_group; } -int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) +struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + u64 type) { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_fs_devices *fs_devices = info->fs_devices; struct btrfs_device_info *devices_info = NULL; struct alloc_chunk_ctl ctl; + struct btrfs_block_group *block_group; int ret; lockdep_assert_held(&info->chunk_mutex); if (!alloc_profile_is_valid(type, 0)) { ASSERT(0); - return -EINVAL; + return ERR_PTR(-EINVAL); } if (list_empty(&fs_devices->alloc_list)) { if (btrfs_test_opt(info, ENOSPC_DEBUG)) btrfs_debug(info, "%s: no writable device", __func__); - return -ENOSPC; + return ERR_PTR(-ENOSPC); } if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { btrfs_err(info, "invalid chunk type 0x%llx requested", type); ASSERT(0); - return -EINVAL; + return ERR_PTR(-EINVAL); } ctl.start = find_next_chunk(info); @@ -5353,46 +5442,43 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), GFP_NOFS); if (!devices_info) - return -ENOMEM; + return ERR_PTR(-ENOMEM); ret = gather_device_info(fs_devices, &ctl, devices_info); - if (ret < 0) + if (ret < 0) { + block_group = ERR_PTR(ret); goto out; + } ret = decide_stripe_size(fs_devices, &ctl, devices_info); - if (ret < 0) + if (ret < 0) { + block_group = ERR_PTR(ret); goto out; + } - ret = create_chunk(trans, &ctl, devices_info); + block_group = create_chunk(trans, &ctl, devices_info); out: kfree(devices_info); - return ret; + return block_group; } /* - * Chunk allocation falls into two parts. The first part does work - * that makes the new allocated chunk usable, but does not do any operation - * that modifies the chunk tree. The second part does the work that - * requires modifying the chunk tree. This division is important for the - * bootstrap process of adding storage to a seed btrfs. + * This function, btrfs_finish_chunk_alloc(), belongs to phase 2. + * + * See the comment at btrfs_chunk_alloc() for details about the chunk allocation + * phases. */ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, u64 chunk_offset, u64 chunk_size) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_root *extent_root = fs_info->extent_root; - struct btrfs_root *chunk_root = fs_info->chunk_root; - struct btrfs_key key; struct btrfs_device *device; - struct btrfs_chunk *chunk; - struct btrfs_stripe *stripe; struct extent_map *em; struct map_lookup *map; - size_t item_size; u64 dev_offset; u64 stripe_size; - int i = 0; + int i; int ret = 0; em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); @@ -5400,53 +5486,117 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, return PTR_ERR(em); map = em->map_lookup; - item_size = btrfs_chunk_item_size(map->num_stripes); stripe_size = em->orig_block_len; - chunk = kzalloc(item_size, GFP_NOFS); - if (!chunk) { - ret = -ENOMEM; - goto out; - } - /* * Take the device list mutex to prevent races with the final phase of * a device replace operation that replaces the device object associated * with the map's stripes, because the device object's id can change * at any time during that final phase of the device replace operation - * (dev-replace.c:btrfs_dev_replace_finishing()). + * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the + * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, + * resulting in persisting a device extent item with such ID. */ mutex_lock(&fs_info->fs_devices->device_list_mutex); for (i = 0; i < map->num_stripes; i++) { device = map->stripes[i].dev; dev_offset = map->stripes[i].physical; - ret = btrfs_update_device(trans, device); - if (ret) - break; ret = btrfs_alloc_dev_extent(trans, device, chunk_offset, dev_offset, stripe_size); if (ret) break; } - if (ret) { - mutex_unlock(&fs_info->fs_devices->device_list_mutex); + mutex_unlock(&fs_info->fs_devices->device_list_mutex); + + free_extent_map(em); + return ret; +} + +/* + * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the + * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system + * chunks. + * + * See the comment at btrfs_chunk_alloc() for details about the chunk allocation + * phases. + */ +int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, + struct btrfs_block_group *bg) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *extent_root = fs_info->extent_root; + struct btrfs_root *chunk_root = fs_info->chunk_root; + struct btrfs_key key; + struct btrfs_chunk *chunk; + struct btrfs_stripe *stripe; + struct extent_map *em; + struct map_lookup *map; + size_t item_size; + int i; + int ret; + + /* + * We take the chunk_mutex for 2 reasons: + * + * 1) Updates and insertions in the chunk btree must be done while holding + * the chunk_mutex, as well as updating the system chunk array in the + * superblock. See the comment on top of btrfs_chunk_alloc() for the + * details; + * + * 2) To prevent races with the final phase of a device replace operation + * that replaces the device object associated with the map's stripes, + * because the device object's id can change at any time during that + * final phase of the device replace operation + * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the + * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, + * which would cause a failure when updating the device item, which does + * not exists, or persisting a stripe of the chunk item with such ID. + * Here we can't use the device_list_mutex because our caller already + * has locked the chunk_mutex, and the final phase of device replace + * acquires both mutexes - first the device_list_mutex and then the + * chunk_mutex. Using any of those two mutexes protects us from a + * concurrent device replace. + */ + lockdep_assert_held(&fs_info->chunk_mutex); + + em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + btrfs_abort_transaction(trans, ret); + return ret; + } + + map = em->map_lookup; + item_size = btrfs_chunk_item_size(map->num_stripes); + + chunk = kzalloc(item_size, GFP_NOFS); + if (!chunk) { + ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); goto out; } + for (i = 0; i < map->num_stripes; i++) { + struct btrfs_device *device = map->stripes[i].dev; + + ret = btrfs_update_device(trans, device); + if (ret) + goto out; + } + stripe = &chunk->stripe; for (i = 0; i < map->num_stripes; i++) { - device = map->stripes[i].dev; - dev_offset = map->stripes[i].physical; + struct btrfs_device *device = map->stripes[i].dev; + const u64 dev_offset = map->stripes[i].physical; btrfs_set_stack_stripe_devid(stripe, device->devid); btrfs_set_stack_stripe_offset(stripe, dev_offset); memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); stripe++; } - mutex_unlock(&fs_info->fs_devices->device_list_mutex); - btrfs_set_stack_chunk_length(chunk, chunk_size); + btrfs_set_stack_chunk_length(chunk, bg->length); btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); btrfs_set_stack_chunk_type(chunk, map->type); @@ -5458,15 +5608,18 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.type = BTRFS_CHUNK_ITEM_KEY; - key.offset = chunk_offset; + key.offset = bg->start; ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); - if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { - /* - * TODO: Cleanup of inserted chunk root in case of - * failure. - */ + if (ret) + goto out; + + bg->chunk_item_inserted = 1; + + if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); + if (ret) + goto out; } out: @@ -5479,16 +5632,41 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; u64 alloc_profile; - int ret; + struct btrfs_block_group *meta_bg; + struct btrfs_block_group *sys_bg; + + /* + * When adding a new device for sprouting, the seed device is read-only + * so we must first allocate a metadata and a system chunk. But before + * adding the block group items to the extent, device and chunk btrees, + * we must first: + * + * 1) Create both chunks without doing any changes to the btrees, as + * otherwise we would get -ENOSPC since the block groups from the + * seed device are read-only; + * + * 2) Add the device item for the new sprout device - finishing the setup + * of a new block group requires updating the device item in the chunk + * btree, so it must exist when we attempt to do it. The previous step + * ensures this does not fail with -ENOSPC. + * + * After that we can add the block group items to their btrees: + * update existing device item in the chunk btree, add a new block group + * item to the extent btree, add a new chunk item to the chunk btree and + * finally add the new device extent items to the devices btree. + */ alloc_profile = btrfs_metadata_alloc_profile(fs_info); - ret = btrfs_alloc_chunk(trans, alloc_profile); - if (ret) - return ret; + meta_bg = btrfs_alloc_chunk(trans, alloc_profile); + if (IS_ERR(meta_bg)) + return PTR_ERR(meta_bg); alloc_profile = btrfs_system_alloc_profile(fs_info); - ret = btrfs_alloc_chunk(trans, alloc_profile); - return ret; + sys_bg = btrfs_alloc_chunk(trans, alloc_profile); + if (IS_ERR(sys_bg)) + return PTR_ERR(sys_bg); + + return 0; } static inline int btrfs_chunk_max_errors(struct map_lookup *map) @@ -7415,10 +7593,18 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) total_dev++; } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { struct btrfs_chunk *chunk; + + /* + * We are only called at mount time, so no need to take + * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, + * we always lock first fs_info->chunk_mutex before + * acquiring any locks on the chunk tree. This is a + * requirement for chunk allocation, see the comment on + * top of btrfs_chunk_alloc() for details. + */ + ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); - mutex_lock(&fs_info->chunk_mutex); ret = read_one_chunk(&found_key, leaf, chunk); - mutex_unlock(&fs_info->chunk_mutex); if (ret) goto error; } diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index c7fc7caf575c..55a8ba244716 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -450,7 +450,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map, struct btrfs_io_geometry *io_geom); int btrfs_read_sys_array(struct btrfs_fs_info *fs_info); int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info); -int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type); +struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + u64 type); void btrfs_mapping_tree_free(struct extent_map_tree *tree); blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num); @@ -509,6 +510,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, u64 logical); int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, u64 chunk_offset, u64 chunk_size); +int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, + struct btrfs_block_group *bg); int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset); struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, u64 logical, u64 length); diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 297c0b1c0634..907c2cc45c9c 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1349,8 +1349,7 @@ void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset, return; ordered->physical = physical; - ordered->disk = bio->bi_bdev->bd_disk; - ordered->partno = bio->bi_bdev->bd_partno; + ordered->bdev = bio->bi_bdev; btrfs_put_ordered_extent(ordered); } @@ -1362,18 +1361,16 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered) struct extent_map_tree *em_tree; struct extent_map *em; struct btrfs_ordered_sum *sum; - struct block_device *bdev; u64 orig_logical = ordered->disk_bytenr; u64 *logical = NULL; int nr, stripe_len; /* Zoned devices should not have partitions. So, we can assume it is 0 */ - ASSERT(ordered->partno == 0); - bdev = bdgrab(ordered->disk->part0); - if (WARN_ON(!bdev)) + ASSERT(!bdev_is_partition(ordered->bdev)); + if (WARN_ON(!ordered->bdev)) return; - if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, bdev, + if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev, ordered->physical, &logical, &nr, &stripe_len))) goto out; @@ -1402,7 +1399,6 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered) out: kfree(logical); - bdput(bdev); } bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 7bdefd0c789a..2a2900903f8c 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -4150,11 +4150,19 @@ bad: /* * Delayed work handler to process end of delayed cap release LRU list. + * + * If new caps are added to the list while processing it, these won't get + * processed in this run. In this case, the ci->i_hold_caps_max will be + * returned so that the work can be scheduled accordingly. */ -void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) +unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc) { struct inode *inode; struct ceph_inode_info *ci; + struct ceph_mount_options *opt = mdsc->fsc->mount_options; + unsigned long delay_max = opt->caps_wanted_delay_max * HZ; + unsigned long loop_start = jiffies; + unsigned long delay = 0; dout("check_delayed_caps\n"); spin_lock(&mdsc->cap_delay_lock); @@ -4162,6 +4170,11 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) ci = list_first_entry(&mdsc->cap_delay_list, struct ceph_inode_info, i_cap_delay_list); + if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) { + dout("%s caps added recently. Exiting loop", __func__); + delay = ci->i_hold_caps_max; + break; + } if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && time_before(jiffies, ci->i_hold_caps_max)) break; @@ -4177,6 +4190,8 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) } } spin_unlock(&mdsc->cap_delay_lock); + + return delay; } /* diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index a818213c972f..afdc20213876 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -4456,7 +4456,7 @@ bool check_session_state(struct ceph_mds_session *s) break; case CEPH_MDS_SESSION_CLOSING: /* Should never reach this when we're unmounting */ - WARN_ON_ONCE(true); + WARN_ON_ONCE(s->s_ttl); fallthrough; case CEPH_MDS_SESSION_NEW: case CEPH_MDS_SESSION_RESTARTING: @@ -4490,22 +4490,29 @@ void inc_session_sequence(struct ceph_mds_session *s) } /* - * delayed work -- periodically trim expired leases, renew caps with mds + * delayed work -- periodically trim expired leases, renew caps with mds. If + * the @delay parameter is set to 0 or if it's more than 5 secs, the default + * workqueue delay value of 5 secs will be used. */ -static void schedule_delayed(struct ceph_mds_client *mdsc) +static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay) { - int delay = 5; - unsigned hz = round_jiffies_relative(HZ * delay); - schedule_delayed_work(&mdsc->delayed_work, hz); + unsigned long max_delay = HZ * 5; + + /* 5 secs default delay */ + if (!delay || (delay > max_delay)) + delay = max_delay; + schedule_delayed_work(&mdsc->delayed_work, + round_jiffies_relative(delay)); } static void delayed_work(struct work_struct *work) { - int i; struct ceph_mds_client *mdsc = container_of(work, struct ceph_mds_client, delayed_work.work); + unsigned long delay; int renew_interval; int renew_caps; + int i; dout("mdsc delayed_work\n"); @@ -4545,7 +4552,7 @@ static void delayed_work(struct work_struct *work) } mutex_unlock(&mdsc->mutex); - ceph_check_delayed_caps(mdsc); + delay = ceph_check_delayed_caps(mdsc); ceph_queue_cap_reclaim_work(mdsc); @@ -4553,7 +4560,7 @@ static void delayed_work(struct work_struct *work) maybe_recover_session(mdsc); - schedule_delayed(mdsc); + schedule_delayed(mdsc, delay); } int ceph_mdsc_init(struct ceph_fs_client *fsc) @@ -5030,7 +5037,7 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) mdsc->mdsmap->m_epoch); mutex_unlock(&mdsc->mutex); - schedule_delayed(mdsc); + schedule_delayed(mdsc, 0); return; bad_unlock: diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 4ac0606dcbd4..4c6bd1042c94 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -67,19 +67,19 @@ void ceph_get_snap_realm(struct ceph_mds_client *mdsc, { lockdep_assert_held(&mdsc->snap_rwsem); - dout("get_realm %p %d -> %d\n", realm, - atomic_read(&realm->nref), atomic_read(&realm->nref)+1); /* - * since we _only_ increment realm refs or empty the empty - * list with snap_rwsem held, adjusting the empty list here is - * safe. we do need to protect against concurrent empty list - * additions, however. + * The 0->1 and 1->0 transitions must take the snap_empty_lock + * atomically with the refcount change. Go ahead and bump the + * nref here, unless it's 0, in which case we take the spinlock + * and then do the increment and remove it from the list. */ - if (atomic_inc_return(&realm->nref) == 1) { - spin_lock(&mdsc->snap_empty_lock); + if (atomic_inc_not_zero(&realm->nref)) + return; + + spin_lock(&mdsc->snap_empty_lock); + if (atomic_inc_return(&realm->nref) == 1) list_del_init(&realm->empty_item); - spin_unlock(&mdsc->snap_empty_lock); - } + spin_unlock(&mdsc->snap_empty_lock); } static void __insert_snap_realm(struct rb_root *root, @@ -208,28 +208,28 @@ static void __put_snap_realm(struct ceph_mds_client *mdsc, { lockdep_assert_held_write(&mdsc->snap_rwsem); - dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm, - atomic_read(&realm->nref), atomic_read(&realm->nref)-1); + /* + * We do not require the snap_empty_lock here, as any caller that + * increments the value must hold the snap_rwsem. + */ if (atomic_dec_and_test(&realm->nref)) __destroy_snap_realm(mdsc, realm); } /* - * caller needn't hold any locks + * See comments in ceph_get_snap_realm. Caller needn't hold any locks. */ void ceph_put_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm) { - dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm, - atomic_read(&realm->nref), atomic_read(&realm->nref)-1); - if (!atomic_dec_and_test(&realm->nref)) + if (!atomic_dec_and_lock(&realm->nref, &mdsc->snap_empty_lock)) return; if (down_write_trylock(&mdsc->snap_rwsem)) { + spin_unlock(&mdsc->snap_empty_lock); __destroy_snap_realm(mdsc, realm); up_write(&mdsc->snap_rwsem); } else { - spin_lock(&mdsc->snap_empty_lock); list_add(&realm->empty_item, &mdsc->snap_empty); spin_unlock(&mdsc->snap_empty_lock); } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 6b6332a5c113..9215a2f4535c 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -1167,7 +1167,7 @@ extern void ceph_flush_snaps(struct ceph_inode_info *ci, extern bool __ceph_should_report_size(struct ceph_inode_info *ci); extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, struct ceph_mds_session *session); -extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); +extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc); extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc); extern int ceph_drop_caps_for_unlink(struct inode *inode); extern int ceph_encode_inode_release(void **p, struct inode *inode, diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 57f91311fdaa..007427ba75e5 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -176,7 +176,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata, } } - rc = dns_resolve_server_name_to_ip(name, &srvIP); + rc = dns_resolve_server_name_to_ip(name, &srvIP, NULL); if (rc < 0) { cifs_dbg(FYI, "%s: Failed to resolve server part of %s to IP: %d\n", __func__, name, rc); @@ -211,6 +211,10 @@ char *cifs_compose_mount_options(const char *sb_mountdata, else noff = tkn_e - (sb_mountdata + off) + 1; + if (strncasecmp(sb_mountdata + off, "cruid=", 6) == 0) { + off += noff; + continue; + } if (strncasecmp(sb_mountdata + off, "unc=", 4) == 0) { off += noff; continue; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 3c2e117bb926..c6a9542ca281 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -75,6 +75,9 @@ #define SMB_ECHO_INTERVAL_MAX 600 #define SMB_ECHO_INTERVAL_DEFAULT 60 +/* dns resolution interval in seconds */ +#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600 + /* maximum number of PDUs in one compound */ #define MAX_COMPOUND 5 @@ -646,6 +649,7 @@ struct TCP_Server_Info { /* point to the SMBD connection if RDMA is used instead of socket */ struct smbd_connection *smbd_conn; struct delayed_work echo; /* echo ping workqueue job */ + struct delayed_work resolve; /* dns resolution workqueue job */ char *smallbuf; /* pointer to current "small" buffer */ char *bigbuf; /* pointer to current "big" buffer */ /* Total size of this PDU. Only valid from cifs_demultiplex_thread */ @@ -689,6 +693,9 @@ struct TCP_Server_Info { bool use_swn_dstaddr; struct sockaddr_storage swn_dstaddr; #endif +#ifdef CONFIG_CIFS_DFS_UPCALL + bool is_dfs_conn; /* if a dfs connection */ +#endif }; struct cifs_credits { @@ -1604,6 +1611,11 @@ struct dfs_info3_param { int ttl; }; +struct file_list { + struct list_head list; + struct cifsFileInfo *cfile; +}; + /* * common struct for holding inode info when searching for or updating an * inode with new info diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index f72e3b3dca69..65d1a65bfc37 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -873,8 +873,11 @@ PsxDelete: InformationLevel) - 4; offset = param_offset + params; - /* Setup pointer to Request Data (inode type) */ - pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset); + /* Setup pointer to Request Data (inode type). + * Note that SMB offsets are from the beginning of SMB which is 4 bytes + * in, after RFC1001 field + */ + pRqD = (struct unlink_psx_rq *)((char *)(pSMB) + offset + 4); pRqD->type = cpu_to_le16(type); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); @@ -1081,7 +1084,8 @@ PsxCreat: param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; - pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset); + /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */ + pdata = (OPEN_PSX_REQ *)((char *)(pSMB) + offset + 4); pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pdata->Permissions = cpu_to_le64(mode); pdata->PosixOpenFlags = cpu_to_le32(posix_flags); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 01dc45178f66..3781eee9360a 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -78,6 +78,8 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) int rc; int len; char *unc, *ipaddr = NULL; + time64_t expiry, now; + unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT; if (!server->hostname) return -EINVAL; @@ -91,13 +93,13 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) } scnprintf(unc, len, "\\\\%s", server->hostname); - rc = dns_resolve_server_name_to_ip(unc, &ipaddr); + rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry); kfree(unc); if (rc < 0) { cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n", __func__, server->hostname, rc); - return rc; + goto requeue_resolve; } spin_lock(&cifs_tcp_ses_lock); @@ -106,7 +108,45 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) spin_unlock(&cifs_tcp_ses_lock); kfree(ipaddr); - return !rc ? -1 : 0; + /* rc == 1 means success here */ + if (rc) { + now = ktime_get_real_seconds(); + if (expiry && expiry > now) + /* + * To make sure we don't use the cached entry, retry 1s + * after expiry. + */ + ttl = (expiry - now + 1); + } + rc = !rc ? -1 : 0; + +requeue_resolve: + cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n", + __func__, ttl); + mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ)); + + return rc; +} + + +static void cifs_resolve_server(struct work_struct *work) +{ + int rc; + struct TCP_Server_Info *server = container_of(work, + struct TCP_Server_Info, resolve.work); + + mutex_lock(&server->srv_mutex); + + /* + * Resolve the hostname again to make sure that IP address is up-to-date. + */ + rc = reconn_set_ipaddr_from_hostname(server); + if (rc) { + cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n", + __func__, rc); + } + + mutex_unlock(&server->srv_mutex); } #ifdef CONFIG_CIFS_DFS_UPCALL @@ -180,7 +220,7 @@ cifs_reconnect(struct TCP_Server_Info *server) #ifdef CONFIG_CIFS_DFS_UPCALL struct super_block *sb = NULL; struct cifs_sb_info *cifs_sb = NULL; - struct dfs_cache_tgt_list tgt_list = {0}; + struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list); struct dfs_cache_tgt_iterator *tgt_it = NULL; #endif @@ -680,6 +720,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) spin_unlock(&cifs_tcp_ses_lock); cancel_delayed_work_sync(&server->echo); + cancel_delayed_work_sync(&server->resolve); spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; @@ -1227,6 +1268,16 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx) spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { +#ifdef CONFIG_CIFS_DFS_UPCALL + /* + * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for + * DFS connections to do failover properly, so avoid sharing them with regular + * shares or even links that may connect to same server but having completely + * different failover targets. + */ + if (server->is_dfs_conn) + continue; +#endif /* * Skip ses channels since they're only handled in lower layers * (e.g. cifs_send_recv). @@ -1254,12 +1305,16 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) return; } + /* srv_count can never go negative */ + WARN_ON(server->srv_count < 0); + put_net(cifs_net_ns(server)); list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); cancel_delayed_work_sync(&server->echo); + cancel_delayed_work_sync(&server->resolve); if (from_reconnect) /* @@ -1342,6 +1397,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx) INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); + INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server); INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); mutex_init(&tcp_ses->reconnect_mutex); memcpy(&tcp_ses->srcaddr, &ctx->srcaddr, @@ -1427,6 +1483,12 @@ smbd_connected: /* queue echo request delayed work */ queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval); + /* queue dns resolution delayed work */ + cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n", + __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT); + + queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ)); + return tcp_ses; out_err_crypto_release: @@ -1605,6 +1667,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses) } spin_unlock(&cifs_tcp_ses_lock); + /* ses_count can never go negative */ + WARN_ON(ses->ses_count < 0); + spin_lock(&GlobalMid_Lock); if (ses->status == CifsGood) ses->status = CifsExiting; @@ -1972,6 +2037,9 @@ cifs_put_tcon(struct cifs_tcon *tcon) return; } + /* tc_count can never go negative */ + WARN_ON(tcon->tc_count < 0); + if (tcon->use_witness) { int rc; @@ -2910,6 +2978,23 @@ static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, } #ifdef CONFIG_CIFS_DFS_UPCALL +static int mount_get_dfs_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb, + unsigned int *xid, struct TCP_Server_Info **nserver, + struct cifs_ses **nses, struct cifs_tcon **ntcon) +{ + int rc; + + ctx->nosharesock = true; + rc = mount_get_conns(ctx, cifs_sb, xid, nserver, nses, ntcon); + if (*nserver) { + cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__); + spin_lock(&cifs_tcp_ses_lock); + (*nserver)->is_dfs_conn = true; + spin_unlock(&cifs_tcp_ses_lock); + } + return rc; +} + /* * cifs_build_path_to_root returns full path to root when we do not have an * existing connection (tcon) @@ -3045,7 +3130,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_ { int rc; char *npath = NULL; - struct dfs_cache_tgt_list tgt_list = {0}; + struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list); struct dfs_cache_tgt_iterator *tgt_it = NULL; struct smb3_fs_context tmp_ctx = {NULL}; @@ -3105,7 +3190,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_ tmp_ctx.prepath); mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon); - rc = mount_get_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon); + rc = mount_get_dfs_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon); if (!rc || (*server && *ses)) { /* * We were able to connect to new target server. Update current context with @@ -3404,7 +3489,12 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) goto error; } - ctx->nosharesock = true; + mount_put_conns(cifs_sb, xid, server, ses, tcon); + /* + * Ignore error check here because we may failover to other targets from cached a + * referral. + */ + (void)mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon); /* Get path of DFS root */ ref_path = build_unc_path_to_root(ctx, cifs_sb, false); @@ -3433,7 +3523,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) /* Connect to new DFS target only if we were redirected */ if (oldmnt != cifs_sb->ctx->mount_options) { mount_put_conns(cifs_sb, xid, server, ses, tcon); - rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon); + rc = mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon); } if (rc && !server && !ses) { /* Failed to connect. Try to connect to other targets in the referral. */ @@ -3459,7 +3549,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) rc = -ELOOP; } while (rc == -EREMOTE); - if (rc || !tcon) + if (rc || !tcon || !ses) goto error; kfree(ref_path); @@ -4095,7 +4185,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru if (!tree) return -ENOMEM; - if (!tcon->dfs_path) { + /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */ + if (!tcon->dfs_path || dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl)) { if (tcon->ipc) { scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname); rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc); @@ -4105,9 +4196,6 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru goto out; } - rc = dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl); - if (rc) - goto out; isroot = ref.server_type == DFS_TYPE_ROOT; free_dfs_info_param(&ref); diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c index 7c1769714609..283745592844 100644 --- a/fs/cifs/dfs_cache.c +++ b/fs/cifs/dfs_cache.c @@ -19,6 +19,7 @@ #include "cifs_debug.h" #include "cifs_unicode.h" #include "smb2glob.h" +#include "dns_resolve.h" #include "dfs_cache.h" @@ -911,6 +912,7 @@ static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl) err_free_it: list_for_each_entry_safe(it, nit, head, it_list) { + list_del(&it->it_list); kfree(it->it_name); kfree(it); } @@ -1293,6 +1295,194 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, return 0; } +static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2) +{ + char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0}; + const char *host; + size_t hostlen; + char *ip = NULL; + struct sockaddr sa; + bool match; + int rc; + + if (strcasecmp(s1, s2)) + return false; + + /* + * Resolve share's hostname and check if server address matches. Otherwise just ignore it + * as we could not have upcall to resolve hostname or failed to convert ip address. + */ + match = true; + extract_unc_hostname(s1, &host, &hostlen); + scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host); + + rc = dns_resolve_server_name_to_ip(unc, &ip, NULL); + if (rc < 0) { + cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n", + __func__, (int)hostlen, host); + return true; + } + + if (!cifs_convert_address(&sa, ip, strlen(ip))) { + cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n", + __func__, ip); + } else { + mutex_lock(&server->srv_mutex); + match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa); + mutex_unlock(&server->srv_mutex); + } + + kfree(ip); + return match; +} + +/* + * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new + * target shares in @refs. + */ +static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl, + const struct dfs_info3_param *refs, int numrefs) +{ + struct dfs_cache_tgt_iterator *it; + int i; + + for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) { + for (i = 0; i < numrefs; i++) { + if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it), + refs[i].node_name)) + return; + } + } + + cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__); + for (i = 0; i < tcon->ses->chan_count; i++) { + spin_lock(&GlobalMid_Lock); + if (tcon->ses->chans[i].server->tcpStatus != CifsExiting) + tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect; + spin_unlock(&GlobalMid_Lock); + } +} + +/* Refresh dfs referral of tcon and mark it for reconnect if needed */ +static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh) +{ + const char *path = tcon->dfs_path + 1; + struct cifs_ses *ses; + struct cache_entry *ce; + struct dfs_info3_param *refs = NULL; + int numrefs = 0; + bool needs_refresh = false; + struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl); + int rc = 0; + unsigned int xid; + + ses = find_ipc_from_server_path(sessions, path); + if (IS_ERR(ses)) { + cifs_dbg(FYI, "%s: could not find ipc session\n", __func__); + return PTR_ERR(ses); + } + + down_read(&htable_rw_lock); + ce = lookup_cache_entry(path); + needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce); + if (!IS_ERR(ce)) { + rc = get_targets(ce, &tl); + if (rc) + cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc); + } + up_read(&htable_rw_lock); + + if (!needs_refresh) { + rc = 0; + goto out; + } + + xid = get_xid(); + rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); + free_xid(xid); + + /* Create or update a cache entry with the new referral */ + if (!rc) { + dump_refs(refs, numrefs); + + down_write(&htable_rw_lock); + ce = lookup_cache_entry(path); + if (IS_ERR(ce)) + add_cache_entry_locked(refs, numrefs); + else if (force_refresh || cache_entry_expired(ce)) + update_cache_entry_locked(ce, refs, numrefs); + up_write(&htable_rw_lock); + + mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs); + } + +out: + dfs_cache_free_tgts(&tl); + free_dfs_info_array(refs, numrefs); + return rc; +} + +/** + * dfs_cache_remount_fs - remount a DFS share + * + * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not + * match any of the new targets, mark it for reconnect. + * + * @cifs_sb: cifs superblock. + * + * Return zero if remounted, otherwise non-zero. + */ +int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb) +{ + struct cifs_tcon *tcon; + struct mount_group *mg; + struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL}; + int rc; + + if (!cifs_sb || !cifs_sb->master_tlink) + return -EINVAL; + + tcon = cifs_sb_master_tcon(cifs_sb); + if (!tcon->dfs_path) { + cifs_dbg(FYI, "%s: not a dfs tcon\n", __func__); + return 0; + } + + if (uuid_is_null(&cifs_sb->dfs_mount_id)) { + cifs_dbg(FYI, "%s: tcon has no dfs mount group id\n", __func__); + return -EINVAL; + } + + mutex_lock(&mount_group_list_lock); + mg = find_mount_group_locked(&cifs_sb->dfs_mount_id); + if (IS_ERR(mg)) { + mutex_unlock(&mount_group_list_lock); + cifs_dbg(FYI, "%s: tcon has ipc session to refresh referral\n", __func__); + return PTR_ERR(mg); + } + kref_get(&mg->refcount); + mutex_unlock(&mount_group_list_lock); + + spin_lock(&mg->lock); + memcpy(&sessions, mg->sessions, mg->num_sessions * sizeof(mg->sessions[0])); + spin_unlock(&mg->lock); + + /* + * After reconnecting to a different server, unique ids won't match anymore, so we disable + * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE). + */ + cifs_autodisable_serverino(cifs_sb); + /* + * Force the use of prefix path to support failover on DFS paths that resolve to targets + * that have different prefix paths. + */ + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; + rc = refresh_tcon(sessions, tcon, true); + + kref_put(&mg->refcount, mount_group_release); + return rc; +} + /* * Refresh all active dfs mounts regardless of whether they are in cache or not. * (cache can be cleared) @@ -1303,7 +1493,6 @@ static void refresh_mounts(struct cifs_ses **sessions) struct cifs_ses *ses; struct cifs_tcon *tcon, *ntcon; struct list_head tcons; - unsigned int xid; INIT_LIST_HEAD(&tcons); @@ -1321,44 +1510,8 @@ static void refresh_mounts(struct cifs_ses **sessions) spin_unlock(&cifs_tcp_ses_lock); list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) { - const char *path = tcon->dfs_path + 1; - struct cache_entry *ce; - struct dfs_info3_param *refs = NULL; - int numrefs = 0; - bool needs_refresh = false; - int rc = 0; - list_del_init(&tcon->ulist); - - ses = find_ipc_from_server_path(sessions, path); - if (IS_ERR(ses)) - goto next_tcon; - - down_read(&htable_rw_lock); - ce = lookup_cache_entry(path); - needs_refresh = IS_ERR(ce) || cache_entry_expired(ce); - up_read(&htable_rw_lock); - - if (!needs_refresh) - goto next_tcon; - - xid = get_xid(); - rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); - free_xid(xid); - - /* Create or update a cache entry with the new referral */ - if (!rc) { - down_write(&htable_rw_lock); - ce = lookup_cache_entry(path); - if (IS_ERR(ce)) - add_cache_entry_locked(refs, numrefs); - else if (cache_entry_expired(ce)) - update_cache_entry_locked(ce, refs, numrefs); - up_write(&htable_rw_lock); - } - -next_tcon: - free_dfs_info_array(refs, numrefs); + refresh_tcon(sessions, tcon, false); cifs_put_tcon(tcon); } } diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h index b29d3ae64829..52070d1df189 100644 --- a/fs/cifs/dfs_cache.h +++ b/fs/cifs/dfs_cache.h @@ -13,6 +13,8 @@ #include <linux/uuid.h> #include "cifsglob.h" +#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), } + struct dfs_cache_tgt_list { int tl_numtgts; struct list_head tl_list; @@ -44,6 +46,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id); void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses); char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap); +int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb); static inline struct dfs_cache_tgt_iterator * dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl, diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 79402ca0ddfa..5f8a302ffcb2 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -100,7 +100,7 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0; - s = dentry_path_raw(direntry, page, PAGE_SIZE); + s = dentry_path_raw(direntry, page, PATH_MAX); if (IS_ERR(s)) return s; if (!s[1]) // for root we want "", not "/" diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c index d15b82d569ef..8c616aaeb7c4 100644 --- a/fs/cifs/dns_resolve.c +++ b/fs/cifs/dns_resolve.c @@ -24,6 +24,7 @@ * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address. * @unc: UNC path specifying the server (with '/' as delimiter) * @ip_addr: Where to return the IP address. + * @expiry: Where to return the expiry time for the dns record. * * The IP address will be returned in string form, and the caller is * responsible for freeing it. @@ -31,7 +32,7 @@ * Returns length of result on success, -ve on error. */ int -dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) +dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry) { struct sockaddr_storage ss; const char *hostname, *sep; @@ -66,13 +67,14 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) /* Perform the upcall */ rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len, - NULL, ip_addr, NULL, false); + NULL, ip_addr, expiry, false); if (rc < 0) cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n", __func__, len, len, hostname); else - cifs_dbg(FYI, "%s: resolved: %*.*s to %s\n", - __func__, len, len, hostname, *ip_addr); + cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n", + __func__, len, len, hostname, *ip_addr, + expiry ? (*expiry) : 0); return rc; name_is_IP_address: diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h index 5be060b82b13..9fa2807ef79e 100644 --- a/fs/cifs/dns_resolve.h +++ b/fs/cifs/dns_resolve.h @@ -12,7 +12,7 @@ #define _DNS_RESOLVE_H #ifdef __KERNEL__ -extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr); +extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry); #endif /* KERNEL */ #endif /* _DNS_RESOLVE_H */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index cd108607a070..bb98fbdd22a9 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4619,7 +4619,7 @@ read_complete: static int cifs_readpage(struct file *file, struct page *page) { - loff_t offset = (loff_t)page->index << PAGE_SHIFT; + loff_t offset = page_file_offset(page); int rc = -EACCES; unsigned int xid; @@ -4848,34 +4848,33 @@ void cifs_oplock_break(struct work_struct *work) oplock_break_ack: /* - * releasing stale oplock after recent reconnect of smb session using - * a now incorrect file handle is not a data integrity issue but do - * not bother sending an oplock release if session to server still is - * disconnected since oplock already released by the server - */ - if (!cfile->oplock_break_cancelled) { - rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid, - cinode); - cifs_dbg(FYI, "Oplock release rc = %d\n", rc); - } - /* * When oplock break is received and there are no active * file handles but cached, then schedule deferred close immediately. * So, new open will not use cached handle. */ spin_lock(&CIFS_I(inode)->deferred_lock); is_deferred = cifs_is_deferred_close(cfile, &dclose); + spin_unlock(&CIFS_I(inode)->deferred_lock); if (is_deferred && cfile->deferred_close_scheduled && delayed_work_pending(&cfile->deferred)) { - /* - * If there is no pending work, mod_delayed_work queues new work. - * So, Increase the ref count to avoid use-after-free. - */ - if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0)) - cifsFileInfo_get(cfile); + if (cancel_delayed_work(&cfile->deferred)) { + _cifsFileInfo_put(cfile, false, false); + goto oplock_break_done; + } } - spin_unlock(&CIFS_I(inode)->deferred_lock); + /* + * releasing stale oplock after recent reconnect of smb session using + * a now incorrect file handle is not a data integrity issue but do + * not bother sending an oplock release if session to server still is + * disconnected since oplock already released by the server + */ + if (!cfile->oplock_break_cancelled) { + rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid, + cinode); + cifs_dbg(FYI, "Oplock release rc = %d\n", rc); + } +oplock_break_done: _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); cifs_done_oplock_break(cinode); } diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c index 553adfbcc22a..eed59bc1d913 100644 --- a/fs/cifs/fs_context.c +++ b/fs/cifs/fs_context.c @@ -13,6 +13,9 @@ #include <linux/magic.h> #include <linux/security.h> #include <net/net_namespace.h> +#ifdef CONFIG_CIFS_DFS_UPCALL +#include "dfs_cache.h" +#endif */ #include <linux/ctype.h> @@ -779,6 +782,10 @@ static int smb3_reconfigure(struct fs_context *fc) smb3_cleanup_fs_context_contents(cifs_sb->ctx); rc = smb3_fs_context_dup(cifs_sb->ctx, ctx); smb3_update_mnt_flags(cifs_sb); +#ifdef CONFIG_CIFS_DFS_UPCALL + if (!rc) + rc = dfs_cache_remount_fs(cifs_sb); +#endif return rc; } @@ -918,6 +925,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, ctx->cred_uid = uid; ctx->cruid_specified = true; break; + case Opt_backupuid: + uid = make_kuid(current_user_ns(), result.uint_32); + if (!uid_valid(uid)) + goto cifs_parse_mount_err; + ctx->backupuid = uid; + ctx->backupuid_specified = true; + break; case Opt_backupgid: gid = make_kgid(current_user_ns(), result.uint_32); if (!gid_valid(gid)) diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index b96b253e7635..65f8a70cece3 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1625,7 +1625,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) goto unlink_out; } - cifs_close_all_deferred_files(tcon); + cifs_close_deferred_file(CIFS_I(inode)); if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = CIFSPOSIXDelFile(xid, tcon, full_path, @@ -2084,6 +2084,7 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, FILE_UNIX_BASIC_INFO *info_buf_target; unsigned int xid; int rc, tmprc; + int retry_count = 0; if (flags & ~RENAME_NOREPLACE) return -EINVAL; @@ -2113,10 +2114,24 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, goto cifs_rename_exit; } - cifs_close_all_deferred_files(tcon); + cifs_close_deferred_file(CIFS_I(d_inode(source_dentry))); + if (d_inode(target_dentry) != NULL) + cifs_close_deferred_file(CIFS_I(d_inode(target_dentry))); + rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, to_name); + if (rc == -EACCES) { + while (retry_count < 3) { + cifs_close_all_deferred_files(tcon); + rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, + to_name); + if (rc != -EACCES) + break; + retry_count++; + } + } + /* * No-replace is the natural behavior for CIFS, so skip unlink hacks. */ diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 184138b4eb8c..9469f1cf0b46 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -723,13 +723,31 @@ void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode) { struct cifsFileInfo *cfile = NULL; - struct cifs_deferred_close *dclose; + struct file_list *tmp_list, *tmp_next_list; + struct list_head file_head; + + if (cifs_inode == NULL) + return; + INIT_LIST_HEAD(&file_head); + spin_lock(&cifs_inode->open_file_lock); list_for_each_entry(cfile, &cifs_inode->openFileList, flist) { - spin_lock(&cifs_inode->deferred_lock); - if (cifs_is_deferred_close(cfile, &dclose)) - mod_delayed_work(deferredclose_wq, &cfile->deferred, 0); - spin_unlock(&cifs_inode->deferred_lock); + if (delayed_work_pending(&cfile->deferred)) { + if (cancel_delayed_work(&cfile->deferred)) { + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); + if (tmp_list == NULL) + continue; + tmp_list->cfile = cfile; + list_add_tail(&tmp_list->list, &file_head); + } + } + } + spin_unlock(&cifs_inode->open_file_lock); + + list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { + _cifsFileInfo_put(tmp_list->cfile, true, false); + list_del(&tmp_list->list); + kfree(tmp_list); } } @@ -738,20 +756,30 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon) { struct cifsFileInfo *cfile; struct list_head *tmp; + struct file_list *tmp_list, *tmp_next_list; + struct list_head file_head; + INIT_LIST_HEAD(&file_head); spin_lock(&tcon->open_file_lock); list_for_each(tmp, &tcon->openFileList) { cfile = list_entry(tmp, struct cifsFileInfo, tlist); if (delayed_work_pending(&cfile->deferred)) { - /* - * If there is no pending work, mod_delayed_work queues new work. - * So, Increase the ref count to avoid use-after-free. - */ - if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0)) - cifsFileInfo_get(cfile); + if (cancel_delayed_work(&cfile->deferred)) { + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); + if (tmp_list == NULL) + continue; + tmp_list->cfile = cfile; + list_add_tail(&tmp_list->list, &file_head); + } } } spin_unlock(&tcon->open_file_lock); + + list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { + _cifsFileInfo_put(tmp_list->cfile, true, false); + list_del(&tmp_list->list); + kfree(tmp_list); + } } /* parses DFS refferal V3 structure @@ -1187,7 +1215,7 @@ int match_target_ip(struct TCP_Server_Info *server, cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2); - rc = dns_resolve_server_name_to_ip(target, &tip); + rc = dns_resolve_server_name_to_ip(target, &tip, NULL); if (rc < 0) goto out; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index e4c8f603dd58..2dfd0d8297eb 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -557,8 +557,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, p = buf; while (bytes_left >= sizeof(*p)) { info->speed = le64_to_cpu(p->LinkSpeed); - info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE); - info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE); + info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; + info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0; cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count); cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed); @@ -2910,6 +2910,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, /* ipc tcons are not refcounted */ spin_lock(&cifs_tcp_ses_lock); tcon->tc_count--; + /* tc_count can never go negative */ + WARN_ON(tcon->tc_count < 0); spin_unlock(&cifs_tcp_ses_lock); } kfree(utf16_path); @@ -3616,6 +3618,7 @@ static int smb3_simple_fallocate_write_range(unsigned int xid, { struct cifs_io_parms io_parms = {0}; int nbytes; + int rc = 0; struct kvec iov[2]; io_parms.netfid = cfile->fid.netfid; @@ -3623,13 +3626,25 @@ static int smb3_simple_fallocate_write_range(unsigned int xid, io_parms.tcon = tcon; io_parms.persistent_fid = cfile->fid.persistent_fid; io_parms.volatile_fid = cfile->fid.volatile_fid; - io_parms.offset = off; - io_parms.length = len; - /* iov[0] is reserved for smb header */ - iov[1].iov_base = buf; - iov[1].iov_len = io_parms.length; - return SMB2_write(xid, &io_parms, &nbytes, iov, 1); + while (len) { + io_parms.offset = off; + io_parms.length = len; + if (io_parms.length > SMB2_MAX_BUFFER_SIZE) + io_parms.length = SMB2_MAX_BUFFER_SIZE; + /* iov[0] is reserved for smb header */ + iov[1].iov_base = buf; + iov[1].iov_len = io_parms.length; + rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1); + if (rc) + break; + if (nbytes > len) + return -EINVAL; + buf += nbytes; + off += nbytes; + len -= nbytes; + } + return rc; } static int smb3_simple_fallocate_range(unsigned int xid, @@ -3653,11 +3668,6 @@ static int smb3_simple_fallocate_range(unsigned int xid, (char **)&out_data, &out_data_len); if (rc) goto out; - /* - * It is already all allocated - */ - if (out_data_len == 0) - goto out; buf = kzalloc(1024 * 1024, GFP_KERNEL); if (buf == NULL) { @@ -3780,6 +3790,24 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, goto out; } + if (keep_size == true) { + /* + * We can not preallocate pages beyond the end of the file + * in SMB2 + */ + if (off >= i_size_read(inode)) { + rc = 0; + goto out; + } + /* + * For fallocates that are partially beyond the end of file, + * clamp len so we only fallocate up to the end of file. + */ + if (off + len > i_size_read(inode)) { + len = i_size_read(inode) - off; + } + } + if ((keep_size == true) || (i_size_read(inode) >= off + len)) { /* * At this point, we are trying to fallocate an internal diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 781d14e5f2af..b6d2e3591927 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -2426,7 +2426,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) memcpy(aclptr, &acl, sizeof(struct cifs_acl)); buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); - *len = ptr - (__u8 *)buf; + *len = roundup(ptr - (__u8 *)buf, 8); return buf; } diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 4b27cb9105fd..e9cac7970b66 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -394,6 +394,7 @@ struct smb2_compression_capabilities_context { __u16 Padding; __u32 Flags; __le16 CompressionAlgorithms[3]; + __u16 Pad; /* Some servers require pad to DataLen multiple of 8 */ /* Check if pad needed */ } __packed; diff --git a/fs/configfs/file.c b/fs/configfs/file.c index 2f63bf3a7325..0ad32150611e 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -91,7 +91,10 @@ static ssize_t configfs_read_iter(struct kiocb *iocb, struct iov_iter *to) } pr_debug("%s: count = %zd, pos = %lld, buf = %s\n", __func__, iov_iter_count(to), iocb->ki_pos, buffer->page); - retval = copy_to_iter(buffer->page, buffer->count, to); + if (iocb->ki_pos >= buffer->count) + goto out; + retval = copy_to_iter(buffer->page + iocb->ki_pos, + buffer->count - iocb->ki_pos, to); iocb->ki_pos += retval; if (retval == 0) retval = -EFAULT; @@ -162,7 +165,10 @@ static ssize_t configfs_bin_read_iter(struct kiocb *iocb, struct iov_iter *to) buffer->needs_read_fill = 0; } - retval = copy_to_iter(buffer->bin_buffer, buffer->bin_buffer_size, to); + if (iocb->ki_pos >= buffer->bin_buffer_size) + goto out; + retval = copy_to_iter(buffer->bin_buffer + iocb->ki_pos, + buffer->bin_buffer_size - iocb->ki_pos, to); iocb->ki_pos += retval; if (retval == 0) retval = -EFAULT; @@ -171,6 +177,7 @@ out: return retval; } +/* Fill @buffer with data coming from @from. */ static int fill_write_buffer(struct configfs_buffer *buffer, struct iov_iter *from) { @@ -214,7 +221,7 @@ static ssize_t configfs_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct configfs_buffer *buffer = file->private_data; - ssize_t len; + int len; mutex_lock(&buffer->mutex); len = fill_write_buffer(buffer, from); @@ -272,7 +279,9 @@ static ssize_t configfs_bin_write_iter(struct kiocb *iocb, buffer->bin_buffer_size = end_offset; } - len = copy_from_iter(buffer->bin_buffer, buffer->bin_buffer_size, from); + len = copy_from_iter(buffer->bin_buffer + iocb->ki_pos, + buffer->bin_buffer_size - iocb->ki_pos, from); + iocb->ki_pos += len; out: mutex_unlock(&buffer->mutex); return len ? : -EFAULT; @@ -722,7 +722,7 @@ static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_d return rc; id = dax_read_lock(); - rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL); + rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); if (rc < 0) { dax_read_unlock(id); return rc; diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 14292dba3a12..2c2f179b6977 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -106,12 +106,11 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) return err; } -static bool ext2_check_page(struct page *page, int quiet) +static bool ext2_check_page(struct page *page, int quiet, char *kaddr) { struct inode *dir = page->mapping->host; struct super_block *sb = dir->i_sb; unsigned chunk_size = ext2_chunk_size(dir); - char *kaddr = page_address(page); u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); unsigned offs, rec_len; unsigned limit = PAGE_SIZE; @@ -205,7 +204,8 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n, if (!IS_ERR(page)) { *page_addr = kmap_local_page(page); if (unlikely(!PageChecked(page))) { - if (PageError(page) || !ext2_check_page(page, quiet)) + if (PageError(page) || !ext2_check_page(page, quiet, + *page_addr)) goto fail; } } @@ -584,10 +584,10 @@ out_unlock: * ext2_delete_entry deletes a directory entry by merging it with the * previous entry. Page is up-to-date. */ -int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page ) +int ext2_delete_entry (struct ext2_dir_entry_2 *dir, struct page *page, + char *kaddr) { struct inode *inode = page->mapping->host; - char *kaddr = page_address(page); unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1); unsigned to = ((char *)dir - kaddr) + ext2_rec_len_from_disk(dir->rec_len); @@ -607,7 +607,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page ) de = ext2_next_entry(de); } if (pde) - from = (char*)pde - (char*)page_address(page); + from = (char *)pde - kaddr; pos = page_offset(page) + from; lock_page(page); err = ext2_prepare_chunk(page, pos, to - from); diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index b0a694820cb7..e512630cb63e 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -740,7 +740,8 @@ extern int ext2_inode_by_name(struct inode *dir, extern int ext2_make_empty(struct inode *, struct inode *); extern struct ext2_dir_entry_2 *ext2_find_entry(struct inode *, const struct qstr *, struct page **, void **res_page_addr); -extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *); +extern int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page, + char *kaddr); extern int ext2_empty_dir (struct inode *); extern struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p, void **pa); extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, void *, diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 1f69b81655b6..5f6b7560eb3f 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -293,7 +293,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry) goto out; } - err = ext2_delete_entry (de, page); + err = ext2_delete_entry (de, page, page_addr); ext2_put_page(page, page_addr); if (err) goto out; @@ -397,7 +397,7 @@ static int ext2_rename (struct user_namespace * mnt_userns, old_inode->i_ctime = current_time(old_inode); mark_inode_dirty(old_inode); - ext2_delete_entry(old_de, old_page); + ext2_delete_entry(old_de, old_page, old_page_addr); if (dir_de) { if (old_dir != new_dir) diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index b96ecba91899..b60f0152ea57 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -244,9 +244,6 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line, * "bh" may be NULL: a metadata block may have been freed from memory * but there may still be a record of it in the journal, and that record * still needs to be revoked. - * - * If the handle isn't valid we're not journaling, but we still need to - * call into ext4_journal_revoke() to put the buffer head. */ int __ext4_forget(const char *where, unsigned int line, handle_t *handle, int is_metadata, struct inode *inode, diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index bc364c119af6..cebea4270817 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -138,7 +138,7 @@ static int kmmpd(void *data) unsigned mmp_check_interval; unsigned long last_update_time; unsigned long diff; - int retval; + int retval = 0; mmp_block = le64_to_cpu(es->s_mmp_block); mmp = (struct mmp_struct *)(bh->b_data); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 5fd56f616cf0..f3bbcd4efb56 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2517,7 +2517,7 @@ again: goto journal_error; err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); - if (err) + if (restart || err) goto journal_error; } else { struct dx_root *dxroot; diff --git a/fs/fcntl.c b/fs/fcntl.c index dfc72f15be7f..f946bec8f1f1 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -369,8 +369,8 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, /* 32-bit arches must use fcntl64() */ case F_OFD_SETLK: case F_OFD_SETLKW: -#endif fallthrough; +#endif case F_SETLK: case F_SETLKW: if (copy_from_user(&flock, argp, sizeof(flock))) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 06d04a74ab6c..4c3370548982 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -521,6 +521,9 @@ static bool inode_prepare_wbs_switch(struct inode *inode, */ smp_mb(); + if (IS_DAX(inode)) + return false; + /* while holding I_WB_SWITCH, no one else can update the association */ spin_lock(&inode->i_lock); if (!(inode->i_sb->s_flags & SB_ACTIVE) || diff --git a/fs/fs_context.c b/fs/fs_context.c index 2834d1afa6e8..de1985eae535 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -80,6 +80,35 @@ static int vfs_parse_sb_flag(struct fs_context *fc, const char *key) } /** + * vfs_parse_fs_param_source - Handle setting "source" via parameter + * @fc: The filesystem context to modify + * @param: The parameter + * + * This is a simple helper for filesystems to verify that the "source" they + * accept is sane. + * + * Returns 0 on success, -ENOPARAM if this is not "source" parameter, and + * -EINVAL otherwise. In the event of failure, supplementary error information + * is logged. + */ +int vfs_parse_fs_param_source(struct fs_context *fc, struct fs_parameter *param) +{ + if (strcmp(param->key, "source") != 0) + return -ENOPARAM; + + if (param->type != fs_value_is_string) + return invalf(fc, "Non-string source"); + + if (fc->source) + return invalf(fc, "Multiple sources"); + + fc->source = param->string; + param->string = NULL; + return 0; +} +EXPORT_SYMBOL(vfs_parse_fs_param_source); + +/** * vfs_parse_fs_param - Add a single parameter to a superblock config * @fc: The filesystem context to modify * @param: The parameter @@ -122,15 +151,9 @@ int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param) /* If the filesystem doesn't take any arguments, give it the * default handling of source. */ - if (strcmp(param->key, "source") == 0) { - if (param->type != fs_value_is_string) - return invalf(fc, "VFS: Non-string source"); - if (fc->source) - return invalf(fc, "VFS: Multiple sources"); - fc->source = param->string; - param->string = NULL; - return 0; - } + ret = vfs_parse_fs_param_source(fc, param); + if (ret != -ENOPARAM) + return ret; return invalf(fc, "%s: Unknown parameter '%s'", fc->fs_type->name, param->key); @@ -504,16 +527,11 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) struct legacy_fs_context *ctx = fc->fs_private; unsigned int size = ctx->data_size; size_t len = 0; + int ret; - if (strcmp(param->key, "source") == 0) { - if (param->type != fs_value_is_string) - return invalf(fc, "VFS: Legacy: Non-string source"); - if (fc->source) - return invalf(fc, "VFS: Legacy: Multiple sources"); - fc->source = param->string; - param->string = NULL; - return 0; - } + ret = vfs_parse_fs_param_source(fc, param); + if (ret != -ENOPARAM) + return ret; if (ctx->param_type == LEGACY_FS_MONOLITHIC_PARAMS) return invalf(fc, "VFS: Legacy: Can't mix monolithic and individual options"); diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index e55723744f58..9d58371d22c2 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -1235,8 +1235,6 @@ void fuse_dax_conn_free(struct fuse_conn *fc) static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd) { long nr_pages, nr_ranges; - void *kaddr; - pfn_t pfn; struct fuse_dax_mapping *range; int ret, id; size_t dax_size = -1; @@ -1248,8 +1246,8 @@ static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd) INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker); id = dax_read_lock(); - nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), &kaddr, - &pfn); + nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), NULL, + NULL); dax_read_unlock(id); if (nr_pages < 0) { pr_debug("dax_direct_access() returned %ld\n", nr_pages); diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c index 4af318fbda77..ef9498a6e88a 100644 --- a/fs/hfs/bfind.c +++ b/fs/hfs/bfind.c @@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) fd->key = ptr + tree->max_key_len + 2; hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); - mutex_lock(&tree->tree_lock); + switch (tree->cnid) { + case HFS_CAT_CNID: + mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); + break; + case HFS_EXT_CNID: + mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); + break; + case HFS_ATTR_CNID: + mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); + break; + default: + return -EINVAL; + } return 0; } diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index b63a4df7327b..c0a73a6ffb28 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -15,16 +15,31 @@ #include "btree.h" -void hfs_bnode_read(struct hfs_bnode *node, void *buf, - int off, int len) +void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) { struct page *page; + int pagenum; + int bytes_read; + int bytes_to_read; + void *vaddr; off += node->page_offset; - page = node->page[0]; + pagenum = off >> PAGE_SHIFT; + off &= ~PAGE_MASK; /* compute page offset for the first page */ - memcpy(buf, kmap(page) + off, len); - kunmap(page); + for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) { + if (pagenum >= node->tree->pages_per_bnode) + break; + page = node->page[pagenum]; + bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off); + + vaddr = kmap_atomic(page); + memcpy(buf + bytes_read, vaddr + off, bytes_to_read); + kunmap_atomic(vaddr); + + pagenum++; + off = 0; /* page offset only applies to the first page */ + } } u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h index 4ba45caf5939..0e6baee93245 100644 --- a/fs/hfs/btree.h +++ b/fs/hfs/btree.h @@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *); #define NODE_HASH_SIZE 256 +/* B-tree mutex nested subclasses */ +enum hfs_btree_mutex_classes { + CATALOG_BTREE_MUTEX, + EXTENTS_BTREE_MUTEX, + ATTR_BTREE_MUTEX, +}; + /* A HFS BTree held in memory */ struct hfs_btree { struct super_block *sb; diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 44d07c9e3a7f..12d9bae39363 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) if (!res) { if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { res = -EIO; - goto bail; + goto bail_hfs_find; } hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); } - if (res) { - hfs_find_exit(&fd); - goto bail_no_root; - } + if (res) + goto bail_hfs_find; res = -EINVAL; root_inode = hfs_iget(sb, &fd.search_key->cat, &rec); hfs_find_exit(&fd); @@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) /* everything's okay */ return 0; +bail_hfs_find: + hfs_find_exit(&fd); bail_no_root: pr_err("get root inode failed\n"); bail: diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 926eeb9bf4eb..cdfb1ae78a3f 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -77,7 +77,7 @@ enum hugetlb_param { static const struct fs_parameter_spec hugetlb_fs_parameters[] = { fsparam_u32 ("gid", Opt_gid), fsparam_string("min_size", Opt_min_size), - fsparam_u32 ("mode", Opt_mode), + fsparam_u32oct("mode", Opt_mode), fsparam_string("nr_inodes", Opt_nr_inodes), fsparam_string("pagesize", Opt_pagesize), fsparam_string("size", Opt_size), diff --git a/fs/internal.h b/fs/internal.h index 3ce8edbaa3ca..82e8eb32ff3d 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -61,7 +61,6 @@ extern void __init chrdev_init(void); */ extern const struct fs_context_operations legacy_fs_context_ops; extern int parse_monolithic_mount_data(struct fs_context *, void *); -extern void fc_drop_locked(struct fs_context *); extern void vfs_clean_context(struct fs_context *fc); extern int finish_clean_context(struct fs_context *fc); diff --git a/fs/io-wq.c b/fs/io-wq.c index 843d4a7bcd6e..7d2ed8c7dd31 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -129,7 +129,8 @@ struct io_cb_cancel_data { bool cancel_all; }; -static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index); +static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first); +static void io_wqe_dec_running(struct io_worker *worker); static bool io_worker_get(struct io_worker *worker) { @@ -168,26 +169,21 @@ static void io_worker_exit(struct io_worker *worker) { struct io_wqe *wqe = worker->wqe; struct io_wqe_acct *acct = io_wqe_get_acct(worker); - unsigned flags; if (refcount_dec_and_test(&worker->ref)) complete(&worker->ref_done); wait_for_completion(&worker->ref_done); - preempt_disable(); - current->flags &= ~PF_IO_WORKER; - flags = worker->flags; - worker->flags = 0; - if (flags & IO_WORKER_F_RUNNING) - atomic_dec(&acct->nr_running); - worker->flags = 0; - preempt_enable(); - raw_spin_lock_irq(&wqe->lock); - if (flags & IO_WORKER_F_FREE) + if (worker->flags & IO_WORKER_F_FREE) hlist_nulls_del_rcu(&worker->nulls_node); list_del_rcu(&worker->all_list); acct->nr_workers--; + preempt_disable(); + io_wqe_dec_running(worker); + worker->flags = 0; + current->flags &= ~PF_IO_WORKER; + preempt_enable(); raw_spin_unlock_irq(&wqe->lock); kfree_rcu(worker, rcu); @@ -214,15 +210,19 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe) struct hlist_nulls_node *n; struct io_worker *worker; - n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list)); - if (is_a_nulls(n)) - return false; - - worker = hlist_nulls_entry(n, struct io_worker, nulls_node); - if (io_worker_get(worker)) { - wake_up_process(worker->task); + /* + * Iterate free_list and see if we can find an idle worker to + * activate. If a given worker is on the free_list but in the process + * of exiting, keep trying. + */ + hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) { + if (!io_worker_get(worker)) + continue; + if (wake_up_process(worker->task)) { + io_worker_release(worker); + return true; + } io_worker_release(worker); - return true; } return false; @@ -247,10 +247,21 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) ret = io_wqe_activate_free_worker(wqe); rcu_read_unlock(); - if (!ret && acct->nr_workers < acct->max_workers) { - atomic_inc(&acct->nr_running); - atomic_inc(&wqe->wq->worker_refs); - create_io_worker(wqe->wq, wqe, acct->index); + if (!ret) { + bool do_create = false, first = false; + + raw_spin_lock_irq(&wqe->lock); + if (acct->nr_workers < acct->max_workers) { + atomic_inc(&acct->nr_running); + atomic_inc(&wqe->wq->worker_refs); + if (!acct->nr_workers) + first = true; + acct->nr_workers++; + do_create = true; + } + raw_spin_unlock_irq(&wqe->lock); + if (do_create) + create_io_worker(wqe->wq, wqe, acct->index, first); } } @@ -271,10 +282,28 @@ static void create_worker_cb(struct callback_head *cb) { struct create_worker_data *cwd; struct io_wq *wq; + struct io_wqe *wqe; + struct io_wqe_acct *acct; + bool do_create = false, first = false; cwd = container_of(cb, struct create_worker_data, work); - wq = cwd->wqe->wq; - create_io_worker(wq, cwd->wqe, cwd->index); + wqe = cwd->wqe; + wq = wqe->wq; + acct = &wqe->acct[cwd->index]; + raw_spin_lock_irq(&wqe->lock); + if (acct->nr_workers < acct->max_workers) { + if (!acct->nr_workers) + first = true; + acct->nr_workers++; + do_create = true; + } + raw_spin_unlock_irq(&wqe->lock); + if (do_create) { + create_io_worker(wq, wqe, cwd->index, first); + } else { + atomic_dec(&acct->nr_running); + io_worker_ref_put(wq); + } kfree(cwd); } @@ -612,7 +641,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk) raw_spin_unlock_irq(&worker->wqe->lock); } -static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) +static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first) { struct io_wqe_acct *acct = &wqe->acct[index]; struct io_worker *worker; @@ -635,6 +664,9 @@ static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) kfree(worker); fail: atomic_dec(&acct->nr_running); + raw_spin_lock_irq(&wqe->lock); + acct->nr_workers--; + raw_spin_unlock_irq(&wqe->lock); io_worker_ref_put(wq); return; } @@ -650,9 +682,8 @@ fail: worker->flags |= IO_WORKER_F_FREE; if (index == IO_WQ_ACCT_BOUND) worker->flags |= IO_WORKER_F_BOUND; - if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND)) + if (first && (worker->flags & IO_WORKER_F_BOUND)) worker->flags |= IO_WORKER_F_FIXED; - acct->nr_workers++; raw_spin_unlock_irq(&wqe->lock); wake_up_new_task(tsk); } @@ -731,7 +762,12 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) int work_flags; unsigned long flags; - if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) { + /* + * If io-wq is exiting for this task, or if the request has explicitly + * been marked as one that should not get executed, cancel it here. + */ + if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) || + (work->flags & IO_WQ_WORK_CANCEL)) { io_run_cancel(work, wqe); return; } diff --git a/fs/io_uring.c b/fs/io_uring.c index d94fb5835a20..a2e20a6fbfed 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -78,6 +78,7 @@ #include <linux/task_work.h> #include <linux/pagemap.h> #include <linux/io_uring.h> +#include <linux/tracehook.h> #define CREATE_TRACE_POINTS #include <trace/events/io_uring.h> @@ -1279,8 +1280,17 @@ static void io_prep_async_link(struct io_kiocb *req) { struct io_kiocb *cur; - io_for_each_link(cur, req) - io_prep_async_work(cur); + if (req->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->completion_lock); + io_for_each_link(cur, req) + io_prep_async_work(cur); + spin_unlock_irq(&ctx->completion_lock); + } else { + io_for_each_link(cur, req) + io_prep_async_work(cur); + } } static void io_queue_async_work(struct io_kiocb *req) @@ -1294,6 +1304,17 @@ static void io_queue_async_work(struct io_kiocb *req) /* init ->work of the whole link before punting */ io_prep_async_link(req); + + /* + * Not expected to happen, but if we do have a bug where this _can_ + * happen, catch it here and ensure the request is marked as + * canceled. That will make io-wq go through the usual work cancel + * procedure rather than attempt to run this request (or create a new + * worker for it). + */ + if (WARN_ON_ONCE(!same_thread_group(req->task, current))) + req->work.flags |= IO_WQ_WORK_CANCEL; + trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, &req->work, req->flags); io_wq_enqueue(tctx->io_wq, &req->work); @@ -1479,7 +1500,8 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) all_flushed = list_empty(&ctx->cq_overflow_list); if (all_flushed) { clear_bit(0, &ctx->check_cq_overflow); - ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW); } if (posted) @@ -1558,7 +1580,9 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, } if (list_empty(&ctx->cq_overflow_list)) { set_bit(0, &ctx->check_cq_overflow); - ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW); + } ocqe->cqe.user_data = user_data; ocqe->cqe.res = res; @@ -1939,9 +1963,13 @@ static void tctx_task_work(struct callback_head *cb) node = next; } if (wq_list_empty(&tctx->task_list)) { + spin_lock_irq(&tctx->task_lock); clear_bit(0, &tctx->task_state); - if (wq_list_empty(&tctx->task_list)) + if (wq_list_empty(&tctx->task_list)) { + spin_unlock_irq(&tctx->task_lock); break; + } + spin_unlock_irq(&tctx->task_lock); /* another tctx_task_work() is enqueued, yield */ if (test_and_set_bit(0, &tctx->task_state)) break; @@ -2016,7 +2044,7 @@ static void io_req_task_submit(struct io_kiocb *req) /* ctx stays valid until unlock, even if we drop all ours ctx->refs */ mutex_lock(&ctx->uring_lock); - if (!(current->flags & PF_EXITING) && !current->in_execve) + if (!(req->task->flags & PF_EXITING) && !req->task->in_execve) __io_queue_sqe(req); else io_req_complete_failed(req, -EFAULT); @@ -2036,6 +2064,12 @@ static void io_req_task_queue(struct io_kiocb *req) io_req_task_work_add(req); } +static void io_req_task_queue_reissue(struct io_kiocb *req) +{ + req->io_task_work.func = io_queue_async_work; + io_req_task_work_add(req); +} + static inline void io_queue_next(struct io_kiocb *req) { struct io_kiocb *nxt = io_req_find_next(req); @@ -2192,9 +2226,9 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) static inline bool io_run_task_work(void) { - if (current->task_works) { + if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) { __set_current_state(TASK_RUNNING); - task_work_run(); + tracehook_notify_signal(); return true; } @@ -2205,7 +2239,7 @@ static inline bool io_run_task_work(void) * Find and free completed poll iocbs */ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, - struct list_head *done) + struct list_head *done, bool resubmit) { struct req_batch rb; struct io_kiocb *req; @@ -2220,11 +2254,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, req = list_first_entry(done, struct io_kiocb, inflight_entry); list_del(&req->inflight_entry); - if (READ_ONCE(req->result) == -EAGAIN && + if (READ_ONCE(req->result) == -EAGAIN && resubmit && !(req->flags & REQ_F_DONT_REISSUE)) { req->iopoll_completed = 0; req_ref_get(req); - io_queue_async_work(req); + io_req_task_queue_reissue(req); continue; } @@ -2244,7 +2278,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, } static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, - long min) + long min, bool resubmit) { struct io_kiocb *req, *tmp; LIST_HEAD(done); @@ -2287,7 +2321,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, } if (!list_empty(&done)) - io_iopoll_complete(ctx, nr_events, &done); + io_iopoll_complete(ctx, nr_events, &done, resubmit); return ret; } @@ -2305,7 +2339,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) while (!list_empty(&ctx->iopoll_list)) { unsigned int nr_events = 0; - io_do_iopoll(ctx, &nr_events, 0); + io_do_iopoll(ctx, &nr_events, 0, false); /* let it sleep and repeat later if can't complete a request */ if (nr_events == 0) @@ -2367,7 +2401,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) list_empty(&ctx->iopoll_list)) break; } - ret = io_do_iopoll(ctx, &nr_events, min); + ret = io_do_iopoll(ctx, &nr_events, min, true); } while (!ret && nr_events < min && !need_resched()); out: mutex_unlock(&ctx->uring_lock); @@ -2417,6 +2451,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req) */ if (percpu_ref_is_dying(&ctx->refs)) return false; + /* + * Play it safe and assume not safe to re-import and reissue if we're + * not in the original thread group (or in task context). + */ + if (!same_thread_group(req->task, current) || !in_task()) + return false; return true; } #else @@ -2437,8 +2477,10 @@ static void io_fallback_req_func(struct work_struct *work) struct llist_node *node = llist_del_all(&ctx->fallback_llist); struct io_kiocb *req, *tmp; + percpu_ref_get(&ctx->refs); llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) req->io_task_work.func(req); + percpu_ref_put(&ctx->refs); } static void __io_complete_rw(struct io_kiocb *req, long res, long res2, @@ -2747,7 +2789,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, req->flags &= ~REQ_F_REISSUE; if (io_resubmit_prep(req)) { req_ref_get(req); - io_queue_async_work(req); + io_req_task_queue_reissue(req); } else { int cflags = 0; @@ -4802,6 +4844,7 @@ IO_NETOP_FN(recv); struct io_poll_table { struct poll_table_struct pt; struct io_kiocb *req; + int nr_entries; int error; }; @@ -4902,7 +4945,6 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask) if (req->poll.events & EPOLLONESHOT) flags = 0; if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) { - io_poll_remove_waitqs(req); req->poll.done = true; flags = 0; } @@ -4925,6 +4967,7 @@ static void io_poll_task_func(struct io_kiocb *req) done = io_poll_complete(req, req->result); if (done) { + io_poll_remove_double(req); hash_del(&req->hash_node); } else { req->result = 0; @@ -4995,11 +5038,11 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, struct io_kiocb *req = pt->req; /* - * If poll->head is already set, it's because the file being polled - * uses multiple waitqueues for poll handling (eg one for read, one - * for write). Setup a separate io_poll_iocb if this happens. + * The file being polled uses multiple waitqueues for poll handling + * (e.g. one for read, one for write). Setup a separate io_poll_iocb + * if this happens. */ - if (unlikely(poll->head)) { + if (unlikely(pt->nr_entries)) { struct io_poll_iocb *poll_one = poll; /* already have a 2nd entry, fail a third attempt */ @@ -5027,7 +5070,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, *poll_ptr = poll; } - pt->error = 0; + pt->nr_entries++; poll->head = head; if (poll->events & EPOLLEXCLUSIVE) @@ -5104,11 +5147,16 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req, ipt->pt._key = mask; ipt->req = req; - ipt->error = -EINVAL; + ipt->error = 0; + ipt->nr_entries = 0; mask = vfs_poll(req->file, &ipt->pt) & poll->events; + if (unlikely(!ipt->nr_entries) && !ipt->error) + ipt->error = -EINVAL; spin_lock_irq(&ctx->completion_lock); + if (ipt->error || (mask && (poll->events & EPOLLONESHOT))) + io_poll_remove_double(req); if (likely(poll->head)) { spin_lock(&poll->head->lock); if (unlikely(list_empty(&poll->wait.entry))) { @@ -5179,7 +5227,6 @@ static int io_arm_poll_handler(struct io_kiocb *req) ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, io_async_wake); if (ret || ipt.error) { - io_poll_remove_double(req); spin_unlock_irq(&ctx->completion_lock); if (ret) return IO_APOLL_READY; @@ -6019,11 +6066,13 @@ static bool io_drain_req(struct io_kiocb *req) ret = io_req_prep_async(req); if (ret) - return ret; + goto fail; io_prep_async_link(req); de = kmalloc(sizeof(*de), GFP_KERNEL); if (!de) { - io_req_complete_failed(req, -ENOMEM); + ret = -ENOMEM; +fail: + io_req_complete_failed(req, ret); return true; } @@ -6760,14 +6809,16 @@ static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx) { /* Tell userspace we may need a wakeup call */ spin_lock_irq(&ctx->completion_lock); - ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP); spin_unlock_irq(&ctx->completion_lock); } static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx) { spin_lock_irq(&ctx->completion_lock); - ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP); spin_unlock_irq(&ctx->completion_lock); } @@ -6790,7 +6841,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) mutex_lock(&ctx->uring_lock); if (!list_empty(&ctx->iopoll_list)) - io_do_iopoll(ctx, &nr_events, 0); + io_do_iopoll(ctx, &nr_events, 0, true); /* * Don't submit if refs are dying, good for io_uring_register(), @@ -7089,16 +7140,6 @@ static void **io_alloc_page_table(size_t size) return table; } -static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx) -{ - spin_lock_bh(&ctx->rsrc_ref_lock); -} - -static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx) -{ - spin_unlock_bh(&ctx->rsrc_ref_lock); -} - static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node) { percpu_ref_exit(&ref_node->refs); @@ -7115,9 +7156,9 @@ static void io_rsrc_node_switch(struct io_ring_ctx *ctx, struct io_rsrc_node *rsrc_node = ctx->rsrc_node; rsrc_node->rsrc_data = data_to_kill; - io_rsrc_ref_lock(ctx); + spin_lock_irq(&ctx->rsrc_ref_lock); list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list); - io_rsrc_ref_unlock(ctx); + spin_unlock_irq(&ctx->rsrc_ref_lock); atomic_inc(&data_to_kill->refs); percpu_ref_kill(&rsrc_node->refs); @@ -7156,17 +7197,19 @@ static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ct /* kill initial ref, already quiesced if zero */ if (atomic_dec_and_test(&data->refs)) break; + mutex_unlock(&ctx->uring_lock); flush_delayed_work(&ctx->rsrc_put_work); ret = wait_for_completion_interruptible(&data->done); - if (!ret) + if (!ret) { + mutex_lock(&ctx->uring_lock); break; + } atomic_inc(&data->refs); /* wait for all works potentially completing data->done */ flush_delayed_work(&ctx->rsrc_put_work); reinit_completion(&data->done); - mutex_unlock(&ctx->uring_lock); ret = io_run_task_work_sig(); mutex_lock(&ctx->uring_lock); } while (ret >= 0); @@ -7625,9 +7668,10 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref) { struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs); struct io_ring_ctx *ctx = node->rsrc_data->ctx; + unsigned long flags; bool first_add = false; - io_rsrc_ref_lock(ctx); + spin_lock_irqsave(&ctx->rsrc_ref_lock, flags); node->done = true; while (!list_empty(&ctx->rsrc_ref_list)) { @@ -7639,7 +7683,7 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref) list_del(&node->node); first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist); } - io_rsrc_ref_unlock(ctx); + spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags); if (first_add) mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ); @@ -7897,15 +7941,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, struct io_wq_data data; unsigned int concurrency; + mutex_lock(&ctx->uring_lock); hash = ctx->hash_map; if (!hash) { hash = kzalloc(sizeof(*hash), GFP_KERNEL); - if (!hash) + if (!hash) { + mutex_unlock(&ctx->uring_lock); return ERR_PTR(-ENOMEM); + } refcount_set(&hash->refs, 1); init_waitqueue_head(&hash->wait); ctx->hash_map = hash; } + mutex_unlock(&ctx->uring_lock); data.hash = hash; data.task = task; @@ -7979,9 +8027,11 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, f = fdget(p->wq_fd); if (!f.file) return -ENXIO; - fdput(f); - if (f.file->f_op != &io_uring_fops) + if (f.file->f_op != &io_uring_fops) { + fdput(f); return -EINVAL; + } + fdput(f); } if (ctx->flags & IORING_SETUP_SQPOLL) { struct task_struct *tsk; @@ -8604,13 +8654,10 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) mutex_unlock(&ctx->uring_lock); } -static bool io_wait_rsrc_data(struct io_rsrc_data *data) +static void io_wait_rsrc_data(struct io_rsrc_data *data) { - if (!data) - return false; - if (!atomic_dec_and_test(&data->refs)) + if (data && !atomic_dec_and_test(&data->refs)) wait_for_completion(&data->done); - return true; } static void io_ring_ctx_free(struct io_ring_ctx *ctx) @@ -8622,10 +8669,14 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) ctx->mm_account = NULL; } + /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ + io_wait_rsrc_data(ctx->buf_data); + io_wait_rsrc_data(ctx->file_data); + mutex_lock(&ctx->uring_lock); - if (io_wait_rsrc_data(ctx->buf_data)) + if (ctx->buf_data) __io_sqe_buffers_unregister(ctx); - if (io_wait_rsrc_data(ctx->file_data)) + if (ctx->file_data) __io_sqe_files_unregister(ctx); if (ctx->rings) __io_cqring_overflow_flush(ctx, true); @@ -9321,9 +9372,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, if (ctx->flags & IORING_SETUP_SQPOLL) { io_cqring_overflow_flush(ctx, false); - ret = -EOWNERDEAD; - if (unlikely(ctx->sq_data->thread == NULL)) + if (unlikely(ctx->sq_data->thread == NULL)) { + ret = -EOWNERDEAD; goto out; + } if (flags & IORING_ENTER_SQ_WAKEUP) wake_up(&ctx->sq_data->wait); if (flags & IORING_ENTER_SQ_WAIT) { @@ -9791,10 +9843,11 @@ static int io_register_personality(struct io_ring_ctx *ctx) ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds, XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL); - if (!ret) - return id; - put_cred(creds); - return ret; + if (ret < 0) { + put_cred(creds); + return ret; + } + return id; } static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg, diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 41da4f14c00b..87ccb3438bec 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -215,6 +215,7 @@ iomap_read_inline_data(struct inode *inode, struct page *page, if (PageUptodate(page)) return; + BUG_ON(page_has_private(page)); BUG_ON(page->index); BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); @@ -239,7 +240,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, { struct iomap_readpage_ctx *ctx = data; struct page *page = ctx->cur_page; - struct iomap_page *iop = iomap_page_create(inode, page); + struct iomap_page *iop; bool same_page = false, is_contig = false; loff_t orig_pos = pos; unsigned poff, plen; @@ -252,6 +253,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, } /* zero post-eof blocks as the page may be mapped */ + iop = iomap_page_create(inode, page); iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); if (plen == 0) goto done; @@ -967,7 +969,6 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, block_commit_write(page, 0, length); } else { WARN_ON_ONCE(!PageUptodate(page)); - iomap_page_create(inode, page); set_page_dirty(page); } @@ -1304,14 +1305,13 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct inode *inode, struct page *page, u64 end_offset) { - struct iomap_page *iop = to_iomap_page(page); + struct iomap_page *iop = iomap_page_create(inode, page); struct iomap_ioend *ioend, *next; unsigned len = i_blocksize(inode); u64 file_offset; /* file offset of page */ int error = 0, count = 0, i; LIST_HEAD(submit_list); - WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop); WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); /* diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c index dab1b02eba5b..ce6fb810854f 100644 --- a/fs/iomap/seek.c +++ b/fs/iomap/seek.c @@ -35,23 +35,20 @@ loff_t iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) { loff_t size = i_size_read(inode); - loff_t length = size - offset; loff_t ret; /* Nothing to be found before or beyond the end of the file. */ if (offset < 0 || offset >= size) return -ENXIO; - while (length > 0) { - ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, - &offset, iomap_seek_hole_actor); + while (offset < size) { + ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT, + ops, &offset, iomap_seek_hole_actor); if (ret < 0) return ret; if (ret == 0) break; - offset += ret; - length -= ret; } return offset; @@ -83,27 +80,23 @@ loff_t iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) { loff_t size = i_size_read(inode); - loff_t length = size - offset; loff_t ret; /* Nothing to be found before or beyond the end of the file. */ if (offset < 0 || offset >= size) return -ENXIO; - while (length > 0) { - ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, - &offset, iomap_seek_data_actor); + while (offset < size) { + ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT, + ops, &offset, iomap_seek_data_actor); if (ret < 0) return ret; if (ret == 0) - break; - + return offset; offset += ret; - length -= ret; } - if (length <= 0) - return -ENXIO; - return offset; + /* We've reached the end of the file without finding data */ + return -ENXIO; } EXPORT_SYMBOL_GPL(iomap_seek_data); diff --git a/fs/namespace.c b/fs/namespace.c index ab4174a3c802..97adcb5ab5d5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1716,8 +1716,12 @@ static inline bool may_mount(void) } #ifdef CONFIG_MANDATORY_FILE_LOCKING -static inline bool may_mandlock(void) +static bool may_mandlock(void) { + pr_warn_once("======================================================\n" + "WARNING: the mand mount option is being deprecated and\n" + " will be removed in v5.15!\n" + "======================================================\n"); return capable(CAP_SYS_ADMIN); } #else @@ -1938,6 +1942,20 @@ void drop_collected_mounts(struct vfsmount *mnt) namespace_unlock(); } +static bool has_locked_children(struct mount *mnt, struct dentry *dentry) +{ + struct mount *child; + + list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { + if (!is_subdir(child->mnt_mountpoint, dentry)) + continue; + + if (child->mnt.mnt_flags & MNT_LOCKED) + return true; + } + return false; +} + /** * clone_private_mount - create a private clone of a path * @path: path to clone @@ -1953,10 +1971,19 @@ struct vfsmount *clone_private_mount(const struct path *path) struct mount *old_mnt = real_mount(path->mnt); struct mount *new_mnt; + down_read(&namespace_sem); if (IS_MNT_UNBINDABLE(old_mnt)) - return ERR_PTR(-EINVAL); + goto invalid; + + if (!check_mnt(old_mnt)) + goto invalid; + + if (has_locked_children(old_mnt, path->dentry)) + goto invalid; new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); + up_read(&namespace_sem); + if (IS_ERR(new_mnt)) return ERR_CAST(new_mnt); @@ -1964,6 +1991,10 @@ struct vfsmount *clone_private_mount(const struct path *path) new_mnt->mnt_ns = MNT_NS_INTERNAL; return &new_mnt->mnt; + +invalid: + up_read(&namespace_sem); + return ERR_PTR(-EINVAL); } EXPORT_SYMBOL_GPL(clone_private_mount); @@ -2315,19 +2346,6 @@ static int do_change_type(struct path *path, int ms_flags) return err; } -static bool has_locked_children(struct mount *mnt, struct dentry *dentry) -{ - struct mount *child; - list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { - if (!is_subdir(child->mnt_mountpoint, dentry)) - continue; - - if (child->mnt.mnt_flags & MNT_LOCKED) - return true; - } - return false; -} - static struct mount *__do_loopback(struct path *old_path, int recurse) { struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt); diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 64864fb40b40..28b67cb9458d 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -54,22 +54,27 @@ static int fanotify_max_queued_events __read_mostly; #include <linux/sysctl.h> +static long ft_zero = 0; +static long ft_int_max = INT_MAX; + struct ctl_table fanotify_table[] = { { .procname = "max_user_groups", .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS], - .maxlen = sizeof(int), + .maxlen = sizeof(long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, + .proc_handler = proc_doulongvec_minmax, + .extra1 = &ft_zero, + .extra2 = &ft_int_max, }, { .procname = "max_user_marks", .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS], - .maxlen = sizeof(int), + .maxlen = sizeof(long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, + .proc_handler = proc_doulongvec_minmax, + .extra1 = &ft_zero, + .extra2 = &ft_int_max, }, { .procname = "max_queued_events", diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 98f61b31745a..62051247f6d2 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -55,22 +55,27 @@ struct kmem_cache *inotify_inode_mark_cachep __read_mostly; #include <linux/sysctl.h> +static long it_zero = 0; +static long it_int_max = INT_MAX; + struct ctl_table inotify_table[] = { { .procname = "max_user_instances", .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES], - .maxlen = sizeof(int), + .maxlen = sizeof(long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, + .proc_handler = proc_doulongvec_minmax, + .extra1 = &it_zero, + .extra2 = &it_int_max, }, { .procname = "max_user_watches", .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES], - .maxlen = sizeof(int), + .maxlen = sizeof(long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, + .proc_handler = proc_doulongvec_minmax, + .extra1 = &it_zero, + .extra2 = &it_int_max, }, { .procname = "max_queued_events", diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 775657943057..54d7843c0211 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start, } } +/* + * zero out partial blocks of one cluster. + * + * start: file offset where zero starts, will be made upper block aligned. + * len: it will be trimmed to the end of current cluster if "start + len" + * is bigger than it. + */ +static int ocfs2_zeroout_partial_cluster(struct inode *inode, + u64 start, u64 len) +{ + int ret; + u64 start_block, end_block, nr_blocks; + u64 p_block, offset; + u32 cluster, p_cluster, nr_clusters; + struct super_block *sb = inode->i_sb; + u64 end = ocfs2_align_bytes_to_clusters(sb, start); + + if (start + len < end) + end = start + len; + + start_block = ocfs2_blocks_for_bytes(sb, start); + end_block = ocfs2_blocks_for_bytes(sb, end); + nr_blocks = end_block - start_block; + if (!nr_blocks) + return 0; + + cluster = ocfs2_bytes_to_clusters(sb, start); + ret = ocfs2_get_clusters(inode, cluster, &p_cluster, + &nr_clusters, NULL); + if (ret) + return ret; + if (!p_cluster) + return 0; + + offset = start_block - ocfs2_clusters_to_blocks(sb, cluster); + p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset; + return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS); +} + static int ocfs2_zero_partial_clusters(struct inode *inode, u64 start, u64 len) { @@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); unsigned int csize = osb->s_clustersize; handle_t *handle; + loff_t isize = i_size_read(inode); /* * The "start" and "end" values are NOT necessarily part of @@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0) goto out; + /* No page cache for EOF blocks, issue zero out to disk. */ + if (end > isize) { + /* + * zeroout eof blocks in last cluster starting from + * "isize" even "start" > "isize" because it is + * complicated to zeroout just at "start" as "start" + * may be not aligned with block size, buffer write + * would be required to do that, but out of eof buffer + * write is not supported. + */ + ret = ocfs2_zeroout_partial_cluster(inode, isize, + end - isize); + if (ret) { + mlog_errno(ret); + goto out; + } + if (start >= isize) + goto out; + end = isize; + } handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); @@ -1856,45 +1916,6 @@ out: } /* - * zero out partial blocks of one cluster. - * - * start: file offset where zero starts, will be made upper block aligned. - * len: it will be trimmed to the end of current cluster if "start + len" - * is bigger than it. - */ -static int ocfs2_zeroout_partial_cluster(struct inode *inode, - u64 start, u64 len) -{ - int ret; - u64 start_block, end_block, nr_blocks; - u64 p_block, offset; - u32 cluster, p_cluster, nr_clusters; - struct super_block *sb = inode->i_sb; - u64 end = ocfs2_align_bytes_to_clusters(sb, start); - - if (start + len < end) - end = start + len; - - start_block = ocfs2_blocks_for_bytes(sb, start); - end_block = ocfs2_blocks_for_bytes(sb, end); - nr_blocks = end_block - start_block; - if (!nr_blocks) - return 0; - - cluster = ocfs2_bytes_to_clusters(sb, start); - ret = ocfs2_get_clusters(inode, cluster, &p_cluster, - &nr_clusters, NULL); - if (ret) - return ret; - if (!p_cluster) - return 0; - - offset = start_block - ocfs2_clusters_to_blocks(sb, cluster); - p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset; - return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS); -} - -/* * Parts of this function taken from xfs_change_file_space() */ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, @@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, goto out_inode_unlock; } - orig_isize = i_size_read(inode); switch (sr->l_whence) { case 0: /*SEEK_SET*/ break; @@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, sr->l_start += f_pos; break; case 2: /*SEEK_END*/ - sr->l_start += orig_isize; + sr->l_start += i_size_read(inode); break; default: ret = -EINVAL; @@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, ret = -EINVAL; } + orig_isize = i_size_read(inode); /* zeroout eof blocks in the cluster. */ if (!ret && change_size && orig_isize < size) { ret = ocfs2_zeroout_partial_cluster(inode, orig_isize, diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index 41ebf52f1bbc..ebde05c9cf62 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c @@ -392,6 +392,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected, */ take_dentry_name_snapshot(&name, real); this = lookup_one_len(name.name.name, connected, name.name.len); + release_dentry_name_snapshot(&name); err = PTR_ERR(this); if (IS_ERR(this)) { goto fail; @@ -406,7 +407,6 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected, } out: - release_dentry_name_snapshot(&name); dput(parent); inode_unlock(dir); return this; diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 4d53d3b7e5fe..d081faa55e83 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -392,6 +392,51 @@ out_unlock: return ret; } +/* + * Calling iter_file_splice_write() directly from overlay's f_op may deadlock + * due to lock order inversion between pipe->mutex in iter_file_splice_write() + * and file_start_write(real.file) in ovl_write_iter(). + * + * So do everything ovl_write_iter() does and call iter_file_splice_write() on + * the real file. + */ +static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out, + loff_t *ppos, size_t len, unsigned int flags) +{ + struct fd real; + const struct cred *old_cred; + struct inode *inode = file_inode(out); + struct inode *realinode = ovl_inode_real(inode); + ssize_t ret; + + inode_lock(inode); + /* Update mode */ + ovl_copyattr(realinode, inode); + ret = file_remove_privs(out); + if (ret) + goto out_unlock; + + ret = ovl_real_fdget(out, &real); + if (ret) + goto out_unlock; + + old_cred = ovl_override_creds(inode->i_sb); + file_start_write(real.file); + + ret = iter_file_splice_write(pipe, real.file, ppos, len, flags); + + file_end_write(real.file); + /* Update size */ + ovl_copyattr(realinode, inode); + revert_creds(old_cred); + fdput(real); + +out_unlock: + inode_unlock(inode); + + return ret; +} + static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct fd real; @@ -603,7 +648,7 @@ const struct file_operations ovl_file_operations = { .fadvise = ovl_fadvise, .flush = ovl_flush, .splice_read = generic_file_splice_read, - .splice_write = iter_file_splice_write, + .splice_write = ovl_splice_write, .copy_file_range = ovl_copy_file_range, .remap_file_range = ovl_remap_file_range, diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index e8ad2c2c77dd..150fdf3bc68d 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -481,6 +481,8 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p) } this = lookup_one_len(p->name, dir, p->len); if (IS_ERR_OR_NULL(this) || !this->d_inode) { + /* Mark a stale entry */ + p->is_whiteout = true; if (IS_ERR(this)) { err = PTR_ERR(this); this = NULL; @@ -776,6 +778,9 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) if (err) goto out; } + } + /* ovl_cache_update_ino() sets is_whiteout on stale entry */ + if (!p->is_whiteout) { if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) break; } diff --git a/fs/pipe.c b/fs/pipe.c index bfd946a9ad01..678dee2a8228 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -32,6 +32,21 @@ #include "internal.h" /* + * New pipe buffers will be restricted to this size while the user is exceeding + * their pipe buffer quota. The general pipe use case needs at least two + * buffers: one for data yet to be read, and one for new data. If this is less + * than two, then a write to a non-empty pipe may block even if the pipe is not + * full. This can occur with GNU make jobserver or similar uses of pipes as + * semaphores: multiple processes may be waiting to write tokens back to the + * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/. + * + * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their + * own risk, namely: pipe writes to non-full pipes may block until the pipe is + * emptied. + */ +#define PIPE_MIN_DEF_BUFFERS 2 + +/* * The max size that a non-root user is allowed to grow the pipe. Can * be set by root in /proc/sys/fs/pipe-max-size */ @@ -429,14 +444,11 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) #endif /* - * Only wake up if the pipe started out empty, since - * otherwise there should be no readers waiting. - * * If it wasn't empty we try to merge new data into * the last buffer. * * That naturally merges small writes, but it also - * page-aligs the rest of the writes for large writes + * page-aligns the rest of the writes for large writes * spanning multiple pages. */ head = pipe->head; @@ -575,8 +587,11 @@ out: * This is particularly important for small writes, because of * how (for example) the GNU make jobserver uses small writes to * wake up pending jobs + * + * Epoll nonsensically wants a wakeup whether the pipe + * was already empty or not. */ - if (was_empty) { + if (was_empty || pipe->poll_usage) { wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } @@ -639,6 +654,9 @@ pipe_poll(struct file *filp, poll_table *wait) struct pipe_inode_info *pipe = filp->private_data; unsigned int head, tail; + /* Epoll has some historical nasty semantics, this enables them */ + pipe->poll_usage = 1; + /* * Reading pipe state only -- no need for acquiring the semaphore. * @@ -781,8 +799,8 @@ struct pipe_inode_info *alloc_pipe_info(void) user_bufs = account_pipe_buffers(user, 0, pipe_bufs); if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) { - user_bufs = account_pipe_buffers(user, pipe_bufs, 1); - pipe_bufs = 1; + user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS); + pipe_bufs = PIPE_MIN_DEF_BUFFERS; } if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user()) diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 476a7ff49482..ef42729216d1 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -387,6 +387,24 @@ void pathrelse(struct treepath *search_path) search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } +static int has_valid_deh_location(struct buffer_head *bh, struct item_head *ih) +{ + struct reiserfs_de_head *deh; + int i; + + deh = B_I_DEH(bh, ih); + for (i = 0; i < ih_entry_count(ih); i++) { + if (deh_location(&deh[i]) > ih_item_len(ih)) { + reiserfs_warning(NULL, "reiserfs-5094", + "directory entry location seems wrong %h", + &deh[i]); + return 0; + } + } + + return 1; +} + static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) { struct block_head *blkh; @@ -454,11 +472,14 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) "(second one): %h", ih); return 0; } - if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) { - reiserfs_warning(NULL, "reiserfs-5093", - "item entry count seems wrong %h", - ih); - return 0; + if (is_direntry_le_ih(ih)) { + if (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE)) { + reiserfs_warning(NULL, "reiserfs-5093", + "item entry count seems wrong %h", + ih); + return 0; + } + return has_valid_deh_location(bh, ih); } prev_location = ih_location(ih); } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 3ffafc73acf0..58481f8d63d5 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -2082,6 +2082,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) unlock_new_inode(root_inode); } + if (!S_ISDIR(root_inode->i_mode) || !inode_get_bytes(root_inode) || + !root_inode->i_size) { + SWARN(silent, s, "", "corrupt root inode, run fsck"); + iput(root_inode); + errval = -EUCLEAN; + goto error; + } + s->s_root = d_make_root(root_inode); if (!s->s_root) goto error; diff --git a/fs/seq_file.c b/fs/seq_file.c index b117b212ef28..4a2cda04d3e2 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -32,6 +32,9 @@ static void seq_set_overflow(struct seq_file *m) static void *seq_buf_alloc(unsigned long size) { + if (unlikely(size > MAX_RW_COUNT)) + return NULL; + return kvmalloc(size, GFP_KERNEL_ACCOUNT); } diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index f6e0f0c0d0e5..5c2d806e6ae5 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1236,23 +1236,21 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, } static __always_inline int validate_range(struct mm_struct *mm, - __u64 *start, __u64 len) + __u64 start, __u64 len) { __u64 task_size = mm->task_size; - *start = untagged_addr(*start); - - if (*start & ~PAGE_MASK) + if (start & ~PAGE_MASK) return -EINVAL; if (len & ~PAGE_MASK) return -EINVAL; if (!len) return -EINVAL; - if (*start < mmap_min_addr) + if (start < mmap_min_addr) return -EINVAL; - if (*start >= task_size) + if (start >= task_size) return -EINVAL; - if (len > task_size - *start) + if (len > task_size - start) return -EINVAL; return 0; } @@ -1316,7 +1314,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, vm_flags |= VM_UFFD_MINOR; } - ret = validate_range(mm, &uffdio_register.range.start, + ret = validate_range(mm, uffdio_register.range.start, uffdio_register.range.len); if (ret) goto out; @@ -1522,7 +1520,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) goto out; - ret = validate_range(mm, &uffdio_unregister.start, + ret = validate_range(mm, uffdio_unregister.start, uffdio_unregister.len); if (ret) goto out; @@ -1671,7 +1669,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx, if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) goto out; - ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len); + ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); if (ret) goto out; @@ -1711,7 +1709,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, sizeof(uffdio_copy)-sizeof(__s64))) goto out; - ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len); + ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); if (ret) goto out; /* @@ -1768,7 +1766,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, sizeof(uffdio_zeropage)-sizeof(__s64))) goto out; - ret = validate_range(ctx->mm, &uffdio_zeropage.range.start, + ret = validate_range(ctx->mm, uffdio_zeropage.range.start, uffdio_zeropage.range.len); if (ret) goto out; @@ -1818,7 +1816,7 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, sizeof(struct uffdio_writeprotect))) return -EFAULT; - ret = validate_range(ctx->mm, &uffdio_wp.range.start, + ret = validate_range(ctx->mm, uffdio_wp.range.start, uffdio_wp.range.len); if (ret) return ret; @@ -1866,7 +1864,7 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) sizeof(uffdio_continue) - (sizeof(__s64)))) goto out; - ret = validate_range(ctx->mm, &uffdio_continue.range.start, + ret = validate_range(ctx->mm, uffdio_continue.range.start, uffdio_continue.range.len); if (ret) goto out; diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c index eac6788fc6cf..c4769a9396c5 100644 --- a/fs/vboxsf/dir.c +++ b/fs/vboxsf/dir.c @@ -253,7 +253,7 @@ static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry, } static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry, - umode_t mode, int is_dir) + umode_t mode, bool is_dir, bool excl, u64 *handle_ret) { struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent); struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb); @@ -261,10 +261,12 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry, int err; params.handle = SHFL_HANDLE_NIL; - params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW | - SHFL_CF_ACT_FAIL_IF_EXISTS | - SHFL_CF_ACCESS_READWRITE | - (is_dir ? SHFL_CF_DIRECTORY : 0); + params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW | SHFL_CF_ACCESS_READWRITE; + if (is_dir) + params.create_flags |= SHFL_CF_DIRECTORY; + if (excl) + params.create_flags |= SHFL_CF_ACT_FAIL_IF_EXISTS; + params.info.attr.mode = (mode & 0777) | (is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE); params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING; @@ -276,30 +278,81 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry, if (params.result != SHFL_FILE_CREATED) return -EPERM; - vboxsf_close(sbi->root, params.handle); - err = vboxsf_dir_instantiate(parent, dentry, ¶ms.info); if (err) - return err; + goto out; /* parent directory access/change time changed */ sf_parent_i->force_restat = 1; - return 0; +out: + if (err == 0 && handle_ret) + *handle_ret = params.handle; + else + vboxsf_close(sbi->root, params.handle); + + return err; } static int vboxsf_dir_mkfile(struct user_namespace *mnt_userns, struct inode *parent, struct dentry *dentry, umode_t mode, bool excl) { - return vboxsf_dir_create(parent, dentry, mode, 0); + return vboxsf_dir_create(parent, dentry, mode, false, excl, NULL); } static int vboxsf_dir_mkdir(struct user_namespace *mnt_userns, struct inode *parent, struct dentry *dentry, umode_t mode) { - return vboxsf_dir_create(parent, dentry, mode, 1); + return vboxsf_dir_create(parent, dentry, mode, true, true, NULL); +} + +static int vboxsf_dir_atomic_open(struct inode *parent, struct dentry *dentry, + struct file *file, unsigned int flags, umode_t mode) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb); + struct vboxsf_handle *sf_handle; + struct dentry *res = NULL; + u64 handle; + int err; + + if (d_in_lookup(dentry)) { + res = vboxsf_dir_lookup(parent, dentry, 0); + if (IS_ERR(res)) + return PTR_ERR(res); + + if (res) + dentry = res; + } + + /* Only creates */ + if (!(flags & O_CREAT) || d_really_is_positive(dentry)) + return finish_no_open(file, res); + + err = vboxsf_dir_create(parent, dentry, mode, false, flags & O_EXCL, &handle); + if (err) + goto out; + + sf_handle = vboxsf_create_sf_handle(d_inode(dentry), handle, SHFL_CF_ACCESS_READWRITE); + if (IS_ERR(sf_handle)) { + vboxsf_close(sbi->root, handle); + err = PTR_ERR(sf_handle); + goto out; + } + + err = finish_open(file, dentry, generic_file_open); + if (err) { + /* This also closes the handle passed to vboxsf_create_sf_handle() */ + vboxsf_release_sf_handle(d_inode(dentry), sf_handle); + goto out; + } + + file->private_data = sf_handle; + file->f_mode |= FMODE_CREATED; +out: + dput(res); + return err; } static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry) @@ -422,6 +475,7 @@ const struct inode_operations vboxsf_dir_iops = { .lookup = vboxsf_dir_lookup, .create = vboxsf_dir_mkfile, .mkdir = vboxsf_dir_mkdir, + .atomic_open = vboxsf_dir_atomic_open, .rmdir = vboxsf_dir_unlink, .unlink = vboxsf_dir_unlink, .rename = vboxsf_dir_rename, diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c index c4ab5996d97a..864c2fad23be 100644 --- a/fs/vboxsf/file.c +++ b/fs/vboxsf/file.c @@ -20,17 +20,39 @@ struct vboxsf_handle { struct list_head head; }; -static int vboxsf_file_open(struct inode *inode, struct file *file) +struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode, + u64 handle, u32 access_flags) { struct vboxsf_inode *sf_i = VBOXSF_I(inode); - struct shfl_createparms params = {}; struct vboxsf_handle *sf_handle; - u32 access_flags = 0; - int err; sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL); if (!sf_handle) - return -ENOMEM; + return ERR_PTR(-ENOMEM); + + /* the host may have given us different attr then requested */ + sf_i->force_restat = 1; + + /* init our handle struct and add it to the inode's handles list */ + sf_handle->handle = handle; + sf_handle->root = VBOXSF_SBI(inode->i_sb)->root; + sf_handle->access_flags = access_flags; + kref_init(&sf_handle->refcount); + + mutex_lock(&sf_i->handle_list_mutex); + list_add(&sf_handle->head, &sf_i->handle_list); + mutex_unlock(&sf_i->handle_list_mutex); + + return sf_handle; +} + +static int vboxsf_file_open(struct inode *inode, struct file *file) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb); + struct shfl_createparms params = {}; + struct vboxsf_handle *sf_handle; + u32 access_flags = 0; + int err; /* * We check the value of params.handle afterwards to find out if @@ -83,23 +105,14 @@ static int vboxsf_file_open(struct inode *inode, struct file *file) err = vboxsf_create_at_dentry(file_dentry(file), ¶ms); if (err == 0 && params.handle == SHFL_HANDLE_NIL) err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT; - if (err) { - kfree(sf_handle); + if (err) return err; - } - - /* the host may have given us different attr then requested */ - sf_i->force_restat = 1; - /* init our handle struct and add it to the inode's handles list */ - sf_handle->handle = params.handle; - sf_handle->root = VBOXSF_SBI(inode->i_sb)->root; - sf_handle->access_flags = access_flags; - kref_init(&sf_handle->refcount); - - mutex_lock(&sf_i->handle_list_mutex); - list_add(&sf_handle->head, &sf_i->handle_list); - mutex_unlock(&sf_i->handle_list_mutex); + sf_handle = vboxsf_create_sf_handle(inode, params.handle, access_flags); + if (IS_ERR(sf_handle)) { + vboxsf_close(sbi->root, params.handle); + return PTR_ERR(sf_handle); + } file->private_data = sf_handle; return 0; @@ -114,22 +127,26 @@ static void vboxsf_handle_release(struct kref *refcount) kfree(sf_handle); } -static int vboxsf_file_release(struct inode *inode, struct file *file) +void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle) { struct vboxsf_inode *sf_i = VBOXSF_I(inode); - struct vboxsf_handle *sf_handle = file->private_data; + mutex_lock(&sf_i->handle_list_mutex); + list_del(&sf_handle->head); + mutex_unlock(&sf_i->handle_list_mutex); + + kref_put(&sf_handle->refcount, vboxsf_handle_release); +} + +static int vboxsf_file_release(struct inode *inode, struct file *file) +{ /* * When a file is closed on our (the guest) side, we want any subsequent * accesses done on the host side to see all changes done from our side. */ filemap_write_and_wait(inode->i_mapping); - mutex_lock(&sf_i->handle_list_mutex); - list_del(&sf_handle->head); - mutex_unlock(&sf_i->handle_list_mutex); - - kref_put(&sf_handle->refcount, vboxsf_handle_release); + vboxsf_release_sf_handle(inode, file->private_data); return 0; } diff --git a/fs/vboxsf/vfsmod.h b/fs/vboxsf/vfsmod.h index 6a7a9cedebc6..9047befa66c5 100644 --- a/fs/vboxsf/vfsmod.h +++ b/fs/vboxsf/vfsmod.h @@ -18,6 +18,8 @@ #define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info) #define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode) +struct vboxsf_handle; + struct vboxsf_options { unsigned long ttl; kuid_t uid; @@ -80,6 +82,11 @@ extern const struct file_operations vboxsf_reg_fops; extern const struct address_space_operations vboxsf_reg_aops; extern const struct dentry_operations vboxsf_dentry_ops; +/* from file.c */ +struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode, + u64 handle, u32 access_flags); +void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle); + /* from utils.c */ struct inode *vboxsf_new_inode(struct super_block *sb); int vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode, diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index 778ec52cce70..ee9ec0c50bec 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -804,6 +804,14 @@ xfs_ag_shrink_space( args.fsbno = XFS_AGB_TO_FSB(mp, agno, aglen - delta); /* + * Make sure that the last inode cluster cannot overlap with the new + * end of the AG, even if it's sparse. + */ + error = xfs_ialloc_check_shrink(*tpp, agno, agibp, aglen - delta); + if (error) + return error; + + /* * Disable perag reservations so it doesn't cause the allocation request * to fail. We'll reestablish reservation before we return. */ diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index d9d7d5137b73..191d51725988 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -483,7 +483,7 @@ xfs_attr_set_iter( if (error) return error; - /* fallthrough */ + fallthrough; case XFS_DAS_RM_LBLK: /* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */ dac->dela_state = XFS_DAS_RM_LBLK; @@ -496,7 +496,7 @@ xfs_attr_set_iter( return -EAGAIN; } - /* fallthrough */ + fallthrough; case XFS_DAS_RD_LEAF: /* * This is the last step for leaf format. Read the block with @@ -528,7 +528,7 @@ xfs_attr_set_iter( return error; } - /* fallthrough */ + fallthrough; case XFS_DAS_ALLOC_NODE: /* * If there was an out-of-line value, allocate the blocks we @@ -590,7 +590,7 @@ xfs_attr_set_iter( if (error) return error; - /* fallthrough */ + fallthrough; case XFS_DAS_RM_NBLK: /* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */ dac->dela_state = XFS_DAS_RM_NBLK; @@ -603,7 +603,7 @@ xfs_attr_set_iter( return -EAGAIN; } - /* fallthrough */ + fallthrough; case XFS_DAS_CLR_FLAG: /* * The last state for node format. Look up the old attr and @@ -1406,7 +1406,7 @@ xfs_attr_remove_iter( state = dac->da_state; } - /* fallthrough */ + fallthrough; case XFS_DAS_RMTBLK: dac->dela_state = XFS_DAS_RMTBLK; @@ -1441,7 +1441,7 @@ xfs_attr_remove_iter( return -EAGAIN; } - /* fallthrough */ + fallthrough; case XFS_DAS_RM_NAME: /* * If we came here fresh from a transaction roll, reattach all @@ -1469,7 +1469,7 @@ xfs_attr_remove_iter( return -EAGAIN; } - /* fallthrough */ + fallthrough; case XFS_DAS_RM_SHRINK: /* * If the result is small enough, push it all into the inode. diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 57d9cb632983..aaf8805a82df 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -2928,3 +2928,58 @@ xfs_ialloc_calc_rootino( return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno)); } + +/* + * Ensure there are not sparse inode clusters that cross the new EOAG. + * + * This is a no-op for non-spinode filesystems since clusters are always fully + * allocated and checking the bnobt suffices. However, a spinode filesystem + * could have a record where the upper inodes are free blocks. If those blocks + * were removed from the filesystem, the inode record would extend beyond EOAG, + * which will be flagged as corruption. + */ +int +xfs_ialloc_check_shrink( + struct xfs_trans *tp, + xfs_agnumber_t agno, + struct xfs_buf *agibp, + xfs_agblock_t new_length) +{ + struct xfs_inobt_rec_incore rec; + struct xfs_btree_cur *cur; + struct xfs_mount *mp = tp->t_mountp; + struct xfs_perag *pag; + xfs_agino_t agino = XFS_AGB_TO_AGINO(mp, new_length); + int has; + int error; + + if (!xfs_sb_version_hassparseinodes(&mp->m_sb)) + return 0; + + pag = xfs_perag_get(mp, agno); + cur = xfs_inobt_init_cursor(mp, tp, agibp, pag, XFS_BTNUM_INO); + + /* Look up the inobt record that would correspond to the new EOFS. */ + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has); + if (error || !has) + goto out; + + error = xfs_inobt_get_rec(cur, &rec, &has); + if (error) + goto out; + + if (!has) { + error = -EFSCORRUPTED; + goto out; + } + + /* If the record covers inodes that would be beyond EOFS, bail out. */ + if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) { + error = -ENOSPC; + goto out; + } +out: + xfs_btree_del_cursor(cur, error); + xfs_perag_put(pag); + return error; +} diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h index 9df7c80408ff..9a2112b4ad5e 100644 --- a/fs/xfs/libxfs/xfs_ialloc.h +++ b/fs/xfs/libxfs/xfs_ialloc.h @@ -122,4 +122,7 @@ int xfs_ialloc_cluster_alignment(struct xfs_mount *mp); void xfs_ialloc_setup_geometry(struct xfs_mount *mp); xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit); +int xfs_ialloc_check_shrink(struct xfs_trans *tp, xfs_agnumber_t agno, + struct xfs_buf *agibp, xfs_agblock_t new_length); + #endif /* __XFS_IALLOC_H__ */ diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index 04ce361688f7..84ea2e0af9f0 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -592,23 +592,27 @@ xfs_inode_validate_extsize( /* * This comment describes a historic gap in this verifier function. * - * On older kernels, the extent size hint verifier doesn't check that - * the extent size hint is an integer multiple of the realtime extent - * size on a directory with both RTINHERIT and EXTSZINHERIT flags set. - * The verifier has always enforced the alignment rule for regular - * files with the REALTIME flag set. + * For a directory with both RTINHERIT and EXTSZINHERIT flags set, this + * function has never checked that the extent size hint is an integer + * multiple of the realtime extent size. Since we allow users to set + * this combination on non-rt filesystems /and/ to change the rt + * extent size when adding a rt device to a filesystem, the net effect + * is that users can configure a filesystem anticipating one rt + * geometry and change their minds later. Directories do not use the + * extent size hint, so this is harmless for them. * * If a directory with a misaligned extent size hint is allowed to * propagate that hint into a new regular realtime file, the result * is that the inode cluster buffer verifier will trigger a corruption - * shutdown the next time it is run. + * shutdown the next time it is run, because the verifier has always + * enforced the alignment rule for regular files. * - * Unfortunately, there could be filesystems with these misconfigured - * directories in the wild, so we cannot add a check to this verifier - * at this time because that will result a new source of directory - * corruption errors when reading an existing filesystem. Instead, we - * permit the misconfiguration to pass through the verifiers so that - * callers of this function can correct and mitigate externally. + * Because we allow administrators to set a new rt extent size when + * adding a rt section, we cannot add a check to this verifier because + * that will result a new source of directory corruption errors when + * reading an existing filesystem. Instead, we rely on callers to + * decide when alignment checks are appropriate, and fix things up as + * needed. */ if (rt_flag) diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h index d548ea4b6aab..2c5bcbc19264 100644 --- a/fs/xfs/libxfs/xfs_log_format.h +++ b/fs/xfs/libxfs/xfs_log_format.h @@ -411,7 +411,16 @@ struct xfs_log_dinode { /* start of the extended dinode, writable fields */ uint32_t di_crc; /* CRC of the inode */ uint64_t di_changecount; /* number of attribute changes */ - xfs_lsn_t di_lsn; /* flush sequence */ + + /* + * The LSN we write to this field during formatting is not a reflection + * of the current on-disk LSN. It should never be used for recovery + * sequencing, nor should it be recovered into the on-disk inode at all. + * See xlog_recover_inode_commit_pass2() and xfs_log_dinode_to_disk() + * for details. + */ + xfs_lsn_t di_lsn; + uint64_t di_flags2; /* more random flags */ uint32_t di_cowextsize; /* basic cow extent size for file */ uint8_t di_pad2[12]; /* more padding for future expansion */ diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c index 8d595a5c4abd..16f723ebe8dd 100644 --- a/fs/xfs/libxfs/xfs_trans_inode.c +++ b/fs/xfs/libxfs/xfs_trans_inode.c @@ -143,16 +143,14 @@ xfs_trans_log_inode( } /* - * Inode verifiers on older kernels don't check that the extent size - * hint is an integer multiple of the rt extent size on a directory - * with both rtinherit and extszinherit flags set. If we're logging a - * directory that is misconfigured in this way, clear the hint. + * Inode verifiers do not check that the extent size hint is an integer + * multiple of the rt extent size on a directory with both rtinherit + * and extszinherit flags set. If we're logging a directory that is + * misconfigured in this way, clear the hint. */ if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) && (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) { - xfs_info_once(ip->i_mount, - "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino); ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT); ip->i_extsize = 0; diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c index 61f90b2c9430..76fbc7ca4cec 100644 --- a/fs/xfs/scrub/inode.c +++ b/fs/xfs/scrub/inode.c @@ -73,11 +73,25 @@ xchk_inode_extsize( uint16_t flags) { xfs_failaddr_t fa; + uint32_t value = be32_to_cpu(dip->di_extsize); - fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize), - mode, flags); + fa = xfs_inode_validate_extsize(sc->mp, value, mode, flags); if (fa) xchk_ino_set_corrupt(sc, ino); + + /* + * XFS allows a sysadmin to change the rt extent size when adding a rt + * section to a filesystem after formatting. If there are any + * directories with extszinherit and rtinherit set, the hint could + * become misaligned with the new rextsize. The verifier doesn't check + * this, because we allow rtinherit directories even without an rt + * device. Flag this as an administrative warning since we will clean + * this up eventually. + */ + if ((flags & XFS_DIFLAG_RTINHERIT) && + (flags & XFS_DIFLAG_EXTSZINHERIT) && + value % sc->mp->m_sb.sb_rextsize > 0) + xchk_ino_set_warning(sc, ino); } /* diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c index d44e8b4a3391..4775485b4062 100644 --- a/fs/xfs/xfs_buf_item_recover.c +++ b/fs/xfs/xfs_buf_item_recover.c @@ -698,7 +698,8 @@ xlog_recover_do_inode_buffer( static xfs_lsn_t xlog_recover_get_buf_lsn( struct xfs_mount *mp, - struct xfs_buf *bp) + struct xfs_buf *bp, + struct xfs_buf_log_format *buf_f) { uint32_t magic32; uint16_t magic16; @@ -706,11 +707,20 @@ xlog_recover_get_buf_lsn( void *blk = bp->b_addr; uuid_t *uuid; xfs_lsn_t lsn = -1; + uint16_t blft; /* v4 filesystems always recover immediately */ if (!xfs_sb_version_hascrc(&mp->m_sb)) goto recover_immediately; + /* + * realtime bitmap and summary file blocks do not have magic numbers or + * UUIDs, so we must recover them immediately. + */ + blft = xfs_blft_from_flags(buf_f); + if (blft == XFS_BLFT_RTBITMAP_BUF || blft == XFS_BLFT_RTSUMMARY_BUF) + goto recover_immediately; + magic32 = be32_to_cpu(*(__be32 *)blk); switch (magic32) { case XFS_ABTB_CRC_MAGIC: @@ -796,6 +806,7 @@ xlog_recover_get_buf_lsn( switch (magicda) { case XFS_DIR3_LEAF1_MAGIC: case XFS_DIR3_LEAFN_MAGIC: + case XFS_ATTR3_LEAF_MAGIC: case XFS_DA3_NODE_MAGIC: lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; @@ -919,7 +930,7 @@ xlog_recover_buf_commit_pass2( * the verifier will be reset to match whatever recover turns that * buffer into. */ - lsn = xlog_recover_get_buf_lsn(mp, bp); + lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f); if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { trace_xfs_log_recover_buf_skip(log, buf_f); xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index a835ceb79ba5..990b72ae3635 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2763,6 +2763,19 @@ xfs_remove( error = xfs_droplink(tp, ip); if (error) goto out_trans_cancel; + + /* + * Point the unlinked child directory's ".." entry to the root + * directory to eliminate back-references to inodes that may + * get freed before the child directory is closed. If the fs + * gets shrunk, this can lead to dirent inode validation errors. + */ + if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) { + error = xfs_dir_replace(tp, ip, &xfs_name_dotdot, + tp->t_mountp->m_sb.sb_rootino, 0); + if (error) + return error; + } } else { /* * When removing a non-directory we need to log the parent diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c index 7b79518b6c20..e0072a6cd2d3 100644 --- a/fs/xfs/xfs_inode_item_recover.c +++ b/fs/xfs/xfs_inode_item_recover.c @@ -145,7 +145,8 @@ xfs_log_dinode_to_disk_ts( STATIC void xfs_log_dinode_to_disk( struct xfs_log_dinode *from, - struct xfs_dinode *to) + struct xfs_dinode *to, + xfs_lsn_t lsn) { to->di_magic = cpu_to_be16(from->di_magic); to->di_mode = cpu_to_be16(from->di_mode); @@ -182,7 +183,7 @@ xfs_log_dinode_to_disk( to->di_flags2 = cpu_to_be64(from->di_flags2); to->di_cowextsize = cpu_to_be32(from->di_cowextsize); to->di_ino = cpu_to_be64(from->di_ino); - to->di_lsn = cpu_to_be64(from->di_lsn); + to->di_lsn = cpu_to_be64(lsn); memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); uuid_copy(&to->di_uuid, &from->di_uuid); to->di_flushiter = 0; @@ -261,16 +262,25 @@ xlog_recover_inode_commit_pass2( } /* - * If the inode has an LSN in it, recover the inode only if it's less - * than the lsn of the transaction we are replaying. Note: we still - * need to replay an owner change even though the inode is more recent - * than the transaction as there is no guarantee that all the btree - * blocks are more recent than this transaction, too. + * If the inode has an LSN in it, recover the inode only if the on-disk + * inode's LSN is older than the lsn of the transaction we are + * replaying. We can have multiple checkpoints with the same start LSN, + * so the current LSN being equal to the on-disk LSN doesn't necessarily + * mean that the on-disk inode is more recent than the change being + * replayed. + * + * We must check the current_lsn against the on-disk inode + * here because the we can't trust the log dinode to contain a valid LSN + * (see comment below before replaying the log dinode for details). + * + * Note: we still need to replay an owner change even though the inode + * is more recent than the transaction as there is no guarantee that all + * the btree blocks are more recent than this transaction, too. */ if (dip->di_version >= 3) { xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn); - if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { + if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) > 0) { trace_xfs_log_recover_inode_skip(log, in_f); error = 0; goto out_owner_change; @@ -368,8 +378,17 @@ xlog_recover_inode_commit_pass2( goto out_release; } - /* recover the log dinode inode into the on disk inode */ - xfs_log_dinode_to_disk(ldip, dip); + /* + * Recover the log dinode inode into the on disk inode. + * + * The LSN in the log dinode is garbage - it can be zero or reflect + * stale in-memory runtime state that isn't coherent with the changes + * logged in this transaction or the changes written to the on-disk + * inode. Hence we write the current lSN into the inode because that + * matches what xfs_iflush() would write inode the inode when flushing + * the changes in this transaction. + */ + xfs_log_dinode_to_disk(ldip, dip, current_lsn); fields = in_f->ilf_fields; if (fields & XFS_ILOG_DEV) diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 65270e63c032..16039ea10ac9 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1065,7 +1065,24 @@ xfs_fill_fsxattr( fileattr_fill_xflags(fa, xfs_ip2xflags(ip)); - fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); + if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) { + fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); + } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { + /* + * Don't let a misaligned extent size hint on a directory + * escape to userspace if it won't pass the setattr checks + * later. + */ + if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && + ip->i_extsize % mp->m_sb.sb_rextsize > 0) { + fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | + FS_XFLAG_EXTSZINHERIT); + fa->fsx_extsize = 0; + } else { + fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); + } + } + if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize); fa->fsx_projid = ip->i_projid; @@ -1292,10 +1309,10 @@ xfs_ioctl_setattr_check_extsize( new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags); /* - * Inode verifiers on older kernels don't check that the extent size - * hint is an integer multiple of the rt extent size on a directory - * with both rtinherit and extszinherit flags set. Don't let sysadmins - * misconfigure directories. + * Inode verifiers do not check that the extent size hint is an integer + * multiple of the rt extent size on a directory with both rtinherit + * and extszinherit flags set. Don't let sysadmins misconfigure + * directories. */ if ((new_diflags & XFS_DIFLAG_RTINHERIT) && (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) { diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 36fa2650b081..60ac5fd63f1e 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -78,13 +78,12 @@ xlog_verify_iclog( STATIC void xlog_verify_tail_lsn( struct xlog *log, - struct xlog_in_core *iclog, - xfs_lsn_t tail_lsn); + struct xlog_in_core *iclog); #else #define xlog_verify_dest_ptr(a,b) #define xlog_verify_grant_tail(a) #define xlog_verify_iclog(a,b,c) -#define xlog_verify_tail_lsn(a,b,c) +#define xlog_verify_tail_lsn(a,b) #endif STATIC int @@ -487,51 +486,80 @@ out_error: return error; } -static bool -__xlog_state_release_iclog( - struct xlog *log, - struct xlog_in_core *iclog) -{ - lockdep_assert_held(&log->l_icloglock); - - if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { - /* update tail before writing to iclog */ - xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); - - iclog->ic_state = XLOG_STATE_SYNCING; - iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); - xlog_verify_tail_lsn(log, iclog, tail_lsn); - /* cycle incremented when incrementing curr_block */ - trace_xlog_iclog_syncing(iclog, _RET_IP_); - return true; - } - - ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); - return false; -} - /* * Flush iclog to disk if this is the last reference to the given iclog and the * it is in the WANT_SYNC state. + * + * If the caller passes in a non-zero @old_tail_lsn and the current log tail + * does not match, there may be metadata on disk that must be persisted before + * this iclog is written. To satisfy that requirement, set the + * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new + * log tail value. + * + * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the + * log tail is updated correctly. NEED_FUA indicates that the iclog will be + * written to stable storage, and implies that a commit record is contained + * within the iclog. We need to ensure that the log tail does not move beyond + * the tail that the first commit record in the iclog ordered against, otherwise + * correct recovery of that checkpoint becomes dependent on future operations + * performed on this iclog. + * + * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the + * current tail into iclog. Once the iclog tail is set, future operations must + * not modify it, otherwise they potentially violate ordering constraints for + * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in + * the iclog will get zeroed on activation of the iclog after sync, so we + * always capture the tail lsn on the iclog on the first NEED_FUA release + * regardless of the number of active reference counts on this iclog. */ + int xlog_state_release_iclog( struct xlog *log, - struct xlog_in_core *iclog) + struct xlog_in_core *iclog, + xfs_lsn_t old_tail_lsn) { + xfs_lsn_t tail_lsn; lockdep_assert_held(&log->l_icloglock); trace_xlog_iclog_release(iclog, _RET_IP_); if (iclog->ic_state == XLOG_STATE_IOERROR) return -EIO; - if (atomic_dec_and_test(&iclog->ic_refcnt) && - __xlog_state_release_iclog(log, iclog)) { - spin_unlock(&log->l_icloglock); - xlog_sync(log, iclog); - spin_lock(&log->l_icloglock); + /* + * Grabbing the current log tail needs to be atomic w.r.t. the writing + * of the tail LSN into the iclog so we guarantee that the log tail does + * not move between deciding if a cache flush is required and writing + * the LSN into the iclog below. + */ + if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) { + tail_lsn = xlog_assign_tail_lsn(log->l_mp); + + if (old_tail_lsn && tail_lsn != old_tail_lsn) + iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; + + if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) && + !iclog->ic_header.h_tail_lsn) + iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); } + if (!atomic_dec_and_test(&iclog->ic_refcnt)) + return 0; + + if (iclog->ic_state != XLOG_STATE_WANT_SYNC) { + ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); + return 0; + } + + iclog->ic_state = XLOG_STATE_SYNCING; + if (!iclog->ic_header.h_tail_lsn) + iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); + xlog_verify_tail_lsn(log, iclog); + trace_xlog_iclog_syncing(iclog, _RET_IP_); + + spin_unlock(&log->l_icloglock); + xlog_sync(log, iclog); + spin_lock(&log->l_icloglock); return 0; } @@ -774,6 +802,21 @@ xfs_log_mount_cancel( } /* + * Flush out the iclog to disk ensuring that device caches are flushed and + * the iclog hits stable storage before any completion waiters are woken. + */ +static inline int +xlog_force_iclog( + struct xlog_in_core *iclog) +{ + atomic_inc(&iclog->ic_refcnt); + iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; + if (iclog->ic_state == XLOG_STATE_ACTIVE) + xlog_state_switch_iclogs(iclog->ic_log, iclog, 0); + return xlog_state_release_iclog(iclog->ic_log, iclog, 0); +} + +/* * Wait for the iclog and all prior iclogs to be written disk as required by the * log force state machine. Waiting on ic_force_wait ensures iclog completions * have been ordered and callbacks run before we are woken here, hence @@ -827,13 +870,6 @@ xlog_write_unmount_record( /* account for space used by record data */ ticket->t_curr_res -= sizeof(ulf); - /* - * For external log devices, we need to flush the data device cache - * first to ensure all metadata writeback is on stable storage before we - * stamp the tail LSN into the unmount record. - */ - if (log->l_targ != log->l_mp->m_ddev_targp) - blkdev_issue_flush(log->l_targ->bt_bdev); return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS); } @@ -865,18 +901,7 @@ out_err: spin_lock(&log->l_icloglock); iclog = log->l_iclog; - atomic_inc(&iclog->ic_refcnt); - if (iclog->ic_state == XLOG_STATE_ACTIVE) - xlog_state_switch_iclogs(log, iclog, 0); - else - ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || - iclog->ic_state == XLOG_STATE_IOERROR); - /* - * Ensure the journal is fully flushed and on stable storage once the - * iclog containing the unmount record is written. - */ - iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); - error = xlog_state_release_iclog(log, iclog); + error = xlog_force_iclog(iclog); xlog_wait_on_iclog(iclog); if (tic) { @@ -1796,10 +1821,20 @@ xlog_write_iclog( * metadata writeback and causing priority inversions. */ iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE; - if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) + if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) { iclog->ic_bio.bi_opf |= REQ_PREFLUSH; + /* + * For external log devices, we also need to flush the data + * device cache first to ensure all metadata writeback covered + * by the LSN in this iclog is on stable storage. This is slow, + * but it *must* complete before we issue the external log IO. + */ + if (log->l_targ != log->l_mp->m_ddev_targp) + blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev); + } if (iclog->ic_flags & XLOG_ICL_NEED_FUA) iclog->ic_bio.bi_opf |= REQ_FUA; + iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { @@ -2310,7 +2345,7 @@ xlog_write_copy_finish( return 0; release_iclog: - error = xlog_state_release_iclog(log, iclog); + error = xlog_state_release_iclog(log, iclog, 0); spin_unlock(&log->l_icloglock); return error; } @@ -2529,7 +2564,7 @@ next_lv: ASSERT(optype & XLOG_COMMIT_TRANS); *commit_iclog = iclog; } else { - error = xlog_state_release_iclog(log, iclog); + error = xlog_state_release_iclog(log, iclog, 0); } spin_unlock(&log->l_icloglock); @@ -2567,6 +2602,7 @@ xlog_state_activate_iclog( memset(iclog->ic_header.h_cycle_data, 0, sizeof(iclog->ic_header.h_cycle_data)); iclog->ic_header.h_lsn = 0; + iclog->ic_header.h_tail_lsn = 0; } /* @@ -2967,7 +3003,7 @@ restart: * reference to the iclog. */ if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) - error = xlog_state_release_iclog(log, iclog); + error = xlog_state_release_iclog(log, iclog, 0); spin_unlock(&log->l_icloglock); if (error) return error; @@ -3132,6 +3168,35 @@ xlog_state_switch_iclogs( } /* + * Force the iclog to disk and check if the iclog has been completed before + * xlog_force_iclog() returns. This can happen on synchronous (e.g. + * pmem) or fast async storage because we drop the icloglock to issue the IO. + * If completion has already occurred, tell the caller so that it can avoid an + * unnecessary wait on the iclog. + */ +static int +xlog_force_and_check_iclog( + struct xlog_in_core *iclog, + bool *completed) +{ + xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); + int error; + + *completed = false; + error = xlog_force_iclog(iclog); + if (error) + return error; + + /* + * If the iclog has already been completed and reused the header LSN + * will have been rewritten by completion + */ + if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) + *completed = true; + return 0; +} + +/* * Write out all data in the in-core log as of this exact moment in time. * * Data may be written to the in-core log during this call. However, @@ -3165,7 +3230,6 @@ xfs_log_force( { struct xlog *log = mp->m_log; struct xlog_in_core *iclog; - xfs_lsn_t lsn; XFS_STATS_INC(mp, xs_log_force); trace_xfs_log_force(mp, 0, _RET_IP_); @@ -3193,39 +3257,33 @@ xfs_log_force( iclog = iclog->ic_prev; } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { if (atomic_read(&iclog->ic_refcnt) == 0) { - /* - * We are the only one with access to this iclog. - * - * Flush it out now. There should be a roundoff of zero - * to show that someone has already taken care of the - * roundoff from the previous sync. - */ - atomic_inc(&iclog->ic_refcnt); - lsn = be64_to_cpu(iclog->ic_header.h_lsn); - xlog_state_switch_iclogs(log, iclog, 0); - if (xlog_state_release_iclog(log, iclog)) + /* We have exclusive access to this iclog. */ + bool completed; + + if (xlog_force_and_check_iclog(iclog, &completed)) goto out_error; - if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) + if (completed) goto out_unlock; } else { /* - * Someone else is writing to this iclog. - * - * Use its call to flush out the data. However, the - * other thread may not force out this LR, so we mark - * it WANT_SYNC. + * Someone else is still writing to this iclog, so we + * need to ensure that when they release the iclog it + * gets synced immediately as we may be waiting on it. */ xlog_state_switch_iclogs(log, iclog, 0); } - } else { - /* - * If the head iclog is not active nor dirty, we just attach - * ourselves to the head and go to sleep if necessary. - */ - ; } + /* + * The iclog we are about to wait on may contain the checkpoint pushed + * by the above xlog_cil_force() call, but it may not have been pushed + * to disk yet. Like the ACTIVE case above, we need to make sure caches + * are flushed when this iclog is written. + */ + if (iclog->ic_state == XLOG_STATE_WANT_SYNC) + iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; + if (flags & XFS_LOG_SYNC) return xlog_wait_on_iclog(iclog); out_unlock: @@ -3245,6 +3303,7 @@ xlog_force_lsn( bool already_slept) { struct xlog_in_core *iclog; + bool completed; spin_lock(&log->l_icloglock); iclog = log->l_iclog; @@ -3258,7 +3317,8 @@ xlog_force_lsn( goto out_unlock; } - if (iclog->ic_state == XLOG_STATE_ACTIVE) { + switch (iclog->ic_state) { + case XLOG_STATE_ACTIVE: /* * We sleep here if we haven't already slept (e.g. this is the * first time we've looked at the correct iclog buf) and the @@ -3281,12 +3341,31 @@ xlog_force_lsn( &log->l_icloglock); return -EAGAIN; } - atomic_inc(&iclog->ic_refcnt); - xlog_state_switch_iclogs(log, iclog, 0); - if (xlog_state_release_iclog(log, iclog)) + if (xlog_force_and_check_iclog(iclog, &completed)) goto out_error; if (log_flushed) *log_flushed = 1; + if (completed) + goto out_unlock; + break; + case XLOG_STATE_WANT_SYNC: + /* + * This iclog may contain the checkpoint pushed by the + * xlog_cil_force_seq() call, but there are other writers still + * accessing it so it hasn't been pushed to disk yet. Like the + * ACTIVE case above, we need to make sure caches are flushed + * when this iclog is written. + */ + iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; + break; + default: + /* + * The entire checkpoint was written by the CIL force and is on + * its way to disk already. It will be stable when it + * completes, so we don't need to manipulate caches here at all. + * We just need to wait for completion if necessary. + */ + break; } if (flags & XFS_LOG_SYNC) @@ -3559,10 +3638,10 @@ xlog_verify_grant_tail( STATIC void xlog_verify_tail_lsn( struct xlog *log, - struct xlog_in_core *iclog, - xfs_lsn_t tail_lsn) + struct xlog_in_core *iclog) { - int blocks; + xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn); + int blocks; if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { blocks = diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index b128aaa9b870..4c44bc3786c0 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -654,8 +654,9 @@ xlog_cil_push_work( struct xfs_trans_header thdr; struct xfs_log_iovec lhdr; struct xfs_log_vec lvhdr = { NULL }; + xfs_lsn_t preflush_tail_lsn; xfs_lsn_t commit_lsn; - xfs_lsn_t push_seq; + xfs_csn_t push_seq; struct bio bio; DECLARE_COMPLETION_ONSTACK(bdev_flush); @@ -730,7 +731,15 @@ xlog_cil_push_work( * because we hold the flush lock exclusively. Hence we can now issue * a cache flush to ensure all the completed metadata in the journal we * are about to overwrite is on stable storage. + * + * Because we are issuing this cache flush before we've written the + * tail lsn to the iclog, we can have metadata IO completions move the + * tail forwards between the completion of this flush and the iclog + * being written. In this case, we need to re-issue the cache flush + * before the iclog write. To detect whether the log tail moves, sample + * the tail LSN *before* we issue the flush. */ + preflush_tail_lsn = atomic64_read(&log->l_tail_lsn); xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev, &bdev_flush); @@ -941,7 +950,7 @@ restart: * storage. */ commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; - xlog_state_release_iclog(log, commit_iclog); + xlog_state_release_iclog(log, commit_iclog, preflush_tail_lsn); spin_unlock(&log->l_icloglock); return; diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 4c41bbfa33b0..f3e79a45d60a 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -59,6 +59,16 @@ enum xlog_iclog_state { { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }, \ { XLOG_STATE_IOERROR, "XLOG_STATE_IOERROR" } +/* + * In core log flags + */ +#define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */ +#define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */ + +#define XLOG_ICL_STRINGS \ + { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \ + { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" } + /* * Log ticket flags @@ -143,9 +153,6 @@ enum xlog_iclog_state { #define XLOG_COVER_OPS 5 -#define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */ -#define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */ - /* Ticket reservation region accounting */ #define XLOG_TIC_LEN_MAX 15 @@ -497,7 +504,8 @@ int xlog_commit_record(struct xlog *log, struct xlog_ticket *ticket, void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket); void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); -int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog); +int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog, + xfs_lsn_t log_tail_lsn); /* * When we crack an atomic LSN, we sample it first so that the value will not diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 4e7be6b4ca8e..699066fb9052 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -923,16 +923,41 @@ xfs_growfs_rt( uint8_t *rsum_cache; /* old summary cache */ sbp = &mp->m_sb; - /* - * Initial error checking. - */ + if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL || - (nrblocks = in->newblocks) <= sbp->sb_rblocks || - (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) + + /* Needs to have been mounted with an rt device. */ + if (!XFS_IS_REALTIME_MOUNT(mp)) + return -EINVAL; + /* + * Mount should fail if the rt bitmap/summary files don't load, but + * we'll check anyway. + */ + if (!mp->m_rbmip || !mp->m_rsumip) + return -EINVAL; + + /* Shrink not supported. */ + if (in->newblocks <= sbp->sb_rblocks) + return -EINVAL; + + /* Can only change rt extent size when adding rt volume. */ + if (sbp->sb_rblocks > 0 && in->extsize != sbp->sb_rextsize) + return -EINVAL; + + /* Range check the extent size. */ + if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE || + XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE) return -EINVAL; - if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks))) + + /* Unsupported realtime features. */ + if (xfs_sb_version_hasrmapbt(&mp->m_sb) || + xfs_sb_version_hasreflink(&mp->m_sb)) + return -EOPNOTSUPP; + + nrblocks = in->newblocks; + error = xfs_sb_validate_fsb_count(sbp, nrblocks); + if (error) return error; /* * Read in the last block of the device, make sure it exists. @@ -996,7 +1021,8 @@ xfs_growfs_rt( ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0); bmbno < nrbmblocks; bmbno++) { - xfs_trans_t *tp; + struct xfs_trans *tp; + xfs_rfsblock_t nrblocks_step; *nmp = *mp; nsbp = &nmp->m_sb; @@ -1005,10 +1031,9 @@ xfs_growfs_rt( */ nsbp->sb_rextsize = in->extsize; nsbp->sb_rbmblocks = bmbno + 1; - nsbp->sb_rblocks = - XFS_RTMIN(nrblocks, - nsbp->sb_rbmblocks * NBBY * - nsbp->sb_blocksize * nsbp->sb_rextsize); + nrblocks_step = (bmbno + 1) * NBBY * nsbp->sb_blocksize * + nsbp->sb_rextsize; + nsbp->sb_rblocks = min(nrblocks, nrblocks_step); nsbp->sb_rextents = nsbp->sb_rblocks; do_div(nsbp->sb_rextents, nsbp->sb_rextsize); ASSERT(nsbp->sb_rextents != 0); diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index f9d8d605f9b1..19260291ff8b 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -3944,6 +3944,7 @@ DECLARE_EVENT_CLASS(xlog_iclog_class, __field(uint32_t, state) __field(int32_t, refcount) __field(uint32_t, offset) + __field(uint32_t, flags) __field(unsigned long long, lsn) __field(unsigned long, caller_ip) ), @@ -3952,15 +3953,17 @@ DECLARE_EVENT_CLASS(xlog_iclog_class, __entry->state = iclog->ic_state; __entry->refcount = atomic_read(&iclog->ic_refcnt); __entry->offset = iclog->ic_offset; + __entry->flags = iclog->ic_flags; __entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn); __entry->caller_ip = caller_ip; ), - TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx caller %pS", + TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->state, XLOG_STATE_STRINGS), __entry->refcount, __entry->offset, __entry->lsn, + __print_flags(__entry->flags, "|", XLOG_ICL_STRINGS), (char *)__entry->caller_ip) ); diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index dbf03635869c..70055d486bf7 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -705,9 +705,6 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) return 0; bio = bio_alloc(GFP_NOFS, nr_pages); - if (!bio) - return -ENOMEM; - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = zi->i_zsector; bio->bi_write_hint = iocb->ki_hint; diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 1ae993fee4a5..13d93371790e 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -707,11 +707,6 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv); * @hrv: Hardware Revision of the device, pass -1 to not check _HRV * * The caller is responsible for invoking acpi_dev_put() on the returned device. - * - * FIXME: Due to above requirement there is a window that may invalidate @adev - * and next iteration will use a dangling pointer, e.g. in the case of a - * hotplug event. That said, the caller should ensure that this will never - * happen. */ #define for_each_acpi_dev_match(adev, hid, uid, hrv) \ for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \ @@ -725,7 +720,8 @@ static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev) static inline void acpi_dev_put(struct acpi_device *adev) { - put_device(&adev->dev); + if (adev) + put_device(&adev->dev); } struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 17325416e2de..62669b36a772 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -586,6 +586,7 @@ NOINSTR_TEXT \ *(.text..refcount) \ *(.ref.text) \ + *(.text.asan.* .text.tsan.*) \ TEXT_CFI_JT \ MEM_KEEP(init.text*) \ MEM_KEEP(exit.text*) \ diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h index 10100a4bbe2a..afb27cb6a7bd 100644 --- a/include/drm/drm_ioctl.h +++ b/include/drm/drm_ioctl.h @@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, unsigned long arg); #define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n) #define DRM_MAJOR 226 /** diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h index 1d8986563fc5..0728ad07ff7a 100644 --- a/include/dt-bindings/clock/r9a07g044-cpg.h +++ b/include/dt-bindings/clock/r9a07g044-cpg.h @@ -32,58 +32,188 @@ #define R9A07G044_OSCCLK 21 /* R9A07G044 Module Clocks */ -#define R9A07G044_CLK_GIC600 0 -#define R9A07G044_CLK_IA55 1 -#define R9A07G044_CLK_SYC 2 -#define R9A07G044_CLK_DMAC 3 -#define R9A07G044_CLK_SYSC 4 -#define R9A07G044_CLK_MTU 5 -#define R9A07G044_CLK_GPT 6 -#define R9A07G044_CLK_ETH0 7 -#define R9A07G044_CLK_ETH1 8 -#define R9A07G044_CLK_I2C0 9 -#define R9A07G044_CLK_I2C1 10 -#define R9A07G044_CLK_I2C2 11 -#define R9A07G044_CLK_I2C3 12 -#define R9A07G044_CLK_SCIF0 13 -#define R9A07G044_CLK_SCIF1 14 -#define R9A07G044_CLK_SCIF2 15 -#define R9A07G044_CLK_SCIF3 16 -#define R9A07G044_CLK_SCIF4 17 -#define R9A07G044_CLK_SCI0 18 -#define R9A07G044_CLK_SCI1 19 -#define R9A07G044_CLK_GPIO 20 -#define R9A07G044_CLK_SDHI0 21 -#define R9A07G044_CLK_SDHI1 22 -#define R9A07G044_CLK_USB0 23 -#define R9A07G044_CLK_USB1 24 -#define R9A07G044_CLK_CANFD 25 -#define R9A07G044_CLK_SSI0 26 -#define R9A07G044_CLK_SSI1 27 -#define R9A07G044_CLK_SSI2 28 -#define R9A07G044_CLK_SSI3 29 -#define R9A07G044_CLK_MHU 30 -#define R9A07G044_CLK_OSTM0 31 -#define R9A07G044_CLK_OSTM1 32 -#define R9A07G044_CLK_OSTM2 33 -#define R9A07G044_CLK_WDT0 34 -#define R9A07G044_CLK_WDT1 35 -#define R9A07G044_CLK_WDT2 36 -#define R9A07G044_CLK_WDT_PON 37 -#define R9A07G044_CLK_GPU 38 -#define R9A07G044_CLK_ISU 39 -#define R9A07G044_CLK_H264 40 -#define R9A07G044_CLK_CRU 41 -#define R9A07G044_CLK_MIPI_DSI 42 -#define R9A07G044_CLK_LCDC 43 -#define R9A07G044_CLK_SRC 44 -#define R9A07G044_CLK_RSPI0 45 -#define R9A07G044_CLK_RSPI1 46 -#define R9A07G044_CLK_RSPI2 47 -#define R9A07G044_CLK_ADC 48 -#define R9A07G044_CLK_TSU_PCLK 49 -#define R9A07G044_CLK_SPI 50 -#define R9A07G044_CLK_MIPI_DSI_V 51 -#define R9A07G044_CLK_MIPI_DSI_PIN 52 +#define R9A07G044_CA55_SCLK 0 +#define R9A07G044_CA55_PCLK 1 +#define R9A07G044_CA55_ATCLK 2 +#define R9A07G044_CA55_GICCLK 3 +#define R9A07G044_CA55_PERICLK 4 +#define R9A07G044_CA55_ACLK 5 +#define R9A07G044_CA55_TSCLK 6 +#define R9A07G044_GIC600_GICCLK 7 +#define R9A07G044_IA55_CLK 8 +#define R9A07G044_IA55_PCLK 9 +#define R9A07G044_MHU_PCLK 10 +#define R9A07G044_SYC_CNT_CLK 11 +#define R9A07G044_DMAC_ACLK 12 +#define R9A07G044_DMAC_PCLK 13 +#define R9A07G044_OSTM0_PCLK 14 +#define R9A07G044_OSTM1_PCLK 15 +#define R9A07G044_OSTM2_PCLK 16 +#define R9A07G044_MTU_X_MCK_MTU3 17 +#define R9A07G044_POE3_CLKM_POE 18 +#define R9A07G044_GPT_PCLK 19 +#define R9A07G044_POEG_A_CLKP 20 +#define R9A07G044_POEG_B_CLKP 21 +#define R9A07G044_POEG_C_CLKP 22 +#define R9A07G044_POEG_D_CLKP 23 +#define R9A07G044_WDT0_PCLK 24 +#define R9A07G044_WDT0_CLK 25 +#define R9A07G044_WDT1_PCLK 26 +#define R9A07G044_WDT1_CLK 27 +#define R9A07G044_WDT2_PCLK 28 +#define R9A07G044_WDT2_CLK 29 +#define R9A07G044_SPI_CLK2 30 +#define R9A07G044_SPI_CLK 31 +#define R9A07G044_SDHI0_IMCLK 32 +#define R9A07G044_SDHI0_IMCLK2 33 +#define R9A07G044_SDHI0_CLK_HS 34 +#define R9A07G044_SDHI0_ACLK 35 +#define R9A07G044_SDHI1_IMCLK 36 +#define R9A07G044_SDHI1_IMCLK2 37 +#define R9A07G044_SDHI1_CLK_HS 38 +#define R9A07G044_SDHI1_ACLK 39 +#define R9A07G044_GPU_CLK 40 +#define R9A07G044_GPU_AXI_CLK 41 +#define R9A07G044_GPU_ACE_CLK 42 +#define R9A07G044_ISU_ACLK 43 +#define R9A07G044_ISU_PCLK 44 +#define R9A07G044_H264_CLK_A 45 +#define R9A07G044_H264_CLK_P 46 +#define R9A07G044_CRU_SYSCLK 47 +#define R9A07G044_CRU_VCLK 48 +#define R9A07G044_CRU_PCLK 49 +#define R9A07G044_CRU_ACLK 50 +#define R9A07G044_MIPI_DSI_PLLCLK 51 +#define R9A07G044_MIPI_DSI_SYSCLK 52 +#define R9A07G044_MIPI_DSI_ACLK 53 +#define R9A07G044_MIPI_DSI_PCLK 54 +#define R9A07G044_MIPI_DSI_VCLK 55 +#define R9A07G044_MIPI_DSI_LPCLK 56 +#define R9A07G044_LCDC_CLK_A 57 +#define R9A07G044_LCDC_CLK_P 58 +#define R9A07G044_LCDC_CLK_D 59 +#define R9A07G044_SSI0_PCLK2 60 +#define R9A07G044_SSI0_PCLK_SFR 61 +#define R9A07G044_SSI1_PCLK2 62 +#define R9A07G044_SSI1_PCLK_SFR 63 +#define R9A07G044_SSI2_PCLK2 64 +#define R9A07G044_SSI2_PCLK_SFR 65 +#define R9A07G044_SSI3_PCLK2 66 +#define R9A07G044_SSI3_PCLK_SFR 67 +#define R9A07G044_SRC_CLKP 68 +#define R9A07G044_USB_U2H0_HCLK 69 +#define R9A07G044_USB_U2H1_HCLK 70 +#define R9A07G044_USB_U2P_EXR_CPUCLK 71 +#define R9A07G044_USB_PCLK 72 +#define R9A07G044_ETH0_CLK_AXI 73 +#define R9A07G044_ETH0_CLK_CHI 74 +#define R9A07G044_ETH1_CLK_AXI 75 +#define R9A07G044_ETH1_CLK_CHI 76 +#define R9A07G044_I2C0_PCLK 77 +#define R9A07G044_I2C1_PCLK 78 +#define R9A07G044_I2C2_PCLK 79 +#define R9A07G044_I2C3_PCLK 80 +#define R9A07G044_SCIF0_CLK_PCK 81 +#define R9A07G044_SCIF1_CLK_PCK 82 +#define R9A07G044_SCIF2_CLK_PCK 83 +#define R9A07G044_SCIF3_CLK_PCK 84 +#define R9A07G044_SCIF4_CLK_PCK 85 +#define R9A07G044_SCI0_CLKP 86 +#define R9A07G044_SCI1_CLKP 87 +#define R9A07G044_IRDA_CLKP 88 +#define R9A07G044_RSPI0_CLKB 89 +#define R9A07G044_RSPI1_CLKB 90 +#define R9A07G044_RSPI2_CLKB 91 +#define R9A07G044_CANFD_PCLK 92 +#define R9A07G044_GPIO_HCLK 93 +#define R9A07G044_ADC_ADCLK 94 +#define R9A07G044_ADC_PCLK 95 +#define R9A07G044_TSU_PCLK 96 + +/* R9A07G044 Resets */ +#define R9A07G044_CA55_RST_1_0 0 +#define R9A07G044_CA55_RST_1_1 1 +#define R9A07G044_CA55_RST_3_0 2 +#define R9A07G044_CA55_RST_3_1 3 +#define R9A07G044_CA55_RST_4 4 +#define R9A07G044_CA55_RST_5 5 +#define R9A07G044_CA55_RST_6 6 +#define R9A07G044_CA55_RST_7 7 +#define R9A07G044_CA55_RST_8 8 +#define R9A07G044_CA55_RST_9 9 +#define R9A07G044_CA55_RST_10 10 +#define R9A07G044_CA55_RST_11 11 +#define R9A07G044_CA55_RST_12 12 +#define R9A07G044_GIC600_GICRESET_N 13 +#define R9A07G044_GIC600_DBG_GICRESET_N 14 +#define R9A07G044_IA55_RESETN 15 +#define R9A07G044_MHU_RESETN 16 +#define R9A07G044_DMAC_ARESETN 17 +#define R9A07G044_DMAC_RST_ASYNC 18 +#define R9A07G044_SYC_RESETN 19 +#define R9A07G044_OSTM0_PRESETZ 20 +#define R9A07G044_OSTM1_PRESETZ 21 +#define R9A07G044_OSTM2_PRESETZ 22 +#define R9A07G044_MTU_X_PRESET_MTU3 23 +#define R9A07G044_POE3_RST_M_REG 24 +#define R9A07G044_GPT_RST_C 25 +#define R9A07G044_POEG_A_RST 26 +#define R9A07G044_POEG_B_RST 27 +#define R9A07G044_POEG_C_RST 28 +#define R9A07G044_POEG_D_RST 29 +#define R9A07G044_WDT0_PRESETN 30 +#define R9A07G044_WDT1_PRESETN 31 +#define R9A07G044_WDT2_PRESETN 32 +#define R9A07G044_SPI_RST 33 +#define R9A07G044_SDHI0_IXRST 34 +#define R9A07G044_SDHI1_IXRST 35 +#define R9A07G044_GPU_RESETN 36 +#define R9A07G044_GPU_AXI_RESETN 37 +#define R9A07G044_GPU_ACE_RESETN 38 +#define R9A07G044_ISU_ARESETN 39 +#define R9A07G044_ISU_PRESETN 40 +#define R9A07G044_H264_X_RESET_VCP 41 +#define R9A07G044_H264_CP_PRESET_P 42 +#define R9A07G044_CRU_CMN_RSTB 43 +#define R9A07G044_CRU_PRESETN 44 +#define R9A07G044_CRU_ARESETN 45 +#define R9A07G044_MIPI_DSI_CMN_RSTB 46 +#define R9A07G044_MIPI_DSI_ARESET_N 47 +#define R9A07G044_MIPI_DSI_PRESET_N 48 +#define R9A07G044_LCDC_RESET_N 49 +#define R9A07G044_SSI0_RST_M2_REG 50 +#define R9A07G044_SSI1_RST_M2_REG 51 +#define R9A07G044_SSI2_RST_M2_REG 52 +#define R9A07G044_SSI3_RST_M2_REG 53 +#define R9A07G044_SRC_RST 54 +#define R9A07G044_USB_U2H0_HRESETN 55 +#define R9A07G044_USB_U2H1_HRESETN 56 +#define R9A07G044_USB_U2P_EXL_SYSRST 57 +#define R9A07G044_USB_PRESETN 58 +#define R9A07G044_ETH0_RST_HW_N 59 +#define R9A07G044_ETH1_RST_HW_N 60 +#define R9A07G044_I2C0_MRST 61 +#define R9A07G044_I2C1_MRST 62 +#define R9A07G044_I2C2_MRST 63 +#define R9A07G044_I2C3_MRST 64 +#define R9A07G044_SCIF0_RST_SYSTEM_N 65 +#define R9A07G044_SCIF1_RST_SYSTEM_N 66 +#define R9A07G044_SCIF2_RST_SYSTEM_N 67 +#define R9A07G044_SCIF3_RST_SYSTEM_N 68 +#define R9A07G044_SCIF4_RST_SYSTEM_N 69 +#define R9A07G044_SCI0_RST 70 +#define R9A07G044_SCI1_RST 71 +#define R9A07G044_IRDA_RST 72 +#define R9A07G044_RSPI0_RST 73 +#define R9A07G044_RSPI1_RST 74 +#define R9A07G044_RSPI2_RST 75 +#define R9A07G044_CANFD_RSTP_N 76 +#define R9A07G044_CANFD_RSTC_N 77 +#define R9A07G044_GPIO_RSTN 78 +#define R9A07G044_GPIO_PORT_RESETN 79 +#define R9A07G044_GPIO_SPARE_RESETN 80 +#define R9A07G044_ADC_PRESETN 81 +#define R9A07G044_ADC_ADRST_N 82 +#define R9A07G044_TSU_PRESETN 83 #endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */ diff --git a/include/dt-bindings/interconnect/qcom,sc8180x.h b/include/dt-bindings/interconnect/qcom,sc8180x.h new file mode 100644 index 000000000000..235b525d2803 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sc8180x.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Qualcomm SC8180x interconnect IDs + * + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8180X_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8180X_H + +#define MASTER_A1NOC_CFG 0 +#define MASTER_UFS_CARD 1 +#define MASTER_UFS_GEN4 2 +#define MASTER_UFS_MEM 3 +#define MASTER_USB3 4 +#define MASTER_USB3_1 5 +#define MASTER_USB3_2 6 +#define A1NOC_SNOC_SLV 7 +#define SLAVE_SERVICE_A1NOC 8 + +#define MASTER_A2NOC_CFG 0 +#define MASTER_QDSS_BAM 1 +#define MASTER_QSPI_0 2 +#define MASTER_QSPI_1 3 +#define MASTER_QUP_0 4 +#define MASTER_QUP_1 5 +#define MASTER_QUP_2 6 +#define MASTER_SENSORS_AHB 7 +#define MASTER_CRYPTO_CORE_0 8 +#define MASTER_IPA 9 +#define MASTER_EMAC 10 +#define MASTER_PCIE 11 +#define MASTER_PCIE_1 12 +#define MASTER_PCIE_2 13 +#define MASTER_PCIE_3 14 +#define MASTER_QDSS_ETR 15 +#define MASTER_SDCC_2 16 +#define MASTER_SDCC_4 17 +#define A2NOC_SNOC_SLV 18 +#define SLAVE_ANOC_PCIE_GEM_NOC 19 +#define SLAVE_SERVICE_A2NOC 20 + +#define MASTER_CAMNOC_HF0_UNCOMP 0 +#define MASTER_CAMNOC_HF1_UNCOMP 1 +#define MASTER_CAMNOC_SF_UNCOMP 2 +#define SLAVE_CAMNOC_UNCOMP 3 + +#define MASTER_NPU 0 +#define SLAVE_CDSP_MEM_NOC 1 + +#define SNOC_CNOC_MAS 0 +#define SLAVE_A1NOC_CFG 1 +#define SLAVE_A2NOC_CFG 2 +#define SLAVE_AHB2PHY_CENTER 3 +#define SLAVE_AHB2PHY_EAST 4 +#define SLAVE_AHB2PHY_WEST 5 +#define SLAVE_AHB2PHY_SOUTH 6 +#define SLAVE_AOP 7 +#define SLAVE_AOSS 8 +#define SLAVE_CAMERA_CFG 9 +#define SLAVE_CLK_CTL 10 +#define SLAVE_CDSP_CFG 11 +#define SLAVE_RBCPR_CX_CFG 12 +#define SLAVE_RBCPR_MMCX_CFG 13 +#define SLAVE_RBCPR_MX_CFG 14 +#define SLAVE_CRYPTO_0_CFG 15 +#define SLAVE_CNOC_DDRSS 16 +#define SLAVE_DISPLAY_CFG 17 +#define SLAVE_EMAC_CFG 18 +#define SLAVE_GLM 19 +#define SLAVE_GRAPHICS_3D_CFG 20 +#define SLAVE_IMEM_CFG 21 +#define SLAVE_IPA_CFG 22 +#define SLAVE_CNOC_MNOC_CFG 23 +#define SLAVE_NPU_CFG 24 +#define SLAVE_PCIE_0_CFG 25 +#define SLAVE_PCIE_1_CFG 26 +#define SLAVE_PCIE_2_CFG 27 +#define SLAVE_PCIE_3_CFG 28 +#define SLAVE_PDM 29 +#define SLAVE_PIMEM_CFG 30 +#define SLAVE_PRNG 31 +#define SLAVE_QDSS_CFG 32 +#define SLAVE_QSPI_0 33 +#define SLAVE_QSPI_1 34 +#define SLAVE_QUP_1 35 +#define SLAVE_QUP_2 36 +#define SLAVE_QUP_0 37 +#define SLAVE_SDCC_2 38 +#define SLAVE_SDCC_4 39 +#define SLAVE_SECURITY 40 +#define SLAVE_SNOC_CFG 41 +#define SLAVE_SPSS_CFG 42 +#define SLAVE_TCSR 43 +#define SLAVE_TLMM_EAST 44 +#define SLAVE_TLMM_SOUTH 45 +#define SLAVE_TLMM_WEST 46 +#define SLAVE_TSIF 47 +#define SLAVE_UFS_CARD_CFG 48 +#define SLAVE_UFS_MEM_0_CFG 49 +#define SLAVE_UFS_MEM_1_CFG 50 +#define SLAVE_USB3 51 +#define SLAVE_USB3_1 52 +#define SLAVE_USB3_2 53 +#define SLAVE_VENUS_CFG 54 +#define SLAVE_VSENSE_CTRL_CFG 55 +#define SLAVE_SERVICE_CNOC 56 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_GEM_NOC_CFG 1 +#define SLAVE_LLCC_CFG 2 + +#define MASTER_AMPSS_M0 0 +#define MASTER_GPU_TCU 1 +#define MASTER_SYS_TCU 2 +#define MASTER_GEM_NOC_CFG 3 +#define MASTER_COMPUTE_NOC 4 +#define MASTER_GRAPHICS_3D 5 +#define MASTER_MNOC_HF_MEM_NOC 6 +#define MASTER_MNOC_SF_MEM_NOC 7 +#define MASTER_GEM_NOC_PCIE_SNOC 8 +#define MASTER_SNOC_GC_MEM_NOC 9 +#define MASTER_SNOC_SF_MEM_NOC 10 +#define MASTER_ECC 11 +#define SLAVE_MSS_PROC_MS_MPU_CFG 12 +#define SLAVE_ECC 13 +#define SLAVE_GEM_NOC_SNOC 14 +#define SLAVE_LLCC 15 +#define SLAVE_SERVICE_GEM_NOC 16 +#define SLAVE_SERVICE_GEM_NOC_1 17 + +#define MASTER_IPA_CORE 0 +#define SLAVE_IPA_CORE 1 + +#define MASTER_LLCC 0 +#define SLAVE_EBI_CH0 1 + +#define MASTER_CNOC_MNOC_CFG 0 +#define MASTER_CAMNOC_HF0 1 +#define MASTER_CAMNOC_HF1 2 +#define MASTER_CAMNOC_SF 3 +#define MASTER_MDP_PORT0 4 +#define MASTER_MDP_PORT1 5 +#define MASTER_ROTATOR 6 +#define MASTER_VIDEO_P0 7 +#define MASTER_VIDEO_P1 8 +#define MASTER_VIDEO_PROC 9 +#define SLAVE_MNOC_SF_MEM_NOC 10 +#define SLAVE_MNOC_HF_MEM_NOC 11 +#define SLAVE_SERVICE_MNOC 12 + +#define MASTER_SNOC_CFG 0 +#define A1NOC_SNOC_MAS 1 +#define A2NOC_SNOC_MAS 2 +#define MASTER_GEM_NOC_SNOC 3 +#define MASTER_PIMEM 4 +#define MASTER_GIC 5 +#define SLAVE_APPSS 6 +#define SNOC_CNOC_SLV 7 +#define SLAVE_SNOC_GEM_NOC_GC 8 +#define SLAVE_SNOC_GEM_NOC_SF 9 +#define SLAVE_OCIMEM 10 +#define SLAVE_PIMEM 11 +#define SLAVE_SERVICE_SNOC 12 +#define SLAVE_PCIE_0 13 +#define SLAVE_PCIE_1 14 +#define SLAVE_PCIE_2 15 +#define SLAVE_PCIE_3 16 +#define SLAVE_QDSS_STM 17 +#define SLAVE_TCU 18 + +#define MASTER_MNOC_HF_MEM_NOC_DISPLAY 0 +#define MASTER_MNOC_SF_MEM_NOC_DISPLAY 1 +#define SLAVE_LLCC_DISPLAY 2 + +#define MASTER_LLCC_DISPLAY 0 +#define SLAVE_EBI_CH0_DISPLAY 1 + +#define MASTER_MDP_PORT0_DISPLAY 0 +#define MASTER_MDP_PORT1_DISPLAY 1 +#define MASTER_ROTATOR_DISPLAY 2 +#define SLAVE_MNOC_SF_MEM_NOC_DISPLAY 3 +#define SLAVE_MNOC_HF_MEM_NOC_DISPLAY 4 + +#endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3177181c4326..d3afea47ade6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -57,7 +57,7 @@ struct blk_keyslot_manager; * Maximum number of blkcg policies allowed to be registered concurrently. * Defined here to simplify include dependency. */ -#define BLKCG_MAX_POLS 5 +#define BLKCG_MAX_POLS 6 typedef void (rq_end_io_fn)(struct request *, blk_status_t); diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 8b77d08d4b47..6c9b10d82c80 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -201,8 +201,8 @@ static inline void bpf_cgroup_storage_unset(void) { int i; - for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { - if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) + for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) { + if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) continue; this_cpu_write(bpf_cgroup_storage_info[i].task, NULL); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f309fc1509f2..e8e2b0393ca9 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -780,6 +780,7 @@ struct bpf_jit_poke_descriptor { void *tailcall_target; void *tailcall_bypass; void *bypass_addr; + void *aux; union { struct { struct bpf_map *map; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index a9db1eae6796..ae3ac3a2018c 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -134,4 +134,5 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup) BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter) #ifdef CONFIG_NET BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) +BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) #endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e774ecc1cd1f..828d08afeee0 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -340,8 +340,8 @@ struct bpf_insn_aux_data { }; u64 map_key_state; /* constant (32 bit) key tracking for maps */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ - int sanitize_stack_off; /* stack slot to be cleared */ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ + bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ bool zext_dst; /* this insn zero extends dst reg */ u8 alu_state; /* used in combination with alu_limit */ @@ -414,6 +414,7 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 used_btf_cnt; /* number of used BTF objects */ u32 id_gen; /* used to generate unique reg IDs */ + bool explore_alu_limits; bool allow_ptr_leaks; bool allow_uninit_stack; bool allow_ptr_to_map_access; diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 85008a65e21f..93a2922b7653 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -220,6 +220,10 @@ struct coresight_sysfs_link { * @nr_links: number of sysfs links created to other components from this * device. These will appear in the "connections" group. * @has_conns_grp: Have added a "connections" group for sysfs links. + * @feature_csdev_list: List of complex feature programming added to the device. + * @config_csdev_list: List of system configurations added to the device. + * @cscfg_csdev_lock: Protect the lists of configurations and features. + * @active_cscfg_ctxt: Context information for current active system configuration. */ struct coresight_device { struct coresight_platform_data *pdata; @@ -241,6 +245,11 @@ struct coresight_device { int nr_links; bool has_conns_grp; bool ect_enabled; /* true only if associated ect device is enabled */ + /* system configuration and feature lists */ + struct list_head feature_csdev_list; + struct list_head config_csdev_list; + spinlock_t cscfg_csdev_lock; + void *active_cscfg_ctxt; }; /* diff --git a/include/linux/device.h b/include/linux/device.h index 59940f1744c1..65d84b67b024 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -407,6 +407,7 @@ struct dev_links_info { * @em_pd: device's energy model performance domain * @pins: For device pin management. * See Documentation/driver-api/pin-control.rst for details. + * @msi_lock: Lock to protect MSI mask cache and mask register * @msi_list: Hosts MSI descriptors * @msi_domain: The generic MSI domain this device is using. * @numa_node: NUMA node this device is close to. @@ -506,6 +507,7 @@ struct device { struct dev_pin_info *pins; #endif #ifdef CONFIG_GENERIC_MSI_IRQ + raw_spinlock_t msi_lock; struct list_head msi_list; #endif #ifdef CONFIG_DMA_OPS diff --git a/include/linux/dfl.h b/include/linux/dfl.h index 6cc10982351a..431636a0dc78 100644 --- a/include/linux/dfl.h +++ b/include/linux/dfl.h @@ -38,6 +38,7 @@ struct dfl_device { int id; u16 type; u16 feature_id; + u8 revision; struct resource mmio_res; int *irqs; unsigned int num_irqs; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 29dbb603bc91..232daaec56e4 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -758,6 +758,16 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, enum ethtool_link_mode_bit_indices link_mode); /** + * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller + * is responsible to free memory of vclock_index + * @dev: pointer to net_device structure + * @vclock_index: pointer to pointer of vclock index + * + * Return number of phc vclocks + */ +int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index); + +/** * ethtool_sprintf - Write formatted string to ethtool string data * @data: Pointer to start of string to update * @fmt: Format of string to write diff --git a/include/linux/filter.h b/include/linux/filter.h index 472f97074da0..83b896044e79 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -73,6 +73,11 @@ struct ctl_table_header; /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 +/* unused opcode to mark speculation barrier for mitigating + * Speculative Store Bypass + */ +#define BPF_NOSPEC 0xc0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -390,6 +395,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = 0, \ .imm = 0 }) +/* Speculation barrier */ + +#define BPF_ST_NOSPEC() \ + ((struct bpf_insn) { \ + .code = BPF_ST | BPF_NOSPEC, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + /* Internal classic blocks for direct assignment */ #define __BPF_STMT(CODE, K) \ diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 9d1a5c175065..56b426fe020c 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -52,6 +52,10 @@ #define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U #define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U +/* Loader commands */ +#define PM_LOAD_PDI 0x701 +#define PDI_SRC_DDR 0xF + /* * Firmware FPGA Manager flags * XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration @@ -411,6 +415,7 @@ int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, u32 *value); int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, u32 value); +int zynqmp_pm_load_pdi(const u32 src, const u64 address); #else static inline int zynqmp_pm_get_api_version(u32 *version) { @@ -622,6 +627,11 @@ static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, { return -ENODEV; } + +static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index ec2cd8bfceb0..474c1f506307 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -110,7 +110,7 @@ struct fpga_image_info { * @initial_header_size: Maximum number of bytes that should be passed into write_init * @state: returns an enum value of the FPGA's state * @status: returns status of the FPGA, including reconfiguration error code - * @write_init: prepare the FPGA to receive confuration data + * @write_init: prepare the FPGA to receive configuration data * @write: write count bytes of configuration data to the FPGA * @write_sg: write the scatter list of configuration data to the FPGA * @write_complete: set FPGA to operating state after writing is done diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 37e1e8f7f08d..6b54982fc5f3 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -139,6 +139,9 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key, extern int generic_parse_monolithic(struct fs_context *fc, void *data); extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); +extern int vfs_parse_fs_param_source(struct fs_context *fc, + struct fs_parameter *param); +extern void fc_drop_locked(struct fs_context *fc); /* * sget() wrappers to be called from the ->get_tree() op. diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 8c6e8e996c87..d9a606a9fc64 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -318,14 +318,16 @@ static inline void memcpy_to_page(struct page *page, size_t offset, VM_BUG_ON(offset + len > PAGE_SIZE); memcpy(to + offset, from, len); + flush_dcache_page(page); kunmap_local(to); } static inline void memzero_page(struct page *page, size_t offset, size_t len) { - char *addr = kmap_atomic(page); + char *addr = kmap_local_page(page); memset(addr + offset, 0, len); - kunmap_atomic(addr); + flush_dcache_page(page); + kunmap_local(addr); } #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 53aa0343bf69..aaf4f1b4c277 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -41,7 +41,7 @@ struct in_device { unsigned long mr_qri; /* Query Response Interval */ unsigned char mr_qrv; /* Query Robustness Variable */ unsigned char mr_gq_running; - unsigned char mr_ifc_count; + u32 mr_ifc_count; struct timer_list mr_gq_timer; /* general query timer */ struct timer_list mr_ifc_timer; /* interface change timer */ diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index 25e2b4e80502..aee8ff4739b1 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -81,6 +81,8 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device, /* Get the device * from ishtp device instance */ struct device *ishtp_device(struct ishtp_cl_device *cl_device); +/* wait for IPC resume */ +bool ishtp_wait_resume(struct ishtp_device *dev); /* Trace interface for clients */ ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device); /* Get device pointer of PCI device for DMA acces */ diff --git a/include/linux/irq.h b/include/linux/irq.h index 8e9a9ae471a6..c8293c817646 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -569,6 +569,7 @@ struct irq_chip { * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs * in the suspend path if they are in disabled state + * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), @@ -581,6 +582,7 @@ enum { IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), IRQCHIP_SUPPORTS_NMI = (1 << 8), IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), + IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), }; #include <linux/irqdesc.h> diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 5310e217bd74..dd874a1ee862 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -3,6 +3,7 @@ #define _LINUX_KASAN_H #include <linux/bug.h> +#include <linux/kernel.h> #include <linux/static_key.h> #include <linux/types.h> diff --git a/include/linux/kfence.h b/include/linux/kfence.h index a70d1ea03532..3fe6dd8a18c1 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -51,10 +51,11 @@ extern atomic_t kfence_allocation_gate; static __always_inline bool is_kfence_address(const void *addr) { /* - * The non-NULL check is required in case the __kfence_pool pointer was - * never initialized; keep it in the slow-path after the range-check. + * The __kfence_pool != NULL check is required to deal with the case + * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in + * the slow-path after the range-check! */ - return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr); + return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); } /** diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index acee44b9db26..0f06c2287b52 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -22,14 +22,10 @@ #define MARVELL_PHY_ID_88E1545 0x01410ea0 #define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 +#define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 #define MARVELL_PHY_ID_88X2222 0x01410f10 -/* PHY IDs and mask for Alaska 10G PHYs */ -#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8 -#define MARVELL_PHY_ID_88X3310 0x002b09a0 -#define MARVELL_PHY_ID_88X3340 0x002b09a8 - /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 07f5ef8fc456..c6786c12b207 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -91,12 +91,13 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); mei_cldev_driver_register,\ mei_cldev_driver_unregister) -ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, + size_t length); ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, size_t length); -ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, - u8 vtag); +ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, + size_t length, u8 vtag); ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, u8 *vtag); ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, @@ -114,6 +115,6 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); int mei_cldev_enable(struct mei_cl_device *cldev); int mei_cldev_disable(struct mei_cl_device *cldev); -bool mei_cldev_enabled(struct mei_cl_device *cldev); +bool mei_cldev_enabled(const struct mei_cl_device *cldev); #endif /* _LINUX_MEI_CL_BUS_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index cbf46f56d105..4a53c3ca86bd 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -209,7 +209,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, */ #define for_each_mem_range(i, p_start, p_end) \ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_NONE, p_start, p_end, NULL) + MEMBLOCK_HOTPLUG, p_start, p_end, NULL) /** * for_each_mem_range_rev - reverse iterate through memblock areas from @@ -220,7 +220,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, */ #define for_each_mem_range_rev(i, p_start, p_end) \ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_NONE, p_start, p_end, NULL) + MEMBLOCK_HOTPLUG, p_start, p_end, NULL) /** * for_each_reserved_mem_range - iterate over all reserved memblock areas diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index bfe5c486f4ad..24797929d8a1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -612,12 +612,15 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { + *min = *low = 0; + if (mem_cgroup_disabled()) - return 0; + return; /* * There is no reclaim protection applied to a targeted reclaim. @@ -653,13 +656,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, * */ if (root == memcg) - return 0; - - if (in_low_reclaim) - return READ_ONCE(memcg->memory.emin); + return; - return max(READ_ONCE(memcg->memory.emin), - READ_ONCE(memcg->memory.elow)); + *min = READ_ONCE(memcg->memory.emin); + *low = READ_ONCE(memcg->memory.elow); } void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@ -1147,11 +1147,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, { } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { - return 0; + *min = *low = 0; } static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h index 2d1895c3efbf..40a0c2dfb80f 100644 --- a/include/linux/mfd/rt5033-private.h +++ b/include/linux/mfd/rt5033-private.h @@ -200,13 +200,13 @@ enum rt5033_reg { #define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U #define RT5033_REGULATOR_BUCK_VOLTAGE_MAX 3000000U #define RT5033_REGULATOR_BUCK_VOLTAGE_STEP 100000U -#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 32 +#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 21 /* RT5033 regulator LDO output voltage uV */ #define RT5033_REGULATOR_LDO_VOLTAGE_MIN 1200000U #define RT5033_REGULATOR_LDO_VOLTAGE_MAX 3000000U #define RT5033_REGULATOR_LDO_VOLTAGE_STEP 100000U -#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 32 +#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 19 /* RT5033 regulator SAFE LDO output voltage uV */ #define RT5033_REGULATOR_SAFE_LDO_VOLTAGE 4900000U diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 944aa3aa3035..b8ca6943f0b7 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -303,6 +303,7 @@ struct mhi_controller_config { * @rddm_size: RAM dump size that host should allocate for debugging purpose * @sbl_size: SBL image size downloaded through BHIe (optional) * @seg_len: BHIe vector size (optional) + * @reg_len: Length of the MHI MMIO region (required) * @fbc_image: Points to firmware image buffer * @rddm_image: Points to RAM dump buffer * @mhi_chan: Points to the channel configuration table @@ -386,6 +387,7 @@ struct mhi_controller { size_t rddm_size; size_t sbl_size; size_t seg_len; + size_t reg_len; struct image_info *fbc_image; struct image_info *rddm_image; struct mhi_chan *mhi_chan; @@ -719,8 +721,13 @@ void mhi_device_put(struct mhi_device *mhi_dev); * host and device execution environments match and * channels are in a DISABLED state. * @mhi_dev: Device associated with the channels + * @flags: MHI channel flags */ -int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, + unsigned int flags); + +/* Automatically allocate and queue inbound buffers */ +#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) /** * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 9b7b7cd3bae9..23dadf7aeba8 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -51,7 +51,6 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, int extra_count); -extern void copy_huge_page(struct page *dst, struct page *src); #else static inline void putback_movable_pages(struct list_head *l) {} @@ -77,10 +76,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, { return -ENOSYS; } - -static inline void copy_huge_page(struct page *dst, struct page *src) -{ -} #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_COMPACTION diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1efe37466969..25a8be58d289 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1044,8 +1044,7 @@ void mlx5_unregister_debugfs(void); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, - unsigned int *irqn); +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 98b56b75c625..1a9c9d94cb59 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -11,13 +11,15 @@ enum { }; enum { - MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps? - MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2, + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, }; enum { - MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, - MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = + BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT), + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = + BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED), }; struct mlx5_ifc_virtio_q_bits { diff --git a/include/linux/mm.h b/include/linux/mm.h index 57453dba41b9..7ca22e6e694a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -906,6 +906,7 @@ void __put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); +void copy_huge_page(struct page *dst, struct page *src); /* * Compound pages have a destructor function. Provide a diff --git a/include/linux/msi.h b/include/linux/msi.h index 6aff469e511d..e8bdcb83172b 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -233,7 +233,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); -u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); +void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); void pci_msi_mask_irq(struct irq_data *data); void pci_msi_unmask_irq(struct irq_data *data); diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 10279c4830ac..ada1296c87d5 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -196,6 +196,9 @@ struct ip_set_region { u32 elements; /* Number of elements vs timeout */ }; +/* Max range where every element is added/deleted in one step */ +#define IPSET_MAX_RANGE (1<<20) + /* The max revision number supported by any set type + 1 */ #define IPSET_REVISION_MAX 9 diff --git a/include/linux/once.h b/include/linux/once.h index 9225ee6d96c7..ae6f4eb41cbe 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -7,7 +7,7 @@ bool __do_once_start(bool *done, unsigned long *flags); void __do_once_done(bool *done, struct static_key_true *once_key, - unsigned long *flags); + unsigned long *flags, struct module *mod); /* Call a function exactly once. The idea of DO_ONCE() is to perform * a function call such as initialization of random seeds, etc, only @@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key, if (unlikely(___ret)) { \ func(__VA_ARGS__); \ __do_once_done(&___done, &___once_key, \ - &___flags); \ + &___flags, THIS_MODULE); \ } \ } \ ___ret; \ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index d147480cdefc..e24d2c992b11 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1397,34 +1397,10 @@ static inline int p4d_clear_huge(p4d_t *p4d) } #endif /* !__PAGETABLE_P4D_FOLDED */ -#ifndef __PAGETABLE_PUD_FOLDED int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); -int pud_clear_huge(pud_t *pud); -#else -static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) -{ - return 0; -} -static inline int pud_clear_huge(pud_t *pud) -{ - return 0; -} -#endif /* !__PAGETABLE_PUD_FOLDED */ - -#ifndef __PAGETABLE_PMD_FOLDED int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); +int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); -#else -static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) -{ - return 0; -} -static inline int pmd_clear_huge(pmd_t *pmd) -{ - return 0; -} -#endif /* !__PAGETABLE_PMD_FOLDED */ - int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); int pud_free_pmd_page(pud_t *pud, unsigned long addr); int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 5d2705f1d01c..fc5642431b92 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -48,6 +48,7 @@ struct pipe_buffer { * @files: number of struct file referring this pipe (protected by ->i_lock) * @r_counter: reader counter * @w_counter: writer counter + * @poll_usage: is this pipe used for epoll, which has crazy wakeups? * @fasync_readers: reader side fasync * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers @@ -70,6 +71,7 @@ struct pipe_inode_info { unsigned int files; unsigned int r_counter; unsigned int w_counter; + unsigned int poll_usage; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index aba237c0b3a2..71fac9237725 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -11,7 +11,10 @@ #include <linux/device.h> #include <linux/pps_kernel.h> #include <linux/ptp_clock.h> +#include <linux/timecounter.h> +#include <linux/skbuff.h> +#define PTP_CLOCK_NAME_LEN 32 /** * struct ptp_clock_request - request PTP clock event * @@ -134,7 +137,7 @@ struct ptp_system_timestamp { struct ptp_clock_info { struct module *owner; - char name[16]; + char name[PTP_CLOCK_NAME_LEN]; s32 max_adj; int n_alarm; int n_ext_ts; @@ -304,6 +307,27 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay); */ void ptp_cancel_worker_sync(struct ptp_clock *ptp); +/** + * ptp_get_vclocks_index() - get all vclocks index on pclock, and + * caller is responsible to free memory + * of vclock_index + * + * @pclock_index: phc index of ptp pclock. + * @vclock_index: pointer to pointer of vclock index. + * + * return number of vclocks. + */ +int ptp_get_vclocks_index(int pclock_index, int **vclock_index); + +/** + * ptp_convert_timestamp() - convert timestamp to a ptp vclock time + * + * @hwtstamps: skb_shared_hwtstamps structure pointer + * @vclock_index: phc index of ptp vclock. + */ +void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index); + #else static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, struct device *parent) @@ -323,6 +347,11 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp, { return -EOPNOTSUPP; } static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp) { } +static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index) +{ return 0; } +static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index) +{ } #endif diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 83fb86133fe1..c976cc6de257 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -291,7 +291,9 @@ static inline int page_referenced(struct page *page, int is_locked, return 0; } -#define try_to_unmap(page, refs) false +static inline void try_to_unmap(struct page *page, enum ttu_flags flags) +{ +} static inline int page_mkclean(struct page *page) { diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 79d0a1237e6c..80e781c51ddc 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -101,6 +101,10 @@ struct scmi_clk_proto_ops { * to sustained performance level mapping * @est_power_get: gets the estimated power cost for a given performance domain * at a given frequency + * @fast_switch_possible: indicates if fast DVFS switching is possible or not + * for a given device + * @power_scale_mw_get: indicates if the power values provided are in milliWatts + * or in some other (abstract) scale */ struct scmi_perf_proto_ops { int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain, @@ -153,7 +157,7 @@ struct scmi_power_proto_ops { }; /** - * scmi_sensor_reading - represent a timestamped read + * struct scmi_sensor_reading - represent a timestamped read * * Used by @reading_get_timestamped method. * @@ -167,7 +171,7 @@ struct scmi_sensor_reading { }; /** - * scmi_range_attrs - specifies a sensor or axis values' range + * struct scmi_range_attrs - specifies a sensor or axis values' range * @min_range: The minimum value which can be represented by the sensor/axis. * @max_range: The maximum value which can be represented by the sensor/axis. */ @@ -177,7 +181,7 @@ struct scmi_range_attrs { }; /** - * scmi_sensor_axis_info - describes one sensor axes + * struct scmi_sensor_axis_info - describes one sensor axes * @id: The axes ID. * @type: Axes type. Chosen amongst one of @enum scmi_sensor_class. * @scale: Power-of-10 multiplier applied to the axis unit. @@ -205,8 +209,8 @@ struct scmi_sensor_axis_info { }; /** - * scmi_sensor_intervals_info - describes number and type of available update - * intervals + * struct scmi_sensor_intervals_info - describes number and type of available + * update intervals * @segmented: Flag for segmented intervals' representation. When True there * will be exactly 3 intervals in @desc, with each entry * representing a member of a segment in this order: diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index afbf8037d8db..d2176a56828a 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h @@ -51,6 +51,14 @@ struct scpi_sensor_info { * OPP is an index to the list return by @dvfs_get_info * @dvfs_get_info: returns the DVFS capabilities of the given power * domain. It includes the OPP list and the latency information + * @device_domain_id: gets the scpi domain id for a given device + * @get_transition_latency: gets the DVFS transition latency for a given device + * @add_opps_to_device: adds all the OPPs for a given device + * @sensor_get_capability: get the list of capabilities for the sensors + * @sensor_get_info: get the information of the specified sensor + * @sensor_get_value: gets the current value of the sensor + * @device_get_power_state: gets the power state of a power domain + * @device_set_power_state: sets the power state of a power domain */ struct scpi_ops { u32 (*get_version)(void); diff --git a/include/linux/security.h b/include/linux/security.h index 24eda04221e9..5b7288521300 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -120,10 +120,11 @@ enum lockdown_reason { LOCKDOWN_MMIOTRACE, LOCKDOWN_DEBUGFS, LOCKDOWN_XMON_WR, + LOCKDOWN_BPF_WRITE_USER, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, - LOCKDOWN_BPF_READ, + LOCKDOWN_BPF_READ_KERNEL, LOCKDOWN_PERF, LOCKDOWN_TRACEFS, LOCKDOWN_XMON_RW, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 52d7fb92a69d..c58cc142d23f 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -518,6 +518,25 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port) if (sysrq_ch) handle_sysrq(sysrq_ch); } + +static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, + unsigned long flags) +{ + int sysrq_ch; + + if (!port->has_sysrq) { + spin_unlock_irqrestore(&port->lock, flags); + return; + } + + sysrq_ch = port->sysrq_ch; + port->sysrq_ch = 0; + + spin_unlock_irqrestore(&port->lock, flags); + + if (sysrq_ch) + handle_sysrq(sysrq_ch); +} #else /* CONFIG_MAGIC_SYSRQ_SERIAL */ static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { @@ -531,6 +550,11 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port) { spin_unlock(&port->lock); } +static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, + unsigned long flags) +{ + spin_unlock_irqrestore(&port->lock, flags); +} #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ /* diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 96f319099744..14ab0c0bc924 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -285,11 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk) return rcu_dereference_sk_user_data(sk); } +static inline void sk_psock_set_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + set_bit(bit, &psock->state); +} + +static inline void sk_psock_clear_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + clear_bit(bit, &psock->state); +} + +static inline bool sk_psock_test_state(const struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + return test_bit(bit, &psock->state); +} + +static inline void sock_drop(struct sock *sk, struct sk_buff *skb) +{ + sk_drops_add(sk, skb); + kfree_skb(skb); +} + +static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg) +{ + if (msg->skb) + sock_drop(psock->sk, msg->skb); + kfree(msg); +} + static inline void sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { spin_lock_bh(&psock->ingress_lock); - list_add_tail(&msg->list, &psock->ingress_msg); + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + list_add_tail(&msg->list, &psock->ingress_msg); + else + drop_sk_msg(psock, msg); spin_unlock_bh(&psock->ingress_lock); } @@ -406,24 +440,6 @@ static inline void sk_psock_restore_proto(struct sock *sk, psock->psock_update_sk_prot(sk, psock, true); } -static inline void sk_psock_set_state(struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - set_bit(bit, &psock->state); -} - -static inline void sk_psock_clear_state(struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - clear_bit(bit, &psock->state); -} - -static inline bool sk_psock_test_state(const struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - return test_bit(bit, &psock->state); -} - static inline struct sk_psock *sk_psock_get(struct sock *sk) { struct sk_psock *psock; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index d5ae621d66ba..a6f03b36fc4f 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -115,7 +115,9 @@ struct stmmac_axi { #define EST_GCL 1024 struct stmmac_est { + struct mutex lock; int enable; + u32 btr_reserve[2]; u32 btr_offset[2]; u32 btr[2]; u32 ctr[2]; diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 54269e47ac9a..3ebfea0781f1 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -27,6 +27,7 @@ #define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ #define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ #define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */ +#define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */ struct device; struct tee_device; @@ -332,6 +333,7 @@ void *tee_get_drvdata(struct tee_device *teedev); * @returns a pointer to 'struct tee_shm' */ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); +struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); /** * tee_shm_register() - Register shared memory buffer diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index 3aee78dda16d..784659d4dc99 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -196,6 +196,7 @@ struct otg_fsm { struct mutex lock; u8 *host_req_flag; struct delayed_work hnp_polling_work; + bool hnp_work_inited; bool state_changed; }; diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 3357ac98878d..8cfe49d201dd 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -277,6 +277,17 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, size_t size, const char *name); +/** + * vdpa_alloc_device - allocate and initilaize a vDPA device + * + * @dev_struct: the type of the parent structure + * @member: the name of struct vdpa_device within the @dev_struct + * @parent: the parent device + * @config: the bus operations that is supported by this device + * @name: name of the vdpa device + * + * Return allocated data structure or ERR_PTR upon error + */ #define vdpa_alloc_device(dev_struct, member, parent, config, name) \ container_of(__vdpa_alloc_device( \ parent, config, \ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index b1894e0323fa..41edbc01ffa4 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -110,6 +110,7 @@ struct virtio_device { bool config_enabled; bool config_change_pending; spinlock_t config_lock; + spinlock_t vqs_list_lock; /* Protects VQs list access */ struct device dev; struct virtio_device_id id; const struct virtio_config_ops *config; diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 84db7b8f912f..212892cf9822 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -14,6 +14,7 @@ #include <linux/virtio_byteorder.h> #include <linux/uio.h> #include <linux/slab.h> +#include <linux/spinlock.h> #if IS_REACHABLE(CONFIG_VHOST_IOTLB) #include <linux/dma-direction.h> #include <linux/vhost_iotlb.h> diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h index 143568d64b20..4b57bbba588a 100644 --- a/include/math-emu/op-common.h +++ b/include/math-emu/op-common.h @@ -338,7 +338,7 @@ do { \ FP_SET_EXCEPTION(FP_EX_INVALID | FP_EX_INVALID_ISI); \ break; \ } \ - /* FALLTHRU */ \ + fallthrough; \ \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index a53e94459ecd..db4312e44d47 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1230,6 +1230,7 @@ struct hci_dev *hci_alloc_dev(void); void hci_free_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev); +void hci_cleanup_dev(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); int hci_reset_dev(struct hci_dev *hdev); diff --git a/include/net/bonding.h b/include/net/bonding.h index 15335732e166..625d9c72dee3 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -201,6 +201,11 @@ struct bond_up_slave { */ #define BOND_LINK_NOCHANGE -1 +struct bond_ipsec { + struct list_head list; + struct xfrm_state *xs; +}; + /* * Here are the locking policies for the two bonding locks: * Get rcu_read_lock when reading or RTNL when writing slave list. @@ -249,7 +254,9 @@ struct bonding { #endif /* CONFIG_DEBUG_FS */ struct rtnl_link_stats64 bond_stats; #ifdef CONFIG_XFRM_OFFLOAD - struct xfrm_state *xs; + struct list_head ipsec_list; + /* protecting ipsec_list */ + spinlock_t ipsec_lock; #endif /* CONFIG_XFRM_OFFLOAD */ }; diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 73af4a64a599..40296ed976a9 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -38,7 +38,7 @@ static inline bool net_busy_loop_on(void) static inline bool sk_can_busy_loop(const struct sock *sk) { - return sk->sk_ll_usec && !signal_pending(current); + return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); } bool sk_busy_loop_end(void *p, unsigned long start_time); diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h deleted file mode 100644 index 552cf68d28d2..000000000000 --- a/include/net/caif/caif_hsi.h +++ /dev/null @@ -1,200 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson AB 2010 - * Author: Daniel Martensson / daniel.martensson@stericsson.com - * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com - */ - -#ifndef CAIF_HSI_H_ -#define CAIF_HSI_H_ - -#include <net/caif/caif_layer.h> -#include <net/caif/caif_device.h> -#include <linux/atomic.h> - -/* - * Maximum number of CAIF frames that can reside in the same HSI frame. - */ -#define CFHSI_MAX_PKTS 15 - -/* - * Maximum number of bytes used for the frame that can be embedded in the - * HSI descriptor. - */ -#define CFHSI_MAX_EMB_FRM_SZ 96 - -/* - * Decides if HSI buffers should be prefilled with 0xFF pattern for easier - * debugging. Both TX and RX buffers will be filled before the transfer. - */ -#define CFHSI_DBG_PREFILL 0 - -/* Structure describing a HSI packet descriptor. */ -#pragma pack(1) /* Byte alignment. */ -struct cfhsi_desc { - u8 header; - u8 offset; - u16 cffrm_len[CFHSI_MAX_PKTS]; - u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ]; -}; -#pragma pack() /* Default alignment. */ - -/* Size of the complete HSI packet descriptor. */ -#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc)) - -/* - * Size of the complete HSI packet descriptor excluding the optional embedded - * CAIF frame. - */ -#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ) - -/* - * Maximum bytes transferred in one transfer. - */ -#define CFHSI_MAX_CAIF_FRAME_SZ 4096 - -#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ) - -/* Size of the complete HSI TX buffer. */ -#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ) - -/* Size of the complete HSI RX buffer. */ -#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ) - -/* Bitmasks for the HSI descriptor. */ -#define CFHSI_PIGGY_DESC (0x01 << 7) - -#define CFHSI_TX_STATE_IDLE 0 -#define CFHSI_TX_STATE_XFER 1 - -#define CFHSI_RX_STATE_DESC 0 -#define CFHSI_RX_STATE_PAYLOAD 1 - -/* Bitmasks for power management. */ -#define CFHSI_WAKE_UP 0 -#define CFHSI_WAKE_UP_ACK 1 -#define CFHSI_WAKE_DOWN_ACK 2 -#define CFHSI_AWAKE 3 -#define CFHSI_WAKELOCK_HELD 4 -#define CFHSI_SHUTDOWN 5 -#define CFHSI_FLUSH_FIFO 6 - -#ifndef CFHSI_INACTIVITY_TOUT -#define CFHSI_INACTIVITY_TOUT (1 * HZ) -#endif /* CFHSI_INACTIVITY_TOUT */ - -#ifndef CFHSI_WAKE_TOUT -#define CFHSI_WAKE_TOUT (3 * HZ) -#endif /* CFHSI_WAKE_TOUT */ - -#ifndef CFHSI_MAX_RX_RETRIES -#define CFHSI_MAX_RX_RETRIES (10 * HZ) -#endif - -/* Structure implemented by the CAIF HSI driver. */ -struct cfhsi_cb_ops { - void (*tx_done_cb) (struct cfhsi_cb_ops *drv); - void (*rx_done_cb) (struct cfhsi_cb_ops *drv); - void (*wake_up_cb) (struct cfhsi_cb_ops *drv); - void (*wake_down_cb) (struct cfhsi_cb_ops *drv); -}; - -/* Structure implemented by HSI device. */ -struct cfhsi_ops { - int (*cfhsi_up) (struct cfhsi_ops *dev); - int (*cfhsi_down) (struct cfhsi_ops *dev); - int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev); - int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev); - int (*cfhsi_wake_up) (struct cfhsi_ops *dev); - int (*cfhsi_wake_down) (struct cfhsi_ops *dev); - int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status); - int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy); - int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev); - struct cfhsi_cb_ops *cb_ops; -}; - -/* Structure holds status of received CAIF frames processing */ -struct cfhsi_rx_state { - int state; - int nfrms; - int pld_len; - int retries; - bool piggy_desc; -}; - -/* Priority mapping */ -enum { - CFHSI_PRIO_CTL = 0, - CFHSI_PRIO_VI, - CFHSI_PRIO_VO, - CFHSI_PRIO_BEBK, - CFHSI_PRIO_LAST, -}; - -struct cfhsi_config { - u32 inactivity_timeout; - u32 aggregation_timeout; - u32 head_align; - u32 tail_align; - u32 q_high_mark; - u32 q_low_mark; -}; - -/* Structure implemented by CAIF HSI drivers. */ -struct cfhsi { - struct caif_dev_common cfdev; - struct net_device *ndev; - struct platform_device *pdev; - struct sk_buff_head qhead[CFHSI_PRIO_LAST]; - struct cfhsi_cb_ops cb_ops; - struct cfhsi_ops *ops; - int tx_state; - struct cfhsi_rx_state rx_state; - struct cfhsi_config cfg; - int rx_len; - u8 *rx_ptr; - u8 *tx_buf; - u8 *rx_buf; - u8 *rx_flip_buf; - spinlock_t lock; - int flow_off_sent; - struct list_head list; - struct work_struct wake_up_work; - struct work_struct wake_down_work; - struct work_struct out_of_sync_work; - struct workqueue_struct *wq; - wait_queue_head_t wake_up_wait; - wait_queue_head_t wake_down_wait; - wait_queue_head_t flush_fifo_wait; - struct timer_list inactivity_timer; - struct timer_list rx_slowpath_timer; - - /* TX aggregation */ - int aggregation_len; - struct timer_list aggregation_timer; - - unsigned long bits; -}; -extern struct platform_driver cfhsi_driver; - -/** - * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters. - * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before - * taking the HSI wakeline down, in milliseconds. - * When using RT Netlink to create, destroy or configure a CAIF HSI interface, - * enum ifla_caif_hsi is used to specify the configuration attributes. - */ -enum ifla_caif_hsi { - __IFLA_CAIF_HSI_UNSPEC, - __IFLA_CAIF_HSI_INACTIVITY_TOUT, - __IFLA_CAIF_HSI_AGGREGATION_TOUT, - __IFLA_CAIF_HSI_HEAD_ALIGN, - __IFLA_CAIF_HSI_TAIL_ALIGN, - __IFLA_CAIF_HSI_QHIGH_WATERMARK, - __IFLA_CAIF_HSI_QLOW_WATERMARK, - __IFLA_CAIF_HSI_MAX -}; - -struct cfhsi_ops *cfhsi_get_ops(void); - -#endif /* CAIF_HSI_H_ */ diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 56cb3c38569a..14efa0ded75d 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -45,7 +45,9 @@ skb_tunnel_info(const struct sk_buff *skb) return &md_dst->u.tun_info; dst = skb_dst(skb); - if (dst && dst->lwtstate) + if (dst && dst->lwtstate && + (dst->lwtstate->type == LWTUNNEL_ENCAP_IP || + dst->lwtstate->type == LWTUNNEL_ENCAP_IP6)) return lwt_tun_info(dst->lwtstate); return NULL; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 69c9eabf8325..1b9d75aedb22 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -293,7 +293,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action) } /** - * flow_action_has_one_action() - check if exactly one action is present + * flow_offload_has_one_action() - check if exactly one action is present * @action: tc filter flow offload action * * Returns true if exactly one action is present. @@ -319,14 +319,12 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action, if (flow_offload_has_one_action(action)) return true; - if (action) { - flow_action_for_each(i, action_entry, action) { - if (i && action_entry->hw_stats != last_hw_stats) { - NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported"); - return false; - } - last_hw_stats = action_entry->hw_stats; + flow_action_for_each(i, action_entry, action) { + if (i && action_entry->hw_stats != last_hw_stats) { + NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported"); + return false; } + last_hw_stats = action_entry->hw_stats; } return true; } diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index f14149df5a65..0bf09a9bca4e 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -263,9 +263,9 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); -static inline int ip6_skb_dst_mtu(struct sk_buff *skb) +static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb) { - int mtu; + unsigned int mtu; struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? inet6_sk(skb->sk) : NULL; diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index c0f0a13ed818..49aa79c7b278 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -15,9 +15,11 @@ #include <linux/if_ether.h> /* Lengths of frame formats */ -#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ -#define LLC_PDU_LEN_S 4 -#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ +#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ +#define LLC_PDU_LEN_S 4 +#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ +/* header and 1 control byte and XID info */ +#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info)) /* Known SAP addresses */ #define LLC_GLOBAL_SAP 0xFF #define LLC_NULL_SAP 0x00 /* not network-layer visible */ @@ -50,9 +52,10 @@ #define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */ #define LLC_PDU_TYPE_MASK 0x03 -#define LLC_PDU_TYPE_I 0 /* first bit */ -#define LLC_PDU_TYPE_S 1 /* first two bits */ -#define LLC_PDU_TYPE_U 3 /* first two bits */ +#define LLC_PDU_TYPE_I 0 /* first bit */ +#define LLC_PDU_TYPE_S 1 /* first two bits */ +#define LLC_PDU_TYPE_U 3 /* first two bits */ +#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */ #define LLC_PDU_TYPE_IS_I(pdu) \ ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0) @@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, u8 ssap, u8 dsap, u8 cr) { - const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4; + int hlen = 4; /* default value for I and S types */ struct llc_pdu_un *pdu; + switch (type) { + case LLC_PDU_TYPE_U: + hlen = 3; + break; + case LLC_PDU_TYPE_U_XID: + hlen = 6; + break; + } + skb_push(skb, hlen); skb_reset_network_header(skb); pdu = llc_pdu_un_hdr(skb); @@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb, xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */ xid_info->type = svcs_supported; xid_info->rw = rx_window << 1; /* size of receive window */ - skb_put(skb, sizeof(struct llc_xid_info)); + + /* no need to push/put since llc_pdu_header_init() has already + * pushed 3 + 3 bytes + */ } /** diff --git a/include/net/mptcp.h b/include/net/mptcp.h index cb580b06152f..8b5af683a818 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -105,7 +105,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, unsigned int *size, unsigned int remaining, struct mptcp_out_options *opts); -void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb); +bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb); void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, struct mptcp_out_options *opts); @@ -227,9 +227,10 @@ static inline bool mptcp_established_options(struct sock *sk, return false; } -static inline void mptcp_incoming_options(struct sock *sk, +static inline bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) { + return true; } static inline void mptcp_skb_ext_move(struct sk_buff *to, diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 09f2efea0b97..13807ea94cd2 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -30,7 +30,6 @@ void nf_conntrack_cleanup_net(struct net *net); void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list); void nf_conntrack_proto_pernet_init(struct net *net); -void nf_conntrack_proto_pernet_fini(struct net *net); int nf_conntrack_proto_init(void); void nf_conntrack_proto_fini(void); diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index c3094b83a525..fefd38db95b3 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -27,9 +27,9 @@ struct nf_tcp_net { u8 tcp_loose; u8 tcp_be_liberal; u8 tcp_max_retrans; + u8 tcp_ignore_invalid_rst; #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) unsigned int offload_timeout; - unsigned int offload_pickup; #endif }; @@ -43,7 +43,6 @@ struct nf_udp_net { unsigned int timeouts[UDP_CT_MAX]; #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) unsigned int offload_timeout; - unsigned int offload_pickup; #endif }; diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index e946366e8ba5..1f4e1816fd36 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -75,6 +75,7 @@ struct netns_xfrm { #endif spinlock_t xfrm_state_lock; seqcount_spinlock_t xfrm_state_hash_generation; + seqcount_spinlock_t xfrm_policy_hash_generation; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ec7823921bd2..298a8d10168b 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -337,6 +337,9 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); /** * struct tcf_pkt_info - packet information + * + * @ptr: start of the pkt data + * @nexthdr: offset of the next header */ struct tcf_pkt_info { unsigned char * ptr; @@ -355,6 +358,7 @@ struct tcf_ematch_ops; * @ops: the operations lookup table of the corresponding ematch module * @datalen: length of the ematch specific configuration data * @data: ematch specific data + * @net: the network namespace */ struct tcf_ematch { struct tcf_ematch_ops * ops; diff --git a/include/net/psample.h b/include/net/psample.h index e328c5127757..0509d2d6be67 100644 --- a/include/net/psample.h +++ b/include/net/psample.h @@ -31,6 +31,8 @@ struct psample_group *psample_group_get(struct net *net, u32 group_num); void psample_group_take(struct psample_group *group); void psample_group_put(struct psample_group *group); +struct sk_buff; + #if IS_ENABLED(CONFIG_PSAMPLE) void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 265fffa33dad..5859e0a16a58 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -360,8 +360,7 @@ enum { #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>, - * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, - * 192.88.99.0/24. + * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24. * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP * addresses. */ @@ -369,7 +368,6 @@ enum { ((htonl(INADDR_BROADCAST) == a) || \ ipv4_is_multicast(a) || \ ipv4_is_zeronet(a) || \ - ipv4_is_test_198(a) || \ ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 32fc4a309df5..651bba654d77 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -984,6 +984,7 @@ struct sctp_transport { } cacc; struct { + __u32 last_rtx_chunks; __u16 pmtu; __u16 probe_size; __u16 probe_high; @@ -1024,8 +1025,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); void sctp_transport_immediate_rtx(struct sctp_transport *); void sctp_transport_dst_release(struct sctp_transport *t); void sctp_transport_dst_confirm(struct sctp_transport *t); -void sctp_transport_pl_send(struct sctp_transport *t); -void sctp_transport_pl_recv(struct sctp_transport *t); +bool sctp_transport_pl_send(struct sctp_transport *t); +bool sctp_transport_pl_recv(struct sctp_transport *t); /* This is the structure we use to queue packets as they come into diff --git a/include/net/sock.h b/include/net/sock.h index 8bdd80027ffb..f23cb259b0e2 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -316,7 +316,9 @@ struct bpf_local_storage; * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only - * @sk_tsflags: SO_TIMESTAMPING socket options + * @sk_tsflags: SO_TIMESTAMPING flags + * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock + * for timestamping * @sk_tskey: counter to disambiguate concurrent tstamp requests * @sk_zckey: counter to order MSG_ZEROCOPY notifications * @sk_socket: Identd and reporting IO signals @@ -493,6 +495,7 @@ struct sock { seqlock_t sk_stamp_seq; #endif u16 sk_tsflags; + int sk_bind_phc; u8 sk_shutdown; u32 sk_tskey; atomic_t sk_zckey; @@ -2755,7 +2758,8 @@ void sock_def_readable(struct sock *sk); int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk); void sock_set_timestamp(struct sock *sk, int optname, bool valbool); -int sock_set_timestamping(struct sock *sk, int optname, int val); +int sock_set_timestamping(struct sock *sk, int optname, + struct so_timestamping timestamping); void sock_enable_timestamps(struct sock *sk); void sock_no_linger(struct sock *sk); diff --git a/include/net/tcp.h b/include/net/tcp.h index e668f1bf780d..784d5c3ef1c5 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -686,6 +686,10 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp) static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) { + /* mptcp hooks are only on the slow path */ + if (sk_is_mptcp((struct sock *)tp)) + return; + tp->pred_flags = htonl((tp->tcp_header_len << 26) | ntohl(TCP_FLAG_ACK) | snd_wnd); @@ -1705,7 +1709,6 @@ struct tcp_fastopen_context { struct rcu_head rcu; }; -extern unsigned int sysctl_tcp_fastopen_blackhole_timeout; void tcp_fastopen_active_disable(struct sock *sk); bool tcp_fastopen_active_should_disable(struct sock *sk); void tcp_fastopen_active_disable_ofo_check(struct sock *sk); diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index e19c2504a14b..1066b1194a5a 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -237,14 +237,19 @@ unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc); #ifdef CONFIG_TEGRA_MC struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev); +int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev); #else static inline struct tegra_mc * devm_tegra_memory_controller_get(struct device *dev) { return ERR_PTR(-ENODEV); } -#endif -int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev); +static inline int +tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev) +{ + return -ENODEV; +} +#endif #endif /* __SOC_TEGRA_MC_H__ */ diff --git a/include/sound/soc.h b/include/sound/soc.h index 675849d07284..8e6dd8a257c5 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -712,6 +712,12 @@ struct snd_soc_dai_link { /* Do not create a PCM for this DAI link (Backend link) */ unsigned int ignore:1; + /* This flag will reorder stop sequence. By enabling this flag + * DMA controller stop sequence will be invoked first followed by + * CPU DAI driver stop sequence + */ + unsigned int stop_dma_first:1; + #ifdef CONFIG_SND_SOC_TOPOLOGY struct snd_soc_dobj dobj; /* For topology */ #endif diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 3ccf591b2374..9f73ed2cf061 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -174,6 +174,34 @@ enum afs_vl_operation { afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */ }; +enum afs_cm_operation { + afs_CB_CallBack = 204, /* AFS break callback promises */ + afs_CB_InitCallBackState = 205, /* AFS initialise callback state */ + afs_CB_Probe = 206, /* AFS probe client */ + afs_CB_GetLock = 207, /* AFS get contents of CM lock table */ + afs_CB_GetCE = 208, /* AFS get cache file description */ + afs_CB_GetXStatsVersion = 209, /* AFS get version of extended statistics */ + afs_CB_GetXStats = 210, /* AFS get contents of extended statistics data */ + afs_CB_InitCallBackState3 = 213, /* AFS initialise callback state, version 3 */ + afs_CB_ProbeUuid = 214, /* AFS check the client hasn't rebooted */ +}; + +enum yfs_cm_operation { + yfs_CB_Probe = 206, /* YFS probe client */ + yfs_CB_GetLock = 207, /* YFS get contents of CM lock table */ + yfs_CB_XStatsVersion = 209, /* YFS get version of extended statistics */ + yfs_CB_GetXStats = 210, /* YFS get contents of extended statistics data */ + yfs_CB_InitCallBackState3 = 213, /* YFS initialise callback state, version 3 */ + yfs_CB_ProbeUuid = 214, /* YFS check the client hasn't rebooted */ + yfs_CB_GetServerPrefs = 215, + yfs_CB_GetCellServDV = 216, + yfs_CB_GetLocalCell = 217, + yfs_CB_GetCacheConfig = 218, + yfs_CB_GetCellByNum = 65537, + yfs_CB_TellMeAboutYourself = 65538, /* get client capabilities */ + yfs_CB_CallBack = 64204, +}; + enum afs_edit_dir_op { afs_edit_dir_create, afs_edit_dir_create_error, @@ -436,6 +464,32 @@ enum afs_cb_break_reason { EM(afs_YFSVL_GetCellName, "YFSVL.GetCellName") \ E_(afs_VL_GetCapabilities, "VL.GetCapabilities") +#define afs_cm_operations \ + EM(afs_CB_CallBack, "CB.CallBack") \ + EM(afs_CB_InitCallBackState, "CB.InitCallBackState") \ + EM(afs_CB_Probe, "CB.Probe") \ + EM(afs_CB_GetLock, "CB.GetLock") \ + EM(afs_CB_GetCE, "CB.GetCE") \ + EM(afs_CB_GetXStatsVersion, "CB.GetXStatsVersion") \ + EM(afs_CB_GetXStats, "CB.GetXStats") \ + EM(afs_CB_InitCallBackState3, "CB.InitCallBackState3") \ + E_(afs_CB_ProbeUuid, "CB.ProbeUuid") + +#define yfs_cm_operations \ + EM(yfs_CB_Probe, "YFSCB.Probe") \ + EM(yfs_CB_GetLock, "YFSCB.GetLock") \ + EM(yfs_CB_XStatsVersion, "YFSCB.XStatsVersion") \ + EM(yfs_CB_GetXStats, "YFSCB.GetXStats") \ + EM(yfs_CB_InitCallBackState3, "YFSCB.InitCallBackState3") \ + EM(yfs_CB_ProbeUuid, "YFSCB.ProbeUuid") \ + EM(yfs_CB_GetServerPrefs, "YFSCB.GetServerPrefs") \ + EM(yfs_CB_GetCellServDV, "YFSCB.GetCellServDV") \ + EM(yfs_CB_GetLocalCell, "YFSCB.GetLocalCell") \ + EM(yfs_CB_GetCacheConfig, "YFSCB.GetCacheConfig") \ + EM(yfs_CB_GetCellByNum, "YFSCB.GetCellByNum") \ + EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \ + E_(yfs_CB_CallBack, "YFSCB.CallBack") + #define afs_edit_dir_ops \ EM(afs_edit_dir_create, "create") \ EM(afs_edit_dir_create_error, "c_fail") \ @@ -569,6 +623,8 @@ afs_server_traces; afs_cell_traces; afs_fs_operations; afs_vl_operations; +afs_cm_operations; +yfs_cm_operations; afs_edit_dir_ops; afs_edit_dir_reasons; afs_eproto_causes; @@ -649,20 +705,21 @@ TRACE_EVENT(afs_cb_call, TP_STRUCT__entry( __field(unsigned int, call ) - __field(const char *, name ) __field(u32, op ) + __field(u16, service_id ) ), TP_fast_assign( __entry->call = call->debug_id; - __entry->name = call->type->name; __entry->op = call->operation_ID; + __entry->service_id = call->service_id; ), - TP_printk("c=%08x %s o=%u", + TP_printk("c=%08x %s", __entry->call, - __entry->name, - __entry->op) + __entry->service_id == 2501 ? + __print_symbolic(__entry->op, yfs_cm_operations) : + __print_symbolic(__entry->op, afs_cm_operations)) ); TRACE_EVENT(afs_call, diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 390270e00a1d..f160484afc5c 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -48,7 +48,9 @@ {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ - {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\ + {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ + {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \ + {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\ #define show_gfp_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 2399073c3afc..78c448c6ab4c 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -136,7 +136,7 @@ DECLARE_EVENT_CLASS(net_dev_template, __assign_str(name, skb->dev->name); ), - TP_printk("dev=%s skbaddr=%p len=%u", + TP_printk("dev=%s skbaddr=%px len=%u", __get_str(name), __entry->skbaddr, __entry->len) ) diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 330d32d84485..c3006c6b4a87 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -41,11 +41,37 @@ TRACE_EVENT(qdisc_dequeue, __entry->txq_state = txq->state; ), - TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p", + TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%px", __entry->ifindex, __entry->handle, __entry->parent, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); +TRACE_EVENT(qdisc_enqueue, + + TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb), + + TP_ARGS(qdisc, txq, skb), + + TP_STRUCT__entry( + __field(struct Qdisc *, qdisc) + __field(void *, skbaddr) + __field(int, ifindex) + __field(u32, handle) + __field(u32, parent) + ), + + TP_fast_assign( + __entry->qdisc = qdisc; + __entry->skbaddr = skb; + __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; + __entry->handle = qdisc->handle; + __entry->parent = qdisc->parent; + ), + + TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%px", + __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr) +); + TRACE_EVENT(qdisc_reset, TP_PROTO(struct Qdisc *q), diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index c7135c9c37a5..b3b93710eff7 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -46,6 +46,7 @@ enum { ETHTOOL_MSG_FEC_SET, ETHTOOL_MSG_MODULE_EEPROM_GET, ETHTOOL_MSG_STATS_GET, + ETHTOOL_MSG_PHC_VCLOCKS_GET, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -88,6 +89,7 @@ enum { ETHTOOL_MSG_FEC_NTF, ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY, ETHTOOL_MSG_STATS_GET_REPLY, + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY, /* add new constants above here */ __ETHTOOL_MSG_KERNEL_CNT, @@ -440,6 +442,19 @@ enum { ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) }; +/* PHC VCLOCKS */ + +enum { + ETHTOOL_A_PHC_VCLOCKS_UNSPEC, + ETHTOOL_A_PHC_VCLOCKS_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_PHC_VCLOCKS_NUM, /* u32 */ + ETHTOOL_A_PHC_VCLOCKS_INDEX, /* array, s32 */ + + /* add new constants above here */ + __ETHTOOL_A_PHC_VCLOCKS_CNT, + ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1) +}; + /* CABLE TEST */ enum { diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index e33997b4d750..edc346a77c91 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* SPDX-License-Identifier: LGPL-2.1 WITH Linux-syscall-note */ /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ #ifndef _USR_IDXD_H_ #define _USR_IDXD_H_ diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index dc8b72201f6c..00a60695fa53 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -66,8 +66,11 @@ enum { #define NUD_NONE 0x00 /* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change - and make no address resolution or NUD. - NUD_PERMANENT also cannot be deleted by garbage collectors. + * and make no address resolution or NUD. + * NUD_PERMANENT also cannot be deleted by garbage collectors. + * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry + * states don't make sense and thus are ignored. Such entries don't age and + * can roam. */ struct nda_cacheinfo { diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 7ed0b3d1c00a..fcc61c73a666 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -13,7 +13,7 @@ #include <linux/types.h> #include <linux/socket.h> /* for SO_TIMESTAMPING */ -/* SO_TIMESTAMPING gets an integer bit field comprised of these values */ +/* SO_TIMESTAMPING flags */ enum { SOF_TIMESTAMPING_TX_HARDWARE = (1<<0), SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1), @@ -30,8 +30,9 @@ enum { SOF_TIMESTAMPING_OPT_STATS = (1<<12), SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13), SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14), + SOF_TIMESTAMPING_BIND_PHC = (1 << 15), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_BIND_PHC, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; @@ -47,6 +48,18 @@ enum { SOF_TIMESTAMPING_TX_ACK) /** + * struct so_timestamping - SO_TIMESTAMPING parameter + * + * @flags: SO_TIMESTAMPING flags + * @bind_phc: Index of PTP virtual clock bound to sock. This is available + * if flag SOF_TIMESTAMPING_BIND_PHC is set. + */ +struct so_timestamping { + int flags; + int bind_phc; +}; + +/** * struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter * * @flags: no flags defined right now, must be zero for %SIOCSHWTSTAMP diff --git a/include/uapi/linux/netfilter/nfnetlink_hook.h b/include/uapi/linux/netfilter/nfnetlink_hook.h index 912ec60b26b0..bbcd285b22e1 100644 --- a/include/uapi/linux/netfilter/nfnetlink_hook.h +++ b/include/uapi/linux/netfilter/nfnetlink_hook.h @@ -43,6 +43,15 @@ enum nfnl_hook_chain_info_attributes { }; #define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1) +enum nfnl_hook_chain_desc_attributes { + NFNLA_CHAIN_UNSPEC, + NFNLA_CHAIN_TABLE, + NFNLA_CHAIN_FAMILY, + NFNLA_CHAIN_NAME, + __NFNLA_CHAIN_MAX, +}; +#define NFNLA_CHAIN_MAX (__NFNLA_CHAIN_MAX - 1) + /** * enum nfnl_hook_chaintype - chain type * diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h index 45c8d3b027e0..0af9c113d665 100644 --- a/include/uapi/linux/netfilter/nfnetlink_log.h +++ b/include/uapi/linux/netfilter/nfnetlink_log.h @@ -61,7 +61,7 @@ enum nfulnl_attr_type { NFULA_HWTYPE, /* hardware type */ NFULA_HWHEADER, /* hardware header */ NFULA_HWLEN, /* hardware header length */ - NFULA_CT, /* nf_conntrack_netlink.h */ + NFULA_CT, /* nfnetlink_conntrack.h */ NFULA_CT_INFO, /* enum ip_conntrack_info */ NFULA_VLAN, /* nested attribute: packet vlan info */ NFULA_L2HDR, /* full L2 header */ diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index bcb2cb5d40b9..aed90c4df0c8 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -51,11 +51,11 @@ enum nfqnl_attr_type { NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ NFQA_HWADDR, /* nfqnl_msg_packet_hw */ NFQA_PAYLOAD, /* opaque data payload */ - NFQA_CT, /* nf_conntrack_netlink.h */ + NFQA_CT, /* nfnetlink_conntrack.h */ NFQA_CT_INFO, /* enum ip_conntrack_info */ NFQA_CAP_LEN, /* __u32 length of captured packet */ NFQA_SKB_INFO, /* __u32 skb meta information */ - NFQA_EXP, /* nf_conntrack_netlink.h */ + NFQA_EXP, /* nfnetlink_conntrack.h */ NFQA_UID, /* __u32 sk uid */ NFQA_GID, /* __u32 sk gid */ NFQA_SECCTX, /* security context string */ diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h index 26b638a7ad97..a7085e092d34 100644 --- a/include/uapi/rdma/irdma-abi.h +++ b/include/uapi/rdma/irdma-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */ /* * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved. * Copyright (c) 2005 Topspin Communications. All rights reserved. diff --git a/init/Kconfig b/init/Kconfig index bb0d6e6262b1..55f9f7738ebb 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1847,7 +1847,6 @@ config SLUB_DEBUG default y bool "Enable SLUB debugging support" if EXPERT depends on SLUB && SYSFS - select STACKDEPOT if STACKTRACE_SUPPORT help SLUB has extensive debug support features. Disabling these can result in significant savings in code size. This also disables diff --git a/init/main.c b/init/main.c index f5b8246e8aa1..8d97aba78c3a 100644 --- a/init/main.c +++ b/init/main.c @@ -397,6 +397,12 @@ static int __init bootconfig_params(char *param, char *val, return 0; } +static int __init warn_bootconfig(char *str) +{ + /* The 'bootconfig' has been handled by bootconfig_params(). */ + return 0; +} + static void __init setup_boot_config(void) { static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata; @@ -475,9 +481,8 @@ static int __init warn_bootconfig(char *str) pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n"); return 0; } -early_param("bootconfig", warn_bootconfig); - #endif +early_param("bootconfig", warn_bootconfig); /* Change NUL term back to "=", to make "param" the whole string. */ static void __init repair_env_string(char *param, char *val) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 034ad93a1ad7..0a28a8095d3e 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -32,6 +32,8 @@ #include <linux/perf_event.h> #include <linux/extable.h> #include <linux/log2.h> + +#include <asm/barrier.h> #include <asm/unaligned.h> /* Registers */ @@ -1360,11 +1362,13 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) } /** - * __bpf_prog_run - run eBPF program on a given context + * ___bpf_prog_run - run eBPF program on a given context * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers * @insn: is the array of eBPF instructions * * Decode and execute eBPF instructions. + * + * Return: whatever value is in %BPF_R0 at program exit */ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) { @@ -1377,6 +1381,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) /* Non-UAPI available opcodes. */ [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, + [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC, [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B, [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H, [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W, @@ -1621,7 +1626,21 @@ out: COND_JMP(s, JSGE, >=) COND_JMP(s, JSLE, <=) #undef COND_JMP - /* STX and ST and LDX*/ + /* ST, STX and LDX*/ + ST_NOSPEC: + /* Speculation barrier for mitigating Speculative Store Bypass. + * In case of arm64, we rely on the firmware mitigation as + * controlled via the ssbd kernel parameter. Whenever the + * mitigation is enabled, it works for all of the kernel code + * with no need to provide any additional instructions here. + * In case of x86, we use 'lfence' insn for mitigation. We + * reuse preexisting logic from Spectre v1 mitigation that + * happens to produce the required code on x86 for v4 as well. + */ +#ifdef CONFIG_X86 + barrier_nospec(); +#endif + CONT; #define LDST(SIZEOP, SIZE) \ STX_MEM_##SIZEOP: \ *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ @@ -1861,6 +1880,9 @@ static void bpf_prog_select_func(struct bpf_prog *fp) * * Try to JIT eBPF program, if JIT is not available, use interpreter. * The BPF program will be executed via BPF_PROG_RUN() macro. + * + * Return: the &fp argument along with &err set to 0 for success or + * a negative errno code on failure */ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) { @@ -2236,8 +2258,14 @@ static void bpf_prog_free_deferred(struct work_struct *work) #endif if (aux->dst_trampoline) bpf_trampoline_put(aux->dst_trampoline); - for (i = 0; i < aux->func_cnt; i++) + for (i = 0; i < aux->func_cnt; i++) { + /* We can just unlink the subprog poke descriptor table as + * it was originally linked to the main program and is also + * released along with it. + */ + aux->func[i]->aux->poke_tab = NULL; bpf_jit_free(aux->func[i]); + } if (aux->func_cnt) { kfree(aux->func); bpf_prog_unlock_free(aux->prog); diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 2546dafd6672..fdc20892837c 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -558,7 +558,8 @@ int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, if (map->map_type == BPF_MAP_TYPE_DEVMAP) { for (i = 0; i < map->max_entries; i++) { - dst = READ_ONCE(dtab->netdev_map[i]); + dst = rcu_dereference_check(dtab->netdev_map[i], + rcu_read_lock_bh_held()); if (!is_valid_dst(dst, xdp, exclude_ifindex)) continue; @@ -654,7 +655,8 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, if (map->map_type == BPF_MAP_TYPE_DEVMAP) { for (i = 0; i < map->max_entries; i++) { - dst = READ_ONCE(dtab->netdev_map[i]); + dst = rcu_dereference_check(dtab->netdev_map[i], + rcu_read_lock_bh_held()); if (!dst || dst->dev->ifindex == exclude_ifindex) continue; diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c index bbfc6bb79240..ca3cd9aaa6ce 100644 --- a/kernel/bpf/disasm.c +++ b/kernel/bpf/disasm.c @@ -206,15 +206,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, verbose(cbs->private_data, "BUG_%02x\n", insn->code); } } else if (class == BPF_ST) { - if (BPF_MODE(insn->code) != BPF_MEM) { + if (BPF_MODE(insn->code) == BPF_MEM) { + verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, + insn->off, insn->imm); + } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) { + verbose(cbs->private_data, "(%02x) nospec\n", insn->code); + } else { verbose(cbs->private_data, "BUG_st_%02x\n", insn->code); - return; } - verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n", - insn->code, - bpf_ldst_string[BPF_SIZE(insn->code) >> 3], - insn->dst_reg, - insn->off, insn->imm); } else if (class == BPF_LDX) { if (BPF_MODE(insn->code) != BPF_MEM) { verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 72c58cc516a3..9c011f3a2687 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1565,8 +1565,8 @@ alloc: /* We cannot do copy_from_user or copy_to_user inside * the rcu_read_lock. Allocate enough space here. */ - keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN); - values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN); + keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN); + values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN); if (!keys || !values) { ret = -ENOMEM; goto after_loop; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 62cf00383910..55f83ea09dae 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -353,9 +353,15 @@ const struct bpf_func_proto bpf_jiffies64_proto = { #ifdef CONFIG_CGROUPS BPF_CALL_0(bpf_get_current_cgroup_id) { - struct cgroup *cgrp = task_dfl_cgroup(current); + struct cgroup *cgrp; + u64 cgrp_id; - return cgroup_id(cgrp); + rcu_read_lock(); + cgrp = task_dfl_cgroup(current); + cgrp_id = cgroup_id(cgrp); + rcu_read_unlock(); + + return cgrp_id; } const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { @@ -366,13 +372,17 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) { - struct cgroup *cgrp = task_dfl_cgroup(current); + struct cgroup *cgrp; struct cgroup *ancestor; + u64 cgrp_id; + rcu_read_lock(); + cgrp = task_dfl_cgroup(current); ancestor = cgroup_ancestor(cgrp, ancestor_level); - if (!ancestor) - return 0; - return cgroup_id(ancestor); + cgrp_id = ancestor ? cgroup_id(ancestor) : 0; + rcu_read_unlock(); + + return cgrp_id; } const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { @@ -397,8 +407,8 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) void *ptr; int i; - for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { - if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) + for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) { + if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) continue; storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]); @@ -1070,12 +1080,12 @@ bpf_base_func_proto(enum bpf_func_id func_id) case BPF_FUNC_probe_read_user: return &bpf_probe_read_user_proto; case BPF_FUNC_probe_read_kernel: - return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? NULL : &bpf_probe_read_kernel_proto; case BPF_FUNC_probe_read_user_str: return &bpf_probe_read_user_str_proto; case BPF_FUNC_probe_read_kernel_str: - return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? NULL : &bpf_probe_read_kernel_str_proto; case BPF_FUNC_snprintf_btf: return &bpf_snprintf_btf_proto; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index be38bb930bf1..381d3d6f24bc 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2610,6 +2610,19 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, cur = env->cur_state->frame[env->cur_state->curframe]; if (value_regno >= 0) reg = &cur->regs[value_regno]; + if (!env->bypass_spec_v4) { + bool sanitize = reg && is_spillable_regtype(reg->type); + + for (i = 0; i < size; i++) { + if (state->stack[spi].slot_type[i] == STACK_INVALID) { + sanitize = true; + break; + } + } + + if (sanitize) + env->insn_aux_data[insn_idx].sanitize_stack_spill = true; + } if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && !register_is_null(reg) && env->bpf_capable) { @@ -2632,47 +2645,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, verbose(env, "invalid size of register spill\n"); return -EACCES; } - if (state != cur && reg->type == PTR_TO_STACK) { verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } - - if (!env->bypass_spec_v4) { - bool sanitize = false; - - if (state->stack[spi].slot_type[0] == STACK_SPILL && - register_is_const(&state->stack[spi].spilled_ptr)) - sanitize = true; - for (i = 0; i < BPF_REG_SIZE; i++) - if (state->stack[spi].slot_type[i] == STACK_MISC) { - sanitize = true; - break; - } - if (sanitize) { - int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; - int soff = (-spi - 1) * BPF_REG_SIZE; - - /* detected reuse of integer stack slot with a pointer - * which means either llvm is reusing stack slot or - * an attacker is trying to exploit CVE-2018-3639 - * (speculative store bypass) - * Have to sanitize that slot with preemptive - * store of zero. - */ - if (*poff && *poff != soff) { - /* disallow programs where single insn stores - * into two different stack slots, since verifier - * cannot sanitize them - */ - verbose(env, - "insn %d cannot access two stack slots fp%d and fp%d", - insn_idx, *poff, soff); - return -EINVAL; - } - *poff = soff; - } - } save_register_state(state, spi, reg); } else { u8 type = STACK_MISC; @@ -3677,6 +3653,8 @@ continue_func: if (tail_call_reachable) for (j = 0; j < frame; j++) subprog[ret_prog[j]].tail_call_reachable = true; + if (subprog[0].tail_call_reachable) + env->prog->aux->tail_call_reachable = true; /* end of for() loop means the last insn of the 'subprog' * was reached. Doesn't matter whether it was JA or EXIT @@ -6559,6 +6537,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; alu_state |= ptr_is_dst_reg ? BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; + + /* Limit pruning on unknown scalars to enable deep search for + * potential masking differences from other program paths. + */ + if (!off_is_imm) + env->explore_alu_limits = true; } err = update_alu_sanitation_state(aux, alu_state, alu_limit); @@ -9934,8 +9918,8 @@ next: } /* Returns true if (rold safe implies rcur safe) */ -static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, - struct bpf_id_pair *idmap) +static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, + struct bpf_reg_state *rcur, struct bpf_id_pair *idmap) { bool equal; @@ -9961,6 +9945,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, return false; switch (rold->type) { case SCALAR_VALUE: + if (env->explore_alu_limits) + return false; if (rcur->type == SCALAR_VALUE) { if (!rold->precise && !rcur->precise) return true; @@ -10051,9 +10037,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, return false; } -static bool stacksafe(struct bpf_func_state *old, - struct bpf_func_state *cur, - struct bpf_id_pair *idmap) +static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, + struct bpf_func_state *cur, struct bpf_id_pair *idmap) { int i, spi; @@ -10098,9 +10083,8 @@ static bool stacksafe(struct bpf_func_state *old, continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; - if (!regsafe(&old->stack[spi].spilled_ptr, - &cur->stack[spi].spilled_ptr, - idmap)) + if (!regsafe(env, &old->stack[spi].spilled_ptr, + &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. @@ -10157,10 +10141,11 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch)); for (i = 0; i < MAX_BPF_REG; i++) - if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch)) + if (!regsafe(env, &old->regs[i], &cur->regs[i], + env->idmap_scratch)) return false; - if (!stacksafe(old, cur, env->idmap_scratch)) + if (!stacksafe(env, old, cur, env->idmap_scratch)) return false; if (!refsafe(old, cur)) @@ -11678,6 +11663,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env) if (aux_data[i].seen) continue; memcpy(insn + i, &trap, sizeof(trap)); + aux_data[i].zext_dst = false; } } @@ -11904,35 +11890,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) for (i = 0; i < insn_cnt; i++, insn++) { bpf_convert_ctx_access_t convert_ctx_access; + bool ctx_access; if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || - insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) + insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) { type = BPF_READ; - else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || - insn->code == (BPF_STX | BPF_MEM | BPF_H) || - insn->code == (BPF_STX | BPF_MEM | BPF_W) || - insn->code == (BPF_STX | BPF_MEM | BPF_DW)) + ctx_access = true; + } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || + insn->code == (BPF_STX | BPF_MEM | BPF_H) || + insn->code == (BPF_STX | BPF_MEM | BPF_W) || + insn->code == (BPF_STX | BPF_MEM | BPF_DW) || + insn->code == (BPF_ST | BPF_MEM | BPF_B) || + insn->code == (BPF_ST | BPF_MEM | BPF_H) || + insn->code == (BPF_ST | BPF_MEM | BPF_W) || + insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { type = BPF_WRITE; - else + ctx_access = BPF_CLASS(insn->code) == BPF_STX; + } else { continue; + } if (type == BPF_WRITE && - env->insn_aux_data[i + delta].sanitize_stack_off) { + env->insn_aux_data[i + delta].sanitize_stack_spill) { struct bpf_insn patch[] = { - /* Sanitize suspicious stack slot with zero. - * There are no memory dependencies for this store, - * since it's only using frame pointer and immediate - * constant of zero - */ - BPF_ST_MEM(BPF_DW, BPF_REG_FP, - env->insn_aux_data[i + delta].sanitize_stack_off, - 0), - /* the original STX instruction will immediately - * overwrite the same stack slot with appropriate value - */ *insn, + BPF_ST_NOSPEC(), }; cnt = ARRAY_SIZE(patch); @@ -11946,6 +11930,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) continue; } + if (!ctx_access) + continue; + switch (env->insn_aux_data[i + delta].ptr_type) { case PTR_TO_CTX: if (!ops->convert_ctx_access) @@ -12121,33 +12108,19 @@ static int jit_subprogs(struct bpf_verifier_env *env) goto out_free; func[i]->is_func = 1; func[i]->aux->func_idx = i; - /* the btf and func_info will be freed only at prog->aux */ + /* Below members will be freed only at prog->aux */ func[i]->aux->btf = prog->aux->btf; func[i]->aux->func_info = prog->aux->func_info; + func[i]->aux->poke_tab = prog->aux->poke_tab; + func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; for (j = 0; j < prog->aux->size_poke_tab; j++) { - u32 insn_idx = prog->aux->poke_tab[j].insn_idx; - int ret; + struct bpf_jit_poke_descriptor *poke; - if (!(insn_idx >= subprog_start && - insn_idx <= subprog_end)) - continue; - - ret = bpf_jit_add_poke_descriptor(func[i], - &prog->aux->poke_tab[j]); - if (ret < 0) { - verbose(env, "adding tail call poke descriptor failed\n"); - goto out_free; - } - - func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1; - - map_ptr = func[i]->aux->poke_tab[ret].tail_call.map; - ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux); - if (ret < 0) { - verbose(env, "tracking tail call prog failed\n"); - goto out_free; - } + poke = &prog->aux->poke_tab[j]; + if (poke->insn_idx < subprog_end && + poke->insn_idx >= subprog_start) + poke->aux = func[i]->aux; } /* Use bpf_prog_F_tag to indicate functions in stack traces. @@ -12178,18 +12151,6 @@ static int jit_subprogs(struct bpf_verifier_env *env) cond_resched(); } - /* Untrack main program's aux structs so that during map_poke_run() - * we will not stumble upon the unfilled poke descriptors; each - * of the main program's poke descs got distributed across subprogs - * and got tracked onto map, so we are sure that none of them will - * be missed after the operation below - */ - for (i = 0; i < prog->aux->size_poke_tab; i++) { - map_ptr = prog->aux->poke_tab[i].tail_call.map; - - map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); - } - /* at this point all bpf functions were successfully JITed * now populate all bpf_calls with correct addresses and * run last pass of JIT @@ -12267,14 +12228,22 @@ static int jit_subprogs(struct bpf_verifier_env *env) bpf_prog_jit_attempt_done(prog); return 0; out_free: + /* We failed JIT'ing, so at this point we need to unregister poke + * descriptors from subprogs, so that kernel is not attempting to + * patch it anymore as we're freeing the subprog JIT memory. + */ + for (i = 0; i < prog->aux->size_poke_tab; i++) { + map_ptr = prog->aux->poke_tab[i].tail_call.map; + map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); + } + /* At this point we're guaranteed that poke descriptors are not + * live anymore. We can just unlink its descriptor table as it's + * released with the main prog. + */ for (i = 0; i < env->subprog_cnt; i++) { if (!func[i]) continue; - - for (j = 0; j < func[i]->aux->size_poke_tab; j++) { - map_ptr = func[i]->aux->poke_tab[j].tail_call.map; - map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux); - } + func[i]->aux->poke_tab = NULL; bpf_jit_free(func[i]); } kfree(func); @@ -12768,37 +12737,6 @@ static void free_states(struct bpf_verifier_env *env) } } -/* The verifier is using insn_aux_data[] to store temporary data during - * verification and to store information for passes that run after the - * verification like dead code sanitization. do_check_common() for subprogram N - * may analyze many other subprograms. sanitize_insn_aux_data() clears all - * temporary data after do_check_common() finds that subprogram N cannot be - * verified independently. pass_cnt counts the number of times - * do_check_common() was run and insn->aux->seen tells the pass number - * insn_aux_data was touched. These variables are compared to clear temporary - * data from failed pass. For testing and experiments do_check_common() can be - * run multiple times even when prior attempt to verify is unsuccessful. - * - * Note that special handling is needed on !env->bypass_spec_v1 if this is - * ever called outside of error path with subsequent program rejection. - */ -static void sanitize_insn_aux_data(struct bpf_verifier_env *env) -{ - struct bpf_insn *insn = env->prog->insnsi; - struct bpf_insn_aux_data *aux; - int i, class; - - for (i = 0; i < env->prog->len; i++) { - class = BPF_CLASS(insn[i].code); - if (class != BPF_LDX && class != BPF_STX) - continue; - aux = &env->insn_aux_data[i]; - if (aux->seen != env->pass_cnt) - continue; - memset(aux, 0, offsetof(typeof(*aux), orig_idx)); - } -} - static int do_check_common(struct bpf_verifier_env *env, int subprog) { bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); @@ -12875,9 +12813,6 @@ out: if (!ret && pop_log) bpf_vlog_reset(&env->log, 0); free_states(env); - if (ret) - /* clean aux data in case subprog was rejected */ - sanitize_insn_aux_data(env); return ret; } diff --git a/kernel/cfi.c b/kernel/cfi.c index e17a56639766..9594cfd1cf2c 100644 --- a/kernel/cfi.c +++ b/kernel/cfi.c @@ -248,9 +248,9 @@ static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr) { cfi_check_fn fn; - rcu_read_lock_sched(); + rcu_read_lock_sched_notrace(); fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr); - rcu_read_unlock_sched(); + rcu_read_unlock_sched_notrace(); return fn; } @@ -269,11 +269,11 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr) cfi_check_fn fn = NULL; struct module *mod; - rcu_read_lock_sched(); + rcu_read_lock_sched_notrace(); mod = __module_address(ptr); if (mod) fn = mod->cfi_check; - rcu_read_unlock_sched(); + rcu_read_unlock_sched_notrace(); return fn; } diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index ee93b6e89587..de2c432dee20 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -911,13 +911,11 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) opt = fs_parse(fc, cgroup1_fs_parameters, param, &result); if (opt == -ENOPARAM) { - if (strcmp(param->key, "source") == 0) { - if (fc->source) - return invalf(fc, "Multiple sources not supported"); - fc->source = param->string; - param->string = NULL; - return 0; - } + int ret; + + ret = vfs_parse_fs_param_source(fc, param); + if (ret != -ENOPARAM) + return ret; for_each_subsys(ss, i) { if (strcmp(param->key, ss->legacy_name)) continue; @@ -1223,9 +1221,7 @@ int cgroup1_get_tree(struct fs_context *fc) ret = cgroup_do_get_tree(fc); if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) { - struct super_block *sb = fc->root->d_sb; - dput(fc->root); - deactivate_locked_super(sb); + fc_drop_locked(fc); ret = 1; } diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 7f0e58917432..b264ab5652ba 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -347,19 +347,20 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu) } static struct cgroup_rstat_cpu * -cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp) +cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags) { struct cgroup_rstat_cpu *rstatc; rstatc = get_cpu_ptr(cgrp->rstat_cpu); - u64_stats_update_begin(&rstatc->bsync); + *flags = u64_stats_update_begin_irqsave(&rstatc->bsync); return rstatc; } static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp, - struct cgroup_rstat_cpu *rstatc) + struct cgroup_rstat_cpu *rstatc, + unsigned long flags) { - u64_stats_update_end(&rstatc->bsync); + u64_stats_update_end_irqrestore(&rstatc->bsync, flags); cgroup_rstat_updated(cgrp, smp_processor_id()); put_cpu_ptr(rstatc); } @@ -367,18 +368,20 @@ static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp, void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec) { struct cgroup_rstat_cpu *rstatc; + unsigned long flags; - rstatc = cgroup_base_stat_cputime_account_begin(cgrp); + rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags); rstatc->bstat.cputime.sum_exec_runtime += delta_exec; - cgroup_base_stat_cputime_account_end(cgrp, rstatc); + cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags); } void __cgroup_account_cputime_field(struct cgroup *cgrp, enum cpu_usage_stat index, u64 delta_exec) { struct cgroup_rstat_cpu *rstatc; + unsigned long flags; - rstatc = cgroup_base_stat_cputime_account_begin(cgrp); + rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags); switch (index) { case CPUTIME_USER: @@ -394,7 +397,7 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, break; } - cgroup_base_stat_cputime_account_end(cgrp, rstatc); + cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags); } /* diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index 8372897402f4..b6f28fad4307 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -1045,8 +1045,8 @@ int gdb_serial_stub(struct kgdb_state *ks) gdb_cmd_detachkill(ks); return DBG_PASS_EVENT; } -#endif fallthrough; +#endif case 'C': /* Exception passing */ tmp = gdb_cmd_exception_pass(ks); if (tmp > 0) diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c index 910ae69cae77..af4a6ef48ce0 100644 --- a/kernel/dma/ops_helpers.c +++ b/kernel/dma/ops_helpers.c @@ -5,6 +5,13 @@ */ #include <linux/dma-map-ops.h> +static struct page *dma_common_vaddr_to_page(void *cpu_addr) +{ + if (is_vmalloc_addr(cpu_addr)) + return vmalloc_to_page(cpu_addr); + return virt_to_page(cpu_addr); +} + /* * Create scatter-list for the already allocated DMA buffer. */ @@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct page *page = virt_to_page(cpu_addr); + struct page *page = dma_common_vaddr_to_page(cpu_addr); int ret; ret = sg_alloc_table(sgt, 1, GFP_KERNEL); @@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; + struct page *page = dma_common_vaddr_to_page(cpu_addr); int ret = -ENXIO; vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); @@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, return -ENXIO; return remap_pfn_range(vma, vma->vm_start, - page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff, + page_to_pfn(page) + vma->vm_pgoff, user_count << PAGE_SHIFT, vma->vm_page_prot); #else return -ENXIO; diff --git a/kernel/events/core.c b/kernel/events/core.c index 464917096e73..1cb1f9b8392e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -11917,6 +11917,37 @@ again: return gctx; } +static bool +perf_check_permission(struct perf_event_attr *attr, struct task_struct *task) +{ + unsigned int ptrace_mode = PTRACE_MODE_READ_REALCREDS; + bool is_capable = perfmon_capable(); + + if (attr->sigtrap) { + /* + * perf_event_attr::sigtrap sends signals to the other task. + * Require the current task to also have CAP_KILL. + */ + rcu_read_lock(); + is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL); + rcu_read_unlock(); + + /* + * If the required capabilities aren't available, checks for + * ptrace permissions: upgrade to ATTACH, since sending signals + * can effectively change the target task. + */ + ptrace_mode = PTRACE_MODE_ATTACH_REALCREDS; + } + + /* + * Preserve ptrace permission check for backwards compatibility. The + * ptrace check also includes checks that the current task and other + * task have matching uids, and is therefore not done here explicitly. + */ + return is_capable || ptrace_may_access(task, ptrace_mode); +} + /** * sys_perf_event_open - open a performance event, associate it to a task/cpu * @@ -12163,15 +12194,13 @@ SYSCALL_DEFINE5(perf_event_open, goto err_file; /* - * Preserve ptrace permission check for backwards compatibility. - * * We must hold exec_update_lock across this and any potential * perf_install_in_context() call for this new event to * serialize against exec() altering our credentials (and the * perf_event_exit_task() that could imply). */ err = -EACCES; - if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) + if (!perf_check_permission(&attr, task)) goto err_cred; } diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 7f04c7d8296e..a98bcfc4be7b 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -265,8 +265,11 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) } else { switch (__irq_startup_managed(desc, aff, force)) { case IRQ_STARTUP_NORMAL: + if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) + irq_setup_affinity(desc); ret = __irq_startup(desc); - irq_setup_affinity(desc); + if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) + irq_setup_affinity(desc); break; case IRQ_STARTUP_MANAGED: irq_do_set_affinity(d, aff, false); diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index c41965e348b5..85df3ca03efe 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -476,11 +476,6 @@ skip_activate: return 0; cleanup: - for_each_msi_vector(desc, i, dev) { - irq_data = irq_domain_get_irq_data(domain, i); - if (irqd_is_activated(irq_data)) - irq_domain_deactivate_irq(irq_data); - } msi_domain_free_irqs(domain, dev); return ret; } @@ -505,7 +500,15 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) { + struct irq_data *irq_data; struct msi_desc *desc; + int i; + + for_each_msi_vector(desc, i, dev) { + irq_data = irq_domain_get_irq_data(domain, i); + if (irqd_is_activated(irq_data)) + irq_domain_deactivate_irq(irq_data); + } for_each_msi_entry(desc, dev) { /* diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c index d309d6fbf5bd..4d2a702d7aa9 100644 --- a/kernel/irq/timings.c +++ b/kernel/irq/timings.c @@ -453,6 +453,11 @@ static __always_inline void __irq_timings_store(int irq, struct irqt_stat *irqs, */ index = irq_timings_interval_index(interval); + if (index > PREDICTION_BUFFER_SIZE - 1) { + irqs->count = 0; + return; + } + /* * Store the index as an element of the pattern in another * circular array. diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b5d9bb5202c6..ad0db322ed3b 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -343,7 +343,7 @@ static __always_inline bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, enum rtmutex_chainwalk chwalk) { - if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEX)) + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) return waiter != NULL; return chwalk == RT_MUTEX_FULL_CHAINWALK; } diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 313d4547cbc7..d998a76fb542 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -487,13 +487,13 @@ ref_scale_reader(void *arg) s64 duration; VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me); - set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); + WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids))); set_user_nice(current, MAX_NICE); atomic_inc(&n_init); if (holdoff) schedule_timeout_interruptible(holdoff * HZ); repeat: - VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id()); + VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id()); // Wait for signal that this reader can start. wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) || @@ -503,7 +503,7 @@ repeat: goto end; // Make sure that the CPU is affinitized appropriately during testing. - WARN_ON_ONCE(smp_processor_id() != me); + WARN_ON_ONCE(raw_smp_processor_id() != me); WRITE_ONCE(rt->start_reader, 0); if (!atomic_dec_return(&n_started)) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 03a118d1c003..8536c55df514 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -953,10 +953,9 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg) in_qs = likely(!t->trc_reader_nesting); } - // Mark as checked. Because this is called from the grace-period - // kthread, also remove the task from the holdout list. + // Mark as checked so that the grace-period kthread will + // remove it from the holdout list. t->trc_reader_checked = true; - trc_del_holdout(t); if (in_qs) return true; // Already in quiescent state, done!!! @@ -983,7 +982,6 @@ static void trc_wait_for_one_reader(struct task_struct *t, // The current task had better be in a quiescent state. if (t == current) { t->trc_reader_checked = true; - trc_del_holdout(t); WARN_ON_ONCE(t->trc_reader_nesting); return; } diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 3f937b20814f..6c76988cc019 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -795,9 +795,9 @@ void show_rcu_gp_kthreads(void) jr = j - data_race(rcu_state.gp_req_activity); js = j - data_race(rcu_state.gp_start); jw = j - data_race(rcu_state.gp_wake_time); - pr_info("%s: wait state: %s(%d) ->state: %#lx ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n", + pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n", rcu_state.name, gp_state_getname(rcu_state.gp_state), - rcu_state.gp_state, t ? t->__state : 0x1ffffL, t ? t->rt_priority : 0xffU, + rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU, js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq), (long)data_race(rcu_state.gp_seq), (long)data_race(rcu_get_root()->gp_seq_needed), diff --git a/kernel/scftorture.c b/kernel/scftorture.c index 2377cbb32474..29e8fc5d91a7 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -405,15 +405,15 @@ static int scftorture_invoker(void *arg) VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu); cpu = scfp->cpu % nr_cpu_ids; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); + WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(cpu))); set_user_nice(current, MAX_NICE); if (holdoff) schedule_timeout_interruptible(holdoff * HZ); - VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, smp_processor_id()); + VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, raw_smp_processor_id()); // Make sure that the CPU is affinitized appropriately during testing. - curcpu = smp_processor_id(); + curcpu = raw_smp_processor_id(); WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids, "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n", __func__, scfp->cpu, curcpu, nr_cpu_ids); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2d9ff40f4661..20ffcc044134 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1981,12 +1981,18 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) dequeue_task(rq, p, flags); } -/* - * __normal_prio - return the priority that is based on the static prio - */ -static inline int __normal_prio(struct task_struct *p) +static inline int __normal_prio(int policy, int rt_prio, int nice) { - return p->static_prio; + int prio; + + if (dl_policy(policy)) + prio = MAX_DL_PRIO - 1; + else if (rt_policy(policy)) + prio = MAX_RT_PRIO - 1 - rt_prio; + else + prio = NICE_TO_PRIO(nice); + + return prio; } /* @@ -1998,15 +2004,7 @@ static inline int __normal_prio(struct task_struct *p) */ static inline int normal_prio(struct task_struct *p) { - int prio; - - if (task_has_dl_policy(p)) - prio = MAX_DL_PRIO-1; - else if (task_has_rt_policy(p)) - prio = MAX_RT_PRIO-1 - p->rt_priority; - else - prio = __normal_prio(p); - return prio; + return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); } /* @@ -4099,7 +4097,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) } else if (PRIO_TO_NICE(p->static_prio) < 0) p->static_prio = NICE_TO_PRIO(0); - p->prio = p->normal_prio = __normal_prio(p); + p->prio = p->normal_prio = p->static_prio; set_load_weight(p, false); /* @@ -6341,6 +6339,18 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag } EXPORT_SYMBOL(default_wake_function); +static void __setscheduler_prio(struct task_struct *p, int prio) +{ + if (dl_prio(prio)) + p->sched_class = &dl_sched_class; + else if (rt_prio(prio)) + p->sched_class = &rt_sched_class; + else + p->sched_class = &fair_sched_class; + + p->prio = prio; +} + #ifdef CONFIG_RT_MUTEXES static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) @@ -6456,22 +6466,19 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) } else { p->dl.pi_se = &p->dl; } - p->sched_class = &dl_sched_class; } else if (rt_prio(prio)) { if (dl_prio(oldprio)) p->dl.pi_se = &p->dl; if (oldprio < prio) queue_flag |= ENQUEUE_HEAD; - p->sched_class = &rt_sched_class; } else { if (dl_prio(oldprio)) p->dl.pi_se = &p->dl; if (rt_prio(oldprio)) p->rt.timeout = 0; - p->sched_class = &fair_sched_class; } - p->prio = prio; + __setscheduler_prio(p, prio); if (queued) enqueue_task(rq, p, queue_flag); @@ -6824,35 +6831,6 @@ static void __setscheduler_params(struct task_struct *p, set_load_weight(p, true); } -/* Actually do priority change: must hold pi & rq lock. */ -static void __setscheduler(struct rq *rq, struct task_struct *p, - const struct sched_attr *attr, bool keep_boost) -{ - /* - * If params can't change scheduling class changes aren't allowed - * either. - */ - if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS) - return; - - __setscheduler_params(p, attr); - - /* - * Keep a potential priority boosting if called from - * sched_setscheduler(). - */ - p->prio = normal_prio(p); - if (keep_boost) - p->prio = rt_effective_prio(p, p->prio); - - if (dl_prio(p->prio)) - p->sched_class = &dl_sched_class; - else if (rt_prio(p->prio)) - p->sched_class = &rt_sched_class; - else - p->sched_class = &fair_sched_class; -} - /* * Check the target process has a UID that matches the current process's: */ @@ -6873,10 +6851,8 @@ static int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi) { - int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : - MAX_RT_PRIO - 1 - attr->sched_priority; - int retval, oldprio, oldpolicy = -1, queued, running; - int new_effective_prio, policy = attr->sched_policy; + int oldpolicy = -1, policy = attr->sched_policy; + int retval, oldprio, newprio, queued, running; const struct sched_class *prev_class; struct callback_head *head; struct rq_flags rf; @@ -7074,6 +7050,7 @@ change: p->sched_reset_on_fork = reset_on_fork; oldprio = p->prio; + newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); if (pi) { /* * Take priority boosted tasks into account. If the new @@ -7082,8 +7059,8 @@ change: * the runqueue. This will be done when the task deboost * itself. */ - new_effective_prio = rt_effective_prio(p, newprio); - if (new_effective_prio == oldprio) + newprio = rt_effective_prio(p, newprio); + if (newprio == oldprio) queue_flags &= ~DEQUEUE_MOVE; } @@ -7096,7 +7073,10 @@ change: prev_class = p->sched_class; - __setscheduler(rq, p, attr, pi); + if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { + __setscheduler_params(p, attr); + __setscheduler_prio(p, newprio); + } __setscheduler_uclamp(p, attr); if (queued) { diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 057e17f3215d..6469eca8078c 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -602,7 +602,7 @@ static inline void seccomp_sync_threads(unsigned long flags) smp_store_release(&thread->seccomp.filter, caller->seccomp.filter); atomic_set(&thread->seccomp.filter_count, - atomic_read(&thread->seccomp.filter_count)); + atomic_read(&caller->seccomp.filter_count)); /* * Don't let an unprivileged task work around diff --git a/kernel/smpboot.c b/kernel/smpboot.c index e4163042c4d6..cf6acab78538 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -47,7 +47,7 @@ void __init idle_thread_set_boot_cpu(void) * * Creates the thread if it does not exist. */ -static inline void idle_init(unsigned int cpu) +static __always_inline void idle_init(unsigned int cpu) { struct task_struct *tsk = per_cpu(idle_threads, cpu); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 29a5e54e6e10..517be7fd175e 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -991,6 +991,11 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer) if (!p) goto out; + /* Protect timer list r/w in arm_timer() */ + sighand = lock_task_sighand(p, &flags); + if (unlikely(sighand == NULL)) + goto out; + /* * Fetch the current sample and update the timer's expiry time. */ @@ -1001,11 +1006,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer) bump_cpu_timer(timer, now); - /* Protect timer list r/w in arm_timer() */ - sighand = lock_task_sighand(p, &flags); - if (unlikely(sighand == NULL)) - goto out; - /* * Now re-arm for the new expiry time. */ diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 3fadb58fc9d7..e3d2c23c413d 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -207,6 +207,7 @@ struct timer_base { unsigned int cpu; bool next_expiry_recalc; bool is_idle; + bool timers_pending; DECLARE_BITMAP(pending_map, WHEEL_SIZE); struct hlist_head vectors[WHEEL_SIZE]; } ____cacheline_aligned; @@ -595,6 +596,7 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer, * can reevaluate the wheel: */ base->next_expiry = bucket_expiry; + base->timers_pending = true; base->next_expiry_recalc = false; trigger_dyntick_cpu(base, timer); } @@ -1263,8 +1265,10 @@ static inline void timer_base_unlock_expiry(struct timer_base *base) static void timer_sync_wait_running(struct timer_base *base) { if (atomic_read(&base->timer_waiters)) { + raw_spin_unlock_irq(&base->lock); spin_unlock(&base->expiry_lock); spin_lock(&base->expiry_lock); + raw_spin_lock_irq(&base->lock); } } @@ -1455,14 +1459,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head) if (timer->flags & TIMER_IRQSAFE) { raw_spin_unlock(&base->lock); call_timer_fn(timer, fn, baseclk); - base->running_timer = NULL; raw_spin_lock(&base->lock); + base->running_timer = NULL; } else { raw_spin_unlock_irq(&base->lock); call_timer_fn(timer, fn, baseclk); + raw_spin_lock_irq(&base->lock); base->running_timer = NULL; timer_sync_wait_running(base); - raw_spin_lock_irq(&base->lock); } } } @@ -1582,6 +1586,7 @@ static unsigned long __next_timer_interrupt(struct timer_base *base) } base->next_expiry_recalc = false; + base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); return next; } @@ -1633,7 +1638,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); u64 expires = KTIME_MAX; unsigned long nextevt; - bool is_max_delta; /* * Pretend that there is no timer pending if the cpu is offline. @@ -1646,7 +1650,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) if (base->next_expiry_recalc) base->next_expiry = __next_timer_interrupt(base); nextevt = base->next_expiry; - is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); /* * We have a fresh next event. Check whether we can forward the @@ -1664,7 +1667,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) expires = basem; base->is_idle = false; } else { - if (!is_max_delta) + if (base->timers_pending) expires = basem + (u64)(nextevt - basej) * TICK_NSEC; /* * If we expect to sleep more than a tick, mark the base idle. @@ -1947,6 +1950,7 @@ int timers_prepare_cpu(unsigned int cpu) base = per_cpu_ptr(&timer_bases[b], cpu); base->clk = jiffies; base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; + base->timers_pending = false; base->is_idle = false; } return 0; diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index d567b1717c4c..3ee23f4d437f 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -219,6 +219,11 @@ config DYNAMIC_FTRACE_WITH_DIRECT_CALLS depends on DYNAMIC_FTRACE_WITH_REGS depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +config DYNAMIC_FTRACE_WITH_ARGS + def_bool y + depends on DYNAMIC_FTRACE + depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS + config FUNCTION_PROFILER bool "Kernel function profiler" depends on FUNCTION_TRACER diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index b4916ef388ad..fdd14072fc3b 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -990,28 +990,29 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_numa_node_id_proto; case BPF_FUNC_perf_event_read: return &bpf_perf_event_read_proto; - case BPF_FUNC_probe_write_user: - return bpf_get_probe_write_proto(); case BPF_FUNC_current_task_under_cgroup: return &bpf_current_task_under_cgroup_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; + case BPF_FUNC_probe_write_user: + return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? + NULL : bpf_get_probe_write_proto(); case BPF_FUNC_probe_read_user: return &bpf_probe_read_user_proto; case BPF_FUNC_probe_read_kernel: - return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? NULL : &bpf_probe_read_kernel_proto; case BPF_FUNC_probe_read_user_str: return &bpf_probe_read_user_str_proto; case BPF_FUNC_probe_read_kernel_str: - return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? NULL : &bpf_probe_read_kernel_str_proto; #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE case BPF_FUNC_probe_read: - return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? NULL : &bpf_probe_read_compat_proto; case BPF_FUNC_probe_read_str: - return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? NULL : &bpf_probe_read_compat_str_proto; #endif #ifdef CONFIG_CGROUPS diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e6fb3e6e1ffc..7b180f61e6d3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5985,7 +5985,8 @@ ftrace_graph_release(struct inode *inode, struct file *file) * infrastructure to do the synchronization, thus we must do it * ourselves. */ - synchronize_rcu_tasks_rude(); + if (old_hash != EMPTY_HASH) + synchronize_rcu_tasks_rude(); free_ftrace_hash(old_hash); } @@ -7544,7 +7545,7 @@ int ftrace_is_dead(void) */ int register_ftrace_function(struct ftrace_ops *ops) { - int ret = -1; + int ret; ftrace_ops_init(ops); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d1463eac11a3..e592d1df6f88 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3880,10 +3880,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) if (unlikely(!head)) return true; - return reader->read == rb_page_commit(reader) && - (commit == reader || - (commit == head && - head->read == rb_page_commit(commit))); + /* Reader should exhaust content in reader page */ + if (reader->read != rb_page_commit(reader)) + return false; + + /* + * If writers are committing on the reader page, knowing all + * committed content has been read, the ring buffer is empty. + */ + if (commit == reader) + return true; + + /* + * If writers are committing on a page other than reader page + * and head page, there should always be content to read. + */ + if (commit != head) + return false; + + /* + * Writers are committing on the head page, we just need + * to care about there're committed data, and the reader will + * swap reader page with head page when it is to read data. + */ + return rb_page_commit(commit) == 0; } /** diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f8b80b5bab71..a1adb29ef5c1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2897,14 +2897,26 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write, void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) { + enum event_trigger_type tt = ETT_NONE; + struct trace_event_file *file = fbuffer->trace_file; + + if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event, + fbuffer->entry, &tt)) + goto discard; + if (static_key_false(&tracepoint_printk_key.key)) output_printk(fbuffer); if (static_branch_unlikely(&trace_event_exports_enabled)) ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); - event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer, - fbuffer->event, fbuffer->entry, - fbuffer->trace_ctx, fbuffer->regs); + + trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, + fbuffer->event, fbuffer->trace_ctx, fbuffer->regs); + +discard: + if (tt) + event_triggers_post_call(file, tt); + } EXPORT_SYMBOL_GPL(trace_event_buffer_commit); @@ -5609,6 +5621,10 @@ static const char readme_msg[] = "\t [:name=histname1]\n" "\t [:<handler>.<action>]\n" "\t [if <filter>]\n\n" + "\t Note, special fields can be used as well:\n" + "\t common_timestamp - to record current timestamp\n" + "\t common_cpu - to record the CPU the event happened on\n" + "\n" "\t When a matching event is hit, an entry is added to a hash\n" "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" @@ -9131,8 +9147,10 @@ static int trace_array_create_dir(struct trace_array *tr) return -EINVAL; ret = event_trace_add_tracer(tr->dir, tr); - if (ret) + if (ret) { tracefs_remove(tr->dir); + return ret; + } init_tracer_tracefs(tr, tr->dir); __update_tracer_options(tr); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a180abf76d4e..4a0e693000c6 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1389,38 +1389,6 @@ event_trigger_unlock_commit(struct trace_event_file *file, event_triggers_post_call(file, tt); } -/** - * event_trigger_unlock_commit_regs - handle triggers and finish event commit - * @file: The file pointer associated with the event - * @buffer: The ring buffer that the event is being written to - * @event: The event meta data in the ring buffer - * @entry: The event itself - * @trace_ctx: The tracing context flags. - * - * This is a helper function to handle triggers that require data - * from the event itself. It also tests the event against filters and - * if the event is soft disabled and should be discarded. - * - * Same as event_trigger_unlock_commit() but calls - * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). - */ -static inline void -event_trigger_unlock_commit_regs(struct trace_event_file *file, - struct trace_buffer *buffer, - struct ring_buffer_event *event, - void *entry, unsigned int trace_ctx, - struct pt_regs *regs) -{ - enum event_trigger_type tt = ETT_NONE; - - if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit_regs(file->tr, buffer, event, - trace_ctx, regs); - - if (tt) - event_triggers_post_call(file, tt); -} - #define FILTER_PRED_INVALID ((unsigned short)-1) #define FILTER_PRED_IS_RIGHT (1 << 15) #define FILTER_PRED_FOLD (1 << 15) diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 0207aeed31e6..a48aa2a2875b 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -65,7 +65,8 @@ C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \ C(EMPTY_SORT_FIELD, "Empty sort field"), \ C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \ - C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), + C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \ + C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), #undef C #define C(a, b) HIST_ERR_##a @@ -1111,7 +1112,7 @@ static const char *hist_field_name(struct hist_field *field, field->flags & HIST_FIELD_FL_ALIAS) field_name = hist_field_name(field->operands[0], ++level); else if (field->flags & HIST_FIELD_FL_CPU) - field_name = "cpu"; + field_name = "common_cpu"; else if (field->flags & HIST_FIELD_FL_EXPR || field->flags & HIST_FIELD_FL_VAR_REF) { if (field->system) { @@ -1689,7 +1690,9 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, if (WARN_ON_ONCE(!field)) goto out; - if (is_string_field(field)) { + /* Pointers to strings are just pointers and dangerous to dereference */ + if (is_string_field(field) && + (field->filter_type != FILTER_PTR_STRING)) { flags |= HIST_FIELD_FL_STRING; hist_field->size = MAX_FILTER_STR_VAL; @@ -1989,14 +1992,24 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, hist_data->enable_timestamps = true; if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) hist_data->attrs->ts_in_usecs = true; - } else if (strcmp(field_name, "cpu") == 0) + } else if (strcmp(field_name, "common_cpu") == 0) *flags |= HIST_FIELD_FL_CPU; else { field = trace_find_event_field(file->event_call, field_name); if (!field || !field->size) { - hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name)); - field = ERR_PTR(-EINVAL); - goto out; + /* + * For backward compatibility, if field_name + * was "cpu", then we treat this the same as + * common_cpu. + */ + if (strcmp(field_name, "cpu") == 0) { + *flags |= HIST_FIELD_FL_CPU; + } else { + hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, + errpos(field_name)); + field = ERR_PTR(-EINVAL); + goto out; + } } } out: @@ -2144,6 +2157,13 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, ret = PTR_ERR(operand1); goto free; } + if (operand1->flags & HIST_FIELD_FL_STRING) { + /* String type can not be the operand of unary operator. */ + hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); + destroy_hist_field(operand1, 0); + ret = -EINVAL; + goto free; + } expr->flags |= operand1->flags & (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); @@ -2245,6 +2265,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, operand1 = NULL; goto free; } + if (operand1->flags & HIST_FIELD_FL_STRING) { + hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str)); + ret = -EINVAL; + goto free; + } /* rest of string could be another expression e.g. b+c in a+b+c */ operand_flags = 0; @@ -2254,6 +2279,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, operand2 = NULL; goto free; } + if (operand2->flags & HIST_FIELD_FL_STRING) { + hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); + ret = -EINVAL; + goto free; + } ret = check_expr_operands(file->tr, operand1, operand2); if (ret) @@ -2275,6 +2305,10 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, expr->operands[0] = operand1; expr->operands[1] = operand2; + + /* The operand sizes should be the same, so just pick one */ + expr->size = operand1->size; + expr->operator = field_op; expr->name = expr_str(expr, 0); expr->type = kstrdup(operand1->type, GFP_KERNEL); @@ -3396,6 +3430,8 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data, event = data->match_data.event; } + if (!event) + goto free; /* * At this point, we're looking at a field on another * event. Because we can't modify a hist trigger on @@ -4495,8 +4531,6 @@ static inline void add_to_key(char *compound_key, void *key, field = key_field->field; if (field->filter_type == FILTER_DYN_STRING) size = *(u32 *)(rec + field->offset) >> 16; - else if (field->filter_type == FILTER_PTR_STRING) - size = strlen(key); else if (field->filter_type == FILTER_STATIC_STRING) size = field->size; @@ -5085,7 +5119,7 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) seq_printf(m, "%s=", hist_field->var.name); if (hist_field->flags & HIST_FIELD_FL_CPU) - seq_puts(m, "cpu"); + seq_puts(m, "common_cpu"); else if (field_name) { if (hist_field->flags & HIST_FIELD_FL_VAR_REF || hist_field->flags & HIST_FIELD_FL_ALIAS) diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 2ac75eb6aa86..9315fc03e303 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -893,15 +893,13 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields, dyn_event_init(&event->devent, &synth_event_ops); for (i = 0, j = 0; i < n_fields; i++) { + fields[i]->field_pos = i; event->fields[i] = fields[i]; - if (fields[i]->is_dynamic) { - event->dynamic_fields[j] = fields[i]; - event->dynamic_fields[j]->field_pos = i; + if (fields[i]->is_dynamic) event->dynamic_fields[j++] = fields[i]; - event->n_dynamic_fields++; - } } + event->n_dynamic_fields = j; event->n_fields = n_fields; out: return event; diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index a6c0cdaf4b87..14f46aae1981 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -327,7 +327,7 @@ static void move_to_next_cpu(void) get_online_cpus(); cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); - next_cpu = cpumask_next(smp_processor_id(), current_mask); + next_cpu = cpumask_next(raw_smp_processor_id(), current_mask); put_online_cpus(); if (next_cpu >= nr_cpu_ids) diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index a7e3c24dee13..b61eefe5ccf5 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -253,6 +253,7 @@ static struct osnoise_data { */ static bool osnoise_busy; +#ifdef CONFIG_PREEMPT_RT /* * Print the osnoise header info. */ @@ -261,6 +262,35 @@ static void print_osnoise_headers(struct seq_file *s) if (osnoise_data.tainted) seq_puts(s, "# osnoise is tainted!\n"); + seq_puts(s, "# _-------=> irqs-off\n"); + seq_puts(s, "# / _------=> need-resched\n"); + seq_puts(s, "# | / _-----=> need-resched-lazy\n"); + seq_puts(s, "# || / _----=> hardirq/softirq\n"); + seq_puts(s, "# ||| / _---=> preempt-depth\n"); + seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n"); + seq_puts(s, "# ||||| / _-=> migrate-disable\n"); + + seq_puts(s, "# |||||| / "); + seq_puts(s, " MAX\n"); + + seq_puts(s, "# ||||| / "); + seq_puts(s, " SINGLE Interference counters:\n"); + + seq_puts(s, "# ||||||| RUNTIME "); + seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n"); + + seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP IN US "); + seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n"); + + seq_puts(s, "# | | | ||||||| | | "); + seq_puts(s, " | | | | | | | |\n"); +} +#else /* CONFIG_PREEMPT_RT */ +static void print_osnoise_headers(struct seq_file *s) +{ + if (osnoise_data.tainted) + seq_puts(s, "# osnoise is tainted!\n"); + seq_puts(s, "# _-----=> irqs-off\n"); seq_puts(s, "# / _----=> need-resched\n"); seq_puts(s, "# | / _---=> hardirq/softirq\n"); @@ -279,6 +309,7 @@ static void print_osnoise_headers(struct seq_file *s) seq_puts(s, "# | | | |||| | | "); seq_puts(s, " | | | | | | | |\n"); } +#endif /* CONFIG_PREEMPT_RT */ /* * osnoise_taint - report an osnoise error. @@ -323,6 +354,24 @@ static void trace_osnoise_sample(struct osnoise_sample *sample) /* * Print the timerlat header info. */ +#ifdef CONFIG_PREEMPT_RT +static void print_timerlat_headers(struct seq_file *s) +{ + seq_puts(s, "# _-------=> irqs-off\n"); + seq_puts(s, "# / _------=> need-resched\n"); + seq_puts(s, "# | / _-----=> need-resched-lazy\n"); + seq_puts(s, "# || / _----=> hardirq/softirq\n"); + seq_puts(s, "# ||| / _---=> preempt-depth\n"); + seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n"); + seq_puts(s, "# ||||| / _-=> migrate-disable\n"); + seq_puts(s, "# |||||| /\n"); + seq_puts(s, "# ||||||| ACTIVATION\n"); + seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP ID "); + seq_puts(s, " CONTEXT LATENCY\n"); + seq_puts(s, "# | | | ||||||| | | "); + seq_puts(s, " | |\n"); +} +#else /* CONFIG_PREEMPT_RT */ static void print_timerlat_headers(struct seq_file *s) { seq_puts(s, "# _-----=> irqs-off\n"); @@ -336,6 +385,7 @@ static void print_timerlat_headers(struct seq_file *s) seq_puts(s, "# | | | |||| | | "); seq_puts(s, " | |\n"); } +#endif /* CONFIG_PREEMPT_RT */ /* * Record an timerlat_sample into the tracer buffer. @@ -1025,9 +1075,13 @@ diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample * /* * osnoise_stop_tracing - Stop tracing and the tracer. */ -static void osnoise_stop_tracing(void) +static __always_inline void osnoise_stop_tracing(void) { struct trace_array *tr = osnoise_trace; + + trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, + "stop tracing hit on cpu %d\n", smp_processor_id()); + tracer_tracing_off(tr); } diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h index 6e146b959dcd..4007fe95cf42 100644 --- a/kernel/trace/trace_synth.h +++ b/kernel/trace/trace_synth.h @@ -14,10 +14,10 @@ struct synth_field { char *name; size_t size; unsigned int offset; + unsigned int field_pos; bool is_signed; bool is_string; bool is_dynamic; - bool field_pos; }; struct synth_event { diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 976bf8ce8039..efd14c79fab4 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -15,12 +15,57 @@ #include <linux/sched/task.h> #include <linux/static_key.h> +enum tp_func_state { + TP_FUNC_0, + TP_FUNC_1, + TP_FUNC_2, + TP_FUNC_N, +}; + extern tracepoint_ptr_t __start___tracepoints_ptrs[]; extern tracepoint_ptr_t __stop___tracepoints_ptrs[]; DEFINE_SRCU(tracepoint_srcu); EXPORT_SYMBOL_GPL(tracepoint_srcu); +enum tp_transition_sync { + TP_TRANSITION_SYNC_1_0_1, + TP_TRANSITION_SYNC_N_2_1, + + _NR_TP_TRANSITION_SYNC, +}; + +struct tp_transition_snapshot { + unsigned long rcu; + unsigned long srcu; + bool ongoing; +}; + +/* Protected by tracepoints_mutex */ +static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC]; + +static void tp_rcu_get_state(enum tp_transition_sync sync) +{ + struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; + + /* Keep the latest get_state snapshot. */ + snapshot->rcu = get_state_synchronize_rcu(); + snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu); + snapshot->ongoing = true; +} + +static void tp_rcu_cond_sync(enum tp_transition_sync sync) +{ + struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; + + if (!snapshot->ongoing) + return; + cond_synchronize_rcu(snapshot->rcu); + if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu)) + synchronize_srcu(&tracepoint_srcu); + snapshot->ongoing = false; +} + /* Set to 1 to enable tracepoint debug output */ static const int tracepoint_debug; @@ -246,26 +291,29 @@ static void *func_remove(struct tracepoint_func **funcs, return old; } -static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync) +/* + * Count the number of functions (enum tp_func_state) in a tp_funcs array. + */ +static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs) +{ + if (!tp_funcs) + return TP_FUNC_0; + if (!tp_funcs[1].func) + return TP_FUNC_1; + if (!tp_funcs[2].func) + return TP_FUNC_2; + return TP_FUNC_N; /* 3 or more */ +} + +static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs) { void *func = tp->iterator; /* Synthetic events do not have static call sites */ if (!tp->static_call_key) return; - - if (!tp_funcs[1].func) { + if (nr_func_state(tp_funcs) == TP_FUNC_1) func = tp_funcs[0].func; - /* - * If going from the iterator back to a single caller, - * we need to synchronize with __DO_TRACE to make sure - * that the data passed to the callback is the one that - * belongs to that callback. - */ - if (sync) - tracepoint_synchronize_unregister(); - } - __static_call_update(tp->static_call_key, tp->static_call_tramp, func); } @@ -299,9 +347,41 @@ static int tracepoint_add_func(struct tracepoint *tp, * a pointer to it. This array is referenced by __DO_TRACE from * include/linux/tracepoint.h using rcu_dereference_sched(). */ - rcu_assign_pointer(tp->funcs, tp_funcs); - tracepoint_update_call(tp, tp_funcs, false); - static_key_enable(&tp->key); + switch (nr_func_state(tp_funcs)) { + case TP_FUNC_1: /* 0->1 */ + /* + * Make sure new static func never uses old data after a + * 1->0->1 transition sequence. + */ + tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1); + /* Set static call to first function */ + tracepoint_update_call(tp, tp_funcs); + /* Both iterator and static call handle NULL tp->funcs */ + rcu_assign_pointer(tp->funcs, tp_funcs); + static_key_enable(&tp->key); + break; + case TP_FUNC_2: /* 1->2 */ + /* Set iterator static call */ + tracepoint_update_call(tp, tp_funcs); + /* + * Iterator callback installed before updating tp->funcs. + * Requires ordering between RCU assign/dereference and + * static call update/call. + */ + fallthrough; + case TP_FUNC_N: /* N->N+1 (N>1) */ + rcu_assign_pointer(tp->funcs, tp_funcs); + /* + * Make sure static func never uses incorrect data after a + * N->...->2->1 (N>1) transition sequence. + */ + if (tp_funcs[0].data != old[0].data) + tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); + break; + default: + WARN_ON_ONCE(1); + break; + } release_probes(old); return 0; @@ -328,17 +408,52 @@ static int tracepoint_remove_func(struct tracepoint *tp, /* Failed allocating new tp_funcs, replaced func with stub */ return 0; - if (!tp_funcs) { + switch (nr_func_state(tp_funcs)) { + case TP_FUNC_0: /* 1->0 */ /* Removed last function */ if (tp->unregfunc && static_key_enabled(&tp->key)) tp->unregfunc(); static_key_disable(&tp->key); + /* Set iterator static call */ + tracepoint_update_call(tp, tp_funcs); + /* Both iterator and static call handle NULL tp->funcs */ + rcu_assign_pointer(tp->funcs, NULL); + /* + * Make sure new static func never uses old data after a + * 1->0->1 transition sequence. + */ + tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1); + break; + case TP_FUNC_1: /* 2->1 */ rcu_assign_pointer(tp->funcs, tp_funcs); - } else { + /* + * Make sure static func never uses incorrect data after a + * N->...->2->1 (N>2) transition sequence. If the first + * element's data has changed, then force the synchronization + * to prevent current readers that have loaded the old data + * from calling the new function. + */ + if (tp_funcs[0].data != old[0].data) + tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); + tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1); + /* Set static call to first function */ + tracepoint_update_call(tp, tp_funcs); + break; + case TP_FUNC_2: /* N->N-1 (N>2) */ + fallthrough; + case TP_FUNC_N: rcu_assign_pointer(tp->funcs, tp_funcs); - tracepoint_update_call(tp, tp_funcs, - tp_funcs[0].func != old[0].func); + /* + * Make sure static func never uses incorrect data after a + * N->...->2->1 (N>2) transition sequence. + */ + if (tp_funcs[0].data != old[0].data) + tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); + break; + default: + WARN_ON_ONCE(1); + break; } release_probes(old); return 0; diff --git a/kernel/ucount.c b/kernel/ucount.c index 87799e2379bd..bb51849e6375 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -58,14 +58,17 @@ static struct ctl_table_root set_root = { .permissions = set_permissions, }; -#define UCOUNT_ENTRY(name) \ - { \ - .procname = name, \ - .maxlen = sizeof(int), \ - .mode = 0644, \ - .proc_handler = proc_dointvec_minmax, \ - .extra1 = SYSCTL_ZERO, \ - .extra2 = SYSCTL_INT_MAX, \ +static long ue_zero = 0; +static long ue_int_max = INT_MAX; + +#define UCOUNT_ENTRY(name) \ + { \ + .procname = name, \ + .maxlen = sizeof(long), \ + .mode = 0644, \ + .proc_handler = proc_doulongvec_minmax, \ + .extra1 = &ue_zero, \ + .extra2 = &ue_int_max, \ } static struct ctl_table user_table[] = { UCOUNT_ENTRY("max_user_namespaces"), @@ -160,6 +163,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) { struct hlist_head *hashent = ucounts_hashentry(ns, uid); struct ucounts *ucounts, *new; + long overflow; spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); @@ -184,8 +188,12 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) return new; } } + overflow = atomic_add_negative(1, &ucounts->count); spin_unlock_irq(&ucounts_lock); - ucounts = get_ucounts(ucounts); + if (overflow) { + put_ucounts(ucounts); + return NULL; + } return ucounts; } @@ -193,8 +201,7 @@ void put_ucounts(struct ucounts *ucounts) { unsigned long flags; - if (atomic_dec_and_test(&ucounts->count)) { - spin_lock_irqsave(&ucounts_lock, flags); + if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) { hlist_del_init(&ucounts->node); spin_unlock_irqrestore(&ucounts_lock, flags); kfree(ucounts); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 50142fc08902..f148eacda55a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3676,15 +3676,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work) unbound_release_work); struct workqueue_struct *wq = pwq->wq; struct worker_pool *pool = pwq->pool; - bool is_last; + bool is_last = false; - if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) - return; + /* + * when @pwq is not linked, it doesn't hold any reference to the + * @wq, and @wq is invalid to access. + */ + if (!list_empty(&pwq->pwqs_node)) { + if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) + return; - mutex_lock(&wq->mutex); - list_del_rcu(&pwq->pwqs_node); - is_last = list_empty(&wq->pwqs); - mutex_unlock(&wq->mutex); + mutex_lock(&wq->mutex); + list_del_rcu(&pwq->pwqs_node); + is_last = list_empty(&wq->pwqs); + mutex_unlock(&wq->mutex); + } mutex_lock(&wq_pool_mutex); put_unbound_pool(pool); diff --git a/lib/Kconfig b/lib/Kconfig index d241fe476fda..5c9c0687f76d 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -683,9 +683,6 @@ config PARMAN config OBJAGG tristate "objagg" if COMPILE_TEST -config STRING_SELFTEST - tristate "Test string functions" - endmenu config GENERIC_IOREMAP diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 831212722924..5ddd575159fb 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2180,6 +2180,9 @@ config ASYNC_RAID6_TEST config TEST_HEXDUMP tristate "Test functions located in the hexdump module at runtime" +config STRING_SELFTEST + tristate "Test string functions at runtime" + config TEST_STRING_HELPERS tristate "Test functions located in the string_helpers module at runtime" diff --git a/lib/devmem_is_allowed.c b/lib/devmem_is_allowed.c index c0d67c541849..60be9e24bd57 100644 --- a/lib/devmem_is_allowed.c +++ b/lib/devmem_is_allowed.c @@ -19,7 +19,7 @@ */ int devmem_is_allowed(unsigned long pfn) { - if (iomem_is_exclusive(pfn << PAGE_SHIFT)) + if (iomem_is_exclusive(PFN_PHYS(pfn))) return 0; if (!page_is_ram(pfn)) return 1; diff --git a/lib/once.c b/lib/once.c index 8b7d6235217e..59149bf3bfb4 100644 --- a/lib/once.c +++ b/lib/once.c @@ -3,10 +3,12 @@ #include <linux/spinlock.h> #include <linux/once.h> #include <linux/random.h> +#include <linux/module.h> struct once_work { struct work_struct work; struct static_key_true *key; + struct module *module; }; static void once_deferred(struct work_struct *w) @@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w) work = container_of(w, struct once_work, work); BUG_ON(!static_key_enabled(work->key)); static_branch_disable(work->key); + module_put(work->module); kfree(work); } -static void once_disable_jump(struct static_key_true *key) +static void once_disable_jump(struct static_key_true *key, struct module *mod) { struct once_work *w; @@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key) INIT_WORK(&w->work, once_deferred); w->key = key; + w->module = mod; + __module_get(mod); schedule_work(&w->work); } @@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags) EXPORT_SYMBOL(__do_once_start); void __do_once_done(bool *done, struct static_key_true *once_key, - unsigned long *flags) + unsigned long *flags, struct module *mod) __releases(once_lock) { *done = true; spin_unlock_irqrestore(&once_lock, *flags); - once_disable_jump(once_key); + once_disable_jump(once_key, mod); } EXPORT_SYMBOL(__do_once_done); diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 8c55c4723692..c259842f6d44 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -628,10 +628,8 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start, for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) { void *entry; - struct page *page; entry = xa_load(&dmirror->pt, pfn); - page = xa_untag_pointer(entry); if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC) return -EPERM; } diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 271f2ca862c8..f5561ea7d90a 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -398,12 +398,12 @@ static void cgwb_release_workfn(struct work_struct *work) blkcg_unpin_online(blkcg); fprop_local_destroy_percpu(&wb->memcg_completions); - percpu_ref_exit(&wb->refcnt); spin_lock_irq(&cgwb_lock); list_del(&wb->offline_node); spin_unlock_irq(&cgwb_lock); + percpu_ref_exit(&wb->refcnt); wb_exit(wb); WARN_ON_ONCE(!list_empty(&wb->b_attached)); kfree_rcu(wb, rcu); @@ -1558,9 +1558,12 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, gup_flags |= FOLL_WRITE; /* - * See check_vma_flags(): Will return -EFAULT on incompatible mappings - * or with insufficient permissions. + * We want to report -EINVAL instead of -EFAULT for any permission + * problems or incompatible mappings. */ + if (check_vma_flags(vma, gup_flags)) + return -EINVAL; + return __get_user_pages(mm, start, nr_pages, gup_flags, NULL, NULL, locked); } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 924553aa8f78..8ea35ba6699f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2476,7 +2476,7 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, if (!rc) { /* * This indicates there is an entry in the reserve map - * added by alloc_huge_page. We know it was added + * not added by alloc_huge_page. We know it was added * before the alloc_huge_page call, otherwise * HPageRestoreReserve would be set on the page. * Remove the entry so that a subsequent allocation @@ -4660,7 +4660,9 @@ retry_avoidcopy: spin_unlock(ptl); mmu_notifier_invalidate_range_end(&range); out_release_all: - restore_reserve_on_error(h, vma, haddr, new_page); + /* No restore in case of successful pagetable update (Break COW) */ + if (new_page != old_page) + restore_reserve_on_error(h, vma, haddr, new_page); put_page(new_page); out_release_old: put_page(old_page); @@ -4776,7 +4778,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, pte_t new_pte; spinlock_t *ptl; unsigned long haddr = address & huge_page_mask(h); - bool new_page = false; + bool new_page, new_pagecache_page = false; /* * Currently, we are forced to kill the process in the event the @@ -4799,6 +4801,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, goto out; retry: + new_page = false; page = find_lock_page(mapping, idx); if (!page) { /* Check for page in userfault range */ @@ -4842,6 +4845,7 @@ retry: goto retry; goto out; } + new_pagecache_page = true; } else { lock_page(page); if (unlikely(anon_vma_prepare(vma))) { @@ -4926,7 +4930,9 @@ backout: spin_unlock(ptl); backout_unlocked: unlock_page(page); - restore_reserve_on_error(h, vma, haddr, page); + /* restore reserve for newly allocated pages not in page cache */ + if (new_page && !new_pagecache_page) + restore_reserve_on_error(h, vma, haddr, page); put_page(page); goto out; } @@ -5135,6 +5141,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, int ret = -ENOMEM; struct page *page; int writable; + bool new_pagecache_page = false; if (is_continue) { ret = -EFAULT; @@ -5228,6 +5235,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, ret = huge_add_to_page_cache(page, mapping, idx); if (ret) goto out_release_nounlock; + new_pagecache_page = true; } ptl = huge_pte_lockptr(h, dst_mm, dst_pte); @@ -5291,7 +5299,8 @@ out_release_unlock: if (vm_shared || is_continue) unlock_page(page); out_release_nounlock: - restore_reserve_on_error(h, dst_vma, dst_addr, page); + if (!new_pagecache_page) + restore_reserve_on_error(h, dst_vma, dst_addr, page); put_page(page); goto out; } @@ -5440,8 +5449,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, continue; } - refs = min3(pages_per_huge_page(h) - pfn_offset, - (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder); + /* vaddr may not be aligned to PAGE_SIZE */ + refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, + (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT); if (pages || vmas) record_subpages_vmas(mem_map_offset(page, pfn_offset), diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 98e3059bfea4..d739cdd1621a 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -9,6 +9,7 @@ #ifdef CONFIG_KASAN_HW_TAGS #include <linux/static_key.h> +#include "../slab.h" DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace); extern bool kasan_flag_async __ro_after_init; @@ -387,6 +388,17 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init) if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; + /* + * Explicitly initialize the memory with the precise object size to + * avoid overwriting the SLAB redzone. This disables initialization in + * the arch code and may thus lead to performance penalty. The penalty + * is accepted since SLAB redzones aren't enabled in production builds. + */ + if (__slub_debug_enabled() && + init && ((unsigned long)size & KASAN_GRANULE_MASK)) { + init = false; + memzero_explicit((void *)addr, size); + } size = round_up(size, KASAN_GRANULE_SIZE); hw_set_mem_tag_range((void *)addr, size, tag, init); diff --git a/mm/kfence/core.c b/mm/kfence/core.c index d7666ace9d2e..575c685aa642 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -734,6 +734,22 @@ void kfence_shutdown_cache(struct kmem_cache *s) void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { /* + * Perform size check before switching kfence_allocation_gate, so that + * we don't disable KFENCE without making an allocation. + */ + if (size > PAGE_SIZE) + return NULL; + + /* + * Skip allocations from non-default zones, including DMA. We cannot + * guarantee that pages in the KFENCE pool will have the requested + * properties (e.g. reside in DMAable memory). + */ + if ((flags & GFP_ZONEMASK) || + (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) + return NULL; + + /* * allocation_gate only needs to become non-zero, so it doesn't make * sense to continue writing to it and pay the associated contention * cost, in case we have a large number of concurrent allocations. @@ -757,9 +773,6 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) if (!READ_ONCE(kfence_enabled)) return NULL; - if (size > PAGE_SIZE) - return NULL; - return kfence_guarded_alloc(s, size, flags); } diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 7f24b9bcb2ec..942cbc16ad26 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -852,7 +852,7 @@ static void kfence_test_exit(void) tracepoint_synchronize_unregister(); } -late_initcall(kfence_test_init); +late_initcall_sync(kfence_test_init); module_exit(kfence_test_exit); MODULE_LICENSE("GPL v2"); diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 228a2fbe0657..73d46d16d575 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -290,7 +290,7 @@ static void hex_dump_object(struct seq_file *seq, warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len); kasan_disable_current(); warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE, - HEX_GROUP_SIZE, ptr, len, HEX_ASCII); + HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII); kasan_enable_current(); } @@ -1171,7 +1171,7 @@ static bool update_checksum(struct kmemleak_object *object) kasan_disable_current(); kcsan_disable_current(); - object->checksum = crc32(0, (void *)object->pointer, object->size); + object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size); kasan_enable_current(); kcsan_enable_current(); @@ -1246,7 +1246,7 @@ static void scan_block(void *_start, void *_end, break; kasan_disable_current(); - pointer = *ptr; + pointer = *(unsigned long *)kasan_reset_tag((void *)ptr); kasan_enable_current(); untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer); diff --git a/mm/madvise.c b/mm/madvise.c index 6d3d348b17f4..5c065bc8b5f6 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -862,10 +862,12 @@ static long madvise_populate(struct vm_area_struct *vma, switch (pages) { case -EINTR: return -EINTR; - case -EFAULT: /* Incompatible mappings / permissions. */ + case -EINVAL: /* Incompatible mappings / permissions. */ return -EINVAL; case -EHWPOISON: return -EHWPOISON; + case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ + return -EFAULT; default: pr_warn_once("%s: unhandled return value: %ld\n", __func__, pages); diff --git a/mm/memblock.c b/mm/memblock.c index 0041ff62c584..de7b553baa50 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -947,7 +947,8 @@ static bool should_skip_region(struct memblock_type *type, return true; /* skip hotpluggable memory regions if needed */ - if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) + if (movable_node_is_enabled() && memblock_is_hotpluggable(m) && + !(flags & MEMBLOCK_HOTPLUG)) return true; /* if we want mirror memory skip non-mirror memory regions */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ae1f5d0cb581..702a81dfe72d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3106,13 +3106,15 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, stock->cached_pgdat = pgdat; } else if (stock->cached_pgdat != pgdat) { /* Flush the existing cached vmstat data */ + struct pglist_data *oldpg = stock->cached_pgdat; + if (stock->nr_slab_reclaimable_b) { - mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B, + mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, stock->nr_slab_reclaimable_b); stock->nr_slab_reclaimable_b = 0; } if (stock->nr_slab_unreclaimable_b) { - mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B, + mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, stock->nr_slab_unreclaimable_b); stock->nr_slab_unreclaimable_b = 0; } @@ -3574,7 +3576,8 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) unsigned long val; if (mem_cgroup_is_root(memcg)) { - cgroup_rstat_flush(memcg->css.cgroup); + /* mem_cgroup_threshold() calls here from irqsafe context */ + cgroup_rstat_flush_irqsafe(memcg->css.cgroup); val = memcg_page_state(memcg, NR_FILE_PAGES) + memcg_page_state(memcg, NR_ANON_MAPPED); if (swap) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index eefd823deb67..470400cc7513 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1146,7 +1146,7 @@ static int __get_hwpoison_page(struct page *page) * unexpected races caused by taking a page refcount. */ if (!HWPoisonHandlable(head)) - return 0; + return -EBUSY; if (PageTransHuge(head)) { /* @@ -1199,9 +1199,15 @@ try_again: } goto out; } else if (ret == -EBUSY) { - /* We raced with freeing huge page to buddy, retry. */ - if (pass++ < 3) + /* + * We raced with (possibly temporary) unhandlable + * page, retry. + */ + if (pass++ < 3) { + shake_page(p, 1); goto try_again; + } + ret = -EIO; goto out; } } diff --git a/mm/memory.c b/mm/memory.c index 747a01d495f2..25fc46e87214 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4026,8 +4026,17 @@ vm_fault_t finish_fault(struct vm_fault *vmf) return ret; } - if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) + if (vmf->prealloc_pte) { + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); + if (likely(pmd_none(*vmf->pmd))) { + mm_inc_nr_ptes(vma->vm_mm); + pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); + vmf->prealloc_pte = NULL; + } + spin_unlock(vmf->ptl); + } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { return VM_FAULT_OOM; + } } /* See comment in handle_pte_fault() */ diff --git a/mm/migrate.c b/mm/migrate.c index 23cbd9de030b..7e240437e7d9 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -537,54 +537,6 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, } /* - * Gigantic pages are so large that we do not guarantee that page++ pointer - * arithmetic will work across the entire page. We need something more - * specialized. - */ -static void __copy_gigantic_page(struct page *dst, struct page *src, - int nr_pages) -{ - int i; - struct page *dst_base = dst; - struct page *src_base = src; - - for (i = 0; i < nr_pages; ) { - cond_resched(); - copy_highpage(dst, src); - - i++; - dst = mem_map_next(dst, dst_base, i); - src = mem_map_next(src, src_base, i); - } -} - -void copy_huge_page(struct page *dst, struct page *src) -{ - int i; - int nr_pages; - - if (PageHuge(src)) { - /* hugetlbfs page */ - struct hstate *h = page_hstate(src); - nr_pages = pages_per_huge_page(h); - - if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { - __copy_gigantic_page(dst, src, nr_pages); - return; - } - } else { - /* thp page */ - BUG_ON(!PageTransHuge(src)); - nr_pages = thp_nr_pages(src); - } - - for (i = 0; i < nr_pages; i++) { - cond_resched(); - copy_highpage(dst + i, src + i); - } -} - -/* * Copy the page to its new location */ void migrate_page_states(struct page *newpage, struct page *page) @@ -2116,7 +2068,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, LIST_HEAD(migratepages); new_page_t *new; bool compound; - unsigned int nr_pages = thp_nr_pages(page); + int nr_pages = thp_nr_pages(page); /* * PTE mapped THP or HugeTLB page can't reach here so the page could diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c index f5852a058ce0..1854850b4b89 100644 --- a/mm/mmap_lock.c +++ b/mm/mmap_lock.c @@ -156,14 +156,14 @@ static inline void put_memcg_path_buf(void) #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \ do { \ const char *memcg_path; \ - preempt_disable(); \ + local_lock(&memcg_paths.lock); \ memcg_path = get_mm_memcg_path(mm); \ trace_mmap_lock_##type(mm, \ memcg_path != NULL ? memcg_path : "", \ ##__VA_ARGS__); \ if (likely(memcg_path != NULL)) \ put_memcg_path_buf(); \ - preempt_enable(); \ + local_unlock(&memcg_paths.lock); \ } while (0) #else /* !CONFIG_MEMCG */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3b97e17806be..eeb3a9cb36bb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -840,21 +840,24 @@ void init_mem_debugging_and_hardening(void) } #endif - if (_init_on_alloc_enabled_early) { - if (page_poisoning_requested) - pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " - "will take precedence over init_on_alloc\n"); - else - static_branch_enable(&init_on_alloc); - } - if (_init_on_free_enabled_early) { - if (page_poisoning_requested) - pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " - "will take precedence over init_on_free\n"); - else - static_branch_enable(&init_on_free); + if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && + page_poisoning_requested) { + pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " + "will take precedence over init_on_alloc and init_on_free\n"); + _init_on_alloc_enabled_early = false; + _init_on_free_enabled_early = false; } + if (_init_on_alloc_enabled_early) + static_branch_enable(&init_on_alloc); + else + static_branch_disable(&init_on_alloc); + + if (_init_on_free_enabled_early) + static_branch_enable(&init_on_free); + else + static_branch_disable(&init_on_free); + #ifdef CONFIG_DEBUG_PAGEALLOC if (!debug_pagealloc_enabled()) return; @@ -3450,19 +3453,10 @@ void free_unref_page_list(struct list_head *list) * comment in free_unref_page. */ migratetype = get_pcppage_migratetype(page); - if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { - if (unlikely(is_migrate_isolate(migratetype))) { - list_del(&page->lru); - free_one_page(page_zone(page), page, pfn, 0, - migratetype, FPI_NONE); - continue; - } - - /* - * Non-isolated types over MIGRATE_PCPTYPES get added - * to the MIGRATE_MOVABLE pcp list. - */ - set_pcppage_migratetype(page, MIGRATE_MOVABLE); + if (unlikely(is_migrate_isolate(migratetype))) { + list_del(&page->lru); + free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); + continue; } set_page_private(page, pfn); @@ -3472,7 +3466,15 @@ void free_unref_page_list(struct list_head *list) list_for_each_entry_safe(page, next, list, lru) { pfn = page_private(page); set_page_private(page, 0); + + /* + * Non-isolated types over MIGRATE_PCPTYPES get added + * to the MIGRATE_MOVABLE pcp list. + */ migratetype = get_pcppage_migratetype(page); + if (unlikely(migratetype >= MIGRATE_PCPTYPES)) + migratetype = MIGRATE_MOVABLE; + trace_mm_page_free_batched(page); free_unref_page_commit(page, pfn, migratetype, 0); @@ -3820,7 +3822,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) #endif /* CONFIG_FAIL_PAGE_ALLOC */ -static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return __should_fail_alloc_page(gfp_mask, order); } @@ -5221,9 +5223,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, unsigned int alloc_flags = ALLOC_WMARK_LOW; int nr_populated = 0, nr_account = 0; - if (unlikely(nr_pages <= 0)) - return 0; - /* * Skip populated array elements to determine if any pages need * to be allocated before disabling IRQs. @@ -5231,19 +5230,35 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, while (page_array && nr_populated < nr_pages && page_array[nr_populated]) nr_populated++; + /* No pages requested? */ + if (unlikely(nr_pages <= 0)) + goto out; + /* Already populated array? */ if (unlikely(page_array && nr_pages - nr_populated == 0)) - return nr_populated; + goto out; /* Use the single page allocator for one page. */ if (nr_pages - nr_populated == 1) goto failed; +#ifdef CONFIG_PAGE_OWNER + /* + * PAGE_OWNER may recurse into the allocator to allocate space to + * save the stack with pagesets.lock held. Releasing/reacquiring + * removes much of the performance benefit of bulk allocation so + * force the caller to allocate one page at a time as it'll have + * similar performance to added complexity to the bulk allocator. + */ + if (static_branch_unlikely(&page_owner_inited)) + goto failed; +#endif + /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ gfp &= gfp_allowed_mask; alloc_gfp = gfp; if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) - return 0; + goto out; gfp = alloc_gfp; /* Find an allowed local zone that meets the low watermark. */ @@ -5311,6 +5326,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); +out: return nr_populated; failed_irq: @@ -5326,7 +5342,7 @@ failed: nr_populated++; } - return nr_populated; + goto out; } EXPORT_SYMBOL_GPL(__alloc_pages_bulk); diff --git a/mm/rmap.c b/mm/rmap.c index 795f9d5f8386..b9eb5c12f3fe 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1440,21 +1440,20 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, /* * If the page is mlock()d, we cannot swap it out. */ - if (!(flags & TTU_IGNORE_MLOCK)) { - if (vma->vm_flags & VM_LOCKED) { - /* PTE-mapped THP are never marked as mlocked */ - if (!PageTransCompound(page) || - (PageHead(page) && !PageDoubleMap(page))) { - /* - * Holding pte lock, we do *not* need - * mmap_lock here - */ - mlock_vma_page(page); - } - ret = false; - page_vma_mapped_walk_done(&pvmw); - break; - } + if (!(flags & TTU_IGNORE_MLOCK) && + (vma->vm_flags & VM_LOCKED)) { + /* + * PTE-mapped THP are never marked as mlocked: so do + * not set it on a DoubleMap THP, nor on an Anon THP + * (which may still be PTE-mapped after DoubleMap was + * cleared). But stop unmapping even in those cases. + */ + if (!PageTransCompound(page) || (PageHead(page) && + !PageDoubleMap(page) && !PageAnon(page))) + mlock_vma_page(page); + page_vma_mapped_walk_done(&pvmw); + ret = false; + break; } /* Unexpected PMD-mapped THP? */ @@ -1986,8 +1985,10 @@ static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, */ if (vma->vm_flags & VM_LOCKED) { /* - * PTE-mapped THP are never marked as mlocked, but - * this function is never called when PageDoubleMap(). + * PTE-mapped THP are never marked as mlocked; but + * this function is never called on a DoubleMap THP, + * nor on an Anon THP (which may still be PTE-mapped + * after DoubleMap was cleared). */ mlock_vma_page(page); /* @@ -2022,6 +2023,10 @@ void page_mlock(struct page *page) VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); + /* Anon THP are only marked as mlocked when singly mapped */ + if (PageTransCompound(page) && PageAnon(page)) + return; + rmap_walk(page, &rwc); } diff --git a/mm/secretmem.c b/mm/secretmem.c index f77d25467a14..030f02ddc7c1 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -152,6 +152,7 @@ static void secretmem_freepage(struct page *page) } const struct address_space_operations secretmem_aops = { + .set_page_dirty = __set_page_dirty_no_writeback, .freepage = secretmem_freepage, .migratepage = secretmem_migratepage, .isolate_page = secretmem_isolate_page, diff --git a/mm/shmem.c b/mm/shmem.c index 70d9ce294bb4..dacda7463d54 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1696,8 +1696,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; - struct swap_info_struct *si; - struct page *page = NULL; + struct page *page; swp_entry_t swap; int error; @@ -1705,12 +1704,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, swap = radix_to_swp_entry(*pagep); *pagep = NULL; - /* Prevent swapoff from happening to us. */ - si = get_swap_device(swap); - if (!si) { - error = EINVAL; - goto failed; - } /* Look it up and read it in.. */ page = lookup_swap_cache(swap, NULL, 0); if (!page) { @@ -1772,8 +1765,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, swap_free(swap); *pagep = page; - if (si) - put_swap_device(si); return 0; failed: if (!shmem_confirm_swap(mapping, index, swap)) @@ -1784,9 +1775,6 @@ unlock: put_page(page); } - if (si) - put_swap_device(si); - return error; } diff --git a/mm/slab.h b/mm/slab.h index 67e06637ff2e..58c01a34e5b8 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -216,10 +216,18 @@ DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); #endif extern void print_tracking(struct kmem_cache *s, void *object); long validate_slab_cache(struct kmem_cache *s); +static inline bool __slub_debug_enabled(void) +{ + return static_branch_unlikely(&slub_debug_enabled); +} #else static inline void print_tracking(struct kmem_cache *s, void *object) { } +static inline bool __slub_debug_enabled(void) +{ + return false; +} #endif /* @@ -229,11 +237,10 @@ static inline void print_tracking(struct kmem_cache *s, void *object) */ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) { -#ifdef CONFIG_SLUB_DEBUG - VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); - if (static_branch_unlikely(&slub_debug_enabled)) + if (IS_ENABLED(CONFIG_SLUB_DEBUG)) + VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); + if (__slub_debug_enabled()) return s->flags & flags; -#endif return false; } @@ -339,7 +346,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig, continue; page = virt_to_head_page(p[i]); - objcgs = page_objcgs(page); + objcgs = page_objcgs_check(page); if (!objcgs) continue; diff --git a/mm/slub.c b/mm/slub.c index dc863c1ea324..f77d8cd79ef7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -26,7 +26,6 @@ #include <linux/cpuset.h> #include <linux/mempolicy.h> #include <linux/ctype.h> -#include <linux/stackdepot.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/kfence.h> @@ -120,25 +119,11 @@ */ #ifdef CONFIG_SLUB_DEBUG - #ifdef CONFIG_SLUB_DEBUG_ON DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); #else DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); #endif - -static inline bool __slub_debug_enabled(void) -{ - return static_branch_unlikely(&slub_debug_enabled); -} - -#else /* CONFIG_SLUB_DEBUG */ - -static inline bool __slub_debug_enabled(void) -{ - return false; -} - #endif /* CONFIG_SLUB_DEBUG */ static inline bool kmem_cache_debug(struct kmem_cache *s) @@ -221,8 +206,8 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) #define TRACK_ADDRS_COUNT 16 struct track { unsigned long addr; /* Called from address */ -#ifdef CONFIG_STACKDEPOT - depot_stack_handle_t handle; +#ifdef CONFIG_STACKTRACE + unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ #endif int cpu; /* Was running on cpu */ int pid; /* Pid context */ @@ -591,8 +576,8 @@ static void print_section(char *level, char *text, u8 *addr, unsigned int length) { metadata_access_enable(); - print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS, - 16, 1, addr, length, 1); + print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, + 16, 1, kasan_reset_tag((void *)addr), length, 1); metadata_access_disable(); } @@ -626,27 +611,22 @@ static struct track *get_track(struct kmem_cache *s, void *object, return kasan_reset_tag(p + alloc); } -#ifdef CONFIG_STACKDEPOT -static depot_stack_handle_t save_stack_depot_trace(gfp_t flags) -{ - unsigned long entries[TRACK_ADDRS_COUNT]; - depot_stack_handle_t handle; - unsigned int nr_entries; - - nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 4); - handle = stack_depot_save(entries, nr_entries, flags); - return handle; -} -#endif - static void set_track(struct kmem_cache *s, void *object, enum track_item alloc, unsigned long addr) { struct track *p = get_track(s, object, alloc); if (addr) { -#ifdef CONFIG_STACKDEPOT - p->handle = save_stack_depot_trace(GFP_NOWAIT); +#ifdef CONFIG_STACKTRACE + unsigned int nr_entries; + + metadata_access_enable(); + nr_entries = stack_trace_save(kasan_reset_tag(p->addrs), + TRACK_ADDRS_COUNT, 3); + metadata_access_disable(); + + if (nr_entries < TRACK_ADDRS_COUNT) + p->addrs[nr_entries] = 0; #endif p->addr = addr; p->cpu = smp_processor_id(); @@ -673,19 +653,14 @@ static void print_track(const char *s, struct track *t, unsigned long pr_time) pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); -#ifdef CONFIG_STACKDEPOT +#ifdef CONFIG_STACKTRACE { - depot_stack_handle_t handle; - unsigned long *entries; - unsigned int nr_entries; - - handle = READ_ONCE(t->handle); - if (!handle) { - pr_err("object allocation/free stack trace missing\n"); - } else { - nr_entries = stack_depot_fetch(handle, &entries); - stack_trace_print(entries, nr_entries, 0); - } + int i; + for (i = 0; i < TRACK_ADDRS_COUNT; i++) + if (t->addrs[i]) + pr_err("\t%pS\n", (void *)t->addrs[i]); + else + break; } #endif } @@ -1425,12 +1400,13 @@ check_slabs: static int __init setup_slub_debug(char *str) { slab_flags_t flags; + slab_flags_t global_flags; char *saved_str; char *slab_list; bool global_slub_debug_changed = false; bool slab_list_specified = false; - slub_debug = DEBUG_DEFAULT_FLAGS; + global_flags = DEBUG_DEFAULT_FLAGS; if (*str++ != '=' || !*str) /* * No options specified. Switch on full debugging. @@ -1442,7 +1418,7 @@ static int __init setup_slub_debug(char *str) str = parse_slub_debug_flags(str, &flags, &slab_list, true); if (!slab_list) { - slub_debug = flags; + global_flags = flags; global_slub_debug_changed = true; } else { slab_list_specified = true; @@ -1451,16 +1427,18 @@ static int __init setup_slub_debug(char *str) /* * For backwards compatibility, a single list of flags with list of - * slabs means debugging is only enabled for those slabs, so the global - * slub_debug should be 0. We can extended that to multiple lists as + * slabs means debugging is only changed for those slabs, so the global + * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending + * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as * long as there is no option specifying flags without a slab list. */ if (slab_list_specified) { if (!global_slub_debug_changed) - slub_debug = 0; + global_flags = slub_debug; slub_debug_string = saved_str; } out: + slub_debug = global_flags; if (slub_debug != 0 || slub_debug_string) static_branch_enable(&slub_debug_enabled); else @@ -3261,6 +3239,16 @@ struct detached_freelist { struct kmem_cache *s; }; +static inline void free_nonslab_page(struct page *page, void *object) +{ + unsigned int order = compound_order(page); + + VM_BUG_ON_PAGE(!PageCompound(page), page); + kfree_hook(object); + mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); + __free_pages(page, order); +} + /* * This function progressively scans the array with free objects (with * a limited look ahead) and extract objects belonging to the same @@ -3297,9 +3285,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, if (!s) { /* Handle kalloc'ed objects */ if (unlikely(!PageSlab(page))) { - BUG_ON(!PageCompound(page)); - kfree_hook(object); - __free_pages(page, compound_order(page)); + free_nonslab_page(page, object); p[size] = NULL; /* mark object processed */ return size; } @@ -4059,26 +4045,18 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page) objp = fixup_red_left(s, objp); trackp = get_track(s, objp, TRACK_ALLOC); kpp->kp_ret = (void *)trackp->addr; -#ifdef CONFIG_STACKDEPOT - { - depot_stack_handle_t handle; - unsigned long *entries; - unsigned int nr_entries; - - handle = READ_ONCE(trackp->handle); - if (handle) { - nr_entries = stack_depot_fetch(handle, &entries); - for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) - kpp->kp_stack[i] = (void *)entries[i]; - } +#ifdef CONFIG_STACKTRACE + for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { + kpp->kp_stack[i] = (void *)trackp->addrs[i]; + if (!kpp->kp_stack[i]) + break; + } - trackp = get_track(s, objp, TRACK_FREE); - handle = READ_ONCE(trackp->handle); - if (handle) { - nr_entries = stack_depot_fetch(handle, &entries); - for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) - kpp->kp_free_stack[i] = (void *)entries[i]; - } + trackp = get_track(s, objp, TRACK_FREE); + for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { + kpp->kp_free_stack[i] = (void *)trackp->addrs[i]; + if (!kpp->kp_free_stack[i]) + break; } #endif #endif @@ -4283,13 +4261,7 @@ void kfree(const void *x) page = virt_to_head_page(x); if (unlikely(!PageSlab(page))) { - unsigned int order = compound_order(page); - - BUG_ON(!PageCompound(page)); - kfree_hook(object); - mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, - -(PAGE_SIZE << order)); - __free_pages(page, order); + free_nonslab_page(page, object); return; } slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); diff --git a/mm/swap_state.c b/mm/swap_state.c index c56aa9ac050d..bc7cee6b2ec5 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -628,13 +628,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, if (!mask) goto skip; - /* Test swap type to make sure the dereference is safe */ - if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) { - struct inode *inode = si->swap_file->f_mapping->host; - if (inode_read_congested(inode)) - goto skip; - } - do_poll = false; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; diff --git a/mm/util.c b/mm/util.c index 99c6cc77de9e..9043d03750a7 100644 --- a/mm/util.c +++ b/mm/util.c @@ -731,6 +731,16 @@ int __page_mapcount(struct page *page) } EXPORT_SYMBOL_GPL(__page_mapcount); +void copy_huge_page(struct page *dst, struct page *src) +{ + unsigned i, nr = compound_nr(src); + + for (i = 0; i < nr; i++) { + cond_resched(); + copy_highpage(nth_page(dst, i), nth_page(src, i)); + } +} + int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; int sysctl_overcommit_ratio __read_mostly = 50; unsigned long sysctl_overcommit_kbytes __read_mostly; diff --git a/mm/vmscan.c b/mm/vmscan.c index 4620df62f0ff..eeae2f6bc532 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -100,9 +100,12 @@ struct scan_control { unsigned int may_swap:1; /* - * Cgroups are not reclaimed below their configured memory.low, - * unless we threaten to OOM. If any cgroups are skipped due to - * memory.low and nothing was reclaimed, go back for memory.low. + * Cgroup memory below memory.low is protected as long as we + * don't threaten to OOM. If any cgroup is reclaimed at + * reduced force or passed over entirely due to its memory.low + * setting (memcg_low_skipped), and nothing is reclaimed as a + * result, then go back for one more cycle that reclaims the protected + * memory (memcg_low_reclaim) to avert OOM. */ unsigned int memcg_low_reclaim:1; unsigned int memcg_low_skipped:1; @@ -2537,15 +2540,14 @@ out: for_each_evictable_lru(lru) { int file = is_file_lru(lru); unsigned long lruvec_size; + unsigned long low, min; unsigned long scan; - unsigned long protection; lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); - protection = mem_cgroup_protection(sc->target_mem_cgroup, - memcg, - sc->memcg_low_reclaim); + mem_cgroup_protection(sc->target_mem_cgroup, memcg, + &min, &low); - if (protection) { + if (min || low) { /* * Scale a cgroup's reclaim pressure by proportioning * its current usage to its memory.low or memory.min @@ -2576,6 +2578,15 @@ out: * hard protection. */ unsigned long cgroup_size = mem_cgroup_size(memcg); + unsigned long protection; + + /* memory.low scaling, make sure we retry before OOM */ + if (!sc->memcg_low_reclaim && low > min) { + protection = low; + sc->memcg_low_skipped = 1; + } else { + protection = min; + } /* Avoid TOCTOU with earlier protection check */ cgroup_size = max(cgroup_size, protection); @@ -4413,11 +4424,13 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in .may_swap = 1, .reclaim_idx = gfp_zone(gfp_mask), }; + unsigned long pflags; trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, sc.gfp_mask); cond_resched(); + psi_memstall_enter(&pflags); fs_reclaim_acquire(sc.gfp_mask); /* * We need to be able to allocate from the reserves for RECLAIM_UNMAP @@ -4442,6 +4455,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in current->flags &= ~PF_SWAPWRITE; memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(sc.gfp_mask); + psi_memstall_leave(&pflags); trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); diff --git a/net/802/garp.c b/net/802/garp.c index 400bd857e5f5..f6012f8e59f0 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr kfree(attr); } +static void garp_attr_destroy_all(struct garp_applicant *app) +{ + struct rb_node *node, *next; + struct garp_attr *attr; + + for (node = rb_first(&app->gid); + next = node ? rb_next(node) : NULL, node != NULL; + node = next) { + attr = rb_entry(node, struct garp_attr, node); + garp_attr_destroy(app, attr); + } +} + static int garp_pdu_init(struct garp_applicant *app) { struct sk_buff *skb; @@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl spin_lock_bh(&app->lock); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); + garp_attr_destroy_all(app); garp_pdu_queue(app); spin_unlock_bh(&app->lock); diff --git a/net/802/mrp.c b/net/802/mrp.c index bea6e43d45a0..35e04cc5390c 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr) kfree(attr); } +static void mrp_attr_destroy_all(struct mrp_applicant *app) +{ + struct rb_node *node, *next; + struct mrp_attr *attr; + + for (node = rb_first(&app->mad); + next = node ? rb_next(node) : NULL, node != NULL; + node = next) { + attr = rb_entry(node, struct mrp_attr, node); + mrp_attr_destroy(app, attr); + } +} + static int mrp_pdu_init(struct mrp_applicant *app) { struct sk_buff *skb; @@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) spin_lock_bh(&app->lock); mrp_mad_event(app, MRP_EVENT_TX); + mrp_attr_destroy_all(app); mrp_pdu_queue(app); spin_unlock_bh(&app->lock); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2560ed2f144d..e1a545c8a69f 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3996,14 +3996,10 @@ EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ void hci_unregister_dev(struct hci_dev *hdev) { - int id; - BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); hci_dev_set_flag(hdev, HCI_UNREGISTER); - id = hdev->id; - write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); @@ -4038,7 +4034,14 @@ void hci_unregister_dev(struct hci_dev *hdev) } device_del(&hdev->dev); + /* Actual cleanup is deferred until hci_cleanup_dev(). */ + hci_dev_put(hdev); +} +EXPORT_SYMBOL(hci_unregister_dev); +/* Cleanup HCI device */ +void hci_cleanup_dev(struct hci_dev *hdev) +{ debugfs_remove_recursive(hdev->debugfs); kfree_const(hdev->hw_info); kfree_const(hdev->fw_info); @@ -4063,11 +4066,8 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_blocked_keys_clear(hdev); hci_dev_unlock(hdev); - hci_dev_put(hdev); - - ida_simple_remove(&hci_index_ida, id); + ida_simple_remove(&hci_index_ida, hdev->id); } -EXPORT_SYMBOL(hci_unregister_dev); /* Suspend HCI device */ int hci_suspend_dev(struct hci_dev *hdev) diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index b04a5a02ecf3..f1128c2134f0 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -59,6 +59,17 @@ struct hci_pinfo { char comm[TASK_COMM_LEN]; }; +static struct hci_dev *hci_hdev_from_sock(struct sock *sk) +{ + struct hci_dev *hdev = hci_pi(sk)->hdev; + + if (!hdev) + return ERR_PTR(-EBADFD); + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return ERR_PTR(-EPIPE); + return hdev; +} + void hci_sock_set_flag(struct sock *sk, int nr) { set_bit(nr, &hci_pi(sk)->flags); @@ -759,19 +770,13 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) if (event == HCI_DEV_UNREG) { struct sock *sk; - /* Detach sockets from device */ + /* Wake up sockets using this dead device */ read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { - lock_sock(sk); if (hci_pi(sk)->hdev == hdev) { - hci_pi(sk)->hdev = NULL; sk->sk_err = EPIPE; - sk->sk_state = BT_OPEN; sk->sk_state_change(sk); - - hci_dev_put(hdev); } - release_sock(sk); } read_unlock(&hci_sk_list.lock); } @@ -930,10 +935,10 @@ static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg) static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { - struct hci_dev *hdev = hci_pi(sk)->hdev; + struct hci_dev *hdev = hci_hdev_from_sock(sk); - if (!hdev) - return -EBADFD; + if (IS_ERR(hdev)) + return PTR_ERR(hdev); if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; @@ -1103,6 +1108,18 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, lock_sock(sk); + /* Allow detaching from dead device and attaching to alive device, if + * the caller wants to re-bind (instead of close) this socket in + * response to hci_sock_dev_event(HCI_DEV_UNREG) notification. + */ + hdev = hci_pi(sk)->hdev; + if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + hci_pi(sk)->hdev = NULL; + sk->sk_state = BT_OPEN; + hci_dev_put(hdev); + } + hdev = NULL; + if (sk->sk_state == BT_BOUND) { err = -EALREADY; goto done; @@ -1379,9 +1396,9 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, lock_sock(sk); - hdev = hci_pi(sk)->hdev; - if (!hdev) { - err = -EBADFD; + hdev = hci_hdev_from_sock(sk); + if (IS_ERR(hdev)) { + err = PTR_ERR(hdev); goto done; } @@ -1743,9 +1760,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, goto done; } - hdev = hci_pi(sk)->hdev; - if (!hdev) { - err = -EBADFD; + hdev = hci_hdev_from_sock(sk); + if (IS_ERR(hdev)) { + err = PTR_ERR(hdev); goto done; } diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 9874844a95a9..b69d88b88d2e 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -83,6 +83,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn) static void bt_host_release(struct device *dev) { struct hci_dev *hdev = to_hci_dev(dev); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) + hci_cleanup_dev(hdev); kfree(hdev); module_put(THIS_MODULE); } diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index aa47af349ba8..caa16bf30fb5 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -7,6 +7,7 @@ #include <linux/vmalloc.h> #include <linux/etherdevice.h> #include <linux/filter.h> +#include <linux/rcupdate_trace.h> #include <linux/sched/signal.h> #include <net/bpf_sk_storage.h> #include <net/sock.h> @@ -701,6 +702,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, void *data; int ret; + if (prog->expected_attach_type == BPF_XDP_DEVMAP || + prog->expected_attach_type == BPF_XDP_CPUMAP) + return -EINVAL; if (kattr->test.ctx_in || kattr->test.ctx_out) return -EINVAL; @@ -948,7 +952,10 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, goto out; } } + + rcu_read_lock_trace(); retval = bpf_prog_run_pin_on_cpu(prog, ctx); + rcu_read_unlock_trace(); if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { err = -EFAULT; diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 2b862cffc03a..5dee30966ed3 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -780,7 +780,7 @@ int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev, struct net_device *dst_dev; dst_dev = dst ? dst->dev : br->dev; - if (dst_dev != br_dev && dst_dev != dev) + if (dst_dev && dst_dev != dev) continue; err = br_fdb_replay_one(nb, fdb, dst_dev, action, ctx); @@ -1019,7 +1019,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, - u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[]) + u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[], + struct netlink_ext_ack *extack) { int err = 0; @@ -1038,6 +1039,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, rcu_read_unlock(); local_bh_enable(); } else if (ndm->ndm_flags & NTF_EXT_LEARNED) { + if (!p && !(ndm->ndm_state & NUD_PERMANENT)) { + NL_SET_ERR_MSG_MOD(extack, + "FDB entry towards bridge must be permanent"); + return -EINVAL; + } err = br_fdb_external_learn_add(br, p, addr, vid, true); } else { spin_lock_bh(&br->hash_lock); @@ -1110,9 +1116,11 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } /* VID was specified, so use it. */ - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb, + extack); } else { - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb, + extack); if (err || !vg || !vg->num_vlans) goto out; @@ -1124,7 +1132,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], if (!br_vlan_should_use(v)) continue; err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid, - nfea_tb); + nfea_tb, extack); if (err) goto out; } @@ -1281,6 +1289,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, if (swdev_notify) flags |= BIT(BR_FDB_ADDED_BY_USER); + + if (!p) + flags |= BIT(BR_FDB_LOCAL); + fdb = fdb_create(br, p, addr, vid, flags); if (!fdb) { err = -ENOMEM; @@ -1307,6 +1319,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, if (swdev_notify) set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); + if (!p) + set_bit(BR_FDB_LOCAL, &fdb->flags); + if (modified) fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f7d2f472ae24..14cd6ef96111 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -562,7 +562,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, struct net_bridge_port *p; int err = 0; unsigned br_hr, dev_hr; - bool changed_addr; + bool changed_addr, fdb_synced = false; /* Don't allow bridging non-ethernet like devices. */ if ((dev->flags & IFF_LOOPBACK) || @@ -616,6 +616,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, err = dev_set_allmulti(dev, 1); if (err) { + br_multicast_del_port(p); kfree(p); /* kobject not yet init'd, manually free */ goto err1; } @@ -652,6 +653,19 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, list_add_rcu(&p->list, &br->port_list); nbp_update_port_count(br); + if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) { + /* When updating the port count we also update all ports' + * promiscuous mode. + * A port leaving promiscuous mode normally gets the bridge's + * fdb synced to the unicast filter (if supported), however, + * `br_port_clear_promisc` does not distinguish between + * non-promiscuous ports and *new* ports, so we need to + * sync explicitly here. + */ + fdb_synced = br_fdb_sync_static(br, p) == 0; + if (!fdb_synced) + netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n"); + } netdev_update_features(br->dev); @@ -701,6 +715,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, return 0; err7: + if (fdb_synced) + br_fdb_unsync_static(br, p); list_del_rcu(&p->list); br_fdb_delete_by_port(br, p, 0, 1); nbp_update_port_count(br); @@ -714,6 +730,7 @@ err4: err3: sysfs_remove_link(br->ifobj, p->dev->name); err2: + br_multicast_del_port(p); kobject_put(&p->kobj); dev_set_allmulti(dev, -1); err1: diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 53c3a9d80d9c..d0434dc8c03b 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -3264,7 +3264,9 @@ static void br_multicast_pim(struct net_bridge *br, pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) return; + spin_lock(&br->multicast_lock); br_ip4_multicast_mark_router(br, port); + spin_unlock(&br->multicast_lock); } static int br_ip4_multicast_mrd_rcv(struct net_bridge *br, @@ -3275,7 +3277,9 @@ static int br_ip4_multicast_mrd_rcv(struct net_bridge *br, igmp_hdr(skb)->type != IGMP_MRDISC_ADV) return -ENOMSG; + spin_lock(&br->multicast_lock); br_ip4_multicast_mark_router(br, port); + spin_unlock(&br->multicast_lock); return 0; } @@ -3343,7 +3347,9 @@ static void br_ip6_multicast_mrd_rcv(struct net_bridge *br, if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) return; + spin_lock(&br->multicast_lock); br_ip6_multicast_mark_router(br, port); + spin_unlock(&br->multicast_lock); } static int br_multicast_ipv6_rcv(struct net_bridge *br, diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index 8d033a75a766..fdbed3158555 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -88,6 +88,12 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, skb = ip_fraglist_next(&iter); } + + if (!err) + return 0; + + kfree_skb_list(iter.frag); + return err; } slow_path: diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 647554c9813b..e12fd3cad619 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -539,7 +539,8 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg, goto err; ret = -EINVAL; - if (unlikely(msg->msg_iter.iov->iov_base == NULL)) + if (unlikely(msg->msg_iter.nr_segs == 0) || + unlikely(msg->msg_iter.iov->iov_base == NULL)) goto err; noblock = msg->msg_flags & MSG_DONTWAIT; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index c3946c355882..bdc95bd7a851 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -1075,11 +1075,16 @@ static bool j1939_session_deactivate_locked(struct j1939_session *session) static bool j1939_session_deactivate(struct j1939_session *session) { + struct j1939_priv *priv = session->priv; bool active; - j1939_session_list_lock(session->priv); + j1939_session_list_lock(priv); + /* This function should be called with a session ref-count of at + * least 2. + */ + WARN_ON_ONCE(kref_read(&session->kref) < 2); active = j1939_session_deactivate_locked(session); - j1939_session_list_unlock(session->priv); + j1939_session_list_unlock(priv); return active; } @@ -1869,7 +1874,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, if (!session->transmission) j1939_tp_schedule_txtimer(session, 0); } else { - j1939_tp_set_rxtimeout(session, 250); + j1939_tp_set_rxtimeout(session, 750); } session->last_cmd = 0xff; consume_skb(se_skb); diff --git a/net/can/raw.c b/net/can/raw.c index ed4fcb7ab0c3..cd5a49380116 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -546,10 +546,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, return -EFAULT; } + rtnl_lock(); lock_sock(sk); - if (ro->bound && ro->ifindex) + if (ro->bound && ro->ifindex) { dev = dev_get_by_index(sock_net(sk), ro->ifindex); + if (!dev) { + if (count > 1) + kfree(filter); + err = -ENODEV; + goto out_fil; + } + } if (ro->bound) { /* (try to) register the new filters */ @@ -588,6 +596,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, dev_put(dev); release_sock(sk); + rtnl_unlock(); break; @@ -600,10 +609,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, err_mask &= CAN_ERR_MASK; + rtnl_lock(); lock_sock(sk); - if (ro->bound && ro->ifindex) + if (ro->bound && ro->ifindex) { dev = dev_get_by_index(sock_net(sk), ro->ifindex); + if (!dev) { + err = -ENODEV; + goto out_err; + } + } /* remove current error mask */ if (ro->bound) { @@ -627,6 +642,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, dev_put(dev); release_sock(sk); + rtnl_unlock(); break; diff --git a/net/core/dev.c b/net/core/dev.c index c253c2aafe97..8f1a47ad6781 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -131,6 +131,7 @@ #include <trace/events/napi.h> #include <trace/events/net.h> #include <trace/events/skb.h> +#include <trace/events/qdisc.h> #include <linux/inetdevice.h> #include <linux/cpu_rmap.h> #include <linux/static_key.h> @@ -3844,6 +3845,18 @@ static void qdisc_pkt_len_init(struct sk_buff *skb) } } +static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q, + struct sk_buff **to_free, + struct netdev_queue *txq) +{ + int rc; + + rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK; + if (rc == NET_XMIT_SUCCESS) + trace_qdisc_enqueue(q, txq, skb); + return rc; +} + static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq) @@ -3862,8 +3875,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, * of q->seqlock to protect from racing with requeuing. */ if (unlikely(!nolock_qdisc_is_empty(q))) { - rc = q->enqueue(skb, q, &to_free) & - NET_XMIT_MASK; + rc = dev_qdisc_enqueue(skb, q, &to_free, txq); __qdisc_run(q); qdisc_run_end(q); @@ -3879,7 +3891,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, return NET_XMIT_SUCCESS; } - rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + rc = dev_qdisc_enqueue(skb, q, &to_free, txq); qdisc_run(q); no_lock_out: @@ -3923,7 +3935,7 @@ no_lock_out: qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { - rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + rc = dev_qdisc_enqueue(skb, q, &to_free, txq); if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); @@ -6008,6 +6020,19 @@ static void gro_list_prepare(const struct list_head *head, diffs = memcmp(skb_mac_header(p), skb_mac_header(skb), maclen); + + diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); +#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + if (!diffs) { + struct tc_skb_ext *skb_ext = skb_ext_find(skb, TC_SKB_EXT); + struct tc_skb_ext *p_ext = skb_ext_find(p, TC_SKB_EXT); + + diffs |= (!!p_ext) ^ (!!skb_ext); + if (!diffs && unlikely(skb_ext)) + diffs |= p_ext->chain ^ skb_ext->chain; + } +#endif + NAPI_GRO_CB(p)->same_flow = !diffs; } } @@ -6221,6 +6246,8 @@ static gro_result_t napi_skb_finish(struct napi_struct *napi, case GRO_MERGED_FREE: if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) napi_skb_free_stolen_head(skb); + else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) + __kfree_skb(skb); else __kfree_skb_defer(skb); break; @@ -6270,6 +6297,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) skb_shinfo(skb)->gso_type = 0; skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); skb_ext_reset(skb); + nf_reset_ct(skb); napi->skb = skb; } @@ -9684,14 +9712,17 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) struct net_device *dev; int err, fd; + rtnl_lock(); dev = dev_get_by_index(net, attr->link_create.target_ifindex); - if (!dev) + if (!dev) { + rtnl_unlock(); return -EINVAL; + } link = kzalloc(sizeof(*link), GFP_USER); if (!link) { err = -ENOMEM; - goto out_put_dev; + goto unlock; } bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); @@ -9701,14 +9732,14 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) err = bpf_link_prime(&link->link, &link_primer); if (err) { kfree(link); - goto out_put_dev; + goto unlock; } - rtnl_lock(); err = dev_xdp_attach_link(dev, NULL, link); rtnl_unlock(); if (err) { + link->dev = NULL; bpf_link_cleanup(&link_primer); goto out_put_dev; } @@ -9718,6 +9749,9 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) dev_put(dev); return fd; +unlock: + rtnl_unlock(); + out_put_dev: dev_put(dev); return err; diff --git a/net/core/devlink.c b/net/core/devlink.c index 8fdd04f00fd7..85032626de24 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -9328,18 +9328,10 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, switch (attrs->flavour) { case DEVLINK_PORT_FLAVOUR_PHYSICAL: - case DEVLINK_PORT_FLAVOUR_VIRTUAL: n = snprintf(name, len, "p%u", attrs->phys.port_number); if (n < len && attrs->split) n += snprintf(name + n, len - n, "s%u", attrs->phys.split_subport_number); - if (!attrs->split) - n = snprintf(name, len, "p%u", attrs->phys.port_number); - else - n = snprintf(name, len, "p%us%u", - attrs->phys.port_number, - attrs->phys.split_subport_number); - break; case DEVLINK_PORT_FLAVOUR_CPU: case DEVLINK_PORT_FLAVOUR_DSA: @@ -9381,6 +9373,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf, attrs->pci_sf.sf); break; + case DEVLINK_PORT_FLAVOUR_VIRTUAL: + return -EOPNOTSUPP; } if (n >= len) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 2aadbfc5193b..4b2415d34873 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1504,7 +1504,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow) } EXPORT_SYMBOL(flow_get_u32_dst); -/* Sort the source and destination IP (and the ports if the IP are the same), +/* Sort the source and destination IP and the ports, * to have consistent hash within the two directions */ static inline void __flow_hash_consistentify(struct flow_keys *keys) @@ -1515,11 +1515,11 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys) case FLOW_DISSECTOR_KEY_IPV4_ADDRS: addr_diff = (__force u32)keys->addrs.v4addrs.dst - (__force u32)keys->addrs.v4addrs.src; - if ((addr_diff < 0) || - (addr_diff == 0 && - ((__force u16)keys->ports.dst < - (__force u16)keys->ports.src))) { + if (addr_diff < 0) swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); + + if ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src) { swap(keys->ports.src, keys->ports.dst); } break; @@ -1527,13 +1527,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys) addr_diff = memcmp(&keys->addrs.v6addrs.dst, &keys->addrs.v6addrs.src, sizeof(keys->addrs.v6addrs.dst)); - if ((addr_diff < 0) || - (addr_diff == 0 && - ((__force u16)keys->ports.dst < - (__force u16)keys->ports.src))) { + if (addr_diff < 0) { for (i = 0; i < 4; i++) swap(keys->addrs.v6addrs.src.s6_addr32[i], keys->addrs.v6addrs.dst.s6_addr32[i]); + } + if ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src) { swap(keys->ports.src, keys->ports.dst); } break; diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 75431ca9300f..1a455847da54 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -158,7 +158,7 @@ static void linkwatch_do_dev(struct net_device *dev) clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); rfc2863_policy(dev); - if (dev->flags & IFF_UP && netif_device_present(dev)) { + if (dev->flags & IFF_UP) { if (netif_carrier_ok(dev)) dev_activate(dev); else @@ -204,7 +204,8 @@ static void __linkwatch_run_queue(int urgent_only) dev = list_first_entry(&wrk, struct net_device, link_watch_list); list_del_init(&dev->link_watch_list); - if (urgent_only && !linkwatch_urgent_event(dev)) { + if (!netif_device_present(dev) || + (urgent_only && !linkwatch_urgent_event(dev))) { list_add_tail(&dev->link_watch_list, &lweventlist); continue; } diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 5e4eb45b139c..8ab7b402244c 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -634,7 +634,15 @@ bool page_pool_return_skb_page(struct page *page) struct page_pool *pp; page = compound_head(page); - if (unlikely(page->pp_magic != PP_SIGNATURE)) + + /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation + * in order to preserve any existing bits, such as bit 0 for the + * head page of compound page and bit 1 for pfmemalloc page, so + * mask those bits for freeing side when doing below checking, + * and page_is_pfmemalloc() is checked in __page_pool_put_page() + * to avoid recycling the pfmemalloc page. + */ + if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE)) return false; pp = page->pp; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 12aabcda6db2..fc7942c0dddc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -663,7 +663,7 @@ static void skb_release_data(struct sk_buff *skb) if (skb->cloned && atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, &shinfo->dataref)) - return; + goto exit; skb_zcopy_clear(skb, true); @@ -674,6 +674,17 @@ static void skb_release_data(struct sk_buff *skb) kfree_skb_list(shinfo->frag_list); skb_free_head(skb); +exit: + /* When we clone an SKB we copy the reycling bit. The pp_recycle + * bit is only set on the head though, so in order to avoid races + * while trying to recycle fragments on __skb_frag_unref() we need + * to make one SKB responsible for triggering the recycle path. + * So disable the recycling bit if an SKB is cloned and we have + * additional references to to the fragmented part of the SKB. + * Eventually the last SKB will have the recycling bit set and it's + * dataref set to 0, which will trigger the recycling + */ + skb->pp_recycle = 0; } /* @@ -943,6 +954,7 @@ void __kfree_skb_defer(struct sk_buff *skb) void napi_skb_free_stolen_head(struct sk_buff *skb) { + nf_reset_ct(skb); skb_dst_drop(skb); skb_ext_put(skb); napi_skb_cache_put(skb); @@ -3010,8 +3022,11 @@ skb_zerocopy_headlen(const struct sk_buff *from) if (!from->head_frag || skb_headlen(from) < L1_CACHE_BYTES || - skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { hlen = skb_headlen(from); + if (!hlen) + hlen = from->len; + } if (skb_has_frag_list(from)) hlen = from->len; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 9b6160a191f8..2d6249b28928 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -508,10 +508,8 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, if (skb_linearize(skb)) return -EAGAIN; num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len); - if (unlikely(num_sge < 0)) { - kfree(msg); + if (unlikely(num_sge < 0)) return num_sge; - } copied = skb->len; msg->sg.start = 0; @@ -530,6 +528,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) { struct sock *sk = psock->sk; struct sk_msg *msg; + int err; /* If we are receiving on the same sock skb->sk is already assigned, * skip memory accounting and owner transition seeing it already set @@ -548,7 +547,10 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) * into user buffers. */ skb_set_owner_r(skb, sk); - return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); + if (err < 0) + kfree(msg); + return err; } /* Puts an skb on the ingress queue of the socket already assigned to the @@ -559,12 +561,16 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb { struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC); struct sock *sk = psock->sk; + int err; if (unlikely(!msg)) return -EAGAIN; sk_msg_init(msg); skb_set_owner_r(skb, sk); - return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); + if (err < 0) + kfree(msg); + return err; } static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, @@ -578,29 +584,42 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, return sk_psock_skb_ingress(psock, skb); } -static void sock_drop(struct sock *sk, struct sk_buff *skb) +static void sk_psock_skb_state(struct sk_psock *psock, + struct sk_psock_work_state *state, + struct sk_buff *skb, + int len, int off) { - sk_drops_add(sk, skb); - kfree_skb(skb); + spin_lock_bh(&psock->ingress_lock); + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { + state->skb = skb; + state->len = len; + state->off = off; + } else { + sock_drop(psock->sk, skb); + } + spin_unlock_bh(&psock->ingress_lock); } static void sk_psock_backlog(struct work_struct *work) { struct sk_psock *psock = container_of(work, struct sk_psock, work); struct sk_psock_work_state *state = &psock->work_state; - struct sk_buff *skb; + struct sk_buff *skb = NULL; bool ingress; u32 len, off; int ret; mutex_lock(&psock->work_mutex); - if (state->skb) { + if (unlikely(state->skb)) { + spin_lock_bh(&psock->ingress_lock); skb = state->skb; len = state->len; off = state->off; state->skb = NULL; - goto start; + spin_unlock_bh(&psock->ingress_lock); } + if (skb) + goto start; while ((skb = skb_dequeue(&psock->ingress_skb))) { len = skb->len; @@ -615,9 +634,8 @@ start: len, ingress); if (ret <= 0) { if (ret == -EAGAIN) { - state->skb = skb; - state->len = len; - state->off = off; + sk_psock_skb_state(psock, state, skb, + len, off); goto end; } /* Hard errors break pipe and stop xmit. */ @@ -716,6 +734,11 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock) skb_bpf_redirect_clear(skb); sock_drop(psock->sk, skb); } + kfree_skb(psock->work_state.skb); + /* We null the skb here to ensure that calls to sk_psock_backlog + * do not pick up the free'd skb. + */ + psock->work_state.skb = NULL; __sk_psock_purge_ingress_msg(psock); } @@ -767,8 +790,6 @@ static void sk_psock_destroy(struct work_struct *work) void sk_psock_drop(struct sock *sk, struct sk_psock *psock) { - sk_psock_stop(psock, false); - write_lock_bh(&sk->sk_callback_lock); sk_psock_restore_proto(sk, psock); rcu_assign_sk_user_data(sk, NULL); @@ -778,6 +799,8 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock) sk_psock_stop_verdict(sk, psock); write_unlock_bh(&sk->sk_callback_lock); + sk_psock_stop(psock, false); + INIT_RCU_WORK(&psock->rwork, sk_psock_destroy); queue_rcu_work(system_wq, &psock->rwork); } diff --git a/net/core/sock.c b/net/core/sock.c index ba1c0f75cd45..a3eea6e0b30a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -139,6 +139,8 @@ #include <net/tcp.h> #include <net/busy_poll.h> +#include <linux/ethtool.h> + static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); @@ -810,8 +812,47 @@ void sock_set_timestamp(struct sock *sk, int optname, bool valbool) } } -int sock_set_timestamping(struct sock *sk, int optname, int val) +static int sock_timestamping_bind_phc(struct sock *sk, int phc_index) +{ + struct net *net = sock_net(sk); + struct net_device *dev = NULL; + bool match = false; + int *vclock_index; + int i, num; + + if (sk->sk_bound_dev_if) + dev = dev_get_by_index(net, sk->sk_bound_dev_if); + + if (!dev) { + pr_err("%s: sock not bind to device\n", __func__); + return -EOPNOTSUPP; + } + + num = ethtool_get_phc_vclocks(dev, &vclock_index); + for (i = 0; i < num; i++) { + if (*(vclock_index + i) == phc_index) { + match = true; + break; + } + } + + if (num > 0) + kfree(vclock_index); + + if (!match) + return -EINVAL; + + sk->sk_bind_phc = phc_index; + + return 0; +} + +int sock_set_timestamping(struct sock *sk, int optname, + struct so_timestamping timestamping) { + int val = timestamping.flags; + int ret; + if (val & ~SOF_TIMESTAMPING_MASK) return -EINVAL; @@ -832,6 +873,12 @@ int sock_set_timestamping(struct sock *sk, int optname, int val) !(val & SOF_TIMESTAMPING_OPT_TSONLY)) return -EINVAL; + if (val & SOF_TIMESTAMPING_BIND_PHC) { + ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc); + if (ret) + return ret; + } + sk->sk_tsflags = val; sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); @@ -907,6 +954,7 @@ EXPORT_SYMBOL(sock_set_mark); int sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { + struct so_timestamping timestamping; struct sock_txtime sk_txtime; struct sock *sk = sock->sk; int val; @@ -1068,12 +1116,22 @@ set_sndbuf: case SO_TIMESTAMP_NEW: case SO_TIMESTAMPNS_OLD: case SO_TIMESTAMPNS_NEW: - sock_set_timestamp(sk, valbool, optname); + sock_set_timestamp(sk, optname, valbool); break; case SO_TIMESTAMPING_NEW: case SO_TIMESTAMPING_OLD: - ret = sock_set_timestamping(sk, optname, val); + if (optlen == sizeof(timestamping)) { + if (copy_from_sockptr(×tamping, optval, + sizeof(timestamping))) { + ret = -EFAULT; + break; + } + } else { + memset(×tamping, 0, sizeof(timestamping)); + timestamping.flags = val; + } + ret = sock_set_timestamping(sk, optname, timestamping); break; case SO_RCVLOWAT: @@ -1201,7 +1259,7 @@ set_sndbuf: if (val < 0) ret = -EINVAL; else - sk->sk_ll_usec = val; + WRITE_ONCE(sk->sk_ll_usec, val); } break; case SO_PREFER_BUSY_POLL: @@ -1348,6 +1406,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, struct __kernel_old_timeval tm; struct __kernel_sock_timeval stm; struct sock_txtime txtime; + struct so_timestamping timestamping; } v; int lv = sizeof(int); @@ -1451,7 +1510,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_TIMESTAMPING_OLD: - v.val = sk->sk_tsflags; + lv = sizeof(v.timestamping); + v.timestamping.flags = sk->sk_tsflags; + v.timestamping.bind_phc = sk->sk_bind_phc; break; case SO_RCVTIMEO_OLD: diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 9cc9d1ee6cdb..c5c1d2b8045e 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -41,9 +41,9 @@ extern bool dccp_debug; #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a) #else -#define dccp_pr_debug(format, a...) -#define dccp_pr_debug_cat(format, a...) -#define dccp_debug(format, a...) +#define dccp_pr_debug(format, a...) do {} while (0) +#define dccp_pr_debug_cat(format, a...) do {} while (0) +#define dccp_debug(format, a...) do {} while (0) #endif extern struct inet_hashinfo dccp_hashinfo; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 5dbd45dc35ad..dc92a67baea3 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -816,7 +816,7 @@ static int dn_auto_bind(struct socket *sock) static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) { struct dn_scp *scp = DN_SK(sk); - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); int err; if (scp->state != DN_CR) @@ -826,11 +826,11 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk)); dn_send_conn_conf(sk, allocation); - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + add_wait_queue(sk_sleep(sk), &wait); for(;;) { release_sock(sk); if (scp->state == DN_CC) - *timeo = schedule_timeout(*timeo); + *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); lock_sock(sk); err = 0; if (scp->state == DN_RUN) @@ -844,9 +844,8 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) err = -EAGAIN; if (!*timeo) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); if (err == 0) { sk->sk_socket->state = SS_CONNECTED; } else if (scp->state != DN_CC) { @@ -858,7 +857,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) static int dn_wait_run(struct sock *sk, long *timeo) { struct dn_scp *scp = DN_SK(sk); - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); int err = 0; if (scp->state == DN_RUN) @@ -867,11 +866,11 @@ static int dn_wait_run(struct sock *sk, long *timeo) if (!*timeo) return -EALREADY; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + add_wait_queue(sk_sleep(sk), &wait); for(;;) { release_sock(sk); if (scp->state == DN_CI || scp->state == DN_CC) - *timeo = schedule_timeout(*timeo); + *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); lock_sock(sk); err = 0; if (scp->state == DN_RUN) @@ -885,9 +884,8 @@ static int dn_wait_run(struct sock *sk, long *timeo) err = -ETIMEDOUT; if (!*timeo) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); out: if (err == 0) { sk->sk_socket->state = SS_CONNECTED; @@ -1032,16 +1030,16 @@ static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt) static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) { - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sk_buff *skb = NULL; int err = 0; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + add_wait_queue(sk_sleep(sk), &wait); for(;;) { release_sock(sk); skb = skb_dequeue(&sk->sk_receive_queue); if (skb == NULL) { - *timeo = schedule_timeout(*timeo); + *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); skb = skb_dequeue(&sk->sk_receive_queue); } lock_sock(sk); @@ -1056,9 +1054,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) err = -EAGAIN; if (!*timeo) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); return skb == NULL ? ERR_PTR(err) : skb; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index ffbba1e71551..23be8e01026b 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1808,6 +1808,7 @@ void dsa_slave_setup_tagger(struct net_device *slave) struct dsa_slave_priv *p = netdev_priv(slave); const struct dsa_port *cpu_dp = dp->cpu_dp; struct net_device *master = cpu_dp->master; + const struct dsa_switch *ds = dp->ds; slave->needed_headroom = cpu_dp->tag_ops->needed_headroom; slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom; @@ -1819,6 +1820,14 @@ void dsa_slave_setup_tagger(struct net_device *slave) slave->needed_tailroom += master->needed_tailroom; p->xmit = cpu_dp->tag_ops->xmit; + + slave->features = master->vlan_features | NETIF_F_HW_TC; + if (ds->ops->port_vlan_add && ds->ops->port_vlan_del) + slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + slave->hw_features |= NETIF_F_HW_TC; + slave->features |= NETIF_F_LLTX; + if (slave->needed_tailroom) + slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST); } static struct lock_class_key dsa_slave_netdev_xmit_lock_key; @@ -1881,11 +1890,6 @@ int dsa_slave_create(struct dsa_port *port) if (slave_dev == NULL) return -ENOMEM; - slave_dev->features = master->vlan_features | NETIF_F_HW_TC; - if (ds->ops->port_vlan_add && ds->ops->port_vlan_del) - slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - slave_dev->hw_features |= NETIF_F_HW_TC; - slave_dev->features |= NETIF_F_LLTX; slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; if (!is_zero_ether_addr(port->mac)) ether_addr_copy(slave_dev->dev_addr, port->mac); @@ -2287,8 +2291,8 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, static void dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work) { + struct switchdev_notifier_fdb_info info = {}; struct dsa_switch *ds = switchdev_work->ds; - struct switchdev_notifier_fdb_info info; struct dsa_port *dp; if (!dsa_is_user_port(ds, switchdev_work->port)) diff --git a/net/dsa/switch.c b/net/dsa/switch.c index af71b8638098..5ece05dfd8f2 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -113,11 +113,11 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, int err, port; if (dst->index == info->tree_index && ds->index == info->sw_index && - ds->ops->port_bridge_join) + ds->ops->port_bridge_leave) ds->ops->port_bridge_leave(ds, info->port, info->br); if ((dst->index != info->tree_index || ds->index != info->sw_index) && - ds->ops->crosschip_bridge_join) + ds->ops->crosschip_bridge_leave) ds->ops->crosschip_bridge_leave(ds, info->tree_index, info->sw_index, info->port, info->br); @@ -427,7 +427,7 @@ static int dsa_switch_lag_join(struct dsa_switch *ds, info->port, info->lag, info->info); - return 0; + return -EOPNOTSUPP; } static int dsa_switch_lag_leave(struct dsa_switch *ds, @@ -440,7 +440,7 @@ static int dsa_switch_lag_leave(struct dsa_switch *ds, return ds->ops->crosschip_lag_leave(ds, info->sw_index, info->port, info->lag); - return 0; + return -EOPNOTSUPP; } static int dsa_switch_mdb_add(struct dsa_switch *ds, diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index 53565f48934c..a201ccf2435d 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -53,6 +53,9 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev) u8 *tag; u8 *addr; + if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb)) + return NULL; + /* Tag encoding */ tag = skb_put(skb, KSZ_INGRESS_TAG_LEN); addr = skb_mac_header(skb); @@ -114,6 +117,9 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb, u8 *addr; u16 val; + if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb)) + return NULL; + /* Tag encoding */ tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN); addr = skb_mac_header(skb); @@ -164,6 +170,9 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb, u8 *addr; u8 *tag; + if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb)) + return NULL; + /* Tag encoding */ tag = skb_put(skb, KSZ_INGRESS_TAG_LEN); addr = skb_mac_header(skb); diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile index 723c9a8a8cdf..0a19470efbfb 100644 --- a/net/ethtool/Makefile +++ b/net/ethtool/Makefile @@ -7,4 +7,4 @@ obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \ linkstate.o debug.o wol.o features.o privflags.o rings.o \ channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \ - tunnels.o fec.o eeprom.o stats.o + tunnels.o fec.o eeprom.o stats.o phc_vclocks.o diff --git a/net/ethtool/common.c b/net/ethtool/common.c index f9dcbad84788..c63e0739dc6a 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -4,6 +4,7 @@ #include <linux/net_tstamp.h> #include <linux/phy.h> #include <linux/rtnetlink.h> +#include <linux/ptp_clock_kernel.h> #include "common.h" @@ -397,6 +398,7 @@ const char sof_timestamping_names[][ETH_GSTRING_LEN] = { [const_ilog2(SOF_TIMESTAMPING_OPT_STATS)] = "option-stats", [const_ilog2(SOF_TIMESTAMPING_OPT_PKTINFO)] = "option-pktinfo", [const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw", + [const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc", }; static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT); @@ -554,6 +556,18 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) return 0; } +int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index) +{ + struct ethtool_ts_info info = { }; + int num = 0; + + if (!__ethtool_get_ts_info(dev, &info)) + num = ptp_get_vclocks_index(info.phc_index, vclock_index); + + return num; +} +EXPORT_SYMBOL(ethtool_get_phc_vclocks); + const struct ethtool_phy_ops *ethtool_phy_ops; void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops) diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index a7346346114f..73e0f5b626bf 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -248,6 +248,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = { [ETHTOOL_MSG_TSINFO_GET] = ðnl_tsinfo_request_ops, [ETHTOOL_MSG_MODULE_EEPROM_GET] = ðnl_module_eeprom_request_ops, [ETHTOOL_MSG_STATS_GET] = ðnl_stats_request_ops, + [ETHTOOL_MSG_PHC_VCLOCKS_GET] = ðnl_phc_vclocks_request_ops, }; static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb) @@ -958,6 +959,15 @@ static const struct genl_ops ethtool_genl_ops[] = { .policy = ethnl_stats_get_policy, .maxattr = ARRAY_SIZE(ethnl_stats_get_policy) - 1, }, + { + .cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_phc_vclocks_get_policy, + .maxattr = ARRAY_SIZE(ethnl_phc_vclocks_get_policy) - 1, + }, }; static const struct genl_multicast_group ethtool_nl_mcgrps[] = { diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 3e25a47fd482..3fc395c86702 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -347,6 +347,7 @@ extern const struct ethnl_request_ops ethnl_tsinfo_request_ops; extern const struct ethnl_request_ops ethnl_fec_request_ops; extern const struct ethnl_request_ops ethnl_module_eeprom_request_ops; extern const struct ethnl_request_ops ethnl_stats_request_ops; +extern const struct ethnl_request_ops ethnl_phc_vclocks_request_ops; extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1]; extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1]; @@ -382,6 +383,7 @@ extern const struct nla_policy ethnl_fec_get_policy[ETHTOOL_A_FEC_HEADER + 1]; extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1]; extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1]; extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1]; +extern const struct nla_policy ethnl_phc_vclocks_get_policy[ETHTOOL_A_PHC_VCLOCKS_HEADER + 1]; int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info); int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info); diff --git a/net/ethtool/phc_vclocks.c b/net/ethtool/phc_vclocks.c new file mode 100644 index 000000000000..637b2f5297d5 --- /dev/null +++ b/net/ethtool/phc_vclocks.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2021 NXP + */ +#include "netlink.h" +#include "common.h" + +struct phc_vclocks_req_info { + struct ethnl_req_info base; +}; + +struct phc_vclocks_reply_data { + struct ethnl_reply_data base; + int num; + int *index; +}; + +#define PHC_VCLOCKS_REPDATA(__reply_base) \ + container_of(__reply_base, struct phc_vclocks_reply_data, base) + +const struct nla_policy ethnl_phc_vclocks_get_policy[] = { + [ETHTOOL_A_PHC_VCLOCKS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int phc_vclocks_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + struct genl_info *info) +{ + struct phc_vclocks_reply_data *data = PHC_VCLOCKS_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + data->num = ethtool_get_phc_vclocks(dev, &data->index); + ethnl_ops_complete(dev); + + return ret; +} + +static int phc_vclocks_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct phc_vclocks_reply_data *data = + PHC_VCLOCKS_REPDATA(reply_base); + int len = 0; + + if (data->num > 0) { + len += nla_total_size(sizeof(u32)); + len += nla_total_size(sizeof(s32) * data->num); + } + + return len; +} + +static int phc_vclocks_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct phc_vclocks_reply_data *data = + PHC_VCLOCKS_REPDATA(reply_base); + + if (data->num <= 0) + return 0; + + if (nla_put_u32(skb, ETHTOOL_A_PHC_VCLOCKS_NUM, data->num) || + nla_put(skb, ETHTOOL_A_PHC_VCLOCKS_INDEX, + sizeof(s32) * data->num, data->index)) + return -EMSGSIZE; + + return 0; +} + +static void phc_vclocks_cleanup_data(struct ethnl_reply_data *reply_base) +{ + const struct phc_vclocks_reply_data *data = + PHC_VCLOCKS_REPDATA(reply_base); + + kfree(data->index); +} + +const struct ethnl_request_ops ethnl_phc_vclocks_request_ops = { + .request_cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET, + .reply_cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY, + .hdr_attr = ETHTOOL_A_PHC_VCLOCKS_HEADER, + .req_info_size = sizeof(struct phc_vclocks_req_info), + .reply_data_size = sizeof(struct phc_vclocks_reply_data), + + .prepare_data = phc_vclocks_prepare_data, + .reply_size = phc_vclocks_reply_size, + .fill_reply = phc_vclocks_fill_reply, + .cleanup_data = phc_vclocks_cleanup_data, +}; diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index a45a0401adc5..c25f7617770c 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -984,6 +984,11 @@ static const struct proto_ops ieee802154_dgram_ops = { .sendpage = sock_no_sendpage, }; +static void ieee802154_sock_destruct(struct sock *sk) +{ + skb_queue_purge(&sk->sk_receive_queue); +} + /* Create a socket. Initialise the socket, blank the addresses * set the state. */ @@ -1024,7 +1029,7 @@ static int ieee802154_create(struct net *net, struct socket *sock, sock->ops = ops; sock_init_data(sock, sk); - /* FIXME: sk->sk_destruct */ + sk->sk_destruct = ieee802154_sock_destruct; sk->sk_family = PF_IEEE802154; /* Checksums on by default */ diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index a933bd6345b1..9fe13e4f5d08 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1376,7 +1376,7 @@ static void nl_fib_input(struct sk_buff *skb) portid = NETLINK_CB(skb).portid; /* netlink portid */ NETLINK_CB(skb).portid = 0; /* from kernel */ NETLINK_CB(skb).dst_group = 0; /* unicast */ - netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT); + nlmsg_unicast(net->ipv4.fibnl, skb, portid); } static int __net_init nl_fib_lookup_init(struct net *net) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 6b3c558a4f23..00576bae183d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -803,10 +803,17 @@ static void igmp_gq_timer_expire(struct timer_list *t) static void igmp_ifc_timer_expire(struct timer_list *t) { struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer); + u32 mr_ifc_count; igmpv3_send_cr(in_dev); - if (in_dev->mr_ifc_count) { - in_dev->mr_ifc_count--; +restart: + mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count); + + if (mr_ifc_count) { + if (cmpxchg(&in_dev->mr_ifc_count, + mr_ifc_count, + mr_ifc_count - 1) != mr_ifc_count) + goto restart; igmp_ifc_start_timer(in_dev, unsolicited_report_interval(in_dev)); } @@ -818,7 +825,7 @@ static void igmp_ifc_event(struct in_device *in_dev) struct net *net = dev_net(in_dev->dev); if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) return; - in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv); igmp_ifc_start_timer(in_dev, 1); } @@ -957,7 +964,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, in_dev->mr_qri; } /* cancel the interface change timer */ - in_dev->mr_ifc_count = 0; + WRITE_ONCE(in_dev->mr_ifc_count, 0); if (del_timer(&in_dev->mr_ifc_timer)) __in_dev_put(in_dev); /* clear deleted report items */ @@ -1724,7 +1731,7 @@ void ip_mc_down(struct in_device *in_dev) igmp_group_dropped(pmc); #ifdef CONFIG_IP_MULTICAST - in_dev->mr_ifc_count = 0; + WRITE_ONCE(in_dev->mr_ifc_count, 0); if (del_timer(&in_dev->mr_ifc_timer)) __in_dev_put(in_dev); in_dev->mr_gq_running = 0; @@ -1941,7 +1948,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, pmc->sfmode = MCAST_INCLUDE; #ifdef CONFIG_IP_MULTICAST pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; - in_dev->mr_ifc_count = pmc->crcount; + WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); for (psf = pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; igmp_ifc_event(pmc->interface); @@ -2120,7 +2127,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, /* else no filters; keep old mode for reports */ pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; - in_dev->mr_ifc_count = pmc->crcount; + WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); for (psf = pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; igmp_ifc_event(in_dev); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index e65f4ef024a4..ef7897226f08 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -580,10 +580,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, nlmsg_free(rep); goto out; } - err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, - MSG_DONTWAIT); - if (err > 0) - err = 0; + err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); out: if (sk) diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index f6cc26de5ed3..be75b409445c 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) } dev->needed_headroom = t_hlen + hlen; - mtu -= t_hlen; + mtu -= t_hlen + (dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0); if (mtu < IPV4_MIN_MTU) mtu = IPV4_MIN_MTU; @@ -348,6 +348,9 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, t_hlen = nt->hlen + sizeof(struct iphdr); dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = IP_MAX_MTU - t_hlen; + if (dev->type == ARPHRD_ETHER) + dev->max_mtu -= dev->hard_header_len; + ip_tunnel_add(itn, nt); return nt; @@ -387,7 +390,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, tunnel->i_seqno = ntohl(tpi->seq) + 1; } - skb_reset_network_header(skb); + skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0); err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { @@ -489,11 +492,14 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, tunnel_hlen = md ? tunnel_hlen : tunnel->hlen; pkt_size = skb->len - tunnel_hlen; + pkt_size -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0; - if (df) + if (df) { mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen); - else + mtu -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0; + } else { mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; + } if (skb_valid_dst(skb)) skb_dst_update_pmtu_no_confirm(skb, mtu); @@ -972,6 +978,9 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) int t_hlen = tunnel->hlen + sizeof(struct iphdr); int max_mtu = IP_MAX_MTU - t_hlen; + if (dev->type == ARPHRD_ETHER) + max_mtu -= dev->hard_header_len; + if (new_mtu < ETH_MIN_MTU) return -EINVAL; @@ -1149,6 +1158,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], if (tb[IFLA_MTU]) { unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr)); + if (dev->type == ARPHRD_ETHER) + max -= dev->hard_header_len; + mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max); } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 7b12a40dd465..2dda856ca260 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2119,7 +2119,7 @@ int ip_mr_input(struct sk_buff *skb) raw_rcv(mroute_sk, skb); return 0; } - } + } } /* already under rcu_read_lock() */ diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c index 1b5b8af27aaf..ccacbde30a2c 100644 --- a/net/ipv4/raw_diag.c +++ b/net/ipv4/raw_diag.c @@ -119,11 +119,8 @@ static int raw_diag_dump_one(struct netlink_callback *cb, return err; } - err = netlink_unicast(net->diag_nlsk, rep, - NETLINK_CB(in_skb).portid, - MSG_DONTWAIT); - if (err > 0) - err = 0; + err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); + return err; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d5ab5f243640..8cb44040ec68 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1375,6 +1375,9 @@ new_segment: } pfrag->offset += copy; } else { + if (!sk_wmem_schedule(sk, copy)) + goto wait_for_space; + err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); if (err == -EMSGSIZE || err == -EEXIST) { tcp_mark_push(tp, skb); diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 6ea3dc2e4219..6274462b86b4 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -1041,7 +1041,7 @@ static void bbr_init(struct sock *sk) bbr->prior_cwnd = 0; tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; bbr->rtt_cnt = 0; - bbr->next_rtt_delivered = 0; + bbr->next_rtt_delivered = tp->delivered; bbr->prev_ca_state = TCP_CA_Open; bbr->packet_conservation = 0; diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index f26916a62f25..d3e9386b493e 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -503,7 +503,7 @@ static int __init tcp_bpf_v4_build_proto(void) tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); return 0; } -core_initcall(tcp_bpf_v4_build_proto); +late_initcall(tcp_bpf_v4_build_proto); static int tcp_bpf_assert_proto_ops(struct proto *ops) { diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 47c32604d38f..25fa4c01a17f 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -507,8 +507,18 @@ void tcp_fastopen_active_disable(struct sock *sk) { struct net *net = sock_net(sk); + if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout) + return; + + /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */ + WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies); + + /* Paired with smp_rmb() in tcp_fastopen_active_should_disable(). + * We want net->ipv4.tfo_active_disable_stamp to be updated first. + */ + smp_mb__before_atomic(); atomic_inc(&net->ipv4.tfo_active_disable_times); - net->ipv4.tfo_active_disable_stamp = jiffies; + NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE); } @@ -519,17 +529,27 @@ void tcp_fastopen_active_disable(struct sock *sk) bool tcp_fastopen_active_should_disable(struct sock *sk) { unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout; - int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times); unsigned long timeout; + int tfo_da_times; int multiplier; + if (!tfo_bh_timeout) + return false; + + tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times); if (!tfo_da_times) return false; + /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */ + smp_rmb(); + /* Limit timeout to max: 2^6 * initial timeout */ multiplier = 1 << min(tfo_da_times - 1, 6); - timeout = multiplier * tfo_bh_timeout * HZ; - if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout)) + + /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */ + timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) + + multiplier * tfo_bh_timeout * HZ; + if (time_before(jiffies, timeout)) return true; /* Mark check bit so we can check for successful active TFO diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e6ca5a1f3b59..149ceb5c94ff 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4247,6 +4247,9 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb) { trace_tcp_receive_reset(sk); + /* mptcp can't tell us to ignore reset pkts, + * so just ignore the return value of mptcp_incoming_options(). + */ if (sk_is_mptcp(sk)) mptcp_incoming_options(sk, skb); @@ -4941,8 +4944,13 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) bool fragstolen; int eaten; - if (sk_is_mptcp(sk)) - mptcp_incoming_options(sk, skb); + /* If a subflow has been reset, the packet should not continue + * to be processed, drop the packet. + */ + if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) { + __kfree_skb(skb); + return; + } if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { __kfree_skb(skb); @@ -5922,8 +5930,8 @@ void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb) tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); tp->snd_cwnd_stamp = tcp_jiffies32; - icsk->icsk_ca_initialized = 0; bpf_skops_established(sk, bpf_op, skb); + /* Initialize congestion control unless BPF initialized it already: */ if (!icsk->icsk_ca_initialized) tcp_init_congestion_control(sk); tcp_init_buffer_space(sk); @@ -6523,8 +6531,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) case TCP_CLOSING: case TCP_LAST_ACK: if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { - if (sk_is_mptcp(sk)) - mptcp_incoming_options(sk, skb); + /* If a subflow has been reset, the packet should not + * continue to be processed, drop the packet. + */ + if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) + goto discard; break; } fallthrough; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e66ad6bfe808..a692626c19e4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -342,7 +342,7 @@ void tcp_v4_mtu_reduced(struct sock *sk) if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return; - mtu = tcp_sk(sk)->mtu_info; + mtu = READ_ONCE(tcp_sk(sk)->mtu_info); dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; @@ -546,7 +546,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info) if (sk->sk_state == TCP_LISTEN) goto out; - tp->mtu_info = info; + WRITE_ONCE(tp->mtu_info, info); if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); } else { @@ -2965,7 +2965,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_comp_sack_nr = 44; net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE; spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock); - net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60; + net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0; atomic_set(&net->ipv4.tfo_active_disable_times, 0); /* Reno is always built in */ diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index e09147ac9a99..fc61cd3fea65 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -298,6 +298,9 @@ int tcp_gro_complete(struct sk_buff *skb) if (th->cwr) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + if (skb->encapsulation) + skb->inner_transport_header = skb->transport_header; + return 0; } EXPORT_SYMBOL(tcp_gro_complete); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index bde781f46b41..29553fce8502 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1732,6 +1732,7 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu) return __tcp_mtu_to_mss(sk, pmtu) - (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); } +EXPORT_SYMBOL(tcp_mtu_to_mss); /* Inverse of above */ int tcp_mss_to_mtu(struct sock *sk, int mss) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 62682807b4b2..1a742b710e54 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -645,10 +645,12 @@ static struct sock *__udp4_lib_err_encap(struct net *net, const struct iphdr *iph, struct udphdr *uh, struct udp_table *udptable, + struct sock *sk, struct sk_buff *skb, u32 info) { + int (*lookup)(struct sock *sk, struct sk_buff *skb); int network_offset, transport_offset; - struct sock *sk; + struct udp_sock *up; network_offset = skb_network_offset(skb); transport_offset = skb_transport_offset(skb); @@ -659,18 +661,28 @@ static struct sock *__udp4_lib_err_encap(struct net *net, /* Transport header needs to point to the UDP header */ skb_set_transport_header(skb, iph->ihl << 2); + if (sk) { + up = udp_sk(sk); + + lookup = READ_ONCE(up->encap_err_lookup); + if (lookup && lookup(sk, skb)) + sk = NULL; + + goto out; + } + sk = __udp4_lib_lookup(net, iph->daddr, uh->source, iph->saddr, uh->dest, skb->dev->ifindex, 0, udptable, NULL); if (sk) { - int (*lookup)(struct sock *sk, struct sk_buff *skb); - struct udp_sock *up = udp_sk(sk); + up = udp_sk(sk); lookup = READ_ONCE(up->encap_err_lookup); if (!lookup || lookup(sk, skb)) sk = NULL; } +out: if (!sk) sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info)); @@ -707,15 +719,16 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex, inet_sdif(skb), udptable, NULL); + if (!sk || udp_sk(sk)->encap_type) { /* No socket for error: try tunnels before discarding */ - sk = ERR_PTR(-ENOENT); if (static_branch_unlikely(&udp_encap_needed_key)) { - sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb, + sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb, info); if (!sk) return 0; - } + } else + sk = ERR_PTR(-ENOENT); if (IS_ERR(sk)) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); @@ -1102,7 +1115,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } ipcm_init_sk(&ipc, inet); - ipc.gso_size = up->gso_size; + ipc.gso_size = READ_ONCE(up->gso_size); if (msg->msg_controllen) { err = udp_cmsg_send(sk, msg, &ipc.gso_size); @@ -2695,7 +2708,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, case UDP_SEGMENT: if (val < 0 || val > USHRT_MAX) return -EINVAL; - up->gso_size = val; + WRITE_ONCE(up->gso_size, val); break; case UDP_GRO: @@ -2790,7 +2803,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, break; case UDP_SEGMENT: - val = up->gso_size; + val = READ_ONCE(up->gso_size); break; case UDP_GRO: diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c index 45b8782aec0c..9f5a5cdc38e6 100644 --- a/net/ipv4/udp_bpf.c +++ b/net/ipv4/udp_bpf.c @@ -134,7 +134,7 @@ static int __init udp_bpf_v4_build_proto(void) udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot); return 0; } -core_initcall(udp_bpf_v4_build_proto); +late_initcall(udp_bpf_v4_build_proto); int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) { diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index b2cee9a307d4..1ed8c4d78e5c 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -77,10 +77,8 @@ static int udp_dump_one(struct udp_table *tbl, kfree_skb(rep); goto out; } - err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, - MSG_DONTWAIT); - if (err > 0) - err = 0; + err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); + out: if (sk) sock_put(sk); diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 54e06b88af69..1380a6b6f4ff 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -525,8 +525,10 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) || (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist) - pp = call_gro_receive(udp_gro_receive_segment, head, skb); - return pp; + return call_gro_receive(udp_gro_receive_segment, head, skb); + + /* no GRO, be sure flush the current packet */ + goto out; } if (NAPI_GRO_CB(skb)->encap_mark || @@ -622,6 +624,10 @@ static int udp_gro_complete_segment(struct sk_buff *skb) skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; + + if (skb->encapsulation) + skb->inner_transport_header = skb->transport_header; + return 0; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 984050f35c61..8e6ca9ad6812 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -60,10 +60,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; + unsigned int hh_len = LL_RESERVED_SPACE(dev); + int delta = hh_len - skb_headroom(skb); const struct in6_addr *nexthop; struct neighbour *neigh; int ret; + /* Be paranoid, rather than too clever. */ + if (unlikely(delta > 0) && dev->header_ops) { + /* pskb_expand_head() might crash, if skb is shared */ + if (skb_shared(skb)) { + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + + if (likely(nskb)) { + if (skb->sk) + skb_set_owner_w(nskb, skb->sk); + consume_skb(skb); + } else { + kfree_skb(skb); + } + skb = nskb; + } + if (skb && + pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { + kfree_skb(skb); + skb = NULL; + } + if (!skb) { + IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); + return -ENOMEM; + } + } + if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); @@ -479,7 +507,9 @@ int ip6_forward(struct sk_buff *skb) if (skb_warn_if_lro(skb)) goto drop; - if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { + if (!net->ipv6.devconf_all->disable_policy && + !idev->cnf.disable_policy && + !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); goto drop; } @@ -519,9 +549,10 @@ int ip6_forward(struct sk_buff *skb) if (net->ipv6.devconf_all->proxy_ndp && pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { int proxied = ip6_forward_proxy_check(skb); - if (proxied > 0) + if (proxied > 0) { + hdr->hop_limit--; return ip6_input(skb); - else if (proxied < 0) { + } else if (proxied < 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); goto drop; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7b756a7dc036..b6ddf23d3833 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3769,7 +3769,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, err = PTR_ERR(rt->fib6_metrics); /* Do not leave garbage there. */ rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics; - goto out; + goto out_free; } if (cfg->fc_flags & RTF_ADDRCONF) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 578ab6305c3f..0ce52d46e4f8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -348,11 +348,20 @@ failure: static void tcp_v6_mtu_reduced(struct sock *sk) { struct dst_entry *dst; + u32 mtu; if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return; - dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); + mtu = READ_ONCE(tcp_sk(sk)->mtu_info); + + /* Drop requests trying to increase our current mss. + * Check done in __ip6_rt_update_pmtu() is too late. + */ + if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache) + return; + + dst = inet6_csk_update_pmtu(sk, mtu); if (!dst) return; @@ -433,6 +442,8 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } if (type == ICMPV6_PKT_TOOBIG) { + u32 mtu = ntohl(info); + /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). @@ -443,7 +454,11 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (!ip6_sk_accept_pmtu(sk)) goto out; - tp->mtu_info = ntohl(info); + if (mtu < IPV6_MIN_MTU) + goto out; + + WRITE_ONCE(tp->mtu_info, mtu); + if (!sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk); else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, @@ -540,7 +555,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); - err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, + err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt, tclass, sk->sk_priority); rcu_read_unlock(); err = net_xmit_eval(err); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 368972dbd919..c5e15e94bb00 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -502,12 +502,14 @@ static struct sock *__udp6_lib_err_encap(struct net *net, const struct ipv6hdr *hdr, int offset, struct udphdr *uh, struct udp_table *udptable, + struct sock *sk, struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, __be32 info) { + int (*lookup)(struct sock *sk, struct sk_buff *skb); int network_offset, transport_offset; - struct sock *sk; + struct udp_sock *up; network_offset = skb_network_offset(skb); transport_offset = skb_transport_offset(skb); @@ -518,18 +520,28 @@ static struct sock *__udp6_lib_err_encap(struct net *net, /* Transport header needs to point to the UDP header */ skb_set_transport_header(skb, offset); + if (sk) { + up = udp_sk(sk); + + lookup = READ_ONCE(up->encap_err_lookup); + if (lookup && lookup(sk, skb)) + sk = NULL; + + goto out; + } + sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, &hdr->saddr, uh->dest, inet6_iif(skb), 0, udptable, skb); if (sk) { - int (*lookup)(struct sock *sk, struct sk_buff *skb); - struct udp_sock *up = udp_sk(sk); + up = udp_sk(sk); lookup = READ_ONCE(up->encap_err_lookup); if (!lookup || lookup(sk, skb)) sk = NULL; } +out: if (!sk) { sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, offset, info)); @@ -558,16 +570,17 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, inet6_iif(skb), inet6_sdif(skb), udptable, NULL); + if (!sk || udp_sk(sk)->encap_type) { /* No socket for error: try tunnels before discarding */ - sk = ERR_PTR(-ENOENT); if (static_branch_unlikely(&udpv6_encap_needed_key)) { sk = __udp6_lib_err_encap(net, hdr, offset, uh, - udptable, skb, + udptable, sk, skb, opt, type, code, info); if (!sk) return 0; - } + } else + sk = ERR_PTR(-ENOENT); if (IS_ERR(sk)) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), @@ -1296,7 +1309,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); ipcm6_init(&ipc6); - ipc6.gso_size = up->gso_size; + ipc6.gso_size = READ_ONCE(up->gso_size); ipc6.sockc.tsflags = sk->sk_tsflags; ipc6.sockc.mark = sk->sk_mark; diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 57fa27c1cdf9..d0d280077721 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -49,7 +49,7 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x = dst->xfrm; - int mtu; + unsigned int mtu; bool toobig; #ifdef CONFIG_NETFILTER diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 349c6ac3313f..e6795d5a546a 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -1635,14 +1635,16 @@ struct iucv_message_pending { u8 iptype; u32 ipmsgid; u32 iptrgcls; - union { - u32 iprmmsg1_u32; - u8 iprmmsg1[4]; - } ln1msg1; - union { - u32 ipbfln1f; - u8 iprmmsg2[4]; - } ln1msg2; + struct { + union { + u32 iprmmsg1_u32; + u8 iprmmsg1[4]; + } ln1msg1; + union { + u32 ipbfln1f; + u8 iprmmsg2[4]; + } ln1msg2; + } rmmsg; u32 res1[3]; u32 ipbfln2f; u8 ippollfg; @@ -1660,10 +1662,10 @@ static void iucv_message_pending(struct iucv_irq_data *data) msg.id = imp->ipmsgid; msg.class = imp->iptrgcls; if (imp->ipflags1 & IUCV_IPRMDATA) { - memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); + memcpy(msg.rmmsg, &imp->rmmsg, 8); msg.length = 8; } else - msg.length = imp->ln1msg2.ipbfln1f; + msg.length = imp->rmmsg.ln1msg2.ipbfln1f; msg.reply_size = imp->ipbfln2f; path->handler->message_pending(path, &msg); } diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 7180979114e4..ac5cadd02cfa 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr) { u8 rc = LLC_PDU_LEN_U; - if (addr->sllc_test || addr->sllc_xid) + if (addr->sllc_test) rc = LLC_PDU_LEN_U; + else if (addr->sllc_xid) + /* We need to expand header to sizeof(struct llc_xid_info) + * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header + * as XID PDU. In llc_ui_sendmsg() we reserved header size and then + * filled all other space with user data. If we won't reserve this + * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data + */ + rc = LLC_PDU_LEN_U_XID; else if (sk->sk_type == SOCK_STREAM) rc = LLC_PDU_LEN_I; return rc; diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c index b554f26c68ee..79d1cef8f15a 100644 --- a/net/llc/llc_s_ac.c +++ b/net/llc/llc_s_ac.c @@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) struct llc_sap_state_ev *ev = llc_sap_ev(skb); int rc; - llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, + llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 84cc7733ea66..4e6f11e63df3 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -152,6 +152,8 @@ static int ieee80211_change_iface(struct wiphy *wiphy, struct vif_params *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; int ret; ret = ieee80211_if_change_type(sdata, type); @@ -162,7 +164,24 @@ static int ieee80211_change_iface(struct wiphy *wiphy, RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); ieee80211_check_fast_rx_iface(sdata); } else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) { + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + if (params->use_4addr == ifmgd->use_4addr) + return 0; + sdata->u.mgd.use_4addr = params->use_4addr; + if (!ifmgd->associated) + return 0; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, ifmgd->bssid); + if (sta) + drv_sta_set_4addr(local, sdata, &sta->sta, + params->use_4addr); + mutex_unlock(&local->sta_mtx); + + if (params->use_4addr) + ieee80211_send_4addr_nullfunc(local, sdata); } if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 22549b95d1aa..30ce6d2ec7ce 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2201,6 +2201,8 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t); void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, bool powersave); +void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr, bool ack, u16 tx_time); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 05f4c3c72619..fcae76ddd586 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -260,6 +260,8 @@ static void ieee80211_restart_work(struct work_struct *work) flush_work(&local->radar_detected_work); rtnl_lock(); + /* we might do interface manipulations, so need both */ + wiphy_lock(local->hw.wiphy); WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), "%s called with hardware scan in progress\n", __func__); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a00f11a33699..c0ea3b1aa9e1 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1095,8 +1095,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, ieee80211_tx_skb(sdata, skb); } -static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata) +void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) { struct sk_buff *skb; struct ieee80211_hdr *nullfunc; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 771921c057e8..2563473b5cf1 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -730,7 +730,8 @@ ieee80211_make_monitor_skb(struct ieee80211_local *local, * Need to make a copy and possibly remove radiotap header * and FCS from the original. */ - skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC); + skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD, + 0, GFP_ATOMIC); if (!skb) return NULL; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e96981144358..8509778ff31f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1147,6 +1147,29 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, return queued; } +static void +ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct sk_buff *skb) +{ + struct rate_control_ref *ref = sdata->local->rate_ctrl; + u16 tid; + + if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER)) + return; + + if (!sta || !sta->sta.ht_cap.ht_supported || + !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO || + skb->protocol == sdata->control_port_protocol) + return; + + tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + if (likely(sta->ampdu_mlme.tid_tx[tid])) + return; + + ieee80211_start_tx_ba_session(&sta->sta, tid, 0); +} + /* * initialises @tx * pass %NULL for the station if unknown, a valid pointer if known @@ -1160,6 +1183,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool aggr_check = false; int tid; memset(tx, 0, sizeof(*tx)); @@ -1188,8 +1212,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, } else if (tx->sdata->control_port_protocol == tx->skb->protocol) { tx->sta = sta_info_get_bss(sdata, hdr->addr1); } - if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) + if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) { tx->sta = sta_info_get(sdata, hdr->addr1); + aggr_check = true; + } } if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && @@ -1199,8 +1225,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, struct tid_ampdu_tx *tid_tx; tid = ieee80211_get_tid(hdr); - tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); + if (!tid_tx && aggr_check) { + ieee80211_aggr_check(sdata, tx->sta, skb); + tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); + } + if (tid_tx) { bool queued; @@ -4120,29 +4150,6 @@ void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) } EXPORT_SYMBOL(ieee80211_txq_schedule_start); -static void -ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata, - struct sta_info *sta, - struct sk_buff *skb) -{ - struct rate_control_ref *ref = sdata->local->rate_ctrl; - u16 tid; - - if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER)) - return; - - if (!sta || !sta->sta.ht_cap.ht_supported || - !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO || - skb->protocol == sdata->control_port_protocol) - return; - - tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; - if (likely(sta->ampdu_mlme.tid_tx[tid])) - return; - - ieee80211_start_tx_ba_session(&sta->sta, tid, 0); -} - void __ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev, u32 info_flags, diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c index 52ea2517e856..ff2cc0e3273d 100644 --- a/net/mptcp/mib.c +++ b/net/mptcp/mib.c @@ -44,6 +44,7 @@ static const struct snmp_mib mptcp_snmp_list[] = { SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW), SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX), SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX), + SNMP_MIB_ITEM("RcvPruned", MPTCP_MIB_RCVPRUNED), SNMP_MIB_SENTINEL }; diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h index 193466c9b549..0663cb12b448 100644 --- a/net/mptcp/mib.h +++ b/net/mptcp/mib.h @@ -37,6 +37,7 @@ enum linux_mptcp_mib_field { MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */ MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */ MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */ + MPTCP_MIB_RCVPRUNED, /* Incoming packet dropped due to memory limit */ __MPTCP_MIB_MAX }; diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c index 8f88ddeab6a2..f48eb6315bbb 100644 --- a/net/mptcp/mptcp_diag.c +++ b/net/mptcp/mptcp_diag.c @@ -57,10 +57,8 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb, kfree_skb(rep); goto out; } - err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, - MSG_DONTWAIT); - if (err > 0) - err = 0; + err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); + out: sock_put(sk); diff --git a/net/mptcp/options.c b/net/mptcp/options.c index b5850afea343..7adcbc1f7d49 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -885,20 +885,16 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, return subflow->mp_capable; } - if (mp_opt->dss && mp_opt->use_ack) { + if ((mp_opt->dss && mp_opt->use_ack) || + (mp_opt->add_addr && !mp_opt->echo)) { /* subflows are fully established as soon as we get any - * additional ack. + * additional ack, including ADD_ADDR. */ subflow->fully_established = 1; WRITE_ONCE(msk->fully_established, true); goto fully_established; } - if (mp_opt->add_addr) { - WRITE_ONCE(msk->fully_established, true); - return true; - } - /* If the first established packet does not contain MP_CAPABLE + data * then fallback to TCP. Fallback scenarios requires a reset for * MP_JOIN subflows. @@ -1035,7 +1031,8 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk, return hmac == mp_opt->ahmac; } -void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) +/* Return false if a subflow has been reset, else return true */ +bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); struct mptcp_sock *msk = mptcp_sk(subflow->conn); @@ -1053,12 +1050,16 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) __mptcp_check_push(subflow->conn, sk); __mptcp_data_acked(subflow->conn); mptcp_data_unlock(subflow->conn); - return; + return true; } mptcp_get_options(sk, skb, &mp_opt); + + /* The subflow can be in close state only if check_fully_established() + * just sent a reset. If so, tell the caller to ignore the current packet. + */ if (!check_fully_established(msk, sk, subflow, skb, &mp_opt)) - return; + return sk->sk_state != TCP_CLOSE; if (mp_opt.fastclose && msk->local_key == mp_opt.rcvr_key) { @@ -1100,7 +1101,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) } if (!mp_opt.dss) - return; + return true; /* we can't wait for recvmsg() to update the ack_seq, otherwise * monodirectional flows will stuck @@ -1119,12 +1120,12 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) schedule_work(&msk->work)) sock_hold(subflow->conn); - return; + return true; } mpext = skb_ext_add(skb, SKB_EXT_MPTCP); if (!mpext) - return; + return true; memset(mpext, 0, sizeof(*mpext)); @@ -1153,6 +1154,8 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) if (mpext->csum_reqd) mpext->csum = mp_opt.csum; } + + return true; } static void mptcp_set_rwin(const struct tcp_sock *tp) diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index d2591ebf01d9..7b3794459783 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -27,7 +27,6 @@ struct mptcp_pm_addr_entry { struct mptcp_addr_info addr; u8 flags; int ifindex; - struct rcu_head rcu; struct socket *lsk; }; @@ -1136,36 +1135,12 @@ next: return 0; } -struct addr_entry_release_work { - struct rcu_work rwork; - struct mptcp_pm_addr_entry *entry; -}; - -static void mptcp_pm_release_addr_entry(struct work_struct *work) +/* caller must ensure the RCU grace period is already elapsed */ +static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) { - struct addr_entry_release_work *w; - struct mptcp_pm_addr_entry *entry; - - w = container_of(to_rcu_work(work), struct addr_entry_release_work, rwork); - entry = w->entry; - if (entry) { - if (entry->lsk) - sock_release(entry->lsk); - kfree(entry); - } - kfree(w); -} - -static void mptcp_pm_free_addr_entry(struct mptcp_pm_addr_entry *entry) -{ - struct addr_entry_release_work *w; - - w = kmalloc(sizeof(*w), GFP_ATOMIC); - if (w) { - INIT_RCU_WORK(&w->rwork, mptcp_pm_release_addr_entry); - w->entry = entry; - queue_rcu_work(system_wq, &w->rwork); - } + if (entry->lsk) + sock_release(entry->lsk); + kfree(entry); } static int mptcp_nl_remove_id_zero_address(struct net *net, @@ -1245,7 +1220,8 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info) spin_unlock_bh(&pernet->lock); mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr); - mptcp_pm_free_addr_entry(entry); + synchronize_rcu(); + __mptcp_pm_release_addr_entry(entry); return ret; } @@ -1298,6 +1274,7 @@ static void mptcp_nl_remove_addrs_list(struct net *net, } } +/* caller must ensure the RCU grace period is already elapsed */ static void __flush_addrs(struct list_head *list) { while (!list_empty(list)) { @@ -1306,7 +1283,7 @@ static void __flush_addrs(struct list_head *list) cur = list_entry(list->next, struct mptcp_pm_addr_entry, list); list_del_rcu(&cur->list); - mptcp_pm_free_addr_entry(cur); + __mptcp_pm_release_addr_entry(cur); } } @@ -1330,6 +1307,7 @@ static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info) bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1); spin_unlock_bh(&pernet->lock); mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list); + synchronize_rcu(); __flush_addrs(&free_list); return 0; } @@ -1940,7 +1918,8 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list) struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id); /* net is removed from namespace list, can't race with - * other modifiers + * other modifiers, also netns core already waited for a + * RCU grace period. */ __flush_addrs(&pernet->local_addr_list); } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 7a5afa8c6866..a88924947815 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -474,7 +474,7 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk) bool cleanup, rx_empty; cleanup = (space > 0) && (space >= (old_space << 1)); - rx_empty = !atomic_read(&sk->sk_rmem_alloc); + rx_empty = !__mptcp_rmem(sk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); @@ -720,8 +720,10 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk) sk_rbuf = ssk_rbuf; /* over limit? can't append more skbs to msk, Also, no need to wake-up*/ - if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) + if (__mptcp_rmem(sk) > sk_rbuf) { + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); return; + } /* Wake-up the reader only for in-sequence data */ mptcp_data_lock(sk); @@ -1754,7 +1756,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, if (!(flags & MSG_PEEK)) { /* we will bulk release the skb memory later */ skb->destructor = NULL; - msk->rmem_released += skb->truesize; + WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize); __skb_unlink(skb, &msk->receive_queue); __kfree_skb(skb); } @@ -1873,7 +1875,7 @@ static void __mptcp_update_rmem(struct sock *sk) atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc); sk_mem_uncharge(sk, msk->rmem_released); - msk->rmem_released = 0; + WRITE_ONCE(msk->rmem_released, 0); } static void __mptcp_splice_receive_queue(struct sock *sk) @@ -2380,7 +2382,7 @@ static int __mptcp_init_sock(struct sock *sk) msk->out_of_order_queue = RB_ROOT; msk->first_pending = NULL; msk->wmem_reserved = 0; - msk->rmem_released = 0; + WRITE_ONCE(msk->rmem_released, 0); msk->tx_pending_data = 0; msk->first = NULL; diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 426ed80fe72f..0f0c026c5f8b 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -296,9 +296,17 @@ static inline struct mptcp_sock *mptcp_sk(const struct sock *sk) return (struct mptcp_sock *)sk; } +/* the msk socket don't use the backlog, also account for the bulk + * free memory + */ +static inline int __mptcp_rmem(const struct sock *sk) +{ + return atomic_read(&sk->sk_rmem_alloc) - READ_ONCE(mptcp_sk(sk)->rmem_released); +} + static inline int __mptcp_space(const struct sock *sk) { - return tcp_space(sk) + READ_ONCE(mptcp_sk(sk)->rmem_released); + return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - __mptcp_rmem(sk)); } static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk) diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 092d1f635d27..8c03afac5ca0 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -157,19 +157,7 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow = lock_sock_fast(ssk); - switch (optname) { - case SO_TIMESTAMP_OLD: - case SO_TIMESTAMP_NEW: - case SO_TIMESTAMPNS_OLD: - case SO_TIMESTAMPNS_NEW: - sock_set_timestamp(sk, optname, !!val); - break; - case SO_TIMESTAMPING_NEW: - case SO_TIMESTAMPING_OLD: - sock_set_timestamping(sk, optname, val); - break; - } - + sock_set_timestamp(sk, optname, !!val); unlock_sock_fast(ssk, slow); } @@ -178,7 +166,8 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam } static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname, - sockptr_t optval, unsigned int optlen) + sockptr_t optval, + unsigned int optlen) { int val, ret; @@ -205,14 +194,56 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname, case SO_TIMESTAMP_NEW: case SO_TIMESTAMPNS_OLD: case SO_TIMESTAMPNS_NEW: - case SO_TIMESTAMPING_OLD: - case SO_TIMESTAMPING_NEW: return mptcp_setsockopt_sol_socket_tstamp(msk, optname, val); } return -ENOPROTOOPT; } +static int mptcp_setsockopt_sol_socket_timestamping(struct mptcp_sock *msk, + int optname, + sockptr_t optval, + unsigned int optlen) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + struct so_timestamping timestamping; + int ret; + + if (optlen == sizeof(timestamping)) { + if (copy_from_sockptr(×tamping, optval, + sizeof(timestamping))) + return -EFAULT; + } else if (optlen == sizeof(int)) { + memset(×tamping, 0, sizeof(timestamping)); + + if (copy_from_sockptr(×tamping.flags, optval, sizeof(int))) + return -EFAULT; + } else { + return -EINVAL; + } + + ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, + KERNEL_SOCKPTR(×tamping), + sizeof(timestamping)); + if (ret) + return ret; + + lock_sock(sk); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + bool slow = lock_sock_fast(ssk); + + sock_set_timestamping(sk, optname, timestamping); + unlock_sock_fast(ssk, slow); + } + + release_sock(sk); + + return 0; +} + static int mptcp_setsockopt_sol_socket_linger(struct mptcp_sock *msk, sockptr_t optval, unsigned int optlen) { @@ -299,9 +330,12 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, case SO_TIMESTAMP_NEW: case SO_TIMESTAMPNS_OLD: case SO_TIMESTAMPNS_NEW: + return mptcp_setsockopt_sol_socket_int(msk, optname, optval, + optlen); case SO_TIMESTAMPING_OLD: case SO_TIMESTAMPING_NEW: - return mptcp_setsockopt_sol_socket_int(msk, optname, optval, optlen); + return mptcp_setsockopt_sol_socket_timestamping(msk, optname, + optval, optlen); case SO_LINGER: return mptcp_setsockopt_sol_socket_linger(msk, optval, optlen); case SO_RCVLOWAT: diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 66d0b1893d26..966f777d35ce 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -214,11 +214,6 @@ again: ntohs(inet_sk(sk_listener)->inet_sport), ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { - sock_put((struct sock *)subflow_req->msk); - mptcp_token_destroy_request(req); - tcp_request_sock_ops.destructor(req); - subflow_req->msk = NULL; - subflow_req->mp_join = 0; SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX); return -EPERM; } @@ -230,6 +225,8 @@ again: if (unlikely(req->syncookie)) { if (mptcp_can_accept_new_subflow(subflow_req->msk)) subflow_init_req_cookie_join_save(subflow_req, skb); + else + return -EPERM; } pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, @@ -269,9 +266,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req, if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) return -EINVAL; - if (mptcp_can_accept_new_subflow(subflow_req->msk)) - subflow_req->mp_join = 1; - + subflow_req->mp_join = 1; subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; } diff --git a/net/mptcp/syncookies.c b/net/mptcp/syncookies.c index abe0fd099746..37127781aee9 100644 --- a/net/mptcp/syncookies.c +++ b/net/mptcp/syncookies.c @@ -37,7 +37,21 @@ static spinlock_t join_entry_locks[COOKIE_JOIN_SLOTS] __cacheline_aligned_in_smp static u32 mptcp_join_entry_hash(struct sk_buff *skb, struct net *net) { - u32 i = skb_get_hash(skb) ^ net_hash_mix(net); + static u32 mptcp_join_hash_secret __read_mostly; + struct tcphdr *th = tcp_hdr(skb); + u32 seq, i; + + net_get_random_once(&mptcp_join_hash_secret, + sizeof(mptcp_join_hash_secret)); + + if (th->syn) + seq = TCP_SKB_CB(skb)->seq; + else + seq = TCP_SKB_CB(skb)->seq - 1; + + i = jhash_3words(seq, net_hash_mix(net), + (__force __u32)th->source << 16 | (__force __u32)th->dest, + mptcp_join_hash_secret); return i % ARRAY_SIZE(join_entries); } diff --git a/net/ncsi/Kconfig b/net/ncsi/Kconfig index 93309081f5a4..ea1dd32b6b1f 100644 --- a/net/ncsi/Kconfig +++ b/net/ncsi/Kconfig @@ -17,3 +17,9 @@ config NCSI_OEM_CMD_GET_MAC help This allows to get MAC address from NCSI firmware and set them back to controller. +config NCSI_OEM_CMD_KEEP_PHY + bool "Keep PHY Link up" + depends on NET_NCSI + help + This allows to keep PHY link up and prevents any channel resets during + the host load. diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h index cbbb0de4750a..0b6cfd3b31e0 100644 --- a/net/ncsi/internal.h +++ b/net/ncsi/internal.h @@ -78,6 +78,9 @@ enum { /* OEM Vendor Manufacture ID */ #define NCSI_OEM_MFR_MLX_ID 0x8119 #define NCSI_OEM_MFR_BCM_ID 0x113d +#define NCSI_OEM_MFR_INTEL_ID 0x157 +/* Intel specific OEM command */ +#define NCSI_OEM_INTEL_CMD_KEEP_PHY 0x20 /* CMD ID for Keep PHY up */ /* Broadcom specific OEM Command */ #define NCSI_OEM_BCM_CMD_GMA 0x01 /* CMD ID for Get MAC */ /* Mellanox specific OEM Command */ @@ -86,6 +89,7 @@ enum { #define NCSI_OEM_MLX_CMD_SMAF 0x01 /* CMD ID for Set MC Affinity */ #define NCSI_OEM_MLX_CMD_SMAF_PARAM 0x07 /* Parameter for SMAF */ /* OEM Command payload lengths*/ +#define NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN 7 #define NCSI_OEM_BCM_CMD_GMA_LEN 12 #define NCSI_OEM_MLX_CMD_GMA_LEN 8 #define NCSI_OEM_MLX_CMD_SMAF_LEN 60 @@ -271,6 +275,7 @@ enum { ncsi_dev_state_probe_mlx_gma, ncsi_dev_state_probe_mlx_smaf, ncsi_dev_state_probe_cis, + ncsi_dev_state_probe_keep_phy, ncsi_dev_state_probe_gvi, ncsi_dev_state_probe_gc, ncsi_dev_state_probe_gls, diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index ca04b6df1341..89c7742cd72e 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -689,6 +689,35 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, return 0; } +#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) + +static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca) +{ + unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN]; + int ret = 0; + + nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN; + + memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN); + *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID); + + data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY; + + /* PHY Link up attribute */ + data[6] = 0x1; + + nca->data = data; + + ret = ncsi_xmit_cmd(nca); + if (ret) + netdev_err(nca->ndp->ndev.dev, + "NCSI: Failed to transmit cmd 0x%x during configure\n", + nca->type); + return ret; +} + +#endif + #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) /* NCSI OEM Command APIs */ @@ -700,7 +729,7 @@ static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca) nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN; memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN); - *(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID); + *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID); data[5] = NCSI_OEM_BCM_CMD_GMA; nca->data = data; @@ -724,7 +753,7 @@ static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca) nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN; memset(&u, 0, sizeof(u)); - u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID); + u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID); u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA; u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM; @@ -747,7 +776,7 @@ static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca) int ret = 0; memset(&u, 0, sizeof(u)); - u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID); + u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID); u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF; u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM; memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET], @@ -1392,7 +1421,23 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) } nd->state = ncsi_dev_state_probe_gvi; + if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)) + nd->state = ncsi_dev_state_probe_keep_phy; + break; +#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) + case ncsi_dev_state_probe_keep_phy: + ndp->pending_req_num = 1; + + nca.type = NCSI_PKT_CMD_OEM; + nca.package = ndp->active_package->id; + nca.channel = 0; + ret = ncsi_oem_keep_phy_intel(&nca); + if (ret) + goto error; + + nd->state = ncsi_dev_state_probe_gvi; break; +#endif /* CONFIG_NCSI_OEM_CMD_KEEP_PHY */ case ncsi_dev_state_probe_gvi: case ncsi_dev_state_probe_gc: case ncsi_dev_state_probe_gls: diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index 888ccc2d4e34..d48374894817 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -403,7 +403,7 @@ static int ncsi_rsp_handler_ev(struct ncsi_request *nr) /* Update to VLAN mode */ cmd = (struct ncsi_cmd_ev_pkt *)skb_network_header(nr->cmd); ncm->enable = 1; - ncm->data[0] = ntohl(cmd->mode); + ncm->data[0] = ntohl((__force __be32)cmd->mode); return 0; } @@ -699,12 +699,19 @@ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr) return 0; } +/* Response handler for Intel card */ +static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr) +{ + return 0; +} + static struct ncsi_rsp_oem_handler { unsigned int mfr_id; int (*handler)(struct ncsi_request *nr); } ncsi_rsp_oem_handlers[] = { { NCSI_OEM_MFR_MLX_ID, ncsi_rsp_handler_oem_mlx }, - { NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm } + { NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm }, + { NCSI_OEM_MFR_INTEL_ID, ncsi_rsp_handler_oem_intel } }; /* Response handler for OEM command */ diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index d1bef23fd4f5..dd30c03d5a23 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -132,8 +132,11 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; - if (ip > ip_to) + if (ip > ip_to) { + if (ip_to == 0) + return -IPSET_ERR_HASH_ELEM; swap(ip, ip_to); + } } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); @@ -144,6 +147,10 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); + /* 64bit division is not allowed on 32bit */ + if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE) + return -ERANGE; + if (retried) { ip = ntohl(h->next.ip); e.ip = htonl(ip); diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c index 18346d18aa16..153de3457423 100644 --- a/net/netfilter/ipset/ip_set_hash_ipmark.c +++ b/net/netfilter/ipset/ip_set_hash_ipmark.c @@ -121,6 +121,8 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK])); e.mark &= h->markmask; + if (e.mark == 0 && e.ip == 0) + return -IPSET_ERR_HASH_ELEM; if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) { @@ -133,8 +135,11 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; - if (ip > ip_to) + if (ip > ip_to) { + if (e.mark == 0 && ip_to == 0) + return -IPSET_ERR_HASH_ELEM; swap(ip, ip_to); + } } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); @@ -143,6 +148,9 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], ip_set_mask_from_to(ip, ip_to, cidr); } + if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE) + return -ERANGE; + if (retried) ip = ntohl(h->next.ip); for (; ip <= ip_to; ip++) { diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index e1ca11196515..7303138e46be 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -173,6 +173,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], swap(port, port_to); } + if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE) + return -ERANGE; + if (retried) ip = ntohl(h->next.ip); for (; ip <= ip_to; ip++) { diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index ab179e064597..334fb1ad0e86 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -180,6 +180,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], swap(port, port_to); } + if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE) + return -ERANGE; + if (retried) ip = ntohl(h->next.ip); for (; ip <= ip_to; ip++) { diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index 8f075b44cf64..7df94f437f60 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -253,6 +253,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], swap(port, port_to); } + if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE) + return -ERANGE; + ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index c1a11f041ac6..1422739d9aa2 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -140,7 +140,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_net4_elem e = { .cidr = HOST_MASK }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip = 0, ip_to = 0; + u32 ip = 0, ip_to = 0, ipn, n = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -188,6 +188,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], if (ip + UINT_MAX == ip_to) return -IPSET_ERR_HASH_RANGE; } + ipn = ip; + do { + ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr); + n++; + } while (ipn++ < ip_to); + + if (n > IPSET_MAX_RANGE) + return -ERANGE; + if (retried) ip = ntohl(h->next.ip); do { diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index ddd51c2e1cb3..9810f5bf63f5 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -202,7 +202,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip = 0, ip_to = 0; + u32 ip = 0, ip_to = 0, ipn, n = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -256,6 +256,14 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip, ip_to, e.cidr); } + ipn = ip; + do { + ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr); + n++; + } while (ipn++ < ip_to); + + if (n > IPSET_MAX_RANGE) + return -ERANGE; if (retried) ip = ntohl(h->next.ip); diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index 6532f0505e66..3d09eefe998a 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c @@ -168,7 +168,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], struct hash_netnet4_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0; - u32 ip2 = 0, ip2_from = 0, ip2_to = 0; + u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn; + u64 n = 0, m = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -244,6 +245,19 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); } + ipn = ip; + do { + ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]); + n++; + } while (ipn++ < ip_to); + ipn = ip2_from; + do { + ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]); + m++; + } while (ipn++ < ip2_to); + + if (n*m > IPSET_MAX_RANGE) + return -ERANGE; if (retried) { ip = ntohl(h->next.ip[0]); diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index ec1564a1cb5a..09cf72eb37f8 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -158,7 +158,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 port, port_to, p = 0, ip = 0, ip_to = 0; + u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn; + u64 n = 0; bool with_ports = false; u8 cidr; int ret; @@ -235,6 +236,14 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip, ip_to, e.cidr + 1); } + ipn = ip; + do { + ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr); + n++; + } while (ipn++ < ip_to); + + if (n*(port_to - port + 1) > IPSET_MAX_RANGE) + return -ERANGE; if (retried) { ip = ntohl(h->next.ip); diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index 0e91d1e82f1c..19bcdb3141f6 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c @@ -182,7 +182,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], struct hash_netportnet4_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, p = 0, port, port_to; - u32 ip2_from = 0, ip2_to = 0, ip2; + u32 ip2_from = 0, ip2_to = 0, ip2, ipn; + u64 n = 0, m = 0; bool with_ports = false; int ret; @@ -284,6 +285,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); } + ipn = ip; + do { + ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]); + n++; + } while (ipn++ < ip_to); + ipn = ip2_from; + do { + ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]); + m++; + } while (ipn++ < ip2_to); + + if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE) + return -ERANGE; if (retried) { ip = ntohl(h->next.ip[0]); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 96ba19fc8155..d31dbccbe7bd 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -66,22 +66,17 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash); struct conntrack_gc_work { struct delayed_work dwork; - u32 last_bucket; + u32 next_bucket; bool exiting; bool early_drop; - long next_gc_run; }; static __read_mostly struct kmem_cache *nf_conntrack_cachep; static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); static __read_mostly bool nf_conntrack_locks_all; -/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ -#define GC_MAX_BUCKETS_DIV 128u -/* upper bound of full table scan */ -#define GC_MAX_SCAN_JIFFIES (16u * HZ) -/* desired ratio of entries found to be expired */ -#define GC_EVICT_RATIO 50u +#define GC_SCAN_INTERVAL (120u * HZ) +#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10) static struct conntrack_gc_work conntrack_gc_work; @@ -149,7 +144,15 @@ static void nf_conntrack_all_lock(void) spin_lock(&nf_conntrack_locks_all_lock); - nf_conntrack_locks_all = true; + /* For nf_contrack_locks_all, only the latest time when another + * CPU will see an update is controlled, by the "release" of the + * spin_lock below. + * The earliest time is not controlled, an thus KCSAN could detect + * a race when nf_conntract_lock() reads the variable. + * WRITE_ONCE() is used to ensure the compiler will not + * optimize the write. + */ + WRITE_ONCE(nf_conntrack_locks_all, true); for (i = 0; i < CONNTRACK_LOCKS; i++) { spin_lock(&nf_conntrack_locks[i]); @@ -662,8 +665,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) return false; tstamp = nf_conn_tstamp_find(ct); - if (tstamp && tstamp->stop == 0) + if (tstamp) { + s32 timeout = ct->timeout - nfct_time_stamp; + tstamp->stop = ktime_get_real_ns(); + if (timeout < 0) + tstamp->stop -= jiffies_to_nsecs(-timeout); + } if (nf_conntrack_event_report(IPCT_DESTROY, ct, portid, report) < 0) { @@ -1350,17 +1358,13 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct) static void gc_worker(struct work_struct *work) { - unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); - unsigned int i, goal, buckets = 0, expired_count = 0; - unsigned int nf_conntrack_max95 = 0; + unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION; + unsigned int i, hashsz, nf_conntrack_max95 = 0; + unsigned long next_run = GC_SCAN_INTERVAL; struct conntrack_gc_work *gc_work; - unsigned int ratio, scanned = 0; - unsigned long next_run; - gc_work = container_of(work, struct conntrack_gc_work, dwork.work); - goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; - i = gc_work->last_bucket; + i = gc_work->next_bucket; if (gc_work->early_drop) nf_conntrack_max95 = nf_conntrack_max / 100u * 95u; @@ -1368,15 +1372,15 @@ static void gc_worker(struct work_struct *work) struct nf_conntrack_tuple_hash *h; struct hlist_nulls_head *ct_hash; struct hlist_nulls_node *n; - unsigned int hashsz; struct nf_conn *tmp; - i++; rcu_read_lock(); nf_conntrack_get_ht(&ct_hash, &hashsz); - if (i >= hashsz) - i = 0; + if (i >= hashsz) { + rcu_read_unlock(); + break; + } hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) { struct nf_conntrack_net *cnet; @@ -1384,7 +1388,6 @@ static void gc_worker(struct work_struct *work) tmp = nf_ct_tuplehash_to_ctrack(h); - scanned++; if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) { nf_ct_offload_timeout(tmp); continue; @@ -1392,7 +1395,6 @@ static void gc_worker(struct work_struct *work) if (nf_ct_is_expired(tmp)) { nf_ct_gc_expired(tmp); - expired_count++; continue; } @@ -1425,7 +1427,14 @@ static void gc_worker(struct work_struct *work) */ rcu_read_unlock(); cond_resched(); - } while (++buckets < goal); + i++; + + if (time_after(jiffies, end_time) && i < hashsz) { + gc_work->next_bucket = i; + next_run = 0; + break; + } + } while (i < hashsz); if (gc_work->exiting) return; @@ -1436,40 +1445,17 @@ static void gc_worker(struct work_struct *work) * * This worker is only here to reap expired entries when system went * idle after a busy period. - * - * The heuristics below are supposed to balance conflicting goals: - * - * 1. Minimize time until we notice a stale entry - * 2. Maximize scan intervals to not waste cycles - * - * Normally, expire ratio will be close to 0. - * - * As soon as a sizeable fraction of the entries have expired - * increase scan frequency. */ - ratio = scanned ? expired_count * 100 / scanned : 0; - if (ratio > GC_EVICT_RATIO) { - gc_work->next_gc_run = min_interval; - } else { - unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV; - - BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0); - - gc_work->next_gc_run += min_interval; - if (gc_work->next_gc_run > max) - gc_work->next_gc_run = max; + if (next_run) { + gc_work->early_drop = false; + gc_work->next_bucket = 0; } - - next_run = gc_work->next_gc_run; - gc_work->last_bucket = i; - gc_work->early_drop = false; queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run); } static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) { INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker); - gc_work->next_gc_run = HZ; gc_work->exiting = false; } @@ -2457,7 +2443,6 @@ i_see_dead_people: } list_for_each_entry(net, net_exit_list, exit_list) { - nf_conntrack_proto_pernet_fini(net); nf_conntrack_ecache_pernet_fini(net); nf_conntrack_expect_pernet_fini(net); free_percpu(net->ct.stat); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 4e1a9dba7077..e81af33b233b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -218,6 +218,7 @@ static int ctnetlink_dump_helpinfo(struct sk_buff *skb, if (!help) return 0; + rcu_read_lock(); helper = rcu_dereference(help->helper); if (!helper) goto out; @@ -233,9 +234,11 @@ static int ctnetlink_dump_helpinfo(struct sk_buff *skb, nla_nest_end(skb, nest_helper); out: + rcu_read_unlock(); return 0; nla_put_failure: + rcu_read_unlock(); return -1; } diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 55647409a9be..8f7a9837349c 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -697,13 +697,6 @@ void nf_conntrack_proto_pernet_init(struct net *net) #endif } -void nf_conntrack_proto_pernet_fini(struct net *net) -{ -#ifdef CONFIG_NF_CT_PROTO_GRE - nf_ct_gre_keymap_flush(net); -#endif -} - module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, &nf_conntrack_htable_size, 0600); diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index db11e403d818..728eeb0aea87 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -55,19 +55,6 @@ static inline struct nf_gre_net *gre_pernet(struct net *net) return &net->ct.nf_ct_proto.gre; } -void nf_ct_gre_keymap_flush(struct net *net) -{ - struct nf_gre_net *net_gre = gre_pernet(net); - struct nf_ct_gre_keymap *km, *tmp; - - spin_lock_bh(&keymap_lock); - list_for_each_entry_safe(km, tmp, &net_gre->keymap_list, list) { - list_del_rcu(&km->list); - kfree_rcu(km, rcu); - } - spin_unlock_bh(&keymap_lock); -} - static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km, const struct nf_conntrack_tuple *t) { diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index f7e8baf59b51..af5115e127cf 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -823,6 +823,22 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, return true; } +static bool tcp_can_early_drop(const struct nf_conn *ct) +{ + switch (ct->proto.tcp.state) { + case TCP_CONNTRACK_FIN_WAIT: + case TCP_CONNTRACK_LAST_ACK: + case TCP_CONNTRACK_TIME_WAIT: + case TCP_CONNTRACK_CLOSE: + case TCP_CONNTRACK_CLOSE_WAIT: + return true; + default: + break; + } + + return false; +} + /* Returns verdict for packet, or -1 for invalid. */ int nf_conntrack_tcp_packet(struct nf_conn *ct, struct sk_buff *skb, @@ -1030,10 +1046,30 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, if (index != TCP_RST_SET) break; - if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) { + /* If we are closing, tuple might have been re-used already. + * last_index, last_ack, and all other ct fields used for + * sequence/window validation are outdated in that case. + * + * As the conntrack can already be expired by GC under pressure, + * just skip validation checks. + */ + if (tcp_can_early_drop(ct)) + goto in_window; + + /* td_maxack might be outdated if we let a SYN through earlier */ + if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) && + ct->proto.tcp.last_index != TCP_SYN_SET) { u32 seq = ntohl(th->seq); - if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) { + /* If we are not in established state and SEQ=0 this is most + * likely an answer to a SYN we let go through above (last_index + * can be updated due to out-of-order ACKs). + */ + if (seq == 0 && !nf_conntrack_tcp_established(ct)) + break; + + if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) && + !tn->tcp_ignore_invalid_rst) { /* Invalid RST */ spin_unlock_bh(&ct->lock); nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst"); @@ -1134,6 +1170,16 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, nf_ct_kill_acct(ct, ctinfo, skb); return NF_ACCEPT; } + + if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) { + /* do not renew timeout on SYN retransmit. + * + * Else port reuse by client or NAT middlebox can keep + * entry alive indefinitely (including nat info). + */ + return NF_ACCEPT; + } + /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection * pickup with loose=1. Avoid large ESTABLISHED timeout. */ @@ -1155,22 +1201,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, return NF_ACCEPT; } -static bool tcp_can_early_drop(const struct nf_conn *ct) -{ - switch (ct->proto.tcp.state) { - case TCP_CONNTRACK_FIN_WAIT: - case TCP_CONNTRACK_LAST_ACK: - case TCP_CONNTRACK_TIME_WAIT: - case TCP_CONNTRACK_CLOSE: - case TCP_CONNTRACK_CLOSE_WAIT: - return true; - default: - break; - } - - return false; -} - #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include <linux/netfilter/nfnetlink.h> @@ -1437,6 +1467,9 @@ void nf_conntrack_tcp_init_net(struct net *net) */ tn->tcp_be_liberal = 0; + /* If it's non-zero, we turn off RST sequence number check */ + tn->tcp_ignore_invalid_rst = 0; + /* Max number of the retransmitted packets without receiving an (acceptable) * ACK from the destination. If this number is reached, a shorter timer * will be started. @@ -1445,7 +1478,6 @@ void nf_conntrack_tcp_init_net(struct net *net) #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) tn->offload_timeout = 30 * HZ; - tn->offload_pickup = 120 * HZ; #endif } diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 698fee49e732..f8e3c0d2602f 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -271,7 +271,6 @@ void nf_conntrack_udp_init_net(struct net *net) #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) un->offload_timeout = 30 * HZ; - un->offload_pickup = 30 * HZ; #endif } diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index f57a951c9b5e..e84b499b7bfa 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -575,16 +575,15 @@ enum nf_ct_sysctl_index { NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK, #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD, - NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP, #endif NF_SYSCTL_CT_PROTO_TCP_LOOSE, NF_SYSCTL_CT_PROTO_TCP_LIBERAL, + NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST, NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS, NF_SYSCTL_CT_PROTO_TIMEOUT_UDP, NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM, #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD, - NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP, #endif NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP, NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6, @@ -775,12 +774,6 @@ static struct ctl_table nf_ct_sysctl_table[] = { .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP] = { - .procname = "nf_flowtable_tcp_pickup", - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, #endif [NF_SYSCTL_CT_PROTO_TCP_LOOSE] = { .procname = "nf_conntrack_tcp_loose", @@ -798,6 +791,14 @@ static struct ctl_table nf_ct_sysctl_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, + [NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = { + .procname = "nf_conntrack_tcp_ignore_invalid_rst", + .maxlen = sizeof(u8), + .mode = 0644, + .proc_handler = proc_dou8vec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, [NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS] = { .procname = "nf_conntrack_tcp_max_retrans", .maxlen = sizeof(u8), @@ -823,12 +824,6 @@ static struct ctl_table nf_ct_sysctl_table[] = { .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP] = { - .procname = "nf_flowtable_udp_pickup", - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, #endif [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = { .procname = "nf_conntrack_icmp_timeout", @@ -1004,11 +999,11 @@ static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net, XASSIGN(LOOSE, &tn->tcp_loose); XASSIGN(LIBERAL, &tn->tcp_be_liberal); XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans); + XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst); #undef XASSIGN #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout; - table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP].data = &tn->offload_pickup; #endif } @@ -1101,7 +1096,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED]; #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout; - table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP].data = &un->offload_pickup; #endif nf_conntrack_standalone_init_tcp_sysctl(net, table); diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 1e50908b1b7e..8788b519255e 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -183,7 +183,7 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) const struct nf_conntrack_l4proto *l4proto; struct net *net = nf_ct_net(ct); int l4num = nf_ct_protonum(ct); - unsigned int timeout; + s32 timeout; l4proto = nf_ct_l4proto_find(l4num); if (!l4proto) @@ -192,15 +192,20 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) if (l4num == IPPROTO_TCP) { struct nf_tcp_net *tn = nf_tcp_pernet(net); - timeout = tn->offload_pickup; + timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED]; + timeout -= tn->offload_timeout; } else if (l4num == IPPROTO_UDP) { struct nf_udp_net *tn = nf_udp_pernet(net); - timeout = tn->offload_pickup; + timeout = tn->timeouts[UDP_CT_REPLIED]; + timeout -= tn->offload_timeout; } else { return; } + if (timeout < 0) + timeout = 0; + if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout) ct->timeout = nfct_time_stamp + timeout; } @@ -331,7 +336,11 @@ EXPORT_SYMBOL_GPL(flow_offload_add); void flow_offload_refresh(struct nf_flowtable *flow_table, struct flow_offload *flow) { - flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow); + u32 timeout; + + timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow); + if (READ_ONCE(flow->timeout) != timeout) + WRITE_ONCE(flow->timeout, timeout); if (likely(!nf_flowtable_hw_offload(flow_table))) return; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 390d4466567f..081437dd75b7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3446,7 +3446,8 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, return 0; err_destroy_flow_rule: - nft_flow_rule_destroy(flow); + if (flow) + nft_flow_rule_destroy(flow); err_release_rule: nf_tables_rule_release(&ctx, rule); err_release_expr: @@ -8444,6 +8445,16 @@ static int nf_tables_commit_audit_alloc(struct list_head *adl, return 0; } +static void nf_tables_commit_audit_free(struct list_head *adl) +{ + struct nft_audit_data *adp, *adn; + + list_for_each_entry_safe(adp, adn, adl, list) { + list_del(&adp->list); + kfree(adp); + } +} + static void nf_tables_commit_audit_collect(struct list_head *adl, struct nft_table *table, u32 op) { @@ -8508,6 +8519,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table); if (ret) { nf_tables_commit_chain_prepare_cancel(net); + nf_tables_commit_audit_free(&adl); return ret; } if (trans->msg_type == NFT_MSG_NEWRULE || @@ -8517,6 +8529,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) ret = nf_tables_commit_chain_prepare(net, chain); if (ret < 0) { nf_tables_commit_chain_prepare_cancel(net); + nf_tables_commit_audit_free(&adl); return ret; } } diff --git a/net/netfilter/nfnetlink_hook.c b/net/netfilter/nfnetlink_hook.c index 50b4e3c9347a..f554e2ea32ee 100644 --- a/net/netfilter/nfnetlink_hook.c +++ b/net/netfilter/nfnetlink_hook.c @@ -89,11 +89,15 @@ static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb, if (!nest2) goto cancel_nest; - ret = nla_put_string(nlskb, NFTA_CHAIN_TABLE, chain->table->name); + ret = nla_put_string(nlskb, NFNLA_CHAIN_TABLE, chain->table->name); if (ret) goto cancel_nest; - ret = nla_put_string(nlskb, NFTA_CHAIN_NAME, chain->name); + ret = nla_put_string(nlskb, NFNLA_CHAIN_NAME, chain->name); + if (ret) + goto cancel_nest; + + ret = nla_put_u8(nlskb, NFNLA_CHAIN_FAMILY, chain->table->family); if (ret) goto cancel_nest; @@ -109,18 +113,19 @@ cancel_nest: static int nfnl_hook_dump_one(struct sk_buff *nlskb, const struct nfnl_dump_hook_data *ctx, const struct nf_hook_ops *ops, - unsigned int seq) + int family, unsigned int seq) { u16 event = nfnl_msg_type(NFNL_SUBSYS_HOOK, NFNL_MSG_HOOK_GET); unsigned int portid = NETLINK_CB(nlskb).portid; struct nlmsghdr *nlh; int ret = -EMSGSIZE; + u32 hooknum; #ifdef CONFIG_KALLSYMS char sym[KSYM_SYMBOL_LEN]; char *module_name; #endif nlh = nfnl_msg_put(nlskb, portid, seq, event, - NLM_F_MULTI, ops->pf, NFNETLINK_V0, 0); + NLM_F_MULTI, family, NFNETLINK_V0, 0); if (!nlh) goto nla_put_failure; @@ -135,6 +140,7 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb, if (module_name) { char *end; + *module_name = '\0'; module_name += 2; end = strchr(module_name, ']'); if (end) { @@ -151,7 +157,12 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb, goto nla_put_failure; #endif - ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(ops->hooknum)); + if (ops->pf == NFPROTO_INET && ops->hooknum == NF_INET_INGRESS) + hooknum = NF_NETDEV_INGRESS; + else + hooknum = ops->hooknum; + + ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(hooknum)); if (ret) goto nla_put_failure; @@ -174,7 +185,9 @@ static const struct nf_hook_entries * nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev) { const struct nf_hook_entries *hook_head = NULL; +#ifdef CONFIG_NETFILTER_INGRESS struct net_device *netdev; +#endif switch (pf) { case NFPROTO_IPV4: @@ -257,7 +270,8 @@ static int nfnl_hook_dump(struct sk_buff *nlskb, ops = nf_hook_entries_get_hook_ops(e); for (; i < e->num_hook_entries; i++) { - err = nfnl_hook_dump_one(nlskb, ctx, ops[i], cb->seq); + err = nfnl_hook_dump_one(nlskb, ctx, ops[i], family, + cb->nlh->nlmsg_seq); if (err) break; } diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c index 913ac45167f2..304e33cbed9b 100644 --- a/net/netfilter/nft_last.c +++ b/net/netfilter/nft_last.c @@ -23,15 +23,21 @@ static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr, { struct nft_last_priv *priv = nft_expr_priv(expr); u64 last_jiffies; + u32 last_set = 0; int err; - if (tb[NFTA_LAST_MSECS]) { + if (tb[NFTA_LAST_SET]) { + last_set = ntohl(nla_get_be32(tb[NFTA_LAST_SET])); + if (last_set == 1) + priv->last_set = 1; + } + + if (last_set && tb[NFTA_LAST_MSECS]) { err = nf_msecs_to_jiffies64(tb[NFTA_LAST_MSECS], &last_jiffies); if (err < 0) return err; - priv->last_jiffies = jiffies + (unsigned long)last_jiffies; - priv->last_set = 1; + priv->last_jiffies = jiffies - (unsigned long)last_jiffies; } return 0; @@ -42,24 +48,30 @@ static void nft_last_eval(const struct nft_expr *expr, { struct nft_last_priv *priv = nft_expr_priv(expr); - priv->last_jiffies = jiffies; - priv->last_set = 1; + if (READ_ONCE(priv->last_jiffies) != jiffies) + WRITE_ONCE(priv->last_jiffies, jiffies); + if (READ_ONCE(priv->last_set) == 0) + WRITE_ONCE(priv->last_set, 1); } static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr) { struct nft_last_priv *priv = nft_expr_priv(expr); + unsigned long last_jiffies = READ_ONCE(priv->last_jiffies); + u32 last_set = READ_ONCE(priv->last_set); __be64 msecs; - if (time_before(jiffies, priv->last_jiffies)) - priv->last_set = 0; + if (time_before(jiffies, last_jiffies)) { + WRITE_ONCE(priv->last_set, 0); + last_set = 0; + } - if (priv->last_set) - msecs = nf_jiffies64_to_msecs(jiffies - priv->last_jiffies); + if (last_set) + msecs = nf_jiffies64_to_msecs(jiffies - last_jiffies); else msecs = 0; - if (nla_put_be32(skb, NFTA_LAST_SET, htonl(priv->last_set)) || + if (nla_put_be32(skb, NFTA_LAST_SET, htonl(last_set)) || nla_put_be64(skb, NFTA_LAST_MSECS, msecs, NFTA_LAST_PAD)) goto nla_put_failure; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 0840c635b752..be1595d6979d 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -201,7 +201,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, alen = sizeof_field(struct nf_nat_range, min_addr.ip6); break; default: - return -EAFNOSUPPORT; + if (tb[NFTA_NAT_REG_ADDR_MIN]) + return -EAFNOSUPPORT; + break; } priv->family = family; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d233ac4a91b6..380f95aacdec 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2471,7 +2471,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, nlmsg_end(skb, rep); - netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT); + nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid); } EXPORT_SYMBOL(netlink_ack); diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index 9115f8a7dd45..a8da88db7893 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c @@ -121,11 +121,9 @@ static void nr_heartbeat_expiry(struct timer_list *t) is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { - sock_hold(sk); bh_unlock_sock(sk); nr_destroy_socket(sk); - sock_put(sk); - return; + goto out; } break; @@ -146,6 +144,8 @@ static void nr_heartbeat_expiry(struct timer_list *t) nr_start_heartbeat(sk); bh_unlock_sock(sk); +out: + sock_put(sk); } static void nr_t2timer_expiry(struct timer_list *t) @@ -159,6 +159,7 @@ static void nr_t2timer_expiry(struct timer_list *t) nr_enquiry_response(sk); } bh_unlock_sock(sk); + sock_put(sk); } static void nr_t4timer_expiry(struct timer_list *t) @@ -169,6 +170,7 @@ static void nr_t4timer_expiry(struct timer_list *t) bh_lock_sock(sk); nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY; bh_unlock_sock(sk); + sock_put(sk); } static void nr_idletimer_expiry(struct timer_list *t) @@ -197,6 +199,7 @@ static void nr_idletimer_expiry(struct timer_list *t) sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); + sock_put(sk); } static void nr_t1timer_expiry(struct timer_list *t) @@ -209,8 +212,7 @@ static void nr_t1timer_expiry(struct timer_list *t) case NR_STATE_1: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); - bh_unlock_sock(sk); - return; + goto out; } else { nr->n2count++; nr_write_internal(sk, NR_CONNREQ); @@ -220,8 +222,7 @@ static void nr_t1timer_expiry(struct timer_list *t) case NR_STATE_2: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); - bh_unlock_sock(sk); - return; + goto out; } else { nr->n2count++; nr_write_internal(sk, NR_DISCREQ); @@ -231,8 +232,7 @@ static void nr_t1timer_expiry(struct timer_list *t) case NR_STATE_3: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); - bh_unlock_sock(sk); - return; + goto out; } else { nr->n2count++; nr_requeue_frames(sk); @@ -241,5 +241,7 @@ static void nr_t1timer_expiry(struct timer_list *t) } nr_start_t1timer(sk); +out: bh_unlock_sock(sk); + sock_put(sk); } diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index e586424d8b04..9713035b89e3 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -293,14 +293,14 @@ static bool icmp6hdr_ok(struct sk_buff *skb) } /** - * Parse vlan tag from vlan header. + * parse_vlan_tag - Parse vlan tag from vlan header. * @skb: skb containing frame to parse * @key_vh: pointer to parsed vlan tag * @untag_vlan: should the vlan header be removed from the frame * - * Returns ERROR on memory error. - * Returns 0 if it encounters a non-vlan or incomplete packet. - * Returns 1 after successfully parsing vlan tag. + * Return: ERROR on memory error. + * %0 if it encounters a non-vlan or incomplete packet. + * %1 after successfully parsing vlan tag. */ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh, bool untag_vlan) @@ -532,6 +532,7 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key) * L3 header * @key: output flow key * + * Return: %0 if successful, otherwise a negative errno value. */ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) { @@ -748,8 +749,6 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) * * The caller must ensure that skb->len >= ETH_HLEN. * - * Returns 0 if successful, otherwise a negative errno value. - * * Initializes @skb header fields as follows: * * - skb->mac_header: the L2 header. @@ -764,6 +763,8 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) * * - skb->protocol: the type of the data starting at skb->network_header. * Equals to key->eth.type. + * + * Return: %0 if successful, otherwise a negative errno value. */ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) { diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index c89c8da99f1a..d4a2db0b2299 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -670,13 +670,13 @@ static bool cmp_key(const struct sw_flow_key *key1, { const long *cp1 = (const long *)((const u8 *)key1 + key_start); const long *cp2 = (const long *)((const u8 *)key2 + key_start); - long diffs = 0; int i; for (i = key_start; i < key_end; i += sizeof(long)) - diffs |= *cp1++ ^ *cp2++; + if (*cp1++ ^ *cp2++) + return false; - return diffs == 0; + return true; } static bool flow_cmp_masked_key(const struct sw_flow *flow, diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 88deb5b41429..cf2ce5812489 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -507,6 +507,7 @@ void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto) } skb->dev = vport->dev; + skb->tstamp = 0; vport->ops->send(skb); return; diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c index fa611678af05..c609cb724c25 100644 --- a/net/qrtr/mhi.c +++ b/net/qrtr/mhi.c @@ -15,6 +15,7 @@ struct qrtr_mhi_dev { struct qrtr_endpoint ep; struct mhi_device *mhi_dev; struct device *dev; + struct completion ready; }; /* From MHI to QRTR */ @@ -50,6 +51,10 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb) struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep); int rc; + rc = wait_for_completion_interruptible(&qdev->ready); + if (rc) + goto free_skb; + if (skb->sk) sock_hold(skb->sk); @@ -79,7 +84,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev, int rc; /* start channels */ - rc = mhi_prepare_for_transfer(mhi_dev); + rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); if (rc) return rc; @@ -96,6 +101,15 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev, if (rc) return rc; + /* start channels */ + rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); + if (rc) { + qrtr_endpoint_unregister(&qdev->ep); + dev_set_drvdata(&mhi_dev->dev, NULL); + return rc; + } + + complete_all(&qdev->ready); dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n"); return 0; diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index e6f4a6202f82..171b7f3be6ef 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -518,8 +518,10 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) if (!ipc) goto err; - if (sock_queue_rcv_skb(&ipc->sk, skb)) + if (sock_queue_rcv_skb(&ipc->sk, skb)) { + qrtr_port_put(ipc); goto err; + } qrtr_port_put(ipc); } @@ -839,6 +841,8 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, ipc = qrtr_port_lookup(to->sq_port); if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */ + if (ipc) + qrtr_port_put(ipc); kfree_skb(skb); return -ENODEV; } diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index 9b6ffff72f2d..28c1b0022178 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c @@ -131,9 +131,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) cpu_relax(); } - ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, + ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len, &off, PAGE_SIZE); - if (unlikely(ret != ibmr->sg_len)) + if (unlikely(ret != ibmr->sg_dma_len)) return ret < 0 ? ret : -EINVAL; if (cmpxchg(&frmr->fr_state, diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index a656baa321fe..1b4b3514c94f 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -322,11 +322,22 @@ err_alloc: static void tcf_ct_flow_table_cleanup_work(struct work_struct *work) { + struct flow_block_cb *block_cb, *tmp_cb; struct tcf_ct_flow_table *ct_ft; + struct flow_block *block; ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table, rwork); nf_flow_table_free(&ct_ft->nf_ft); + + /* Remove any remaining callbacks before cleanup */ + block = &ct_ft->nf_ft.flow_block; + down_write(&ct_ft->nf_ft.flow_block_lock); + list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) { + list_del(&block_cb->list); + flow_block_cb_free(block_cb); + } + up_write(&ct_ft->nf_ft.flow_block_lock); kfree(ct_ft); module_put(THIS_MODULE); @@ -1026,7 +1037,8 @@ do_nat: /* This will take care of sending queued events * even if the connection is already confirmed. */ - nf_conntrack_confirm(skb); + if (nf_conntrack_confirm(skb) != NF_ACCEPT) + goto drop; } if (!skip_add) diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 7153c67f641e..2ef4cd2c848b 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -273,6 +273,9 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, goto out; } + /* All mirred/redirected skbs should clear previous ct info */ + nf_reset_ct(skb2); + want_ingress = tcf_mirred_act_wants_ingress(m_eaction); expects_nh = want_ingress || !m_mac_header_xmit; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 81a1c67335be..8d17a543cc9f 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -6,6 +6,7 @@ */ #include <linux/module.h> +#include <linux/if_arp.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/skbuff.h> @@ -33,6 +34,13 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, tcf_lastuse_update(&d->tcf_tm); bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); + action = READ_ONCE(d->tcf_action); + if (unlikely(action == TC_ACT_SHOT)) + goto drop; + + if (!skb->dev || skb->dev->type != ARPHRD_ETHER) + return action; + /* XXX: if you are going to edit more fields beyond ethernet header * (example when you add IP header replacement or vlan swap) * then MAX_EDIT_LEN needs to change appropriately @@ -41,10 +49,6 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, if (unlikely(err)) /* best policy is to drop on the floor */ goto drop; - action = READ_ONCE(d->tcf_action); - if (unlikely(action == TC_ACT_SHOT)) - goto drop; - p = rcu_dereference_bh(d->skbmod_p); flags = p->flags; if (flags & SKBMOD_F_DMAC) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index d73b5c5514a9..e3e79e9bd706 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -2904,7 +2904,7 @@ replay: break; case RTM_GETCHAIN: err = tc_chain_notify(chain, skb, n->nlmsg_seq, - n->nlmsg_seq, n->nlmsg_type, true); + n->nlmsg_flags, n->nlmsg_type, true); if (err < 0) NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); break; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 5b274534264c..e9a8a2c86bbd 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -278,6 +278,8 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r, TCA_TCINDEX_POLICE); } +static void tcindex_free_perfect_hash(struct tcindex_data *cp); + static void tcindex_partial_destroy_work(struct work_struct *work) { struct tcindex_data *p = container_of(to_rcu_work(work), @@ -285,7 +287,8 @@ static void tcindex_partial_destroy_work(struct work_struct *work) rwork); rtnl_lock(); - kfree(p->perfect); + if (p->perfect) + tcindex_free_perfect_hash(p); kfree(p); rtnl_unlock(); } diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 951542843cab..28af8b1e1bb1 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -720,7 +720,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, skip_hash: if (flow_override) flow_hash = flow_override - 1; - else if (use_skbhash) + else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS)) flow_hash = skb->hash; if (host_override) { dsthost_hash = host_override - 1; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index d9ac60ffe927..a8dd06c74e31 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -913,7 +913,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, /* seqlock has the same scope of busylock, for NOLOCK qdisc */ spin_lock_init(&sch->seqlock); - lockdep_set_class(&sch->busylock, + lockdep_set_class(&sch->seqlock, dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); seqcount_init(&sch->running); diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 66fe2b82af9a..9c79374457a0 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -564,7 +564,7 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch) /* if there's no entry, it means that the schedule didn't * start yet, so force all gates to be open, this is in * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5 - * "AdminGateSates" + * "AdminGateStates" */ gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; @@ -1739,8 +1739,6 @@ static void taprio_attach(struct Qdisc *sch) if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; old = dev_graft_qdisc(qdisc->dev_queue, qdisc); - if (ntx < dev->real_num_tx_queues) - qdisc_hash_add(qdisc, false); } else { old = dev_graft_qdisc(qdisc->dev_queue, sch); qdisc_refcount_inc(sch); diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 6f8319b828b0..db6b7373d16c 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -857,12 +857,18 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength); cur_key->key = key; - if (replace) { - list_del_init(&shkey->key_list); - sctp_auth_shkey_release(shkey); + if (!replace) { + list_add(&cur_key->key_list, sh_keys); + return 0; } + + list_del_init(&shkey->key_list); + sctp_auth_shkey_release(shkey); list_add(&cur_key->key_list, sh_keys); + if (asoc && asoc->active_key_id == auth_key->sca_keynumber) + sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); + return 0; } diff --git a/net/sctp/diag.c b/net/sctp/diag.c index 493fc01e5d2b..760b367644c1 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -284,10 +284,8 @@ static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p) goto out; } - err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, - MSG_DONTWAIT); - if (err > 0) - err = 0; + err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); + out: return err; } diff --git a/net/sctp/input.c b/net/sctp/input.c index eb3c2a34a31c..5ef86fdb1176 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -1203,7 +1203,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup( if (unlikely(!af)) return NULL; - if (af->from_addr_param(&paddr, param, peer_port, 0)) + if (!af->from_addr_param(&paddr, param, peer_port, 0)) return NULL; return __sctp_lookup_association(net, laddr, &paddr, transportp); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index e48dd909dee5..470dbdc27d58 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -100,8 +100,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, list_for_each_entry_safe(addr, temp, &net->sctp.local_addr_list, list) { if (addr->a.sa.sa_family == AF_INET6 && - ipv6_addr_equal(&addr->a.v6.sin6_addr, - &ifa->addr)) { + ipv6_addr_equal(&addr->a.v6.sin6_addr, + &ifa->addr) && + addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) { sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL); found = 1; addr->valid = 0; diff --git a/net/sctp/output.c b/net/sctp/output.c index 9032ce60d50e..4dfb5ea82b05 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -104,8 +104,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, if (asoc->param_flags & SPP_PMTUD_ENABLE) sctp_assoc_sync_pmtu(asoc); } else if (!sctp_transport_pl_enabled(tp) && - !sctp_transport_pmtu_check(tp)) { - if (asoc->param_flags & SPP_PMTUD_ENABLE) + asoc->param_flags & SPP_PMTUD_ENABLE) { + if (!sctp_transport_pmtu_check(tp)) sctp_assoc_sync_pmtu(asoc); } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 3c1fbf38f4f7..ec0f52567c16 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -398,7 +398,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr) retval = SCTP_SCOPE_LINK; } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) || ipv4_is_private_172(addr->v4.sin_addr.s_addr) || - ipv4_is_private_192(addr->v4.sin_addr.s_addr)) { + ipv4_is_private_192(addr->v4.sin_addr.s_addr) || + ipv4_is_test_198(addr->v4.sin_addr.s_addr)) { retval = SCTP_SCOPE_PRIVATE; } else { retval = SCTP_SCOPE_GLOBAL; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 6c08e5048d38..b8fa8f1a7277 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1163,7 +1163,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, const struct sctp_transport *transport, __u32 probe_size) { - struct sctp_sender_hb_info hbinfo; + struct sctp_sender_hb_info hbinfo = {}; struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 09a8f23ec709..32df65f68c12 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1109,12 +1109,12 @@ enum sctp_disposition sctp_sf_send_probe(struct net *net, if (!sctp_transport_pl_enabled(transport)) return SCTP_DISPOSITION_CONSUME; - sctp_transport_pl_send(transport); - - reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size); - if (!reply) - return SCTP_DISPOSITION_NOMEM; - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + if (sctp_transport_pl_send(transport)) { + reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size); + if (!reply) + return SCTP_DISPOSITION_NOMEM; + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + } sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE, SCTP_TRANSPORT(transport)); @@ -1274,8 +1274,7 @@ enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net, !sctp_transport_pl_enabled(link)) return SCTP_DISPOSITION_DISCARD; - sctp_transport_pl_recv(link); - if (link->pl.state == SCTP_PL_COMPLETE) + if (sctp_transport_pl_recv(link)) return SCTP_DISPOSITION_CONSUME; return sctp_sf_send_probe(net, ep, asoc, type, link, commands); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e64e01f61b11..6b937bfd4751 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4577,6 +4577,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, } if (optlen > 0) { + /* Trim it to the biggest size sctp sockopt may need if necessary */ + optlen = min_t(unsigned int, optlen, + PAGE_ALIGN(USHRT_MAX + + sizeof(__u16) * sizeof(struct sctp_reset_streams))); kopt = memdup_sockptr(optval, optlen); if (IS_ERR(kopt)) return PTR_ERR(kopt); diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 5f23804f21c7..a3d3ca6dd63d 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -258,16 +258,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) sctp_transport_pl_update(transport); } -void sctp_transport_pl_send(struct sctp_transport *t) +bool sctp_transport_pl_send(struct sctp_transport *t) { - pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", - __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); - - if (t->pl.probe_count < SCTP_MAX_PROBES) { - t->pl.probe_count++; - return; - } + if (t->pl.probe_count < SCTP_MAX_PROBES) + goto out; + t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks; + t->pl.probe_count = 0; if (t->pl.state == SCTP_PL_BASE) { if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */ t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ @@ -299,14 +296,27 @@ void sctp_transport_pl_send(struct sctp_transport *t) sctp_assoc_sync_pmtu(t->asoc); } } - t->pl.probe_count = 1; + +out: + if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 && + !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) { + t->pl.raise_count++; + return false; + } + + pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", + __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); + + t->pl.probe_count++; + return true; } -void sctp_transport_pl_recv(struct sctp_transport *t) +bool sctp_transport_pl_recv(struct sctp_transport *t) { pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); + t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks; t->pl.pmtu = t->pl.probe_size; t->pl.probe_count = 0; if (t->pl.state == SCTP_PL_BASE) { @@ -323,7 +333,7 @@ void sctp_transport_pl_recv(struct sctp_transport *t) if (!t->pl.probe_high) { t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP, SCTP_MAX_PLPMTU); - return; + return false; } t->pl.probe_size += SCTP_PL_MIN_STEP; if (t->pl.probe_size >= t->pl.probe_high) { @@ -335,11 +345,13 @@ void sctp_transport_pl_recv(struct sctp_transport *t) t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_assoc_sync_pmtu(t->asoc); } - } else if (t->pl.state == SCTP_PL_COMPLETE && ++t->pl.raise_count == 30) { + } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) { /* Raise probe_size again after 30 * interval in Search Complete */ t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ t->pl.probe_size += SCTP_PL_MIN_STEP; } + + return t->pl.state == SCTP_PL_COMPLETE; } static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 898389611ae8..c038efc23ce3 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -795,7 +795,7 @@ static int smc_connect_rdma(struct smc_sock *smc, reason_code = SMC_CLC_DECL_NOSRVLINK; goto connect_abort; } - smc->conn.lnk = link; + smc_switch_link_and_count(&smc->conn, link); } /* create send buffer and rmb */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index cd0d7c908b2a..c160ff50c053 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -917,8 +917,8 @@ static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend, return rc; } -static void smc_switch_link_and_count(struct smc_connection *conn, - struct smc_link *to_lnk) +void smc_switch_link_and_count(struct smc_connection *conn, + struct smc_link *to_lnk) { atomic_dec(&conn->lnk->conn_cnt); conn->lnk = to_lnk; diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 6d6fd1397c87..c043ecdca5c4 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -97,6 +97,7 @@ struct smc_link { unsigned long *wr_tx_mask; /* bit mask of used indexes */ u32 wr_tx_cnt; /* number of WR send buffers */ wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */ + atomic_t wr_tx_refcnt; /* tx refs to link */ struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */ struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */ @@ -109,6 +110,7 @@ struct smc_link { struct ib_reg_wr wr_reg; /* WR register memory region */ wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ + atomic_t wr_reg_refcnt; /* reg refs to link */ enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */ u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/ @@ -444,6 +446,8 @@ void smc_core_exit(void); int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, u8 link_idx, struct smc_init_info *ini); void smcr_link_clear(struct smc_link *lnk, bool log); +void smc_switch_link_and_count(struct smc_connection *conn, + struct smc_link *to_lnk); int smcr_buf_map_lgr(struct smc_link *lnk); int smcr_buf_reg_lgr(struct smc_link *lnk); void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type); diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 273eaf1bfe49..2e7560eba981 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -888,6 +888,7 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) if (!rc) goto out; out_clear_lnk: + lnk_new->state = SMC_LNK_INACTIVE; smcr_link_clear(lnk_new, false); out_reject: smc_llc_cli_add_link_reject(qentry); @@ -1184,6 +1185,7 @@ int smc_llc_srv_add_link(struct smc_link *link) goto out_err; return 0; out_err: + link_new->state = SMC_LNK_INACTIVE; smcr_link_clear(link_new, false); return rc; } @@ -1286,10 +1288,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr) del_llc->reason = 0; smc_llc_send_message(lnk, &qentry->msg); /* response */ - if (smc_link_downing(&lnk_del->state)) { - if (smc_switch_conns(lgr, lnk_del, false)) - smc_wr_tx_wait_no_pending_sends(lnk_del); - } + if (smc_link_downing(&lnk_del->state)) + smc_switch_conns(lgr, lnk_del, false); smcr_link_clear(lnk_del, true); active_links = smc_llc_active_link_count(lgr); @@ -1805,8 +1805,6 @@ void smc_llc_link_clear(struct smc_link *link, bool log) link->smcibdev->ibdev->name, link->ibport); complete(&link->llc_testlink_resp); cancel_delayed_work_sync(&link->llc_testlink_wrk); - smc_wr_wakeup_reg_wait(link); - smc_wr_wakeup_tx_wait(link); } /* register a new rtoken at the remote peer (for all links) */ diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 289025cd545a..c79361dfcdfb 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -496,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn, /* Wakeup sndbuf consumers from any context (IRQ or process) * since there is more data to transmit; usable snd_wnd as max transmit */ -static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) +static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn) { struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; struct smc_link *link = conn->lnk; @@ -550,6 +550,22 @@ out_unlock: return rc; } +static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) +{ + struct smc_link *link = conn->lnk; + int rc = -ENOLINK; + + if (!link) + return rc; + + atomic_inc(&link->wr_tx_refcnt); + if (smc_link_usable(link)) + rc = _smcr_tx_sndbuf_nonempty(conn); + if (atomic_dec_and_test(&link->wr_tx_refcnt)) + wake_up_all(&link->wr_tx_wait); + return rc; +} + static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) { struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index cbc73a7e4d59..a419e9af36b9 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -322,9 +322,12 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) if (rc) return rc; + atomic_inc(&link->wr_reg_refcnt); rc = wait_event_interruptible_timeout(link->wr_reg_wait, (link->wr_reg_state != POSTED), SMC_WR_REG_MR_WAIT_TIME); + if (atomic_dec_and_test(&link->wr_reg_refcnt)) + wake_up_all(&link->wr_reg_wait); if (!rc) { /* timeout - terminate link */ smcr_link_down_cond_sched(link); @@ -566,10 +569,15 @@ void smc_wr_free_link(struct smc_link *lnk) return; ibdev = lnk->smcibdev->ibdev; + smc_wr_wakeup_reg_wait(lnk); + smc_wr_wakeup_tx_wait(lnk); + if (smc_wr_tx_wait_no_pending_sends(lnk)) memset(lnk->wr_tx_mask, 0, BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); + wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt))); + wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt))); if (lnk->wr_rx_dma_addr) { ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, @@ -728,7 +736,9 @@ int smc_wr_create_link(struct smc_link *lnk) memset(lnk->wr_tx_mask, 0, BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); init_waitqueue_head(&lnk->wr_tx_wait); + atomic_set(&lnk->wr_tx_refcnt, 0); init_waitqueue_head(&lnk->wr_reg_wait); + atomic_set(&lnk->wr_reg_refcnt, 0); return rc; dma_unmap: diff --git a/net/socket.c b/net/socket.c index bd9233da2497..0b2dad3bdf7f 100644 --- a/net/socket.c +++ b/net/socket.c @@ -104,6 +104,7 @@ #include <linux/sockios.h> #include <net/busy_poll.h> #include <linux/errqueue.h> +#include <linux/ptp_clock_kernel.h> #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sysctl_net_busy_read __read_mostly; @@ -873,12 +874,18 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, empty = 0; if (shhwtstamps && (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) && - !skb_is_swtx_tstamp(skb, false_tstamp) && - ktime_to_timespec64_cond(shhwtstamps->hwtstamp, tss.ts + 2)) { - empty = 0; - if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) && - !skb_is_err_queue(skb)) - put_ts_pktinfo(msg, skb); + !skb_is_swtx_tstamp(skb, false_tstamp)) { + if (sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC) + ptp_convert_timestamp(shhwtstamps, sk->sk_bind_phc); + + if (ktime_to_timespec64_cond(shhwtstamps->hwtstamp, + tss.ts + 2)) { + empty = 0; + + if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) && + !skb_is_err_queue(skb)) + put_ts_pktinfo(msg, skb); + } } if (!empty) { if (sock_flag(sk, SOCK_TSTAMP_NEW)) diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index e5c43d4d5a75..c9391d38de85 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -898,16 +898,10 @@ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, if (unlikely(!aead)) return -ENOKEY; - /* Cow skb data if needed */ - if (likely(!skb_cloned(skb) && - (!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) { - nsg = 1 + skb_shinfo(skb)->nr_frags; - } else { - nsg = skb_cow_data(skb, 0, &unused); - if (unlikely(nsg < 0)) { - pr_err("RX: skb_cow_data() returned %d\n", nsg); - return nsg; - } + nsg = skb_cow_data(skb, 0, &unused); + if (unlikely(nsg < 0)) { + pr_err("RX: skb_cow_data() returned %d\n", nsg); + return nsg; } /* Allocate memory for the AEAD operation */ diff --git a/net/tipc/link.c b/net/tipc/link.c index cf586840caeb..1b7a487c8841 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -913,7 +913,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr) skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, dnode, l->addr, dport, 0, 0); if (!skb) - return -ENOMEM; + return -ENOBUFS; msg_set_dest_droppable(buf_msg(skb), true); TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr); skb_queue_tail(&l->wakeupq, skb); @@ -1031,7 +1031,7 @@ void tipc_link_reset(struct tipc_link *l) * * Consumes the buffer chain. * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted - * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS or -ENOMEM + * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS */ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, struct sk_buff_head *xmitq) @@ -1089,7 +1089,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, if (!_skb) { kfree_skb(skb); __skb_queue_purge(list); - return -ENOMEM; + return -ENOBUFS; } __skb_queue_tail(transmq, skb); tipc_link_set_skb_retransmit_time(skb, l); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 34a97ea36cc8..8754bd885169 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -158,6 +158,7 @@ static void tipc_sk_remove(struct tipc_sock *tsk); static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack); +static int tipc_wait_for_connect(struct socket *sock, long *timeo_p); static const struct proto_ops packet_ops; static const struct proto_ops stream_ops; @@ -1515,8 +1516,13 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) rc = 0; } - if (unlikely(syn && !rc)) + if (unlikely(syn && !rc)) { tipc_set_sk_state(sk, TIPC_CONNECTING); + if (dlen && timeout) { + timeout = msecs_to_jiffies(timeout); + tipc_wait_for_connect(sock, &timeout); + } + } return rc ? rc : dlen; } @@ -1564,7 +1570,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) return -EMSGSIZE; /* Handle implicit connection setup */ - if (unlikely(dest)) { + if (unlikely(dest && sk->sk_state == TIPC_OPEN)) { rc = __tipc_sendmsg(sock, m, dlen); if (dlen && dlen == rc) { tsk->peer_caps = tipc_node_get_capabilities(net, dnode); @@ -2646,7 +2652,7 @@ static int tipc_listen(struct socket *sock, int len) static int tipc_wait_for_accept(struct socket *sock, long timeo) { struct sock *sk = sock->sk; - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); int err; /* True wake-one mechanism for incoming connections: only @@ -2655,12 +2661,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) * anymore, the common case will execute the loop only once. */ for (;;) { - prepare_to_wait_exclusive(sk_sleep(sk), &wait, - TASK_INTERRUPTIBLE); if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { + add_wait_queue(sk_sleep(sk), &wait); release_sock(sk); - timeo = schedule_timeout(timeo); + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock(sk); + remove_wait_queue(sk_sleep(sk), &wait); } err = 0; if (!skb_queue_empty(&sk->sk_receive_queue)) @@ -2672,7 +2678,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) if (signal_pending(current)) break; } - finish_wait(sk_sleep(sk), &wait); return err; } @@ -2689,9 +2694,10 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, bool kern) { struct sock *new_sk, *sk = sock->sk; - struct sk_buff *buf; struct tipc_sock *new_tsock; + struct msghdr m = {NULL,}; struct tipc_msg *msg; + struct sk_buff *buf; long timeo; int res; @@ -2737,19 +2743,17 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, } /* - * Respond to 'SYN-' by discarding it & returning 'ACK'-. - * Respond to 'SYN+' by queuing it on new socket. + * Respond to 'SYN-' by discarding it & returning 'ACK'. + * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'. */ if (!msg_data_sz(msg)) { - struct msghdr m = {NULL,}; - tsk_advance_rx_queue(sk); - __tipc_sendstream(new_sock, &m, 0); } else { __skb_dequeue(&sk->sk_receive_queue); __skb_queue_head(&new_sk->sk_receive_queue, buf); skb_set_owner_r(buf, new_sk); } + __tipc_sendstream(new_sock, &m, 0); release_sock(new_sk); exit: release_sock(sk); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 23c92ad15c61..ba7ced947e51 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1526,6 +1526,53 @@ out: return err; } +static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + scm->fp = scm_fp_dup(UNIXCB(skb).fp); + + /* + * Garbage collection of unix sockets starts by selecting a set of + * candidate sockets which have reference only from being in flight + * (total_refs == inflight_refs). This condition is checked once during + * the candidate collection phase, and candidates are marked as such, so + * that non-candidates can later be ignored. While inflight_refs is + * protected by unix_gc_lock, total_refs (file count) is not, hence this + * is an instantaneous decision. + * + * Once a candidate, however, the socket must not be reinstalled into a + * file descriptor while the garbage collection is in progress. + * + * If the above conditions are met, then the directed graph of + * candidates (*) does not change while unix_gc_lock is held. + * + * Any operations that changes the file count through file descriptors + * (dup, close, sendmsg) does not change the graph since candidates are + * not installed in fds. + * + * Dequeing a candidate via recvmsg would install it into an fd, but + * that takes unix_gc_lock to decrement the inflight count, so it's + * serialized with garbage collection. + * + * MSG_PEEK is special in that it does not change the inflight count, + * yet does install the socket into an fd. The following lock/unlock + * pair is to ensure serialization with garbage collection. It must be + * done between incrementing the file count and installing the file into + * an fd. + * + * If garbage collection starts after the barrier provided by the + * lock/unlock, then it will see the elevated refcount and not mark this + * as a candidate. If a garbage collection is already in progress + * before the file count was incremented, then the lock/unlock pair will + * ensure that garbage collection is finished before progressing to + * installing the fd. + * + * (*) A -> B where B is on the queue of A or B is on the queue of C + * which is on the queue of listening socket A. + */ + spin_lock(&unix_gc_lock); + spin_unlock(&unix_gc_lock); +} + static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) { int err = 0; @@ -2175,7 +2222,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, sk_peek_offset_fwd(sk, size); if (UNIXCB(skb).fp) - scm.fp = scm_fp_dup(UNIXCB(skb).fp); + unix_peek_fds(&scm, skb); } err = (flags & MSG_TRUNC) ? skb->len - skip : size; @@ -2418,7 +2465,7 @@ unlock: /* It is questionable, see note in unix_dgram_recvmsg. */ if (UNIXCB(skb).fp) - scm.fp = scm_fp_dup(UNIXCB(skb).fp); + unix_peek_fds(&scm, skb); sk_peek_offset_fwd(sk, chunk); diff --git a/net/unix/diag.c b/net/unix/diag.c index 9ff64f9df1f3..7e7d7f45685a 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -295,10 +295,8 @@ again: goto again; } - err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, - MSG_DONTWAIT); - if (err > 0) - err = 0; + err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); + out: if (sk) sock_put(sk); diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index e0c2c992ad9c..4f7c99dfd16c 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -357,11 +357,14 @@ static void virtio_vsock_event_fill(struct virtio_vsock *vsock) static void virtio_vsock_reset_sock(struct sock *sk) { - lock_sock(sk); + /* vmci_transport.c doesn't take sk_lock here either. At least we're + * under vsock_table_lock so the sock cannot disappear while we're + * executing. + */ + sk->sk_state = TCP_CLOSE; sk->sk_err = ECONNRESET; sk_error_report(sk); - release_sock(sk); } static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 169ba8b72a63..081e7ae93cb1 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -1079,6 +1079,9 @@ virtio_transport_recv_connected(struct sock *sk, virtio_transport_recv_enqueue(vsk, pkt); sk->sk_data_ready(sk); return err; + case VIRTIO_VSOCK_OP_CREDIT_REQUEST: + virtio_transport_send_credit_update(vsk); + break; case VIRTIO_VSOCK_OP_CREDIT_UPDATE: sk->sk_write_space(sk); break; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 50eb405b0690..16c88beea48b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2351,7 +2351,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, goto nla_put_failure; for (band = state->band_start; - band < NUM_NL80211_BANDS; band++) { + band < (state->split ? + NUM_NL80211_BANDS : + NL80211_BAND_60GHZ + 1); + band++) { struct ieee80211_supported_band *sband; /* omit higher bands for ancient software */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c index f03c7ac8e184..7897b1478c3c 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1754,16 +1754,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, * be grouped with this beacon for updates ... */ if (!cfg80211_combine_bsses(rdev, new)) { - kfree(new); + bss_ref_put(rdev, new); goto drop; } } if (rdev->bss_entries >= bss_entries_limit && !cfg80211_bss_expire_oldest(rdev)) { - if (!list_empty(&new->hidden_list)) - list_del(&new->hidden_list); - kfree(new); + bss_ref_put(rdev, new); goto drop; } diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index a20aec9d7393..2bf269390163 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -298,8 +298,16 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src) len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]); nla_for_each_attr(nla, attrs, len, remaining) { - int err = xfrm_xlate64_attr(dst, nla); + int err; + switch (type) { + case XFRM_MSG_NEWSPDINFO: + err = xfrm_nla_cpy(dst, nla, nla_len(nla)); + break; + default: + err = xfrm_xlate64_attr(dst, nla); + break; + } if (err) return err; } @@ -341,7 +349,8 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src /* Calculates len of translated 64-bit message. */ static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src, - struct nlattr *attrs[XFRMA_MAX+1]) + struct nlattr *attrs[XFRMA_MAX + 1], + int maxtype) { size_t len = nlmsg_len(src); @@ -358,10 +367,20 @@ static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src, case XFRM_MSG_POLEXPIRE: len += 8; break; + case XFRM_MSG_NEWSPDINFO: + /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */ + return len; default: break; } + /* Unexpected for anything, but XFRM_MSG_NEWSPDINFO, please + * correct both 64=>32-bit and 32=>64-bit translators to copy + * new attributes. + */ + if (WARN_ON_ONCE(maxtype)) + return len; + if (attrs[XFRMA_SA]) len += 4; if (attrs[XFRMA_POLICY]) @@ -440,7 +459,8 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla, static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src, struct nlattr *attrs[XFRMA_MAX+1], - size_t size, u8 type, struct netlink_ext_ack *extack) + size_t size, u8 type, int maxtype, + struct netlink_ext_ack *extack) { size_t pos; int i; @@ -520,6 +540,25 @@ static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src, } pos = dst->nlmsg_len; + if (maxtype) { + /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */ + WARN_ON_ONCE(src->nlmsg_type != XFRM_MSG_NEWSPDINFO); + + for (i = 1; i <= maxtype; i++) { + int err; + + if (!attrs[i]) + continue; + + /* just copy - no need for translation */ + err = xfrm_attr_cpy32(dst, &pos, attrs[i], size, + nla_len(attrs[i]), nla_len(attrs[i])); + if (err) + return err; + } + return 0; + } + for (i = 1; i < XFRMA_MAX + 1; i++) { int err; @@ -564,7 +603,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32, if (err < 0) return ERR_PTR(err); - len = xfrm_user_rcv_calculate_len64(h32, attrs); + len = xfrm_user_rcv_calculate_len64(h32, attrs, maxtype); /* The message doesn't need translation */ if (len == nlmsg_len(h32)) return NULL; @@ -574,7 +613,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32, if (!h64) return ERR_PTR(-ENOMEM); - err = xfrm_xlate32(h64, h32, attrs, len, type, extack); + err = xfrm_xlate32(h64, h32, attrs, len, type, maxtype, extack); if (err < 0) { kvfree(h64); return ERR_PTR(err); diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 2e8afe078d61..cb40ff0ff28d 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -241,7 +241,7 @@ static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms) break; } - WARN_ON(!pos); + WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list)); if (--pos->users) return; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 827d84255021..7f881f5a5897 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -155,7 +155,6 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1] __read_mostly; static struct kmem_cache *xfrm_dst_cache __ro_after_init; -static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation; static struct rhashtable xfrm_policy_inexact_table; static const struct rhashtable_params xfrm_pol_inexact_params; @@ -585,7 +584,7 @@ static void xfrm_bydst_resize(struct net *net, int dir) return; spin_lock_bh(&net->xfrm.xfrm_policy_lock); - write_seqcount_begin(&xfrm_policy_hash_generation); + write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table, lockdep_is_held(&net->xfrm.xfrm_policy_lock)); @@ -596,7 +595,7 @@ static void xfrm_bydst_resize(struct net *net, int dir) rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst); net->xfrm.policy_bydst[dir].hmask = nhashmask; - write_seqcount_end(&xfrm_policy_hash_generation); + write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation); spin_unlock_bh(&net->xfrm.xfrm_policy_lock); synchronize_rcu(); @@ -1245,7 +1244,7 @@ static void xfrm_hash_rebuild(struct work_struct *work) } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); spin_lock_bh(&net->xfrm.xfrm_policy_lock); - write_seqcount_begin(&xfrm_policy_hash_generation); + write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); /* make sure that we can insert the indirect policies again before * we start with destructive action. @@ -1354,7 +1353,7 @@ static void xfrm_hash_rebuild(struct work_struct *work) out_unlock: __xfrm_policy_inexact_flush(net); - write_seqcount_end(&xfrm_policy_hash_generation); + write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation); spin_unlock_bh(&net->xfrm.xfrm_policy_lock); mutex_unlock(&hash_resize_mutex); @@ -2091,15 +2090,12 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, if (unlikely(!daddr || !saddr)) return NULL; - retry: - sequence = read_seqcount_begin(&xfrm_policy_hash_generation); rcu_read_lock(); - - chain = policy_hash_direct(net, daddr, saddr, family, dir); - if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) { - rcu_read_unlock(); - goto retry; - } + retry: + do { + sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); + chain = policy_hash_direct(net, daddr, saddr, family, dir); + } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence)); ret = NULL; hlist_for_each_entry_rcu(pol, chain, bydst) { @@ -2130,15 +2126,11 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, } skip_inexact: - if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) { - rcu_read_unlock(); + if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence)) goto retry; - } - if (ret && !xfrm_pol_hold_rcu(ret)) { - rcu_read_unlock(); + if (ret && !xfrm_pol_hold_rcu(ret)) goto retry; - } fail: rcu_read_unlock(); @@ -4089,6 +4081,7 @@ static int __net_init xfrm_net_init(struct net *net) /* Initialize the per-net locks here */ spin_lock_init(&net->xfrm.xfrm_state_lock); spin_lock_init(&net->xfrm.xfrm_policy_lock); + seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock); mutex_init(&net->xfrm.xfrm_cfg_mutex); rv = xfrm_statistics_init(net); @@ -4133,7 +4126,6 @@ void __init xfrm_init(void) { register_pernet_subsys(&xfrm_net_ops); xfrm_dev_init(); - seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex); xfrm_input_init(); #ifdef CONFIG_XFRM_ESPINTCP diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index b47d613409b7..7aff641c717d 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2811,6 +2811,16 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, err = link->doit(skb, nlh, attrs); + /* We need to free skb allocated in xfrm_alloc_compat() before + * returning from this function, because consume_skb() won't take + * care of frag_list since netlink destructor sets + * sbk->head to NULL. (see netlink_skb_destructor()) + */ + if (skb_has_frag_list(skb)) { + kfree_skb(skb_shinfo(skb)->frag_list); + skb_shinfo(skb)->frag_list = NULL; + } + err: kvfree(nlh64); return err; diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 520434ea966f..036998d11ded 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -331,6 +331,7 @@ $(obj)/%.o: $(src)/%.c -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ + -fno-asynchronous-unwind-tables \ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \ $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \ diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 53e300f860bb..33d0bdebbed8 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -96,6 +96,7 @@ static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; static int opt_timeout = 1000; static bool opt_need_wakeup = true; static u32 opt_num_xsks = 1; +static u32 prog_id; static bool opt_busy_poll; static bool opt_reduced_cap; @@ -461,6 +462,23 @@ static void *poller(void *arg) return NULL; } +static void remove_xdp_program(void) +{ + u32 curr_prog_id = 0; + + if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) { + printf("bpf_get_link_xdp_id failed\n"); + exit(EXIT_FAILURE); + } + + if (prog_id == curr_prog_id) + bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags); + else if (!curr_prog_id) + printf("couldn't find a prog id on a given interface\n"); + else + printf("program on interface changed, not removing\n"); +} + static void int_exit(int sig) { benchmark_done = true; @@ -471,6 +489,9 @@ static void __exit_with_error(int error, const char *file, const char *func, { fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func, line, error, strerror(error)); + + if (opt_num_xsks > 1) + remove_xdp_program(); exit(EXIT_FAILURE); } @@ -490,6 +511,9 @@ static void xdpsock_cleanup(void) if (write(sock, &cmd, sizeof(int)) < 0) exit_with_error(errno); } + + if (opt_num_xsks > 1) + remove_xdp_program(); } static void swap_mac_addresses(void *data) @@ -857,6 +881,10 @@ static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem, if (ret) exit_with_error(-ret); + ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags); + if (ret) + exit_with_error(-ret); + xsk->app_stats.rx_empty_polls = 0; xsk->app_stats.fill_fail_polls = 0; xsk->app_stats.copy_tx_sendtos = 0; diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c index ad3e56042f96..867debd3b912 100644 --- a/samples/mei/mei-amt-version.c +++ b/samples/mei/mei-amt-version.c @@ -154,31 +154,52 @@ err: static ssize_t mei_recv_msg(struct mei *me, unsigned char *buffer, ssize_t len, unsigned long timeout) { + struct timeval tv; + fd_set set; ssize_t rc; + tv.tv_sec = timeout / 1000; + tv.tv_usec = (timeout % 1000) * 1000000; + mei_msg(me, "call read length = %zd\n", len); + FD_ZERO(&set); + FD_SET(me->fd, &set); + rc = select(me->fd + 1, &set, NULL, NULL, &tv); + if (rc > 0 && FD_ISSET(me->fd, &set)) { + mei_msg(me, "have reply\n"); + } else if (rc == 0) { + rc = -1; + mei_err(me, "read failed on timeout\n"); + goto out; + } else { /* rc < 0 */ + rc = errno; + mei_err(me, "read failed on select with status %zd %s\n", + rc, strerror(errno)); + goto out; + } + rc = read(me->fd, buffer, len); if (rc < 0) { mei_err(me, "read failed with status %zd %s\n", rc, strerror(errno)); - mei_deinit(me); - } else { - mei_msg(me, "read succeeded with result %zd\n", rc); + goto out; } + + mei_msg(me, "read succeeded with result %zd\n", rc); + +out: + if (rc < 0) + mei_deinit(me); + return rc; } static ssize_t mei_send_msg(struct mei *me, const unsigned char *buffer, ssize_t len, unsigned long timeout) { - struct timeval tv; ssize_t written; ssize_t rc; - fd_set set; - - tv.tv_sec = timeout / 1000; - tv.tv_usec = (timeout % 1000) * 1000000; mei_msg(me, "call write length = %zd\n", len); @@ -189,19 +210,7 @@ static ssize_t mei_send_msg(struct mei *me, const unsigned char *buffer, written, strerror(errno)); goto out; } - - FD_ZERO(&set); - FD_SET(me->fd, &set); - rc = select(me->fd + 1 , &set, NULL, NULL, &tv); - if (rc > 0 && FD_ISSET(me->fd, &set)) { - mei_msg(me, "write success\n"); - } else if (rc == 0) { - mei_err(me, "write failed on timeout with status\n"); - goto out; - } else { /* rc < 0 */ - mei_err(me, "write failed on select with status %zd\n", rc); - goto out; - } + mei_msg(me, "write success\n"); rc = written; out: diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 10b2f2380d6f..02197cb8e3a7 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -386,7 +386,7 @@ ifeq ($(CONFIG_LTO_CLANG) $(CONFIG_MODVERSIONS),y y) cmd_update_lto_symversions = \ rm -f $@.symversions \ $(foreach n, $(filter-out FORCE,$^), \ - $(if $(wildcard $(n).symversions), \ + $(if $(shell test -s $(n).symversions && echo y), \ ; cat $(n).symversions >> $@.symversions)) else cmd_update_lto_symversions = echo >/dev/null diff --git a/scripts/checkversion.pl b/scripts/checkversion.pl index f67b125c5269..94cd49eff605 100755 --- a/scripts/checkversion.pl +++ b/scripts/checkversion.pl @@ -1,10 +1,10 @@ #! /usr/bin/env perl # SPDX-License-Identifier: GPL-2.0 # -# checkversion find uses of LINUX_VERSION_CODE or KERNEL_VERSION -# without including <linux/version.h>, or cases of -# including <linux/version.h> that don't need it. -# Copyright (C) 2003, Randy Dunlap <rdunlap@xenotime.net> +# checkversion finds uses of all macros in <linux/version.h> +# where the source files do not #include <linux/version.h>; or cases +# of including <linux/version.h> where it is not needed. +# Copyright (C) 2003, Randy Dunlap <rdunlap@infradead.org> use strict; @@ -13,7 +13,8 @@ $| = 1; my $debugging; foreach my $file (@ARGV) { - next if $file =~ "include/linux/version\.h"; + next if $file =~ "include/generated/uapi/linux/version\.h"; + next if $file =~ "usr/include/linux/version\.h"; # Open this file. open( my $f, '<', $file ) or die "Can't open $file: $!\n"; @@ -41,8 +42,11 @@ foreach my $file (@ARGV) { $iLinuxVersion = $. if m/^\s*#\s*include\s*<linux\/version\.h>/o; } - # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, UTS_RELEASE - if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/)) { + # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, + # LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL + if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/) || + ($_ =~ /LINUX_VERSION_MAJOR/) || ($_ =~ /LINUX_VERSION_PATCHLEVEL/) || + ($_ =~ /LINUX_VERSION_SUBLEVEL/)) { $fUseVersion = 1; last if $iLinuxVersion; } diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index c17e48020ec3..8f6b13ae46bf 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -173,39 +173,6 @@ my $mcount_regex; # Find the call site to mcount (return offset) my $mcount_adjust; # Address adjustment to mcount offset my $alignment; # The .align value to use for $mcount_section my $section_type; # Section header plus possible alignment command -my $can_use_local = 0; # If we can use local function references - -# Shut up recordmcount if user has older objcopy -my $quiet_recordmcount = ".tmp_quiet_recordmcount"; -my $print_warning = 1; -$print_warning = 0 if ( -f $quiet_recordmcount); - -## -# check_objcopy - whether objcopy supports --globalize-symbols -# -# --globalize-symbols came out in 2.17, we must test the version -# of objcopy, and if it is less than 2.17, then we can not -# record local functions. -sub check_objcopy -{ - open (IN, "$objcopy --version |") or die "error running $objcopy"; - while (<IN>) { - if (/objcopy.*\s(\d+)\.(\d+)/) { - $can_use_local = 1 if ($1 > 2 || ($1 == 2 && $2 >= 17)); - last; - } - } - close (IN); - - if (!$can_use_local && $print_warning) { - print STDERR "WARNING: could not find objcopy version or version " . - "is less than 2.17.\n" . - "\tLocal function references are disabled.\n"; - open (QUIET, ">$quiet_recordmcount"); - printf QUIET "Disables the warning from recordmcount.pl\n"; - close QUIET; - } -} if ($arch =~ /(x86(_64)?)|(i386)/) { if ($bits == 64) { @@ -434,8 +401,6 @@ if ($filename =~ m,^(.*)(\.\S),) { my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s"; my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o"; -check_objcopy(); - # # Step 1: find all the local (static functions) and weak symbols. # 't' is local, 'w/W' is weak @@ -473,11 +438,6 @@ sub update_funcs # is this function static? If so, note this fact. if (defined $locals{$ref_func}) { - - # only use locals if objcopy supports globalize-symbols - if (!$can_use_local) { - return; - } $convert{$ref_func} = 1; } diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 151f04971faa..6b54e46a0f12 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -131,11 +131,14 @@ res="${res}${CONFIG_LOCALVERSION}${LOCALVERSION}" if test "$CONFIG_LOCALVERSION_AUTO" = "y"; then # full scm version string res="$res$(scm_version)" -elif [ -z "${LOCALVERSION}" ]; then - # append a plus sign if the repository is not in a clean - # annotated or signed tagged state (as git describe only - # looks at signed or annotated tags - git tag -a/-s) and - # LOCALVERSION= is not specified +elif [ "${LOCALVERSION+set}" != "set" ]; then + # If the variable LOCALVERSION is not set, append a plus + # sign if the repository is not in a clean annotated or + # signed tagged state (as git describe only looks at signed + # or annotated tags - git tag -a/-s). + # + # If the variable LOCALVERSION is set (including being set + # to an empty string), we don't want to append a plus sign. scm=$(scm_version --short) res="$res${scm:++}" fi diff --git a/scripts/spdxcheck-test.sh b/scripts/spdxcheck-test.sh index cfea6a0d1cc0..cb76324756bd 100644 --- a/scripts/spdxcheck-test.sh +++ b/scripts/spdxcheck-test.sh @@ -1,12 +1,10 @@ #!/bin/sh -for PYTHON in python2 python3; do - # run check on a text and a binary file - for FILE in Makefile Documentation/logo.gif; do - $PYTHON scripts/spdxcheck.py $FILE - $PYTHON scripts/spdxcheck.py - < $FILE - done - - # run check on complete tree to catch any other issues - $PYTHON scripts/spdxcheck.py > /dev/null +# run check on a text and a binary file +for FILE in Makefile Documentation/logo.gif; do + python3 scripts/spdxcheck.py $FILE + python3 scripts/spdxcheck.py - < $FILE done + +# run check on complete tree to catch any other issues +python3 scripts/spdxcheck.py > /dev/null diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py index 3e784cf9f401..ebd06ae642c9 100755 --- a/scripts/spdxcheck.py +++ b/scripts/spdxcheck.py @@ -44,7 +44,7 @@ def read_spdxdata(repo): continue exception = None - for l in open(el.path).readlines(): + for l in open(el.path, encoding="utf-8").readlines(): if l.startswith('Valid-License-Identifier:'): lid = l.split(':')[1].strip().upper() if lid in spdx.licenses: diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py index 74f8aadfd4cb..7011fbe003ff 100755 --- a/scripts/tracing/draw_functrace.py +++ b/scripts/tracing/draw_functrace.py @@ -17,7 +17,7 @@ Usage: $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) - $ scripts/draw_functrace.py < raw_trace_func > draw_functrace + $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ @@ -103,10 +103,10 @@ def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException - m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) + m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException - return (m.group(1), m.group(2), m.group(3)) + return (m.group(2), m.group(3), m.group(4)) def main(): diff --git a/security/security.c b/security/security.c index 09533cbb7221..9ffa9e9c5c55 100644 --- a/security/security.c +++ b/security/security.c @@ -58,10 +58,11 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = { [LOCKDOWN_MMIOTRACE] = "unsafe mmio", [LOCKDOWN_DEBUGFS] = "debugfs access", [LOCKDOWN_XMON_WR] = "xmon write access", + [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM", [LOCKDOWN_INTEGRITY_MAX] = "integrity", [LOCKDOWN_KCORE] = "/proc/kcore access", [LOCKDOWN_KPROBES] = "use of kprobes", - [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM", + [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM", [LOCKDOWN_PERF] = "unsafe use of perf", [LOCKDOWN_TRACEFS] = "use of tracefs", [LOCKDOWN_XMON_RW] = "xmon read and write access", diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index defc5ef35c66..0ae1b718194a 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -874,7 +874,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s) rc = sidtab_init(s); if (rc) { pr_err("SELinux: out of memory on SID table init\n"); - goto out; + return rc; } head = p->ocontexts[OCON_ISID]; @@ -885,7 +885,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s) if (sid == SECSID_NULL) { pr_err("SELinux: SID 0 was assigned a context.\n"); sidtab_destroy(s); - goto out; + return -EINVAL; } /* Ignore initial SIDs unused by this kernel. */ @@ -897,12 +897,10 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s) pr_err("SELinux: unable to load initial SID %s.\n", name); sidtab_destroy(s); - goto out; + return rc; } } - rc = 0; -out: - return rc; + return 0; } int policydb_class_isvalid(struct policydb *p, unsigned int class) diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 83b79edfa52d..439a358ecfe9 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -215,7 +215,7 @@ static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { return remap_pfn_range(area, area->vm_start, - dmab->addr >> PAGE_SHIFT, + page_to_pfn(virt_to_page(dmab->area)), area->vm_end - area->vm_start, area->vm_page_prot); } diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 14e32825c339..71323d807dbf 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -246,12 +246,21 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream) if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP)) return false; - if (substream->ops->mmap || - (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV && - substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC)) + if (substream->ops->mmap || substream->ops->page) return true; - return dma_can_mmap(substream->dma_buffer.dev.dev); + switch (substream->dma_buffer.dev.type) { + case SNDRV_DMA_TYPE_UNKNOWN: + /* we can't know the device, so just assume that the driver does + * everything right + */ + return true; + case SNDRV_DMA_TYPE_CONTINUOUS: + case SNDRV_DMA_TYPE_VMALLOC: + return true; + default: + return dma_can_mmap(substream->dma_buffer.dev.dev); + } } static int constrain_mask_params(struct snd_pcm_substream *substream, @@ -3063,9 +3072,14 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream, boundary = 0x7fffffff; snd_pcm_stream_lock_irq(substream); /* FIXME: we should consider the boundary for the sync from app */ - if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) - control->appl_ptr = scontrol.appl_ptr; - else + if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) { + err = pcm_lib_apply_appl_ptr(substream, + scontrol.appl_ptr); + if (err < 0) { + snd_pcm_stream_unlock_irq(substream); + return err; + } + } else scontrol.appl_ptr = control->appl_ptr % boundary; if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) control->avail_min = scontrol.avail_min; @@ -3664,6 +3678,8 @@ static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; if (substream->ops->page) page = substream->ops->page(substream, offset); + else if (!snd_pcm_get_dma_buf(substream)) + page = virt_to_page(runtime->dma_area + offset); else page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset); if (!page) diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c index b9c2ce2b8d5a..84d78630463e 100644 --- a/sound/core/seq/seq_ports.c +++ b/sound/core/seq/seq_ports.c @@ -514,10 +514,11 @@ static int check_and_subscribe_port(struct snd_seq_client *client, return err; } -static void delete_and_unsubscribe_port(struct snd_seq_client *client, - struct snd_seq_client_port *port, - struct snd_seq_subscribers *subs, - bool is_src, bool ack) +/* called with grp->list_mutex held */ +static void __delete_and_unsubscribe_port(struct snd_seq_client *client, + struct snd_seq_client_port *port, + struct snd_seq_subscribers *subs, + bool is_src, bool ack) { struct snd_seq_port_subs_info *grp; struct list_head *list; @@ -525,7 +526,6 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client, grp = is_src ? &port->c_src : &port->c_dest; list = is_src ? &subs->src_list : &subs->dest_list; - down_write(&grp->list_mutex); write_lock_irq(&grp->list_lock); empty = list_empty(list); if (!empty) @@ -535,6 +535,18 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client, if (!empty) unsubscribe_port(client, port, grp, &subs->info, ack); +} + +static void delete_and_unsubscribe_port(struct snd_seq_client *client, + struct snd_seq_client_port *port, + struct snd_seq_subscribers *subs, + bool is_src, bool ack) +{ + struct snd_seq_port_subs_info *grp; + + grp = is_src ? &port->c_src : &port->c_dest; + down_write(&grp->list_mutex); + __delete_and_unsubscribe_port(client, port, subs, is_src, ack); up_write(&grp->list_mutex); } @@ -590,27 +602,30 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector, struct snd_seq_client_port *dest_port, struct snd_seq_port_subscribe *info) { - struct snd_seq_port_subs_info *src = &src_port->c_src; + struct snd_seq_port_subs_info *dest = &dest_port->c_dest; struct snd_seq_subscribers *subs; int err = -ENOENT; - down_write(&src->list_mutex); + /* always start from deleting the dest port for avoiding concurrent + * deletions + */ + down_write(&dest->list_mutex); /* look for the connection */ - list_for_each_entry(subs, &src->list_head, src_list) { + list_for_each_entry(subs, &dest->list_head, dest_list) { if (match_subs_info(info, &subs->info)) { - atomic_dec(&subs->ref_count); /* mark as not ready */ + __delete_and_unsubscribe_port(dest_client, dest_port, + subs, false, + connector->number != dest_client->number); err = 0; break; } } - up_write(&src->list_mutex); + up_write(&dest->list_mutex); if (err < 0) return err; delete_and_unsubscribe_port(src_client, src_port, subs, true, connector->number != src_client->number); - delete_and_unsubscribe_port(dest_client, dest_port, subs, false, - connector->number != dest_client->number); kfree(subs); return 0; } diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c index 0ef242fdd3bc..fff18b5d4e05 100644 --- a/sound/firewire/oxfw/oxfw-stream.c +++ b/sound/firewire/oxfw/oxfw-stream.c @@ -153,7 +153,7 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream) struct cmp_connection *conn; enum cmp_direction c_dir; enum amdtp_stream_direction s_dir; - unsigned int flags = CIP_UNAWARE_SYT; + unsigned int flags = 0; int err; if (!(oxfw->quirks & SND_OXFW_QUIRK_BLOCKING_TRANSMISSION)) @@ -161,6 +161,13 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream) else flags |= CIP_BLOCKING; + // OXFW 970/971 has no function to generate playback timing according to the sequence + // of value in syt field, thus the packet should include NO_INFO value in the field. + // However, some models just ignore data blocks in packet with NO_INFO for audio data + // processing. + if (!(oxfw->quirks & SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET)) + flags |= CIP_UNAWARE_SYT; + if (stream == &oxfw->tx_stream) { conn = &oxfw->out_conn; c_dir = CMP_OUTPUT; diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 84971d78d152..cb5b5e3a481b 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -159,8 +159,10 @@ static int detect_quirks(struct snd_oxfw *oxfw, const struct ieee1394_device_id return snd_oxfw_scs1x_add(oxfw); } - if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW) - oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION; + if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW) { + oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION | + SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET; + } /* * TASCAM FireOne has physical control and requires a pair of additional diff --git a/sound/firewire/oxfw/oxfw.h b/sound/firewire/oxfw/oxfw.h index ee47abcb0c90..c13034f6c2ca 100644 --- a/sound/firewire/oxfw/oxfw.h +++ b/sound/firewire/oxfw/oxfw.h @@ -42,6 +42,11 @@ enum snd_oxfw_quirk { SND_OXFW_QUIRK_BLOCKING_TRANSMISSION = 0x04, // Stanton SCS1.d and SCS1.m support unique transaction. SND_OXFW_QUIRK_SCS_TRANSACTION = 0x08, + // Apogee Duet FireWire ignores data blocks in packet with NO_INFO for audio data + // processing, while output level meter moves. Any value in syt field of packet takes + // the device to process audio data even if the value is invalid in a point of + // IEC 61883-1/6. + SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET = 0x10, }; /* This is an arbitrary number for convinience. */ diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index d8be146793ee..c9d0ba353463 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -319,6 +319,10 @@ static const struct config_entry config_table[] = { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC, .device = 0x4b55, }, + { + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC, + .device = 0x4b58, + }, #endif /* Alder Lake */ diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c index 5bbe6695689d..7ad8c5f7b664 100644 --- a/sound/isa/sb/sb16_csp.c +++ b/sound/isa/sb/sb16_csp.c @@ -816,6 +816,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); + spin_unlock_irqrestore(&p->chip->mixer_lock, flags); spin_lock(&p->chip->reg_lock); set_mode_register(p->chip, 0xc0); /* c0 = STOP */ @@ -855,6 +856,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel spin_unlock(&p->chip->reg_lock); /* restore PCM volume */ + spin_lock_irqsave(&p->chip->mixer_lock, flags); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); spin_unlock_irqrestore(&p->chip->mixer_lock, flags); @@ -880,6 +882,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p) mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); + spin_unlock_irqrestore(&p->chip->mixer_lock, flags); spin_lock(&p->chip->reg_lock); if (p->running & SNDRV_SB_CSP_ST_QSOUND) { @@ -894,6 +897,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p) spin_unlock(&p->chip->reg_lock); /* restore PCM volume */ + spin_lock_irqsave(&p->chip->mixer_lock, flags); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); spin_unlock_irqrestore(&p->chip->mixer_lock, flags); diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index e97d00585e8e..481d8f8d3396 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -3460,7 +3460,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol, struct hda_gen_spec *spec = codec->spec; const struct hda_input_mux *imux; struct nid_path *path; - int i, adc_idx, err = 0; + int i, adc_idx, ret, err = 0; imux = &spec->input_mux; adc_idx = kcontrol->id.index; @@ -3470,9 +3470,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol, if (!path || !path->ctls[type]) continue; kcontrol->private_value = path->ctls[type]; - err = func(kcontrol, ucontrol); - if (err < 0) + ret = func(kcontrol, ucontrol); + if (ret < 0) { + err = ret; break; + } + if (ret > 0) + err = 1; } mutex_unlock(&codec->control_mutex); if (err >= 0 && spec->cap_sync_hook) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 0322b289505e..0062c18b646a 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -883,10 +883,11 @@ static unsigned int azx_get_pos_skl(struct azx *chip, struct azx_dev *azx_dev) return azx_get_pos_posbuf(chip, azx_dev); } -static void azx_shutdown_chip(struct azx *chip) +static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset) { azx_stop_chip(chip); - azx_enter_link_reset(chip); + if (!skip_link_reset) + azx_enter_link_reset(chip); azx_clear_irq_pending(chip); display_power(chip, false); } @@ -895,6 +896,11 @@ static void azx_shutdown_chip(struct azx *chip) static DEFINE_MUTEX(card_list_lock); static LIST_HEAD(card_list); +static void azx_shutdown_chip(struct azx *chip) +{ + __azx_shutdown_chip(chip, false); +} + static void azx_add_card_list(struct azx *chip) { struct hda_intel *hda = container_of(chip, struct hda_intel, chip); @@ -2385,7 +2391,7 @@ static void azx_shutdown(struct pci_dev *pci) return; chip = card->private_data; if (chip && chip->running) - azx_shutdown_chip(chip); + __azx_shutdown_chip(chip, true); } /* PCI IDs */ diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 4b2cc8cb55c4..e143e69d8184 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1940,6 +1940,8 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) static const struct snd_pci_quirk force_connect_list[] = { SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), + SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1), + SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1), {} }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 1389cfd5e0db..7ad689f991e7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6658,6 +6658,7 @@ enum { ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, ALC623_FIXUP_LENOVO_THINKSTATION_P340, ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, + ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST, }; static const struct hda_fixup alc269_fixups[] = { @@ -8242,6 +8243,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC }, + [ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -8274,9 +8281,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x1300, "Acer SWIFT SF314-56", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), @@ -8330,6 +8339,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC), + SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -8429,13 +8439,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP), + SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), - SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), @@ -8463,6 +8474,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), + SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), @@ -8626,6 +8638,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP), diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index a5c1a2c4eae4..773a136161f1 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -1041,6 +1041,7 @@ static const struct hda_fixup via_fixups[] = { }; static const struct snd_pci_quirk vt2002p_fixups[] = { + SND_PCI_QUIRK(0x1043, 0x13f7, "Asus B23E", VIA_FIXUP_POWER_SAVE), SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75), SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST), SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE), diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index 8a13462e1a63..5dcf77af07af 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -36,6 +36,7 @@ config SND_SOC_COMPRESS config SND_SOC_TOPOLOGY bool + select SND_DYNAMIC_MINORS config SND_SOC_TOPOLOGY_KUNIT_TEST tristate "KUnit tests for SoC topology" diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c index 84e3906abd4f..3c60c5f96dcb 100644 --- a/sound/soc/amd/acp-da7219-max98357a.c +++ b/sound/soc/amd/acp-da7219-max98357a.c @@ -525,6 +525,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = { | SND_SOC_DAIFMT_CBM_CFM, .init = cz_da7219_init, .dpcm_playback = 1, + .stop_dma_first = 1, .ops = &cz_da7219_play_ops, SND_SOC_DAILINK_REG(designware1, dlgs, platform), }, @@ -534,6 +535,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = { .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .dpcm_capture = 1, + .stop_dma_first = 1, .ops = &cz_da7219_cap_ops, SND_SOC_DAILINK_REG(designware2, dlgs, platform), }, @@ -543,6 +545,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = { .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .dpcm_playback = 1, + .stop_dma_first = 1, .ops = &cz_max_play_ops, SND_SOC_DAILINK_REG(designware3, mx, platform), }, @@ -553,6 +556,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = { .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .dpcm_capture = 1, + .stop_dma_first = 1, .ops = &cz_dmic0_cap_ops, SND_SOC_DAILINK_REG(designware3, adau, platform), }, @@ -563,6 +567,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = { .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .dpcm_capture = 1, + .stop_dma_first = 1, .ops = &cz_dmic1_cap_ops, SND_SOC_DAILINK_REG(designware2, adau, platform), }, @@ -576,6 +581,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = { | SND_SOC_DAIFMT_CBM_CFM, .init = cz_rt5682_init, .dpcm_playback = 1, + .stop_dma_first = 1, .ops = &cz_rt5682_play_ops, SND_SOC_DAILINK_REG(designware1, rt5682, platform), }, @@ -585,6 +591,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = { .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .dpcm_capture = 1, + .stop_dma_first = 1, .ops = &cz_rt5682_cap_ops, SND_SOC_DAILINK_REG(designware2, rt5682, platform), }, @@ -594,6 +601,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = { .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .dpcm_playback = 1, + .stop_dma_first = 1, .ops = &cz_rt5682_max_play_ops, SND_SOC_DAILINK_REG(designware3, mx, platform), }, @@ -604,6 +612,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = { .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .dpcm_capture = 1, + .stop_dma_first = 1, .ops = &cz_rt5682_dmic0_cap_ops, SND_SOC_DAILINK_REG(designware3, adau, platform), }, @@ -614,6 +623,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = { .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .dpcm_capture = 1, + .stop_dma_first = 1, .ops = &cz_rt5682_dmic1_cap_ops, SND_SOC_DAILINK_REG(designware2, adau, platform), }, diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c index 143155a840ac..cc1ce6f22caa 100644 --- a/sound/soc/amd/acp-pcm-dma.c +++ b/sound/soc/amd/acp-pcm-dma.c @@ -969,7 +969,7 @@ static int acp_dma_hw_params(struct snd_soc_component *component, acp_set_sram_bank_state(rtd->acp_mmio, 0, true); /* Save for runtime private data */ - rtd->dma_addr = substream->dma_buffer.addr; + rtd->dma_addr = runtime->dma_addr; rtd->order = get_order(size); /* Fill the page table entries in ACP SRAM */ diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c index 8148b0d22e88..597d7c4b2a6b 100644 --- a/sound/soc/amd/raven/acp3x-pcm-dma.c +++ b/sound/soc/amd/raven/acp3x-pcm-dma.c @@ -286,7 +286,7 @@ static int acp3x_dma_hw_params(struct snd_soc_component *component, pr_err("pinfo failed\n"); } size = params_buffer_bytes(params); - rtd->dma_addr = substream->dma_buffer.addr; + rtd->dma_addr = substream->runtime->dma_addr; rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT); config_acp3x_dma(rtd, substream->stream); return 0; diff --git a/sound/soc/amd/renoir/acp3x-pdm-dma.c b/sound/soc/amd/renoir/acp3x-pdm-dma.c index bd20622b0933..0391c28dd078 100644 --- a/sound/soc/amd/renoir/acp3x-pdm-dma.c +++ b/sound/soc/amd/renoir/acp3x-pdm-dma.c @@ -242,7 +242,7 @@ static int acp_pdm_dma_hw_params(struct snd_soc_component *component, return -EINVAL; size = params_buffer_bytes(params); period_bytes = params_period_bytes(params); - rtd->dma_addr = substream->dma_buffer.addr; + rtd->dma_addr = substream->runtime->dma_addr; rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT); config_acp_dma(rtd, substream->stream); init_pdm_ring_buffer(MEM_WINDOW_START, size, period_bytes, diff --git a/sound/soc/amd/renoir/rn-pci-acp3x.c b/sound/soc/amd/renoir/rn-pci-acp3x.c index 19438da5dfa5..7b8040e812a1 100644 --- a/sound/soc/amd/renoir/rn-pci-acp3x.c +++ b/sound/soc/amd/renoir/rn-pci-acp3x.c @@ -382,6 +382,8 @@ static const struct dev_pm_ops rn_acp_pm = { .runtime_resume = snd_rn_acp_resume, .suspend = snd_rn_acp_suspend, .resume = snd_rn_acp_resume, + .restore = snd_rn_acp_resume, + .poweroff = snd_rn_acp_suspend, }; static void snd_rn_acp_remove(struct pci_dev *pci) diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 9e3e4db3d75d..9ff1600ca823 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -1343,7 +1343,7 @@ config SND_SOC_SSM2305 high-efficiency mono Class-D audio power amplifiers. config SND_SOC_SSM2518 - tristate + tristate "Analog Devices SSM2518 Class-D Amplifier" depends on I2C config SND_SOC_SSM2602 @@ -1575,7 +1575,9 @@ config SND_SOC_WCD934X Qualcomm SoCs like SDM845. config SND_SOC_WCD938X + depends on SND_SOC_WCD938X_SDW tristate + depends on SOUNDWIRE || !SOUNDWIRE config SND_SOC_WCD938X_SDW tristate "WCD9380/WCD9385 Codec - SDW" @@ -1831,11 +1833,6 @@ config SND_SOC_ZL38060 which consists of a Digital Signal Processor (DSP), several Digital Audio Interfaces (DAIs), analog outputs, and a block of 14 GPIOs. -config SND_SOC_ZX_AUD96P22 - tristate "ZTE ZX AUD96P22 CODEC" - depends on I2C - select REGMAP_I2C - # Amp config SND_SOC_LM4857 tristate diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index d656b1405473..8dcea2c4604a 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -585,7 +585,10 @@ obj-$(CONFIG_SND_SOC_WCD_MBHC) += snd-soc-wcd-mbhc.o obj-$(CONFIG_SND_SOC_WCD9335) += snd-soc-wcd9335.o obj-$(CONFIG_SND_SOC_WCD934X) += snd-soc-wcd934x.o obj-$(CONFIG_SND_SOC_WCD938X) += snd-soc-wcd938x.o -obj-$(CONFIG_SND_SOC_WCD938X_SDW) += snd-soc-wcd938x-sdw.o +ifdef CONFIG_SND_SOC_WCD938X_SDW +# avoid link failure by forcing sdw code built-in when needed +obj-$(CONFIG_SND_SOC_WCD938X) += snd-soc-wcd938x-sdw.o +endif obj-$(CONFIG_SND_SOC_WL1273) += snd-soc-wl1273.o obj-$(CONFIG_SND_SOC_WM0010) += snd-soc-wm0010.o obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c index eff013f295be..99c022be94a6 100644 --- a/sound/soc/codecs/cs42l42.c +++ b/sound/soc/codecs/cs42l42.c @@ -405,7 +405,7 @@ static const struct regmap_config cs42l42_regmap = { .use_single_write = true, }; -static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false); +static DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 100, true); static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true); static const char * const cs42l42_hpf_freq_text[] = { @@ -425,34 +425,23 @@ static SOC_ENUM_SINGLE_DECL(cs42l42_wnf3_freq_enum, CS42L42_ADC_WNF_HPF_CTL, CS42L42_ADC_WNF_CF_SHIFT, cs42l42_wnf3_freq_text); -static const char * const cs42l42_wnf05_freq_text[] = { - "280Hz", "315Hz", "350Hz", "385Hz", - "420Hz", "455Hz", "490Hz", "525Hz" -}; - -static SOC_ENUM_SINGLE_DECL(cs42l42_wnf05_freq_enum, CS42L42_ADC_WNF_HPF_CTL, - CS42L42_ADC_WNF_CF_SHIFT, - cs42l42_wnf05_freq_text); - static const struct snd_kcontrol_new cs42l42_snd_controls[] = { /* ADC Volume and Filter Controls */ SOC_SINGLE("ADC Notch Switch", CS42L42_ADC_CTL, - CS42L42_ADC_NOTCH_DIS_SHIFT, true, false), + CS42L42_ADC_NOTCH_DIS_SHIFT, true, true), SOC_SINGLE("ADC Weak Force Switch", CS42L42_ADC_CTL, CS42L42_ADC_FORCE_WEAK_VCM_SHIFT, true, false), SOC_SINGLE("ADC Invert Switch", CS42L42_ADC_CTL, CS42L42_ADC_INV_SHIFT, true, false), SOC_SINGLE("ADC Boost Switch", CS42L42_ADC_CTL, CS42L42_ADC_DIG_BOOST_SHIFT, true, false), - SOC_SINGLE_SX_TLV("ADC Volume", CS42L42_ADC_VOLUME, - CS42L42_ADC_VOL_SHIFT, 0xA0, 0x6C, adc_tlv), + SOC_SINGLE_S8_TLV("ADC Volume", CS42L42_ADC_VOLUME, -97, 12, adc_tlv), SOC_SINGLE("ADC WNF Switch", CS42L42_ADC_WNF_HPF_CTL, CS42L42_ADC_WNF_EN_SHIFT, true, false), SOC_SINGLE("ADC HPF Switch", CS42L42_ADC_WNF_HPF_CTL, CS42L42_ADC_HPF_EN_SHIFT, true, false), SOC_ENUM("HPF Corner Freq", cs42l42_hpf_freq_enum), SOC_ENUM("WNF 3dB Freq", cs42l42_wnf3_freq_enum), - SOC_ENUM("WNF 05dB Freq", cs42l42_wnf05_freq_enum), /* DAC Volume and Filter Controls */ SOC_SINGLE("DACA Invert Switch", CS42L42_DAC_CTL1, @@ -471,8 +460,8 @@ static const struct snd_soc_dapm_widget cs42l42_dapm_widgets[] = { SND_SOC_DAPM_OUTPUT("HP"), SND_SOC_DAPM_DAC("DAC", NULL, CS42L42_PWR_CTL1, CS42L42_HP_PDN_SHIFT, 1), SND_SOC_DAPM_MIXER("MIXER", CS42L42_PWR_CTL1, CS42L42_MIXER_PDN_SHIFT, 1, NULL, 0), - SND_SOC_DAPM_AIF_IN("SDIN1", NULL, 0, CS42L42_ASP_RX_DAI0_EN, CS42L42_ASP_RX0_CH1_SHIFT, 0), - SND_SOC_DAPM_AIF_IN("SDIN2", NULL, 1, CS42L42_ASP_RX_DAI0_EN, CS42L42_ASP_RX0_CH2_SHIFT, 0), + SND_SOC_DAPM_AIF_IN("SDIN1", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SDIN2", NULL, 1, SND_SOC_NOPM, 0, 0), /* Playback Requirements */ SND_SOC_DAPM_SUPPLY("ASP DAI0", CS42L42_PWR_CTL1, CS42L42_ASP_DAI_PDN_SHIFT, 1, NULL, 0), @@ -630,6 +619,8 @@ static int cs42l42_pll_config(struct snd_soc_component *component) for (i = 0; i < ARRAY_SIZE(pll_ratio_table); i++) { if (pll_ratio_table[i].sclk == clk) { + cs42l42->pll_config = i; + /* Configure the internal sample rate */ snd_soc_component_update_bits(component, CS42L42_MCLK_CTL, CS42L42_INTERNAL_FS_MASK, @@ -638,14 +629,9 @@ static int cs42l42_pll_config(struct snd_soc_component *component) (pll_ratio_table[i].mclk_int != 24000000)) << CS42L42_INTERNAL_FS_SHIFT); - /* Set the MCLK src (PLL or SCLK) and the divide - * ratio - */ + snd_soc_component_update_bits(component, CS42L42_MCLK_SRC_SEL, - CS42L42_MCLK_SRC_SEL_MASK | CS42L42_MCLKDIV_MASK, - (pll_ratio_table[i].mclk_src_sel - << CS42L42_MCLK_SRC_SEL_SHIFT) | (pll_ratio_table[i].mclk_div << CS42L42_MCLKDIV_SHIFT)); /* Set up the LRCLK */ @@ -681,15 +667,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component) CS42L42_FSYNC_PULSE_WIDTH_MASK, CS42L42_FRAC1_VAL(fsync - 1) << CS42L42_FSYNC_PULSE_WIDTH_SHIFT); - snd_soc_component_update_bits(component, - CS42L42_ASP_FRM_CFG, - CS42L42_ASP_5050_MASK, - CS42L42_ASP_5050_MASK); - /* Set the frame delay to 1.0 SCLK clocks */ - snd_soc_component_update_bits(component, CS42L42_ASP_FRM_CFG, - CS42L42_ASP_FSD_MASK, - CS42L42_ASP_FSD_1_0 << - CS42L42_ASP_FSD_SHIFT); /* Set the sample rates (96k or lower) */ snd_soc_component_update_bits(component, CS42L42_FS_RATE_EN, CS42L42_FS_EN_MASK, @@ -789,7 +766,18 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: - case SND_SOC_DAIFMT_LEFT_J: + /* + * 5050 mode, frame starts on falling edge of LRCLK, + * frame delayed by 1.0 SCLKs + */ + snd_soc_component_update_bits(component, + CS42L42_ASP_FRM_CFG, + CS42L42_ASP_STP_MASK | + CS42L42_ASP_5050_MASK | + CS42L42_ASP_FSD_MASK, + CS42L42_ASP_5050_MASK | + (CS42L42_ASP_FSD_1_0 << + CS42L42_ASP_FSD_SHIFT)); break; default: return -EINVAL; @@ -819,6 +807,25 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) return 0; } +static int cs42l42_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component); + + /* + * Sample rates < 44.1 kHz would produce an out-of-range SCLK with + * a standard I2S frame. If the machine driver sets SCLK it must be + * legal. + */ + if (cs42l42->sclk) + return 0; + + /* Machine driver has not set a SCLK, limit bottom end to 44.1 kHz */ + return snd_pcm_hw_constraint_minmax(substream->runtime, + SNDRV_PCM_HW_PARAM_RATE, + 44100, 192000); +} + static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) @@ -832,6 +839,10 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream, cs42l42->srate = params_rate(params); cs42l42->bclk = snd_soc_params_to_bclk(params); + /* I2S frame always has 2 channels even for mono audio */ + if (channels == 1) + cs42l42->bclk *= 2; + switch(substream->stream) { case SNDRV_PCM_STREAM_CAPTURE: if (channels == 2) { @@ -855,6 +866,17 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream, snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES, CS42L42_ASP_RX_CH_AP_MASK | CS42L42_ASP_RX_CH_RES_MASK, val); + + /* Channel B comes from the last active channel */ + snd_soc_component_update_bits(component, CS42L42_SP_RX_CH_SEL, + CS42L42_SP_RX_CHB_SEL_MASK, + (channels - 1) << CS42L42_SP_RX_CHB_SEL_SHIFT); + + /* Both LRCLK slots must be enabled */ + snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_EN, + CS42L42_ASP_RX0_CH_EN_MASK, + BIT(CS42L42_ASP_RX0_CH1_SHIFT) | + BIT(CS42L42_ASP_RX0_CH2_SHIFT)); break; default: break; @@ -900,13 +922,21 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream) */ regmap_multi_reg_write(cs42l42->regmap, cs42l42_to_osc_seq, ARRAY_SIZE(cs42l42_to_osc_seq)); + + /* Must disconnect PLL before stopping it */ + snd_soc_component_update_bits(component, + CS42L42_MCLK_SRC_SEL, + CS42L42_MCLK_SRC_SEL_MASK, + 0); + usleep_range(100, 200); + snd_soc_component_update_bits(component, CS42L42_PLL_CTL1, CS42L42_PLL_START_MASK, 0); } } else { if (!cs42l42->stream_use) { /* SCLK must be running before codec unmute */ - if ((cs42l42->bclk < 11289600) && (cs42l42->sclk < 11289600)) { + if (pll_ratio_table[cs42l42->pll_config].mclk_src_sel) { snd_soc_component_update_bits(component, CS42L42_PLL_CTL1, CS42L42_PLL_START_MASK, 1); @@ -927,6 +957,12 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream) CS42L42_PLL_LOCK_TIMEOUT_US); if (ret < 0) dev_warn(component->dev, "PLL failed to lock: %d\n", ret); + + /* PLL must be running to drive glitchless switch logic */ + snd_soc_component_update_bits(component, + CS42L42_MCLK_SRC_SEL, + CS42L42_MCLK_SRC_SEL_MASK, + CS42L42_MCLK_SRC_SEL_MASK); } /* Mark SCLK as present, turn off internal oscillator */ @@ -960,8 +996,8 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream) SNDRV_PCM_FMTBIT_S24_LE |\ SNDRV_PCM_FMTBIT_S32_LE ) - static const struct snd_soc_dai_ops cs42l42_ops = { + .startup = cs42l42_dai_startup, .hw_params = cs42l42_pcm_hw_params, .set_fmt = cs42l42_set_dai_fmt, .set_sysclk = cs42l42_set_sysclk, diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h index 206b3c81d3e0..8734f6828f3e 100644 --- a/sound/soc/codecs/cs42l42.h +++ b/sound/soc/codecs/cs42l42.h @@ -653,6 +653,8 @@ /* Page 0x25 Audio Port Registers */ #define CS42L42_SP_RX_CH_SEL (CS42L42_PAGE_25 + 0x01) +#define CS42L42_SP_RX_CHB_SEL_SHIFT 2 +#define CS42L42_SP_RX_CHB_SEL_MASK (3 << CS42L42_SP_RX_CHB_SEL_SHIFT) #define CS42L42_SP_RX_ISOC_CTL (CS42L42_PAGE_25 + 0x02) #define CS42L42_SP_RX_RSYNC_SHIFT 6 @@ -775,6 +777,7 @@ struct cs42l42_private { struct gpio_desc *reset_gpio; struct completion pdn_done; struct snd_soc_jack *jack; + int pll_config; int bclk; u32 sclk; u32 srate; diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c index 15bd8335f667..db88be48c998 100644 --- a/sound/soc/codecs/nau8824.c +++ b/sound/soc/codecs/nau8824.c @@ -828,36 +828,6 @@ static void nau8824_int_status_clear_all(struct regmap *regmap) } } -static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin) -{ - struct snd_soc_dapm_context *dapm = nau8824->dapm; - const char *prefix = dapm->component->name_prefix; - char prefixed_pin[80]; - - if (prefix) { - snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s", - prefix, pin); - snd_soc_dapm_disable_pin(dapm, prefixed_pin); - } else { - snd_soc_dapm_disable_pin(dapm, pin); - } -} - -static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin) -{ - struct snd_soc_dapm_context *dapm = nau8824->dapm; - const char *prefix = dapm->component->name_prefix; - char prefixed_pin[80]; - - if (prefix) { - snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s", - prefix, pin); - snd_soc_dapm_force_enable_pin(dapm, prefixed_pin); - } else { - snd_soc_dapm_force_enable_pin(dapm, pin); - } -} - static void nau8824_eject_jack(struct nau8824 *nau8824) { struct snd_soc_dapm_context *dapm = nau8824->dapm; @@ -866,8 +836,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824) /* Clear all interruption status */ nau8824_int_status_clear_all(regmap); - nau8824_dapm_disable_pin(nau8824, "SAR"); - nau8824_dapm_disable_pin(nau8824, "MICBIAS"); + snd_soc_dapm_disable_pin(dapm, "SAR"); + snd_soc_dapm_disable_pin(dapm, "MICBIAS"); snd_soc_dapm_sync(dapm); /* Enable the insertion interruption, disable the ejection @@ -897,8 +867,8 @@ static void nau8824_jdet_work(struct work_struct *work) struct regmap *regmap = nau8824->regmap; int adc_value, event = 0, event_mask = 0; - nau8824_dapm_enable_pin(nau8824, "MICBIAS"); - nau8824_dapm_enable_pin(nau8824, "SAR"); + snd_soc_dapm_enable_pin(dapm, "MICBIAS"); + snd_soc_dapm_enable_pin(dapm, "SAR"); snd_soc_dapm_sync(dapm); msleep(100); @@ -909,8 +879,8 @@ static void nau8824_jdet_work(struct work_struct *work) if (adc_value < HEADSET_SARADC_THD) { event |= SND_JACK_HEADPHONE; - nau8824_dapm_disable_pin(nau8824, "SAR"); - nau8824_dapm_disable_pin(nau8824, "MICBIAS"); + snd_soc_dapm_disable_pin(dapm, "SAR"); + snd_soc_dapm_disable_pin(dapm, "MICBIAS"); snd_soc_dapm_sync(dapm); } else { event |= SND_JACK_HEADSET; diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c index 3000bc128b5b..38356ea2bd6e 100644 --- a/sound/soc/codecs/rt5631.c +++ b/sound/soc/codecs/rt5631.c @@ -1695,6 +1695,8 @@ static const struct regmap_config rt5631_regmap_config = { .reg_defaults = rt5631_reg, .num_reg_defaults = ARRAY_SIZE(rt5631_reg), .cache_type = REGCACHE_RBTREE, + .use_single_read = true, + .use_single_write = true, }; static int rt5631_i2c_probe(struct i2c_client *i2c, diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index e4c91571abae..51ecaa2abcd1 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -44,6 +44,7 @@ static const struct reg_sequence patch_list[] = { {RT5682_I2C_CTRL, 0x000f}, {RT5682_PLL2_INTERNAL, 0x8266}, {RT5682_SAR_IL_CMD_3, 0x8365}, + {RT5682_SAR_IL_CMD_6, 0x0180}, }; void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev) @@ -973,10 +974,14 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert) rt5682_enable_push_button_irq(component, false); snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW); - if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS")) + if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS") && + !snd_soc_dapm_get_pin_status(dapm, "PLL1") && + !snd_soc_dapm_get_pin_status(dapm, "PLL2B")) snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, RT5682_PWR_MB, 0); - if (!snd_soc_dapm_get_pin_status(dapm, "Vref2")) + if (!snd_soc_dapm_get_pin_status(dapm, "Vref2") && + !snd_soc_dapm_get_pin_status(dapm, "PLL1") && + !snd_soc_dapm_get_pin_status(dapm, "PLL2B")) snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, RT5682_PWR_VREF2, 0); snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3, diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c index 51870d50f419..52d2c968b5c0 100644 --- a/sound/soc/codecs/tlv320aic31xx.c +++ b/sound/soc/codecs/tlv320aic31xx.c @@ -35,6 +35,9 @@ #include "tlv320aic31xx.h" +static int aic31xx_set_jack(struct snd_soc_component *component, + struct snd_soc_jack *jack, void *data); + static const struct reg_default aic31xx_reg_defaults[] = { { AIC31XX_CLKMUX, 0x00 }, { AIC31XX_PLLPR, 0x11 }, @@ -1256,6 +1259,13 @@ static int aic31xx_power_on(struct snd_soc_component *component) return ret; } + /* + * The jack detection configuration is in the same register + * that is used to report jack detect status so is volatile + * and not covered by the cache sync, restore it separately. + */ + aic31xx_set_jack(component, aic31xx->jack, NULL); + return 0; } @@ -1604,6 +1614,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c, ret); return ret; } + regcache_cache_only(aic31xx->regmap, true); + aic31xx->dev = &i2c->dev; aic31xx->irq = i2c->irq; diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h index 81952984613d..2513922a0292 100644 --- a/sound/soc/codecs/tlv320aic31xx.h +++ b/sound/soc/codecs/tlv320aic31xx.h @@ -151,8 +151,8 @@ struct aic31xx_pdata { #define AIC31XX_WORD_LEN_24BITS 0x02 #define AIC31XX_WORD_LEN_32BITS 0x03 #define AIC31XX_IFACE1_MASTER_MASK GENMASK(3, 2) -#define AIC31XX_BCLK_MASTER BIT(2) -#define AIC31XX_WCLK_MASTER BIT(3) +#define AIC31XX_BCLK_MASTER BIT(3) +#define AIC31XX_WCLK_MASTER BIT(2) /* AIC31XX_DATA_OFFSET */ #define AIC31XX_DATA_OFFSET_MASK GENMASK(7, 0) diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c index c63b717040ed..2e9175b37dc9 100644 --- a/sound/soc/codecs/tlv320aic32x4.c +++ b/sound/soc/codecs/tlv320aic32x4.c @@ -250,8 +250,8 @@ static DECLARE_TLV_DB_SCALE(tlv_pcm, -6350, 50, 0); static DECLARE_TLV_DB_SCALE(tlv_driver_gain, -600, 100, 0); /* -12dB min, 0.5dB steps */ static DECLARE_TLV_DB_SCALE(tlv_adc_vol, -1200, 50, 0); - -static DECLARE_TLV_DB_LINEAR(tlv_spk_vol, TLV_DB_GAIN_MUTE, 0); +/* -6dB min, 1dB steps */ +static DECLARE_TLV_DB_SCALE(tlv_tas_driver_gain, -5850, 50, 0); static DECLARE_TLV_DB_SCALE(tlv_amp_vol, 0, 600, 1); static const char * const lo_cm_text[] = { @@ -682,11 +682,20 @@ static int aic32x4_set_dosr(struct snd_soc_component *component, u16 dosr) static int aic32x4_set_processing_blocks(struct snd_soc_component *component, u8 r_block, u8 p_block) { - if (r_block > 18 || p_block > 25) - return -EINVAL; + struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component); + + if (aic32x4->type == AIC32X4_TYPE_TAS2505) { + if (r_block || p_block > 3) + return -EINVAL; - snd_soc_component_write(component, AIC32X4_ADCSPB, r_block); - snd_soc_component_write(component, AIC32X4_DACSPB, p_block); + snd_soc_component_write(component, AIC32X4_DACSPB, p_block); + } else { /* AIC32x4 */ + if (r_block > 18 || p_block > 25) + return -EINVAL; + + snd_soc_component_write(component, AIC32X4_ADCSPB, r_block); + snd_soc_component_write(component, AIC32X4_DACSPB, p_block); + } return 0; } @@ -695,6 +704,7 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component, unsigned int sample_rate, unsigned int channels, unsigned int bit_depth) { + struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component); u8 aosr; u16 dosr; u8 adc_resource_class, dac_resource_class; @@ -721,19 +731,28 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component, adc_resource_class = 6; dac_resource_class = 8; dosr_increment = 8; - aic32x4_set_processing_blocks(component, 1, 1); + if (aic32x4->type == AIC32X4_TYPE_TAS2505) + aic32x4_set_processing_blocks(component, 0, 1); + else + aic32x4_set_processing_blocks(component, 1, 1); } else if (sample_rate <= 96000) { aosr = 64; adc_resource_class = 6; dac_resource_class = 8; dosr_increment = 4; - aic32x4_set_processing_blocks(component, 1, 9); + if (aic32x4->type == AIC32X4_TYPE_TAS2505) + aic32x4_set_processing_blocks(component, 0, 1); + else + aic32x4_set_processing_blocks(component, 1, 9); } else if (sample_rate == 192000) { aosr = 32; adc_resource_class = 3; dac_resource_class = 4; dosr_increment = 2; - aic32x4_set_processing_blocks(component, 13, 19); + if (aic32x4->type == AIC32X4_TYPE_TAS2505) + aic32x4_set_processing_blocks(component, 0, 1); + else + aic32x4_set_processing_blocks(component, 13, 19); } else { dev_err(component->dev, "Sampling rate not supported\n"); return -EINVAL; @@ -1063,21 +1082,20 @@ static const struct snd_soc_component_driver soc_component_dev_aic32x4 = { }; static const struct snd_kcontrol_new aic32x4_tas2505_snd_controls[] = { - SOC_DOUBLE_R_S_TLV("PCM Playback Volume", AIC32X4_LDACVOL, - AIC32X4_LDACVOL, 0, -0x7f, 0x30, 7, 0, tlv_pcm), + SOC_SINGLE_S8_TLV("PCM Playback Volume", + AIC32X4_LDACVOL, -0x7f, 0x30, tlv_pcm), SOC_ENUM("DAC Playback PowerTune Switch", l_ptm_enum), - SOC_DOUBLE_R_S_TLV("HP Driver Playback Volume", AIC32X4_HPLGAIN, - AIC32X4_HPLGAIN, 0, -0x6, 0x1d, 5, 0, - tlv_driver_gain), - SOC_DOUBLE_R("HP DAC Playback Switch", AIC32X4_HPLGAIN, - AIC32X4_HPLGAIN, 6, 0x01, 1), - SOC_SINGLE("Auto-mute Switch", AIC32X4_DACMUTE, 4, 7, 0), + SOC_SINGLE_TLV("HP Driver Gain Volume", + AIC32X4_HPLGAIN, 0, 0x74, 1, tlv_tas_driver_gain), + SOC_SINGLE("HP DAC Playback Switch", AIC32X4_HPLGAIN, 6, 1, 1), - SOC_SINGLE_RANGE_TLV("Speaker Driver Playback Volume", TAS2505_SPKVOL1, - 0, 0, 117, 1, tlv_spk_vol), - SOC_SINGLE_TLV("Speaker Amplifier Playback Volume", TAS2505_SPKVOL2, - 4, 5, 0, tlv_amp_vol), + SOC_SINGLE_TLV("Speaker Driver Playback Volume", + TAS2505_SPKVOL1, 0, 0x74, 1, tlv_tas_driver_gain), + SOC_SINGLE_TLV("Speaker Amplifier Playback Volume", + TAS2505_SPKVOL2, 4, 5, 0, tlv_amp_vol), + + SOC_SINGLE("Auto-mute Switch", AIC32X4_DACMUTE, 4, 7, 0), }; static const struct snd_kcontrol_new hp_output_mixer_controls[] = { diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c index 78b76eceff8f..2fcc97370be2 100644 --- a/sound/soc/codecs/wcd938x.c +++ b/sound/soc/codecs/wcd938x.c @@ -3317,13 +3317,6 @@ static int wcd938x_soc_codec_probe(struct snd_soc_component *component) (WCD938X_DIGITAL_INTR_LEVEL_0 + i), 0); } - ret = wcd938x_irq_init(wcd938x, component->dev); - if (ret) { - dev_err(component->dev, "%s: IRQ init failed: %d\n", - __func__, ret); - return ret; - } - wcd938x->hphr_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip, WCD938X_IRQ_HPHR_PDM_WD_INT); wcd938x->hphl_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip, @@ -3553,7 +3546,6 @@ static int wcd938x_bind(struct device *dev) } wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev); wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x; - wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq; wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode); if (!wcd938x->txdev) { @@ -3562,7 +3554,6 @@ static int wcd938x_bind(struct device *dev) } wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev); wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x; - wcd938x->sdw_priv[AIF1_CAP]->slave_irq = wcd938x->virq; wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev); if (!wcd938x->tx_sdw_dev) { dev_err(dev, "could not get txslave with matching of dev\n"); @@ -3595,6 +3586,15 @@ static int wcd938x_bind(struct device *dev) return PTR_ERR(wcd938x->regmap); } + ret = wcd938x_irq_init(wcd938x, dev); + if (ret) { + dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret); + return ret; + } + + wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq; + wcd938x->sdw_priv[AIF1_CAP]->slave_irq = wcd938x->virq; + ret = wcd938x_set_micbias_data(wcd938x); if (ret < 0) { dev_err(dev, "%s: bad micbias pdata\n", __func__); diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 37aa020f23f6..fe15cbc7bcaf 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -282,6 +282,7 @@ /* * HALO_CCM_CORE_CONTROL */ +#define HALO_CORE_RESET 0x00000200 #define HALO_CORE_EN 0x00000001 /* @@ -746,7 +747,6 @@ static void wm_adsp2_init_debugfs(struct wm_adsp *dsp, static void wm_adsp2_cleanup_debugfs(struct wm_adsp *dsp) { wm_adsp_debugfs_clear(dsp); - debugfs_remove_recursive(dsp->debugfs_root); } #else static inline void wm_adsp2_init_debugfs(struct wm_adsp *dsp, @@ -1213,7 +1213,7 @@ static int wm_coeff_tlv_get(struct snd_kcontrol *kctl, mutex_lock(&ctl->dsp->pwr_lock); - ret = wm_coeff_read_ctrl_raw(ctl, ctl->cache, size); + ret = wm_coeff_read_ctrl(ctl, ctl->cache, size); if (!ret && copy_to_user(bytes, ctl->cache, size)) ret = -EFAULT; @@ -3333,7 +3333,8 @@ static int wm_halo_start_core(struct wm_adsp *dsp) { return regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL, - HALO_CORE_EN, HALO_CORE_EN); + HALO_CORE_RESET | HALO_CORE_EN, + HALO_CORE_RESET | HALO_CORE_EN); } static void wm_halo_stop_core(struct wm_adsp *dsp) diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 4124aa2fc247..905c7965f653 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream, snd_pcm_uframes_t period_size; ssize_t periodbytes; ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream); - u32 buffer_addr = virt_to_phys(substream->dma_buffer.area); + u32 buffer_addr = virt_to_phys(substream->runtime->dma_area); channels = substream->runtime->channels; period_size = substream->runtime->period_size; @@ -233,7 +233,6 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream, /* set codec params and inform SST driver the same */ sst_fill_pcm_params(substream, ¶m); sst_fill_alloc_params(substream, &alloc_params); - substream->runtime->dma_area = substream->dma_buffer.area; str_params.sparams = param; str_params.aparams = alloc_params; str_params.codec = SST_CODEC_TYPE_PCM; diff --git a/sound/soc/intel/boards/sof_da7219_max98373.c b/sound/soc/intel/boards/sof_da7219_max98373.c index 896251d742fe..b7b3b0bf994a 100644 --- a/sound/soc/intel/boards/sof_da7219_max98373.c +++ b/sound/soc/intel/boards/sof_da7219_max98373.c @@ -404,7 +404,7 @@ static int audio_probe(struct platform_device *pdev) return -ENOMEM; /* By default dais[0] is configured for max98373 */ - if (!strcmp(pdev->name, "sof_da7219_max98360a")) { + if (!strcmp(pdev->name, "sof_da7219_mx98360a")) { dais[0] = (struct snd_soc_dai_link) { .name = "SSP1-Codec", .id = 0, diff --git a/sound/soc/intel/boards/sof_sdw_max98373.c b/sound/soc/intel/boards/sof_sdw_max98373.c index 0e7ed906b341..25daef910aee 100644 --- a/sound/soc/intel/boards/sof_sdw_max98373.c +++ b/sound/soc/intel/boards/sof_sdw_max98373.c @@ -55,43 +55,68 @@ static int spk_init(struct snd_soc_pcm_runtime *rtd) return ret; } -static int max98373_sdw_trigger(struct snd_pcm_substream *substream, int cmd) +static int mx8373_enable_spk_pin(struct snd_pcm_substream *substream, bool enable) { + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); + struct snd_soc_dai *codec_dai; + struct snd_soc_dai *cpu_dai; int ret; + int j; - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: - case SNDRV_PCM_TRIGGER_RESUME: - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - /* enable max98373 first */ - ret = max_98373_trigger(substream, cmd); - if (ret < 0) - break; - - ret = sdw_trigger(substream, cmd); - break; - case SNDRV_PCM_TRIGGER_STOP: - case SNDRV_PCM_TRIGGER_SUSPEND: - case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - ret = sdw_trigger(substream, cmd); - if (ret < 0) - break; - - ret = max_98373_trigger(substream, cmd); - break; - default: - ret = -EINVAL; - break; + /* set spk pin by playback only */ + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) + return 0; + + cpu_dai = asoc_rtd_to_cpu(rtd, 0); + for_each_rtd_codec_dais(rtd, j, codec_dai) { + struct snd_soc_dapm_context *dapm = + snd_soc_component_get_dapm(cpu_dai->component); + char pin_name[16]; + + snprintf(pin_name, ARRAY_SIZE(pin_name), "%s Spk", + codec_dai->component->name_prefix); + + if (enable) + ret = snd_soc_dapm_enable_pin(dapm, pin_name); + else + ret = snd_soc_dapm_disable_pin(dapm, pin_name); + + if (!ret) + snd_soc_dapm_sync(dapm); } - return ret; + return 0; +} + +static int mx8373_sdw_prepare(struct snd_pcm_substream *substream) +{ + int ret = 0; + + /* according to soc_pcm_prepare dai link prepare is called first */ + ret = sdw_prepare(substream); + if (ret < 0) + return ret; + + return mx8373_enable_spk_pin(substream, true); +} + +static int mx8373_sdw_hw_free(struct snd_pcm_substream *substream) +{ + int ret = 0; + + /* according to soc_pcm_hw_free dai link free is called first */ + ret = sdw_hw_free(substream); + if (ret < 0) + return ret; + + return mx8373_enable_spk_pin(substream, false); } static const struct snd_soc_ops max_98373_sdw_ops = { .startup = sdw_startup, - .prepare = sdw_prepare, - .trigger = max98373_sdw_trigger, - .hw_free = sdw_hw_free, + .prepare = mx8373_sdw_prepare, + .trigger = sdw_trigger, + .hw_free = mx8373_sdw_hw_free, .shutdown = sdw_shutdown, }; diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c index c2a5933bfcfc..700a18561a94 100644 --- a/sound/soc/kirkwood/kirkwood-dma.c +++ b/sound/soc/kirkwood/kirkwood-dma.c @@ -104,8 +104,6 @@ static int kirkwood_dma_open(struct snd_soc_component *component, int err; struct snd_pcm_runtime *runtime = substream->runtime; struct kirkwood_dma_data *priv = kirkwood_priv(substream); - const struct mbus_dram_target_info *dram; - unsigned long addr; snd_soc_set_runtime_hwparams(substream, &kirkwood_dma_snd_hw); @@ -142,20 +140,14 @@ static int kirkwood_dma_open(struct snd_soc_component *component, writel((unsigned int)-1, priv->io + KIRKWOOD_ERR_MASK); } - dram = mv_mbus_dram_info(); - addr = substream->dma_buffer.addr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (priv->substream_play) return -EBUSY; priv->substream_play = substream; - kirkwood_dma_conf_mbus_windows(priv->io, - KIRKWOOD_PLAYBACK_WIN, addr, dram); } else { if (priv->substream_rec) return -EBUSY; priv->substream_rec = substream; - kirkwood_dma_conf_mbus_windows(priv->io, - KIRKWOOD_RECORD_WIN, addr, dram); } return 0; @@ -182,6 +174,23 @@ static int kirkwood_dma_close(struct snd_soc_component *component, return 0; } +static int kirkwood_dma_hw_params(struct snd_soc_component *component, + struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct kirkwood_dma_data *priv = kirkwood_priv(substream); + const struct mbus_dram_target_info *dram = mv_mbus_dram_info(); + unsigned long addr = substream->runtime->dma_addr; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + kirkwood_dma_conf_mbus_windows(priv->io, + KIRKWOOD_PLAYBACK_WIN, addr, dram); + else + kirkwood_dma_conf_mbus_windows(priv->io, + KIRKWOOD_RECORD_WIN, addr, dram); + return 0; +} + static int kirkwood_dma_prepare(struct snd_soc_component *component, struct snd_pcm_substream *substream) { @@ -246,6 +255,7 @@ const struct snd_soc_component_driver kirkwood_soc_component = { .name = DRV_NAME, .open = kirkwood_dma_open, .close = kirkwood_dma_close, + .hw_params = kirkwood_dma_hw_params, .prepare = kirkwood_dma_prepare, .pointer = kirkwood_dma_pointer, .pcm_construct = kirkwood_dma_new, diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-adda.c b/sound/soc/mediatek/mt8183/mt8183-dai-adda.c index 2b758a18c2ea..5b8a274419ed 100644 --- a/sound/soc/mediatek/mt8183/mt8183-dai-adda.c +++ b/sound/soc/mediatek/mt8183/mt8183-dai-adda.c @@ -341,6 +341,7 @@ static int set_mtkaif_rx(struct mtk_base_afe *afe) case MT8183_MTKAIF_PROTOCOL_1: regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x31); regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0, 0x0); + break; default: break; } diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 3a5e84e16a87..c8dfd0de30e4 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -148,86 +148,75 @@ int snd_soc_component_set_bias_level(struct snd_soc_component *component, return soc_component_ret(component, ret); } -static int soc_component_pin(struct snd_soc_component *component, - const char *pin, - int (*pin_func)(struct snd_soc_dapm_context *dapm, - const char *pin)) -{ - struct snd_soc_dapm_context *dapm = - snd_soc_component_get_dapm(component); - char *full_name; - int ret; - - if (!component->name_prefix) { - ret = pin_func(dapm, pin); - goto end; - } - - full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin); - if (!full_name) { - ret = -ENOMEM; - goto end; - } - - ret = pin_func(dapm, full_name); - kfree(full_name); -end: - return soc_component_ret(component, ret); -} - int snd_soc_component_enable_pin(struct snd_soc_component *component, const char *pin) { - return soc_component_pin(component, pin, snd_soc_dapm_enable_pin); + struct snd_soc_dapm_context *dapm = + snd_soc_component_get_dapm(component); + return snd_soc_dapm_enable_pin(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin); int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component, const char *pin) { - return soc_component_pin(component, pin, snd_soc_dapm_enable_pin_unlocked); + struct snd_soc_dapm_context *dapm = + snd_soc_component_get_dapm(component); + return snd_soc_dapm_enable_pin_unlocked(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin_unlocked); int snd_soc_component_disable_pin(struct snd_soc_component *component, const char *pin) { - return soc_component_pin(component, pin, snd_soc_dapm_disable_pin); + struct snd_soc_dapm_context *dapm = + snd_soc_component_get_dapm(component); + return snd_soc_dapm_disable_pin(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin); int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component, const char *pin) { - return soc_component_pin(component, pin, snd_soc_dapm_disable_pin_unlocked); + struct snd_soc_dapm_context *dapm = + snd_soc_component_get_dapm(component); + return snd_soc_dapm_disable_pin_unlocked(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin_unlocked); int snd_soc_component_nc_pin(struct snd_soc_component *component, const char *pin) { - return soc_component_pin(component, pin, snd_soc_dapm_nc_pin); + struct snd_soc_dapm_context *dapm = + snd_soc_component_get_dapm(component); + return snd_soc_dapm_nc_pin(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin); int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component, const char *pin) { - return soc_component_pin(component, pin, snd_soc_dapm_nc_pin_unlocked); + struct snd_soc_dapm_context *dapm = + snd_soc_component_get_dapm(component); + return snd_soc_dapm_nc_pin_unlocked(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin_unlocked); int snd_soc_component_get_pin_status(struct snd_soc_component *component, const char *pin) { - return soc_component_pin(component, pin, snd_soc_dapm_get_pin_status); + struct snd_soc_dapm_context *dapm = + snd_soc_component_get_dapm(component); + return snd_soc_dapm_get_pin_status(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_get_pin_status); int snd_soc_component_force_enable_pin(struct snd_soc_component *component, const char *pin) { - return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin); + struct snd_soc_dapm_context *dapm = + snd_soc_component_get_dapm(component); + return snd_soc_dapm_force_enable_pin(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin); @@ -235,7 +224,9 @@ int snd_soc_component_force_enable_pin_unlocked( struct snd_soc_component *component, const char *pin) { - return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin_unlocked); + struct snd_soc_dapm_context *dapm = + snd_soc_component_get_dapm(component); + return snd_soc_dapm_force_enable_pin_unlocked(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked); diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 46513bb97904..d1c570ca21ea 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1015,6 +1015,7 @@ out: static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = -EINVAL, _ret = 0; int rollback = 0; @@ -1055,14 +1056,23 @@ start_err: case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback); - if (ret < 0) - break; + if (rtd->dai_link->stop_dma_first) { + ret = snd_soc_pcm_component_trigger(substream, cmd, rollback); + if (ret < 0) + break; - ret = snd_soc_pcm_component_trigger(substream, cmd, rollback); - if (ret < 0) - break; + ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback); + if (ret < 0) + break; + } else { + ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback); + if (ret < 0) + break; + ret = snd_soc_pcm_component_trigger(substream, cmd, rollback); + if (ret < 0) + break; + } ret = snd_soc_link_trigger(substream, cmd, rollback); break; } diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig index 4bce89b5ea40..4447f515e8b1 100644 --- a/sound/soc/sof/intel/Kconfig +++ b/sound/soc/sof/intel/Kconfig @@ -278,6 +278,8 @@ config SND_SOC_SOF_HDA config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE tristate + select SOUNDWIRE_INTEL if SND_SOC_SOF_INTEL_SOUNDWIRE + select SND_INTEL_SOUNDWIRE_ACPI if SND_SOC_SOF_INTEL_SOUNDWIRE config SND_SOC_SOF_INTEL_SOUNDWIRE tristate "SOF support for SoundWire" @@ -285,8 +287,6 @@ config SND_SOC_SOF_INTEL_SOUNDWIRE depends on SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE depends on ACPI && SOUNDWIRE depends on !(SOUNDWIRE=m && SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=y) - select SOUNDWIRE_INTEL - select SND_INTEL_SOUNDWIRE_ACPI help This adds support for SoundWire with Sound Open Firmware for Intel(R) platforms. diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c index c91aa951df22..acfeca42604c 100644 --- a/sound/soc/sof/intel/hda-ipc.c +++ b/sound/soc/sof/intel/hda-ipc.c @@ -107,8 +107,8 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev) } else { /* reply correct size ? */ if (reply.hdr.size != msg->reply_size && - /* getter payload is never known upfront */ - !(reply.hdr.cmd & SOF_IPC_GLB_PROBE)) { + /* getter payload is never known upfront */ + ((reply.hdr.cmd & SOF_GLB_TYPE_MASK) != SOF_IPC_GLB_PROBE)) { dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n", msg->reply_size, reply.hdr.size); ret = -EINVAL; diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index e1e368ff2b12..891e6e1b9121 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -187,12 +187,16 @@ static int hda_sdw_probe(struct snd_sof_dev *sdev) int hda_sdw_startup(struct snd_sof_dev *sdev) { struct sof_intel_hda_dev *hdev; + struct snd_sof_pdata *pdata = sdev->pdata; hdev = sdev->pdata->hw_pdata; if (!hdev->sdw) return 0; + if (pdata->machine && !pdata->machine->mach_params.link_mask) + return 0; + return sdw_intel_startup(hdev->sdw); } @@ -1002,6 +1006,14 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev) hda_mach->mach_params.dmic_num = dmic_num; pdata->machine = hda_mach; pdata->tplg_filename = tplg_filename; + + if (codec_num == 2) { + /* + * Prevent SoundWire links from starting when an external + * HDaudio codec is used + */ + hda_mach->mach_params.link_mask = 0; + } } } diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c index a00262184efa..d04ce84fe7cc 100644 --- a/sound/soc/sof/intel/pci-tgl.c +++ b/sound/soc/sof/intel/pci-tgl.c @@ -89,6 +89,7 @@ static const struct sof_dev_desc adls_desc = { static const struct sof_dev_desc adl_desc = { .machines = snd_soc_acpi_intel_adl_machines, .alt_machines = snd_soc_acpi_intel_adl_sdw_machines, + .use_acpi_target_states = true, .resindex_lpe_base = 0, .resindex_pcicfg_base = -1, .resindex_imr_base = -1, diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c index 573374b89b10..d3276b4595af 100644 --- a/sound/soc/tegra/tegra_pcm.c +++ b/sound/soc/tegra/tegra_pcm.c @@ -213,19 +213,19 @@ snd_pcm_uframes_t tegra_pcm_pointer(struct snd_soc_component *component, } EXPORT_SYMBOL_GPL(tegra_pcm_pointer); -static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream, +static int tegra_pcm_preallocate_dma_buffer(struct device *dev, struct snd_pcm *pcm, int stream, size_t size) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; - buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL); + buf->area = dma_alloc_wc(dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->private_data = NULL; buf->dev.type = SNDRV_DMA_TYPE_DEV; - buf->dev.dev = pcm->card->dev; + buf->dev.dev = dev; buf->bytes = size; return 0; @@ -244,31 +244,28 @@ static void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream) if (!buf->area) return; - dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr); + dma_free_wc(buf->dev.dev, buf->bytes, buf->area, buf->addr); buf->area = NULL; } -static int tegra_pcm_dma_allocate(struct snd_soc_pcm_runtime *rtd, +static int tegra_pcm_dma_allocate(struct device *dev, struct snd_soc_pcm_runtime *rtd, size_t size) { - struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; int ret; - ret = dma_set_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret < 0) return ret; if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { - ret = tegra_pcm_preallocate_dma_buffer(pcm, - SNDRV_PCM_STREAM_PLAYBACK, size); + ret = tegra_pcm_preallocate_dma_buffer(dev, pcm, SNDRV_PCM_STREAM_PLAYBACK, size); if (ret) goto err; } if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { - ret = tegra_pcm_preallocate_dma_buffer(pcm, - SNDRV_PCM_STREAM_CAPTURE, size); + ret = tegra_pcm_preallocate_dma_buffer(dev, pcm, SNDRV_PCM_STREAM_CAPTURE, size); if (ret) goto err_free_play; } @@ -284,7 +281,16 @@ err: int tegra_pcm_construct(struct snd_soc_component *component, struct snd_soc_pcm_runtime *rtd) { - return tegra_pcm_dma_allocate(rtd, tegra_pcm_hardware.buffer_bytes_max); + struct device *dev = component->dev; + + /* + * Fallback for backwards-compatibility with older device trees that + * have the iommus property in the virtual, top-level "sound" node. + */ + if (!of_get_property(dev->of_node, "iommus", NULL)) + dev = rtd->card->snd_card->dev; + + return tegra_pcm_dma_allocate(dev, rtd, tegra_pcm_hardware.buffer_bytes_max); } EXPORT_SYMBOL_GPL(tegra_pcm_construct); diff --git a/sound/soc/ti/j721e-evm.c b/sound/soc/ti/j721e-evm.c index a7c0484d44ec..265bbc5a2f96 100644 --- a/sound/soc/ti/j721e-evm.c +++ b/sound/soc/ti/j721e-evm.c @@ -197,7 +197,7 @@ static int j721e_configure_refclk(struct j721e_priv *priv, return ret; } - if (priv->hsdiv_rates[domain->parent_clk_id] != scki) { + if (domain->parent_clk_id == -1 || priv->hsdiv_rates[domain->parent_clk_id] != scki) { dev_dbg(priv->dev, "%s configuration for %u Hz: %s, %dxFS (SCKI: %u Hz)\n", audio_domain == J721E_AUDIO_DOMAIN_CPB ? "CPB" : "IVI", @@ -278,23 +278,29 @@ static int j721e_audio_startup(struct snd_pcm_substream *substream) j721e_rule_rate, &priv->rate_range, SNDRV_PCM_HW_PARAM_RATE, -1); - mutex_unlock(&priv->mutex); if (ret) - return ret; + goto out; /* Reset TDM slots to 32 */ ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 32); if (ret && ret != -ENOTSUPP) - return ret; + goto out; for_each_rtd_codec_dais(rtd, i, codec_dai) { ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x3, 0x3, 2, 32); if (ret && ret != -ENOTSUPP) - return ret; + goto out; } - return 0; + if (ret == -ENOTSUPP) + ret = 0; +out: + if (ret) + domain->active--; + mutex_unlock(&priv->mutex); + + return ret; } static int j721e_audio_hw_params(struct snd_pcm_substream *substream, diff --git a/sound/soc/uniphier/aio-dma.c b/sound/soc/uniphier/aio-dma.c index 3c1628a3a1ac..3d9736e7381f 100644 --- a/sound/soc/uniphier/aio-dma.c +++ b/sound/soc/uniphier/aio-dma.c @@ -198,7 +198,7 @@ static int uniphier_aiodma_mmap(struct snd_soc_component *component, vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, - substream->dma_buffer.addr >> PAGE_SHIFT, + substream->runtime->dma_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c index 1d59fb668c77..91afea9d5de6 100644 --- a/sound/soc/xilinx/xlnx_formatter_pcm.c +++ b/sound/soc/xilinx/xlnx_formatter_pcm.c @@ -452,8 +452,8 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component, stream_data->buffer_size = size; - low = lower_32_bits(substream->dma_buffer.addr); - high = upper_32_bits(substream->dma_buffer.addr); + low = lower_32_bits(runtime->dma_addr); + high = upper_32_bits(runtime->dma_addr); writel(low, stream_data->mmio + XLNX_AUD_BUFF_ADDR_LSB); writel(high, stream_data->mmio + XLNX_AUD_BUFF_ADDR_MSB); diff --git a/sound/usb/card.c b/sound/usb/card.c index 2f6a62416c05..a1f8c3a026f5 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -907,7 +907,7 @@ static void usb_audio_disconnect(struct usb_interface *intf) } } - if (chip->quirk_type & QUIRK_SETUP_DISABLE_AUTOSUSPEND) + if (chip->quirk_type == QUIRK_SETUP_DISABLE_AUTOSUSPEND) usb_enable_autosuspend(interface_to_usbdev(intf)); chip->num_interfaces--; diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 52de52288e10..14456f61539e 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -324,6 +324,12 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, sources[ret - 1], visited, validate); if (ret > 0) { + /* + * For Samsung USBC Headset (AKG), setting clock selector again + * will result in incorrect default clock setting problems + */ + if (chip->usb_id == USB_ID(0x04e8, 0xa051)) + return ret; err = uac_clock_selector_set_val(chip, entity_id, cur); if (err < 0) return err; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 30b3e128e28d..9b713b4a5ec4 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1816,6 +1816,15 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer, strlcat(name, " - Output Jack", name_size); } +/* get connector value to "wake up" the USB audio */ +static int connector_mixer_resume(struct usb_mixer_elem_list *list) +{ + struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); + + get_connector_value(cval, NULL, NULL); + return 0; +} + /* Build a mixer control for a UAC connector control (jack-detect) */ static void build_connector_control(struct usb_mixer_interface *mixer, const struct usbmix_name_map *imap, @@ -1833,6 +1842,10 @@ static void build_connector_control(struct usb_mixer_interface *mixer, if (!cval) return; snd_usb_mixer_elem_init_std(&cval->head, mixer, term->id); + + /* set up a specific resume callback */ + cval->head.resume = connector_mixer_resume; + /* * UAC2: The first byte from reading the UAC2_TE_CONNECTOR control returns the * number of channels connected. @@ -3295,7 +3308,15 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, { struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); static const char * const val_types[] = { - "BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16", "S32", "U32", + [USB_MIXER_BOOLEAN] = "BOOLEAN", + [USB_MIXER_INV_BOOLEAN] = "INV_BOOLEAN", + [USB_MIXER_S8] = "S8", + [USB_MIXER_U8] = "U8", + [USB_MIXER_S16] = "S16", + [USB_MIXER_U16] = "U16", + [USB_MIXER_S32] = "S32", + [USB_MIXER_U32] = "U32", + [USB_MIXER_BESPOKEN] = "BESPOKEN", }; snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " "channels=%i, type=\"%s\"\n", cval->head.id, @@ -3634,23 +3655,15 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list) return 0; } -static int default_mixer_resume(struct usb_mixer_elem_list *list) -{ - struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); - - /* get connector value to "wake up" the USB audio */ - if (cval->val_type == USB_MIXER_BOOLEAN && cval->channels == 1) - get_connector_value(cval, NULL, NULL); - - return 0; -} - static int default_mixer_reset_resume(struct usb_mixer_elem_list *list) { - int err = default_mixer_resume(list); + int err; - if (err < 0) - return err; + if (list->resume) { + err = list->resume(list); + if (err < 0) + return err; + } return restore_mixer_value(list); } @@ -3689,7 +3702,7 @@ void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list, list->id = unitid; list->dump = snd_usb_mixer_dump_cval; #ifdef CONFIG_PM - list->resume = default_mixer_resume; + list->resume = NULL; list->reset_resume = default_mixer_reset_resume; #endif } diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c index f9d698a37153..3d5848d5481b 100644 --- a/sound/usb/mixer_scarlett_gen2.c +++ b/sound/usb/mixer_scarlett_gen2.c @@ -228,7 +228,7 @@ enum { }; static const char *const scarlett2_dim_mute_names[SCARLETT2_DIM_MUTE_COUNT] = { - "Mute", "Dim" + "Mute Playback Switch", "Dim Playback Switch" }; /* Description of each hardware port type: @@ -1856,9 +1856,15 @@ static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *elem = kctl->private_data; - struct scarlett2_data *private = elem->head.mixer->private_data; + struct usb_mixer_interface *mixer = elem->head.mixer; + struct scarlett2_data *private = mixer->private_data; int index = line_out_remap(private, elem->control); + mutex_lock(&private->data_mutex); + if (private->vol_updated) + scarlett2_update_volumes(mixer); + mutex_unlock(&private->data_mutex); + ucontrol->value.integer.value[0] = private->mute_switch[index]; return 0; } @@ -1955,10 +1961,12 @@ static void scarlett2_vol_ctl_set_writable(struct usb_mixer_interface *mixer, ~SNDRV_CTL_ELEM_ACCESS_WRITE; } - /* Notify of write bit change */ - snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, + /* Notify of write bit and possible value change */ + snd_ctl_notify(card, + SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &private->vol_ctls[index]->id); - snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, + snd_ctl_notify(card, + SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &private->mute_ctls[index]->id); } @@ -2530,14 +2538,18 @@ static int scarlett2_add_direct_monitor_ctl(struct usb_mixer_interface *mixer) { struct scarlett2_data *private = mixer->private_data; const struct scarlett2_device_info *info = private->info; + const char *s; if (!info->direct_monitor) return 0; + s = info->direct_monitor == 1 + ? "Direct Monitor Playback Switch" + : "Direct Monitor Playback Enum"; + return scarlett2_add_new_ctl( mixer, &scarlett2_direct_monitor_ctl[info->direct_monitor - 1], - 0, 1, "Direct Monitor Playback Switch", - &private->direct_monitor_ctl); + 0, 1, s, &private->direct_monitor_ctl); } /*** Speaker Switching Control ***/ @@ -2589,7 +2601,9 @@ static int scarlett2_speaker_switch_enable(struct usb_mixer_interface *mixer) /* disable the line out SW/HW switch */ scarlett2_sw_hw_ctl_ro(private, i); - snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, + snd_ctl_notify(card, + SNDRV_CTL_EVENT_MASK_VALUE | + SNDRV_CTL_EVENT_MASK_INFO, &private->sw_hw_ctls[i]->id); } @@ -2913,7 +2927,7 @@ static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl, if (private->vol_sw_hw_switch[line_index]) { private->mute_switch[line_index] = val; snd_ctl_notify(mixer->chip->card, - SNDRV_CTL_EVENT_MASK_INFO, + SNDRV_CTL_EVENT_MASK_VALUE, &private->mute_ctls[i]->id); } } @@ -3455,7 +3469,7 @@ static int scarlett2_add_msd_ctl(struct usb_mixer_interface *mixer) /* Add MSD control */ return scarlett2_add_new_ctl(mixer, &scarlett2_msd_ctl, - 0, 1, "MSD Mode", NULL); + 0, 1, "MSD Mode Switch", NULL); } /*** Cleanup/Suspend Callbacks ***/ diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 8b8bee3c3dd6..326d1b0ea5e6 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1897,6 +1897,10 @@ static const struct registration_quirk registration_quirks[] = { REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */ REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */ REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */ + REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */ + REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */ + REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2), /* JBL Quantum 600 */ + REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */ { 0 } /* terminator */ }; diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h index f83a70e07df8..ce2ee8f1e361 100644 --- a/tools/arch/arm64/include/uapi/asm/unistd.h +++ b/tools/arch/arm64/include/uapi/asm/unistd.h @@ -20,5 +20,6 @@ #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS #define __ARCH_WANT_SYS_CLONE3 +#define __ARCH_WANT_MEMFD_SECRET #include <asm-generic/unistd.h> diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index 39bb322707b4..b11cfc86a3d0 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile @@ -97,7 +97,7 @@ clean: bpftool_clean runqslower_clean resolve_btfids_clean $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpf $(Q)$(RM) -r -- $(OUTPUT)feature -install: $(PROGS) bpftool_install runqslower_install +install: $(PROGS) bpftool_install $(call QUIET_INSTALL, bpf_jit_disasm) $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/bin $(Q)$(INSTALL) $(OUTPUT)bpf_jit_disasm $(DESTDIR)$(prefix)/bin/bpf_jit_disasm @@ -118,9 +118,6 @@ bpftool_clean: runqslower: $(call descend,runqslower) -runqslower_install: - $(call descend,runqslower,install) - runqslower_clean: $(call descend,runqslower,clean) @@ -131,5 +128,5 @@ resolve_btfids_clean: $(call descend,resolve_btfids,clean) .PHONY: all install clean bpftool bpftool_install bpftool_clean \ - runqslower runqslower_install runqslower_clean \ + runqslower runqslower_clean \ resolve_btfids resolve_btfids_clean diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 1828bba19020..dc6daa193557 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -222,6 +222,11 @@ int mount_bpffs_for_pin(const char *name) int err = 0; file = malloc(strlen(name) + 1); + if (!file) { + p_err("mem alloc failed"); + return -1; + } + strcpy(file, name); dir = dirname(file); diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c index e7e7eee9f172..24734f2249d6 100644 --- a/tools/bpf/bpftool/jit_disasm.c +++ b/tools/bpf/bpftool/jit_disasm.c @@ -43,11 +43,13 @@ static int fprintf_json(void *out, const char *fmt, ...) { va_list ap; char *s; + int err; va_start(ap, fmt); - if (vasprintf(&s, fmt, ap) < 0) - return -1; + err = vasprintf(&s, fmt, ap); va_end(ap); + if (err < 0) + return -1; if (!oper_count) { int i; diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c index 645530ca7e98..ab9353f2fd46 100644 --- a/tools/bpf/runqslower/runqslower.bpf.c +++ b/tools/bpf/runqslower/runqslower.bpf.c @@ -74,7 +74,7 @@ int handle__sched_switch(u64 *ctx) u32 pid; /* ivcsw: treat like an enqueue event and store timestamp */ - if (prev->state == TASK_RUNNING) + if (prev->__state == TASK_RUNNING) trace_enqueue(prev); pid = next->pid; diff --git a/tools/include/linux/kconfig.h b/tools/include/linux/kconfig.h index 1555a0c4f345..13b86bd3b746 100644 --- a/tools/include/linux/kconfig.h +++ b/tools/include/linux/kconfig.h @@ -4,12 +4,6 @@ /* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */ -#ifdef CONFIG_CPU_BIG_ENDIAN -#define __BIG_ENDIAN 4321 -#else -#define __LITTLE_ENDIAN 1234 -#endif - #define __ARG_PLACEHOLDER_1 0, #define __take_second_arg(__ignored, val, ...) val diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index f211961ce1da..a9d6fcd95f42 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -873,8 +873,13 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule) #define __NR_landlock_restrict_self 446 __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) +#ifdef __ARCH_WANT_MEMFD_SECRET +#define __NR_memfd_secret 447 +__SYSCALL(__NR_memfd_secret, sys_memfd_secret) +#endif + #undef __NR_syscalls -#define __NR_syscalls 447 +#define __NR_syscalls 448 /* * 32 bit systems traditionally used different diff --git a/tools/io_uring/io_uring-cp.c b/tools/io_uring/io_uring-cp.c index 81461813ec62..d9bd6f5f8f46 100644 --- a/tools/io_uring/io_uring-cp.c +++ b/tools/io_uring/io_uring-cp.c @@ -131,8 +131,7 @@ static int copy_file(struct io_uring *ring, off_t insize) writes = reads = offset = 0; while (insize || write_left) { - unsigned long had_reads; - int got_comp; + int had_reads, got_comp; /* * Queue up as many reads as we can @@ -174,8 +173,13 @@ static int copy_file(struct io_uring *ring, off_t insize) if (!got_comp) { ret = io_uring_wait_cqe(ring, &cqe); got_comp = 1; - } else + } else { ret = io_uring_peek_cqe(ring, &cqe); + if (ret == -EAGAIN) { + cqe = NULL; + ret = 0; + } + } if (ret < 0) { fprintf(stderr, "io_uring_peek_cqe: %s\n", strerror(-ret)); @@ -194,7 +198,7 @@ static int copy_file(struct io_uring *ring, off_t insize) fprintf(stderr, "cqe failed: %s\n", strerror(-cqe->res)); return 1; - } else if ((size_t) cqe->res != data->iov.iov_len) { + } else if (cqe->res != data->iov.iov_len) { /* Short read/write, adjust and requeue */ data->iov.iov_base += cqe->res; data->iov.iov_len -= cqe->res; @@ -221,6 +225,25 @@ static int copy_file(struct io_uring *ring, off_t insize) } } + /* wait out pending writes */ + while (writes) { + struct io_data *data; + + ret = io_uring_wait_cqe(ring, &cqe); + if (ret) { + fprintf(stderr, "wait_cqe=%d\n", ret); + return 1; + } + if (cqe->res < 0) { + fprintf(stderr, "write res=%d\n", cqe->res); + return 1; + } + data = io_uring_cqe_get_data(cqe); + free(data); + writes--; + io_uring_cqe_seen(ring, cqe); + } + return 0; } diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index b46760b93bb4..7ff3d5ce44f9 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -804,6 +804,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) btf->nr_types = 0; btf->start_id = 1; btf->start_str_off = 0; + btf->fd = -1; if (base_btf) { btf->base_btf = base_btf; @@ -832,8 +833,6 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) if (err) goto done; - btf->fd = -1; - done: if (err) { btf__free(btf); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 1e04ce724240..6f5e2757bb3c 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10136,7 +10136,7 @@ int bpf_link__unpin(struct bpf_link *link) err = unlink(link->pin_path); if (err != 0) - return libbpf_err_errno(err); + return -errno; pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); zfree(&link->pin_path); @@ -11197,7 +11197,7 @@ int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); if (cnt < 0) - return libbpf_err_errno(cnt); + return -errno; for (i = 0; i < cnt; i++) { struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index ecaae2927ab8..cd8c703dde71 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -75,6 +75,9 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT; break; + case BPF_PROG_TYPE_CGROUP_SOCKOPT: + xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT; + break; case BPF_PROG_TYPE_SK_LOOKUP: xattr.expected_attach_type = BPF_SK_LOOKUP; break; @@ -104,7 +107,6 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_SK_REUSEPORT: case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_CGROUP_SYSCTL: - case BPF_PROG_TYPE_CGROUP_SOCKOPT: case BPF_PROG_TYPE_TRACING: case BPF_PROG_TYPE_STRUCT_OPS: case BPF_PROG_TYPE_EXT: diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index af973e400053..f6b57799c1ea 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -368,6 +368,7 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +447 common memfd_secret sys_memfd_secret # # Due to a historical design error, certain syscalls are numbered differently diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 5d6f583e2cd3..c88c61e7f8cc 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -361,9 +361,10 @@ static struct dso *findnew_dso(int pid, int tid, const char *filename, dso = machine__findnew_dso_id(machine, filename, id); } - if (dso) + if (dso) { + nsinfo__put(dso->nsinfo); dso->nsinfo = nsi; - else + } else nsinfo__put(nsi); thread__put(thread); @@ -992,8 +993,10 @@ int cmd_inject(int argc, const char **argv) data.path = inject.input_name; inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool); - if (IS_ERR(inject.session)) - return PTR_ERR(inject.session); + if (IS_ERR(inject.session)) { + ret = PTR_ERR(inject.session); + goto out_close_output; + } if (zstd_init(&(inject.session->zstd_data), 0) < 0) pr_warning("Decompression initialization failed.\n"); @@ -1035,6 +1038,8 @@ int cmd_inject(int argc, const char **argv) out_delete: zstd_fini(&(inject.session->zstd_data)); perf_session__delete(inject.session); +out_close_output: + perf_data__close(&inject.output); free(inject.itrace_synth_opts.vm_tm_corr_args); return ret; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 6386af6a2612..dc0364f671b9 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -1175,6 +1175,8 @@ int cmd_report(int argc, const char **argv) .annotation_opts = annotation__default_options, .skip_empty = true, }; + char *sort_order_help = sort_help("sort by key(s):"); + char *field_order_help = sort_help("output field(s): overhead period sample "); const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), @@ -1209,9 +1211,9 @@ int cmd_report(int argc, const char **argv) OPT_BOOLEAN(0, "header-only", &report.header_only, "Show only data header."), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - sort_help("sort by key(s):")), + sort_order_help), OPT_STRING('F', "fields", &field_order, "key[,keys...]", - sort_help("output field(s): overhead period sample ")), + field_order_help), OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization, "Show sample percentage for different cpu modes"), OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, @@ -1344,11 +1346,11 @@ int cmd_report(int argc, const char **argv) char sort_tmp[128]; if (ret < 0) - return ret; + goto exit; ret = perf_config(report__config, &report); if (ret) - return ret; + goto exit; argc = parse_options(argc, argv, options, report_usage, 0); if (argc) { @@ -1362,8 +1364,10 @@ int cmd_report(int argc, const char **argv) report.symbol_filter_str = argv[0]; } - if (annotate_check_args(&report.annotation_opts) < 0) - return -EINVAL; + if (annotate_check_args(&report.annotation_opts) < 0) { + ret = -EINVAL; + goto exit; + } if (report.mmaps_mode) report.tasks_mode = true; @@ -1377,12 +1381,14 @@ int cmd_report(int argc, const char **argv) if (symbol_conf.vmlinux_name && access(symbol_conf.vmlinux_name, R_OK)) { pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name); - return -EINVAL; + ret = -EINVAL; + goto exit; } if (symbol_conf.kallsyms_name && access(symbol_conf.kallsyms_name, R_OK)) { pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name); - return -EINVAL; + ret = -EINVAL; + goto exit; } if (report.inverted_callchain) @@ -1406,12 +1412,14 @@ int cmd_report(int argc, const char **argv) repeat: session = perf_session__new(&data, false, &report.tool); - if (IS_ERR(session)) - return PTR_ERR(session); + if (IS_ERR(session)) { + ret = PTR_ERR(session); + goto exit; + } ret = evswitch__init(&report.evswitch, session->evlist, stderr); if (ret) - return ret; + goto exit; if (zstd_init(&(session->zstd_data), 0) < 0) pr_warning("Decompression initialization failed. Reported data may be incomplete.\n"); @@ -1646,5 +1654,8 @@ error: zstd_fini(&(session->zstd_data)); perf_session__delete(session); +exit: + free(sort_order_help); + free(field_order_help); return ret; } diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 954ce2f594e9..1ff10d4bccf3 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -670,7 +670,7 @@ static void create_tasks(struct perf_sched *sched) err = pthread_attr_init(&attr); BUG_ON(err); err = pthread_attr_setstacksize(&attr, - (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); + (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN)); BUG_ON(err); err = pthread_mutex_lock(&sched->start_work_mutex); BUG_ON(err); @@ -3335,6 +3335,16 @@ static void setup_sorting(struct perf_sched *sched, const struct option *options sort_dimension__add("pid", &sched->cmp_pid); } +static bool schedstat_events_exposed(void) +{ + /* + * Select "sched:sched_stat_wait" event to check + * whether schedstat tracepoints are exposed. + */ + return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ? + false : true; +} + static int __cmd_record(int argc, const char **argv) { unsigned int rec_argc, i, j; @@ -3346,21 +3356,33 @@ static int __cmd_record(int argc, const char **argv) "-m", "1024", "-c", "1", "-e", "sched:sched_switch", - "-e", "sched:sched_stat_wait", - "-e", "sched:sched_stat_sleep", - "-e", "sched:sched_stat_iowait", "-e", "sched:sched_stat_runtime", "-e", "sched:sched_process_fork", "-e", "sched:sched_wakeup_new", "-e", "sched:sched_migrate_task", }; + + /* + * The tracepoints trace_sched_stat_{wait, sleep, iowait} + * are not exposed to user if CONFIG_SCHEDSTATS is not set, + * to prevent "perf sched record" execution failure, determine + * whether to record schedstat events according to actual situation. + */ + const char * const schedstat_args[] = { + "-e", "sched:sched_stat_wait", + "-e", "sched:sched_stat_sleep", + "-e", "sched:sched_stat_iowait", + }; + unsigned int schedstat_argc = schedstat_events_exposed() ? + ARRAY_SIZE(schedstat_args) : 0; + struct tep_event *waking_event; /* * +2 for either "-e", "sched:sched_wakeup" or * "-e", "sched:sched_waking" */ - rec_argc = ARRAY_SIZE(record_args) + 2 + argc - 1; + rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1; rec_argv = calloc(rec_argc + 1, sizeof(char *)); if (rec_argv == NULL) @@ -3376,6 +3398,9 @@ static int __cmd_record(int argc, const char **argv) else rec_argv[i++] = strdup("sched:sched_wakeup"); + for (j = 0; j < schedstat_argc; j++) + rec_argv[i++] = strdup(schedstat_args[j]); + for (j = 1; j < (unsigned int)argc; j++, i++) rec_argv[i] = argv[j]; diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 8c03a9862872..064da7f3618d 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -2601,6 +2601,12 @@ static void perf_script__exit_per_event_dump_stats(struct perf_script *script) } } +static void perf_script__exit(struct perf_script *script) +{ + perf_thread_map__put(script->threads); + perf_cpu_map__put(script->cpus); +} + static int __cmd_script(struct perf_script *script) { int ret; @@ -4143,8 +4149,10 @@ out_delete: zfree(&script.ptime_range); } + zstd_fini(&(session->zstd_data)); evlist__free_stats(session->evlist); perf_session__delete(session); + perf_script__exit(&script); if (script_started) cleanup_scripting(); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index d25cb8088e8c..634375937db9 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -2445,9 +2445,6 @@ int cmd_stat(int argc, const char **argv) evlist__check_cpu_maps(evsel_list); - if (perf_pmu__has_hybrid()) - stat_config.no_merge = true; - /* * Initialize thread_map with comm names, * so we could print it out on output. diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 7ec18ff57fc4..9c265fa96011 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -2266,6 +2266,14 @@ static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sam return augmented_args; } +static void syscall__exit(struct syscall *sc) +{ + if (!sc) + return; + + free(sc->arg_fmt); +} + static int trace__sys_enter(struct trace *trace, struct evsel *evsel, union perf_event *event __maybe_unused, struct perf_sample *sample) @@ -3095,6 +3103,21 @@ static struct evsel *evsel__new_pgfault(u64 config) return evsel; } +static void evlist__free_syscall_tp_fields(struct evlist *evlist) +{ + struct evsel *evsel; + + evlist__for_each_entry(evlist, evsel) { + struct evsel_trace *et = evsel->priv; + + if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls")) + continue; + + free(et->fmt); + free(et); + } +} + static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) { const u32 type = event->header.type; @@ -4130,7 +4153,7 @@ out_disable: out_delete_evlist: trace__symbols__exit(trace); - + evlist__free_syscall_tp_fields(evlist); evlist__delete(evlist); cgroup__put(trace->cgroup); trace->evlist = NULL; @@ -4636,6 +4659,9 @@ do_concat: err = parse_events_option(&o, lists[0], 0); } out: + free(strace_groups_dir); + free(lists[0]); + free(lists[1]); if (sep) *sep = ','; @@ -4701,6 +4727,21 @@ out: return err; } +static void trace__exit(struct trace *trace) +{ + int i; + + strlist__delete(trace->ev_qualifier); + free(trace->ev_qualifier_ids.entries); + if (trace->syscalls.table) { + for (i = 0; i <= trace->sctbl->syscalls.max_id; i++) + syscall__exit(&trace->syscalls.table[i]); + free(trace->syscalls.table); + } + syscalltbl__delete(trace->sctbl); + zfree(&trace->perfconfig_events); +} + int cmd_trace(int argc, const char **argv) { const char *trace_usage[] = { @@ -5135,6 +5176,6 @@ out_close: if (output_name != NULL) fclose(trace.output); out: - zfree(&trace.perfconfig_events); + trace__exit(&trace); return err; } diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c index 33bda9c26542..dbf5f5215abe 100644 --- a/tools/perf/tests/bpf.c +++ b/tools/perf/tests/bpf.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <errno.h> #include <stdio.h> +#include <stdlib.h> #include <sys/epoll.h> #include <sys/types.h> #include <sys/stat.h> @@ -276,6 +277,7 @@ static int __test__bpf(int idx) } out: + free(obj_buf); bpf__clear(); return ret; } diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c index 656218179222..44a50527f9d9 100644 --- a/tools/perf/tests/event_update.c +++ b/tools/perf/tests/event_update.c @@ -88,6 +88,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu struct evsel *evsel; struct event_name tmp; struct evlist *evlist = evlist__new_default(); + char *unit = strdup("KRAVA"); TEST_ASSERT_VAL("failed to get evlist", evlist); @@ -98,7 +99,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123); - evsel->unit = strdup("KRAVA"); + evsel->unit = unit; TEST_ASSERT_VAL("failed to synthesize attr update unit", !perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit)); @@ -118,6 +119,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu TEST_ASSERT_VAL("failed to synthesize attr update cpus", !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus)); - perf_cpu_map__put(evsel->core.own_cpus); + free(unit); + evlist__delete(evlist); return 0; } diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c index 5ebf56331904..4e09f0a312af 100644 --- a/tools/perf/tests/evsel-roundtrip-name.c +++ b/tools/perf/tests/evsel-roundtrip-name.c @@ -5,6 +5,7 @@ #include "tests.h" #include "debug.h" #include "pmu.h" +#include "pmu-hybrid.h" #include <errno.h> #include <linux/kernel.h> @@ -102,7 +103,7 @@ int test__perf_evsel__roundtrip_name_test(struct test *test __maybe_unused, int { int err = 0, ret = 0; - if (perf_pmu__has_hybrid()) + if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) return perf_evsel__name_array_test(evsel__hw_names, 2); err = perf_evsel__name_array_test(evsel__hw_names, 1); diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c index edcbc70ff9d6..1ac72919fa35 100644 --- a/tools/perf/tests/maps.c +++ b/tools/perf/tests/maps.c @@ -116,5 +116,7 @@ int test__maps__merge_in(struct test *t __maybe_unused, int subtest __maybe_unus ret = check_maps(merged3, ARRAY_SIZE(merged3), &maps); TEST_ASSERT_VAL("merge check failed", !ret); + + maps__exit(&maps); return TEST_OK; } diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 56a7b6a14195..8d4866739255 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -6,6 +6,7 @@ #include "tests.h" #include "debug.h" #include "pmu.h" +#include "pmu-hybrid.h" #include <dirent.h> #include <errno.h> #include <sys/types.h> @@ -1596,6 +1597,13 @@ static int test__hybrid_raw1(struct evlist *evlist) { struct evsel *evsel = evlist__first(evlist); + if (!perf_pmu__hybrid_mounted("cpu_atom")) { + TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); + TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config); + return 0; + } + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config); @@ -1620,13 +1628,9 @@ static int test__hybrid_cache_event(struct evlist *evlist) { struct evsel *evsel = evlist__first(evlist); - TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); + TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type); TEST_ASSERT_VAL("wrong config", 0x2 == (evsel->core.attr.config & 0xffffffff)); - - evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", 0x10002 == (evsel->core.attr.config & 0xffffffff)); return 0; } @@ -2028,7 +2032,7 @@ static struct evlist_test test__hybrid_events[] = { .id = 7, }, { - .name = "cpu_core/LLC-loads/,cpu_atom/LLC-load-misses/", + .name = "cpu_core/LLC-loads/", .check = test__hybrid_cache_event, .id = 8, }, diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c index 85d75b9b25a1..7c56bc1f4cff 100644 --- a/tools/perf/tests/perf-time-to-tsc.c +++ b/tools/perf/tests/perf-time-to-tsc.c @@ -21,6 +21,7 @@ #include "mmap.h" #include "tests.h" #include "pmu.h" +#include "pmu-hybrid.h" #define CHECK__(x) { \ while ((x) < 0) { \ @@ -93,7 +94,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe * For hybrid "cycles:u", it creates two events. * Init the second evsel here. */ - if (perf_pmu__has_hybrid()) { + if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) { evsel = evsel__next(evsel); evsel->core.attr.comm = 1; evsel->core.attr.disabled = 1; diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index ec4e3b21b831..b5efe675b321 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -61,6 +61,7 @@ static int session_write_header(char *path) TEST_ASSERT_VAL("failed to write header", !perf_session__write_header(session, session->evlist, data.file.fd, true)); + evlist__delete(session->evlist); perf_session__delete(session); return 0; diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 32ad92d3e454..bc1f64873c8f 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -2434,6 +2434,22 @@ static int cs_etm__process_event(struct perf_session *session, return 0; } +static void dump_queued_data(struct cs_etm_auxtrace *etm, + struct perf_record_auxtrace *event) +{ + struct auxtrace_buffer *buf; + unsigned int i; + /* + * Find all buffers with same reference in the queues and dump them. + * This is because the queues can contain multiple entries of the same + * buffer that were split on aux records. + */ + for (i = 0; i < etm->queues.nr_queues; ++i) + list_for_each_entry(buf, &etm->queues.queue_array[i].head, list) + if (buf->reference == event->reference) + cs_etm__dump_event(etm, buf); +} + static int cs_etm__process_auxtrace_event(struct perf_session *session, union perf_event *event, struct perf_tool *tool __maybe_unused) @@ -2466,7 +2482,8 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session, cs_etm__dump_event(etm, buffer); auxtrace_buffer__put_data(buffer); } - } + } else if (dump_trace) + dump_queued_data(etm, &event->auxtrace); return 0; } @@ -2683,6 +2700,172 @@ static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset, return metadata; } +/** + * Puts a fragment of an auxtrace buffer into the auxtrace queues based + * on the bounds of aux_event, if it matches with the buffer that's at + * file_offset. + * + * Normally, whole auxtrace buffers would be added to the queue. But we + * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder + * is reset across each buffer, so splitting the buffers up in advance has + * the same effect. + */ +static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz, + struct perf_record_aux *aux_event, struct perf_sample *sample) +{ + int err; + char buf[PERF_SAMPLE_MAX_SIZE]; + union perf_event *auxtrace_event_union; + struct perf_record_auxtrace *auxtrace_event; + union perf_event auxtrace_fragment; + __u64 aux_offset, aux_size; + + struct cs_etm_auxtrace *etm = container_of(session->auxtrace, + struct cs_etm_auxtrace, + auxtrace); + + /* + * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got + * from looping through the auxtrace index. + */ + err = perf_session__peek_event(session, file_offset, buf, + PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL); + if (err) + return err; + auxtrace_event = &auxtrace_event_union->auxtrace; + if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE) + return -EINVAL; + + if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) || + auxtrace_event->header.size != sz) { + return -EINVAL; + } + + /* + * In per-thread mode, CPU is set to -1, but TID will be set instead. See + * auxtrace_mmap_params__set_idx(). Return 'not found' if neither CPU nor TID match. + */ + if ((auxtrace_event->cpu == (__u32) -1 && auxtrace_event->tid != sample->tid) || + auxtrace_event->cpu != sample->cpu) + return 1; + + if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) { + /* + * Clamp size in snapshot mode. The buffer size is clamped in + * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect + * the buffer size. + */ + aux_size = min(aux_event->aux_size, auxtrace_event->size); + + /* + * In this mode, the head also points to the end of the buffer so aux_offset + * needs to have the size subtracted so it points to the beginning as in normal mode + */ + aux_offset = aux_event->aux_offset - aux_size; + } else { + aux_size = aux_event->aux_size; + aux_offset = aux_event->aux_offset; + } + + if (aux_offset >= auxtrace_event->offset && + aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) { + /* + * If this AUX event was inside this buffer somewhere, create a new auxtrace event + * based on the sizes of the aux event, and queue that fragment. + */ + auxtrace_fragment.auxtrace = *auxtrace_event; + auxtrace_fragment.auxtrace.size = aux_size; + auxtrace_fragment.auxtrace.offset = aux_offset; + file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size; + + pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64 + " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu); + return auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment, + file_offset, NULL); + } + + /* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */ + return 1; +} + +static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event, + u64 offset __maybe_unused, void *data __maybe_unused) +{ + struct perf_sample sample; + int ret; + struct auxtrace_index_entry *ent; + struct auxtrace_index *auxtrace_index; + struct evsel *evsel; + size_t i; + + /* Don't care about any other events, we're only queuing buffers for AUX events */ + if (event->header.type != PERF_RECORD_AUX) + return 0; + + if (event->header.size < sizeof(struct perf_record_aux)) + return -EINVAL; + + /* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */ + if (!event->aux.aux_size) + return 0; + + /* + * Parse the sample, we need the sample_id_all data that comes after the event so that the + * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID. + */ + evsel = evlist__event2evsel(session->evlist, event); + if (!evsel) + return -EINVAL; + ret = evsel__parse_sample(evsel, event, &sample); + if (ret) + return ret; + + /* + * Loop through the auxtrace index to find the buffer that matches up with this aux event. + */ + list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { + for (i = 0; i < auxtrace_index->nr; i++) { + ent = &auxtrace_index->entries[i]; + ret = cs_etm__queue_aux_fragment(session, ent->file_offset, + ent->sz, &event->aux, &sample); + /* + * Stop search on error or successful values. Continue search on + * 1 ('not found') + */ + if (ret != 1) + return ret; + } + } + + /* + * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but + * don't exit with an error because it will still be possible to decode other aux records. + */ + pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64 + " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu); + return 0; +} + +static int cs_etm__queue_aux_records(struct perf_session *session) +{ + struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index, + struct auxtrace_index, list); + if (index && index->nr > 0) + return perf_session__peek_events(session, session->header.data_offset, + session->header.data_size, + cs_etm__queue_aux_records_cb, NULL); + + /* + * We would get here if there are no entries in the index (either no auxtrace + * buffers or no index at all). Fail silently as there is the possibility of + * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still + * false. + * + * In that scenario, buffers will not be split by AUX records. + */ + return 0; +} + int cs_etm__process_auxtrace_info(union perf_event *event, struct perf_session *session) { @@ -2876,14 +3059,13 @@ int cs_etm__process_auxtrace_info(union perf_event *event, if (dump_trace) { cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu); - return 0; } err = cs_etm__synth_events(etm, session); if (err) goto err_delete_thread; - err = auxtrace_queues__process_index(&etm->queues, session); + err = cs_etm__queue_aux_records(session); if (err) goto err_delete_thread; diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index a9c102e8e3c0..f5d260b1df4d 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -20,7 +20,7 @@ static void close_dir(struct perf_data_file *files, int nr) { - while (--nr >= 1) { + while (--nr >= 0) { close(files[nr].fd); zfree(&files[nr].path); } diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index d786cf6b0cfa..ee15db2be2f4 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -1154,8 +1154,10 @@ struct map *dso__new_map(const char *name) struct map *map = NULL; struct dso *dso = dso__new(name); - if (dso) + if (dso) { map = map__new2(0, dso); + dso__put(dso); + } return map; } diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 7d2ba8419b0c..609ca1671501 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -113,14 +113,14 @@ static Dwarf_Line *cu_getsrc_die(Dwarf_Die *cu_die, Dwarf_Addr addr) * * Find a line number and file name for @addr in @cu_die. */ -int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr, - const char **fname, int *lineno) +int cu_find_lineinfo(Dwarf_Die *cu_die, Dwarf_Addr addr, + const char **fname, int *lineno) { Dwarf_Line *line; Dwarf_Die die_mem; Dwarf_Addr faddr; - if (die_find_realfunc(cu_die, (Dwarf_Addr)addr, &die_mem) + if (die_find_realfunc(cu_die, addr, &die_mem) && die_entrypc(&die_mem, &faddr) == 0 && faddr == addr) { *fname = dwarf_decl_file(&die_mem); @@ -128,7 +128,7 @@ int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr, goto out; } - line = cu_getsrc_die(cu_die, (Dwarf_Addr)addr); + line = cu_getsrc_die(cu_die, addr); if (line && dwarf_lineno(line, lineno) == 0) { *fname = dwarf_linesrc(line, NULL, NULL); if (!*fname) diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index cb99646843a9..7ee0fa19b5c4 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -19,7 +19,7 @@ const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname); const char *cu_get_comp_dir(Dwarf_Die *cu_die); /* Get a line number and file name for given address */ -int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, +int cu_find_lineinfo(Dwarf_Die *cudie, Dwarf_Addr addr, const char **fname, int *lineno); /* Walk on functions at given address */ diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index ebc5e9ad35db..cec2e6cad8aa 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -186,10 +186,12 @@ void perf_env__exit(struct perf_env *env) zfree(&env->cpuid); zfree(&env->cmdline); zfree(&env->cmdline_argv); + zfree(&env->sibling_dies); zfree(&env->sibling_cores); zfree(&env->sibling_threads); zfree(&env->pmu_mappings); zfree(&env->cpu); + zfree(&env->cpu_pmu_caps); zfree(&env->numa_map); for (i = 0; i < env->nr_numa_nodes; i++) diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c index 39062df02629..51424cdc3b68 100644 --- a/tools/perf/util/lzma.c +++ b/tools/perf/util/lzma.c @@ -69,7 +69,7 @@ int lzma_decompress_to_file(const char *input, int output_fd) if (ferror(infile)) { pr_err("lzma: read error: %s\n", strerror(errno)); - goto err_fclose; + goto err_lzma_end; } if (feof(infile)) @@ -83,7 +83,7 @@ int lzma_decompress_to_file(const char *input, int output_fd) if (writen(output_fd, buf_out, write_size) != write_size) { pr_err("lzma: write error: %s\n", strerror(errno)); - goto err_fclose; + goto err_lzma_end; } strm.next_out = buf_out; @@ -95,11 +95,13 @@ int lzma_decompress_to_file(const char *input, int output_fd) break; pr_err("lzma: failed %s\n", lzma_strerror(ret)); - goto err_fclose; + goto err_lzma_end; } } err = 0; +err_lzma_end: + lzma_end(&strm); err_fclose: fclose(infile); return err; diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c index dd9ed56e0504..756295dedccc 100644 --- a/tools/perf/util/pfm.c +++ b/tools/perf/util/pfm.c @@ -99,7 +99,7 @@ int parse_libpfm_events_option(const struct option *opt, const char *str, grp_leader = evsel; if (grp_evt > -1) { - evsel->leader = grp_leader; + evsel__set_leader(evsel, grp_leader); grp_leader->core.nr_members++; grp_evt++; } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 44b90d638ad5..fc683bc41715 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -742,9 +742,13 @@ struct pmu_events_map *__weak pmu_events_map__find(void) return perf_pmu__find_map(NULL); } -static bool perf_pmu__valid_suffix(char *pmu_name, char *tok) +/* + * Suffix must be in form tok_{digits}, or tok{digits}, or same as pmu_name + * to be valid. + */ +static bool perf_pmu__valid_suffix(const char *pmu_name, char *tok) { - char *p; + const char *p; if (strncmp(pmu_name, tok, strlen(tok))) return false; @@ -753,12 +757,16 @@ static bool perf_pmu__valid_suffix(char *pmu_name, char *tok) if (*p == 0) return true; - if (*p != '_') - return false; + if (*p == '_') + ++p; - ++p; - if (*p == 0 || !isdigit(*p)) - return false; + /* Ensure we end in a number */ + while (1) { + if (!isdigit(*p)) + return false; + if (*(++p) == 0) + break; + } return true; } @@ -789,12 +797,19 @@ bool pmu_uncore_alias_match(const char *pmu_name, const char *name) * match "socket" in "socketX_pmunameY" and then "pmuname" in * "pmunameY". */ - for (; tok; name += strlen(tok), tok = strtok_r(NULL, ",", &tmp)) { + while (1) { + char *next_tok = strtok_r(NULL, ",", &tmp); + name = strstr(name, tok); - if (!name || !perf_pmu__valid_suffix((char *)name, tok)) { + if (!name || + (!next_tok && !perf_pmu__valid_suffix(name, tok))) { res = false; goto out; } + if (!next_tok) + break; + tok = next_tok; + name += strlen(tok); } res = true; @@ -950,6 +965,13 @@ static struct perf_pmu *pmu_lookup(const char *name) LIST_HEAD(format); LIST_HEAD(aliases); __u32 type; + bool is_hybrid = perf_pmu__hybrid_mounted(name); + + /* + * Check pmu name for hybrid and the pmu may be invalid in sysfs + */ + if (!strncmp(name, "cpu_", 4) && !is_hybrid) + return NULL; /* * The pmu data we store & need consists of the pmu @@ -978,7 +1000,7 @@ static struct perf_pmu *pmu_lookup(const char *name) pmu->is_uncore = pmu_is_uncore(name); if (pmu->is_uncore) pmu->id = pmu_id(name); - pmu->is_hybrid = perf_pmu__hybrid_mounted(name); + pmu->is_hybrid = is_hybrid; pmu->max_precise = pmu_max_precise(name); pmu_add_cpu_aliases(&aliases, pmu); pmu_add_sys_aliases(&aliases, pmu); diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index c14e1d228e56..b2a02c9ab8ea 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -179,8 +179,10 @@ struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user) struct map *map; map = dso__new_map(target); - if (map && map->dso) + if (map && map->dso) { + nsinfo__put(map->dso->nsinfo); map->dso->nsinfo = nsinfo__get(nsi); + } return map; } else { return kernel_get_module_map(target); @@ -237,8 +239,8 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs) clear_probe_trace_event(tevs + i); } -static bool kprobe_blacklist__listed(unsigned long address); -static bool kprobe_warn_out_range(const char *symbol, unsigned long address) +static bool kprobe_blacklist__listed(u64 address); +static bool kprobe_warn_out_range(const char *symbol, u64 address) { struct map *map; bool ret = false; @@ -398,8 +400,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo, pr_debug("Symbol %s address found : %" PRIx64 "\n", pp->function, address); - ret = debuginfo__find_probe_point(dinfo, (unsigned long)address, - result); + ret = debuginfo__find_probe_point(dinfo, address, result); if (ret <= 0) ret = (!ret) ? -ENOENT : ret; else { @@ -587,7 +588,7 @@ static void debuginfo_cache__exit(void) } -static int get_text_start_address(const char *exec, unsigned long *address, +static int get_text_start_address(const char *exec, u64 *address, struct nsinfo *nsi) { Elf *elf; @@ -632,7 +633,7 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp, bool is_kprobe) { struct debuginfo *dinfo = NULL; - unsigned long stext = 0; + u64 stext = 0; u64 addr = tp->address; int ret = -ENOENT; @@ -660,8 +661,7 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp, dinfo = debuginfo_cache__open(tp->module, verbose <= 0); if (dinfo) - ret = debuginfo__find_probe_point(dinfo, - (unsigned long)addr, pp); + ret = debuginfo__find_probe_point(dinfo, addr, pp); else ret = -ENOENT; @@ -676,7 +676,7 @@ error: /* Adjust symbol name and address */ static int post_process_probe_trace_point(struct probe_trace_point *tp, - struct map *map, unsigned long offs) + struct map *map, u64 offs) { struct symbol *sym; u64 addr = tp->address - offs; @@ -719,7 +719,7 @@ post_process_offline_probe_trace_events(struct probe_trace_event *tevs, int ntevs, const char *pathname) { struct map *map; - unsigned long stext = 0; + u64 stext = 0; int i, ret = 0; /* Prepare a map for offline binary */ @@ -745,7 +745,7 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, struct nsinfo *nsi) { int i, ret = 0; - unsigned long stext = 0; + u64 stext = 0; if (!exec) return 0; @@ -790,7 +790,7 @@ post_process_module_probe_trace_events(struct probe_trace_event *tevs, mod_name = find_module_name(module); for (i = 0; i < ntevs; i++) { ret = post_process_probe_trace_point(&tevs[i].point, - map, (unsigned long)text_offs); + map, text_offs); if (ret < 0) break; tevs[i].point.module = @@ -1534,7 +1534,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev) * so tmp[1] should always valid (but could be '\0'). */ if (tmp && !strncmp(tmp, "0x", 2)) { - pp->abs_address = strtoul(pp->function, &tmp, 0); + pp->abs_address = strtoull(pp->function, &tmp, 0); if (*tmp != '\0') { semantic_error("Invalid absolute address.\n"); return -EINVAL; @@ -1909,7 +1909,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev) argv[i] = NULL; argc -= 1; } else - tp->address = strtoul(fmt1_str, NULL, 0); + tp->address = strtoull(fmt1_str, NULL, 0); } else { /* Only the symbol-based probe has offset */ tp->symbol = strdup(fmt1_str); @@ -2155,7 +2155,7 @@ synthesize_uprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf) return -EINVAL; /* Use the tp->address for uprobes */ - err = strbuf_addf(buf, "%s:0x%lx", tp->module, tp->address); + err = strbuf_addf(buf, "%s:0x%" PRIx64, tp->module, tp->address); if (err >= 0 && tp->ref_ctr_offset) { if (!uprobe_ref_ctr_is_supported()) @@ -2170,7 +2170,7 @@ synthesize_kprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf) { if (!strncmp(tp->symbol, "0x", 2)) { /* Absolute address. See try_to_find_absolute_address() */ - return strbuf_addf(buf, "%s%s0x%lx", tp->module ?: "", + return strbuf_addf(buf, "%s%s0x%" PRIx64, tp->module ?: "", tp->module ? ":" : "", tp->address); } else { return strbuf_addf(buf, "%s%s%s+%lu", tp->module ?: "", @@ -2269,7 +2269,7 @@ static int convert_to_perf_probe_point(struct probe_trace_point *tp, pp->function = strdup(tp->symbol); pp->offset = tp->offset; } else { - ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address); + ret = e_snprintf(buf, 128, "0x%" PRIx64, tp->address); if (ret < 0) return ret; pp->function = strdup(buf); @@ -2450,8 +2450,8 @@ void clear_probe_trace_event(struct probe_trace_event *tev) struct kprobe_blacklist_node { struct list_head list; - unsigned long start; - unsigned long end; + u64 start; + u64 end; char *symbol; }; @@ -2496,7 +2496,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist) } INIT_LIST_HEAD(&node->list); list_add_tail(&node->list, blacklist); - if (sscanf(buf, "0x%lx-0x%lx", &node->start, &node->end) != 2) { + if (sscanf(buf, "0x%" PRIx64 "-0x%" PRIx64, &node->start, &node->end) != 2) { ret = -EINVAL; break; } @@ -2512,7 +2512,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist) ret = -ENOMEM; break; } - pr_debug2("Blacklist: 0x%lx-0x%lx, %s\n", + pr_debug2("Blacklist: 0x%" PRIx64 "-0x%" PRIx64 ", %s\n", node->start, node->end, node->symbol); ret++; } @@ -2524,8 +2524,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist) } static struct kprobe_blacklist_node * -kprobe_blacklist__find_by_address(struct list_head *blacklist, - unsigned long address) +kprobe_blacklist__find_by_address(struct list_head *blacklist, u64 address) { struct kprobe_blacklist_node *node; @@ -2553,7 +2552,7 @@ static void kprobe_blacklist__release(void) kprobe_blacklist__delete(&kprobe_blacklist); } -static bool kprobe_blacklist__listed(unsigned long address) +static bool kprobe_blacklist__listed(u64 address) { return !!kprobe_blacklist__find_by_address(&kprobe_blacklist, address); } @@ -3221,7 +3220,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev, * In __add_probe_trace_events, a NULL symbol is interpreted as * invalid. */ - if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0) + if (asprintf(&tp->symbol, "0x%" PRIx64, tp->address) < 0) goto errout; /* For kprobe, check range */ @@ -3232,7 +3231,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev, goto errout; } - if (asprintf(&tp->realname, "abs_%lx", tp->address) < 0) + if (asprintf(&tp->realname, "abs_%" PRIx64, tp->address) < 0) goto errout; if (pev->target) { diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index 65769d7949a3..8ad5b1579f1d 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h @@ -33,7 +33,7 @@ struct probe_trace_point { char *module; /* Module name */ unsigned long offset; /* Offset from symbol */ unsigned long ref_ctr_offset; /* SDT reference counter offset */ - unsigned long address; /* Actual address of the trace point */ + u64 address; /* Actual address of the trace point */ bool retprobe; /* Return probe flag */ }; @@ -70,7 +70,7 @@ struct perf_probe_point { bool retprobe; /* Return probe flag */ char *lazy_line; /* Lazy matching pattern */ unsigned long offset; /* Offset from function entry */ - unsigned long abs_address; /* Absolute address of the point */ + u64 abs_address; /* Absolute address of the point */ }; /* Perf probe probing argument field chain */ diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c index f9a6cbcd6415..3d50de3217d5 100644 --- a/tools/perf/util/probe-file.c +++ b/tools/perf/util/probe-file.c @@ -377,11 +377,11 @@ int probe_file__del_events(int fd, struct strfilter *filter) ret = probe_file__get_events(fd, filter, namelist); if (ret < 0) - return ret; + goto out; ret = probe_file__del_strlist(fd, namelist); +out: strlist__delete(namelist); - return ret; } diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 02ef0d78053b..50d861a80f57 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -668,7 +668,7 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod, } tp->offset = (unsigned long)(paddr - eaddr); - tp->address = (unsigned long)paddr; + tp->address = paddr; tp->symbol = strdup(symbol); if (!tp->symbol) return -ENOMEM; @@ -1707,7 +1707,7 @@ int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, } /* Reverse search */ -int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, +int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr, struct perf_probe_point *ppt) { Dwarf_Die cudie, spdie, indie; @@ -1720,14 +1720,14 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, addr += baseaddr; /* Find cu die */ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { - pr_warning("Failed to find debug information for address %lx\n", + pr_warning("Failed to find debug information for address %" PRIx64 "\n", addr); ret = -EINVAL; goto end; } /* Find a corresponding line (filename and lineno) */ - cu_find_lineinfo(&cudie, addr, &fname, &lineno); + cu_find_lineinfo(&cudie, (Dwarf_Addr)addr, &fname, &lineno); /* Don't care whether it failed or not */ /* Find a corresponding function (name, baseline and baseaddr) */ @@ -1742,7 +1742,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, } fname = dwarf_decl_file(&spdie); - if (addr == (unsigned long)baseaddr) { + if (addr == baseaddr) { /* Function entry - Relative line number is 0 */ lineno = baseline; goto post; @@ -1788,7 +1788,7 @@ post: if (lineno) ppt->line = lineno - baseline; else if (basefunc) { - ppt->offset = addr - (unsigned long)baseaddr; + ppt->offset = addr - baseaddr; func = basefunc; } @@ -1828,8 +1828,7 @@ static int line_range_add_line(const char *src, unsigned int lineno, } static int line_range_walk_cb(const char *fname, int lineno, - Dwarf_Addr addr __maybe_unused, - void *data) + Dwarf_Addr addr, void *data) { struct line_finder *lf = data; const char *__fname; diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 2febb5875678..8bc1c80d3c1c 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -46,7 +46,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, struct probe_trace_event **tevs); /* Find a perf_probe_point from debuginfo */ -int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, +int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr, struct perf_probe_point *ppt); int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index e9c929a39973..51f727402912 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -306,6 +306,7 @@ void perf_session__delete(struct perf_session *session) evlist__delete(session->evlist); perf_data__close(session->data); } + trace_event__cleanup(&session->tevent); free(session); } diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 88ce47f2547e..568a88c001c6 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -3370,7 +3370,7 @@ static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int add_key(sb, s[i].name, llen); } -const char *sort_help(const char *prefix) +char *sort_help(const char *prefix) { struct strbuf sb; char *s; diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 87a092645aa7..b67c469aba79 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -302,7 +302,7 @@ void reset_output_field(void); void sort__setup_elide(FILE *fp); void perf_hpp__set_elide(int idx, bool elide); -const char *sort_help(const char *prefix); +char *sort_help(const char *prefix); int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset); diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 83a2bc02df15..588601000f3f 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -596,6 +596,18 @@ static void collect_all_aliases(struct perf_stat_config *config, struct evsel *c } } +static bool is_uncore(struct evsel *evsel) +{ + struct perf_pmu *pmu = evsel__find_pmu(evsel); + + return pmu && pmu->is_uncore; +} + +static bool hybrid_uniquify(struct evsel *evsel) +{ + return perf_pmu__has_hybrid() && !is_uncore(evsel); +} + static bool collect_data(struct perf_stat_config *config, struct evsel *counter, void (*cb)(struct perf_stat_config *config, struct evsel *counter, void *data, bool first), @@ -604,7 +616,7 @@ static bool collect_data(struct perf_stat_config *config, struct evsel *counter, if (counter->merged_stat) return false; cb(config, counter, data, true); - if (config->no_merge) + if (config->no_merge || hybrid_uniquify(counter)) uniquify_event_name(counter); else if (counter->auto_merge_stats) collect_all_aliases(config, counter, cb, data); diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index be8d8d4a4e08..6276ce0c0196 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -12,6 +12,8 @@ import sys import os import time +assert sys.version_info >= (3, 7), "Python version is too old" + from collections import namedtuple from enum import Enum, auto diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py index 90bc007f1f93..2c6f916ccbaf 100644 --- a/tools/testing/kunit/kunit_kernel.py +++ b/tools/testing/kunit/kunit_kernel.py @@ -6,15 +6,13 @@ # Author: Felix Guo <felixguoxiuping@gmail.com> # Author: Brendan Higgins <brendanhiggins@google.com> -from __future__ import annotations import importlib.util import logging import subprocess import os import shutil import signal -from typing import Iterator -from typing import Optional +from typing import Iterator, Optional, Tuple from contextlib import ExitStack @@ -208,7 +206,7 @@ def get_source_tree_ops(arch: str, cross_compile: Optional[str]) -> LinuxSourceT raise ConfigError(arch + ' is not a valid arch') def get_source_tree_ops_from_qemu_config(config_path: str, - cross_compile: Optional[str]) -> tuple[ + cross_compile: Optional[str]) -> Tuple[ str, LinuxSourceTreeOperations]: # The module name/path has very little to do with where the actual file # exists (I learned this through experimentation and could not find it diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index c3c524b79db8..b88db3f51dc5 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -338,9 +338,11 @@ def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus: def parse_test_result(lines: LineStream) -> TestResult: consume_non_diagnostic(lines) if not lines or not parse_tap_header(lines): - return TestResult(TestStatus.NO_TESTS, [], lines) + return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines) expected_test_suite_num = parse_test_plan(lines) - if not expected_test_suite_num: + if expected_test_suite_num == 0: + return TestResult(TestStatus.NO_TESTS, [], lines) + elif expected_test_suite_num is None: return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines) test_suites = [] for i in range(1, expected_test_suite_num + 1): diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index bdae0e5f6197..75045aa0f8a1 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -157,8 +157,18 @@ class KUnitParserTest(unittest.TestCase): kunit_parser.TestStatus.FAILURE, result.status) + def test_no_header(self): + empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log') + with open(empty_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + self.assertEqual(0, len(result.suites)) + self.assertEqual( + kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, + result.status) + def test_no_tests(self): - empty_log = test_data_path('test_is_test_passed-no_tests_run.log') + empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log') with open(empty_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) @@ -173,7 +183,7 @@ class KUnitParserTest(unittest.TestCase): with open(crash_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) - print_mock.assert_any_call(StrContains('no tests run!')) + print_mock.assert_any_call(StrContains('could not parse test results!')) print_mock.stop() file.close() @@ -309,7 +319,7 @@ class KUnitJsonTest(unittest.TestCase): result["sub_groups"][1]["test_cases"][0]) def test_no_tests_json(self): - result = self._json_for('test_is_test_passed-no_tests_run.log') + result = self._json_for('test_is_test_passed-no_tests_run_with_header.log') self.assertEqual(0, len(result['sub_groups'])) class StrContains(str): diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log index ba69f5c94b75..ba69f5c94b75 100644 --- a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log new file mode 100644 index 000000000000..5f48ee659d40 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log @@ -0,0 +1,2 @@ +TAP version 14 +1..0 diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index 54f367cbadae..b1bff5fb0f65 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -434,7 +434,7 @@ static int nd_intel_test_finish_query(struct nfit_test *t, dev_dbg(dev, "%s: transition out verify\n", __func__); fw->state = FW_STATE_UPDATED; fw->missed_activate = false; - /* fall through */ + fallthrough; case FW_STATE_UPDATED: nd_cmd->status = 0; /* bogus test version */ diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index ee27d68d2a1c..b5940e6ca67c 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -715,6 +715,8 @@ out: bpf_object__close(obj); } +#include "tailcall_bpf2bpf4.skel.h" + /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved * across tailcalls combined with bpf2bpf calls. for making sure that tailcall * counter behaves correctly, bpf program will go through following flow: @@ -727,10 +729,15 @@ out: * the loop begins. At the end of the test make sure that the global counter is * equal to 31, because tailcall counter includes the first two tailcalls * whereas global counter is incremented only on loop presented on flow above. + * + * The noise parameter is used to insert bpf_map_update calls into the logic + * to force verifier to patch instructions. This allows us to ensure jump + * logic remains correct with instruction movement. */ -static void test_tailcall_bpf2bpf_4(void) +static void test_tailcall_bpf2bpf_4(bool noise) { - int err, map_fd, prog_fd, main_fd, data_fd, i, val; + int err, map_fd, prog_fd, main_fd, data_fd, i; + struct tailcall_bpf2bpf4__bss val; struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; @@ -774,11 +781,6 @@ static void test_tailcall_bpf2bpf_4(void) goto out; } - err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, - &duration, &retval, NULL); - CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); - data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) return; @@ -788,9 +790,21 @@ static void test_tailcall_bpf2bpf_4(void) return; i = 0; + val.noise = noise; + val.count = 0; + err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, + &duration, &retval, NULL); + CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); - CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n", - err, errno, val); + CHECK(err || val.count != 31, "tailcall count", "err %d errno %d count %d\n", + err, errno, val.count); out: bpf_object__close(obj); @@ -815,5 +829,7 @@ void test_tailcalls(void) if (test__start_subtest("tailcall_bpf2bpf_3")) test_tailcall_bpf2bpf_3(); if (test__start_subtest("tailcall_bpf2bpf_4")) - test_tailcall_bpf2bpf_4(); + test_tailcall_bpf2bpf_4(false); + if (test__start_subtest("tailcall_bpf2bpf_5")) + test_tailcall_bpf2bpf_4(true); } diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c index 77df6d4db895..e89368a50b97 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c @@ -3,6 +3,13 @@ #include <bpf/bpf_helpers.h> struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} nop_table SEC(".maps"); + +struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); __uint(max_entries, 3); __uint(key_size, sizeof(__u32)); @@ -10,10 +17,21 @@ struct { } jmp_table SEC(".maps"); int count = 0; +int noise = 0; + +__always_inline int subprog_noise(void) +{ + __u32 key = 0; + + bpf_map_lookup_elem(&nop_table, &key); + return 0; +} __noinline int subprog_tail_2(struct __sk_buff *skb) { + if (noise) + subprog_noise(); bpf_tail_call_static(skb, &jmp_table, 2); return skb->len * 3; } diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c index 2c8935b3e65d..ee454327e5c6 100644 --- a/tools/testing/selftests/bpf/verifier/dead_code.c +++ b/tools/testing/selftests/bpf/verifier/dead_code.c @@ -159,3 +159,15 @@ .result = ACCEPT, .retval = 2, }, +{ + "dead code: zero extension", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c index a3e593ddfafc..2debba4e8a3a 100644 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c @@ -1,4 +1,233 @@ { + "map access: known scalar += value_ptr unknown vs const", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_1, 3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 5 }, + .fixup_map_array_48b = { 8 }, + .result_unpriv = REJECT, + .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: known scalar += value_ptr const vs unknown", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), + BPF_MOV64_IMM(BPF_REG_1, 3), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 5 }, + .fixup_map_array_48b = { 8 }, + .result_unpriv = REJECT, + .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: known scalar += value_ptr const vs const (ne)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), + BPF_MOV64_IMM(BPF_REG_1, 3), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 5 }, + .fixup_map_array_48b = { 8 }, + .result_unpriv = REJECT, + .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: known scalar += value_ptr const vs const (eq)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 5 }, + .fixup_map_array_48b = { 8 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: known scalar += value_ptr unknown vs unknown (eq)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 5 }, + .fixup_map_array_48b = { 8 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: known scalar += value_ptr unknown vs unknown (lt)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 5 }, + .fixup_map_array_48b = { 8 }, + .result_unpriv = REJECT, + .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: known scalar += value_ptr unknown vs unknown (gt)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 5 }, + .fixup_map_array_48b = { 8 }, + .result_unpriv = REJECT, + .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", + .result = ACCEPT, + .retval = 1, +}, +{ "map access: known scalar += value_ptr from different maps", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c index 477cbb042f5b..0315955ff0f4 100644 --- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c +++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c @@ -62,6 +62,9 @@ static int __do_binderfs_test(struct __test_metadata *_metadata) struct binder_version version = { 0 }; char binderfs_mntpt[] = P_tmpdir "/binderfs_XXXXXX", device_path[sizeof(P_tmpdir "/binderfs_XXXXXX/") + BINDERFS_MAX_NAME]; + static const char * const binder_features[] = { + "oneway_spam_detection", + }; change_mountns(_metadata); @@ -150,6 +153,20 @@ static int __do_binderfs_test(struct __test_metadata *_metadata) } /* success: binder-control device removal failed as expected */ + + for (int i = 0; i < ARRAY_SIZE(binder_features); i++) { + snprintf(device_path, sizeof(device_path), "%s/features/%s", + binderfs_mntpt, binder_features[i]); + fd = open(device_path, O_CLOEXEC | O_RDONLY); + EXPECT_GE(fd, 0) { + TH_LOG("%s - Failed to open binder feature: %s", + strerror(errno), binder_features[i]); + goto umount; + } + close(fd); + } + + /* success: binder feature files found */ result = 0; umount: diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 06a351b4f93b..0709af0144c8 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -38,6 +38,7 @@ /x86_64/xen_vmcall_test /x86_64/xss_msr_test /x86_64/vmx_pmu_msrs_test +/access_tracking_perf_test /demand_paging_test /dirty_log_test /dirty_log_perf_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index b853be2ae3c6..5832f510a16c 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test +TEST_GEN_PROGS_x86_64 += access_tracking_perf_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_x86_64 += dirty_log_perf_test diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index a16c8f05366c..cc898181faab 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -1019,7 +1019,8 @@ static __u64 sve_rejects_set[] = { #define VREGS_SUBLIST \ { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), } #define PMU_SUBLIST \ - { "pmu", .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), } + { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \ + .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), } #define SVE_SUBLIST \ { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \ .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \ diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c new file mode 100644 index 000000000000..e2baa187a21e --- /dev/null +++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * access_tracking_perf_test + * + * Copyright (C) 2021, Google, Inc. + * + * This test measures the performance effects of KVM's access tracking. + * Access tracking is driven by the MMU notifiers test_young, clear_young, and + * clear_flush_young. These notifiers do not have a direct userspace API, + * however the clear_young notifier can be triggered by marking a pages as idle + * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to + * enable access tracking on guest memory. + * + * To measure performance this test runs a VM with a configurable number of + * vCPUs that each touch every page in disjoint regions of memory. Performance + * is measured in the time it takes all vCPUs to finish touching their + * predefined region. + * + * Note that a deterministic correctness test of access tracking is not possible + * by using page_idle as it exists today. This is for a few reasons: + * + * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This + * means subsequent guest accesses are not guaranteed to see page table + * updates made by KVM until some time in the future. + * + * 2. page_idle only operates on LRU pages. Newly allocated pages are not + * immediately allocated to LRU lists. Instead they are held in a "pagevec", + * which is drained to LRU lists some time in the future. There is no + * userspace API to force this drain to occur. + * + * These limitations are worked around in this test by using a large enough + * region of memory for each vCPU such that the number of translations cached in + * the TLB and the number of pages held in pagevecs are a small fraction of the + * overall workload. And if either of those conditions are not true this test + * will fail rather than silently passing. + */ +#include <inttypes.h> +#include <limits.h> +#include <pthread.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/stat.h> + +#include "kvm_util.h" +#include "test_util.h" +#include "perf_test_util.h" +#include "guest_modes.h" + +/* Global variable used to synchronize all of the vCPU threads. */ +static int iteration = -1; + +/* Defines what vCPU threads should do during a given iteration. */ +static enum { + /* Run the vCPU to access all its memory. */ + ITERATION_ACCESS_MEMORY, + /* Mark the vCPU's memory idle in page_idle. */ + ITERATION_MARK_IDLE, +} iteration_work; + +/* Set to true when vCPU threads should exit. */ +static bool done; + +/* The iteration that was last completed by each vCPU. */ +static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; + +/* Whether to overlap the regions of memory vCPUs access. */ +static bool overlap_memory_access; + +struct test_params { + /* The backing source for the region of memory. */ + enum vm_mem_backing_src_type backing_src; + + /* The amount of memory to allocate for each vCPU. */ + uint64_t vcpu_memory_bytes; + + /* The number of vCPUs to create in the VM. */ + int vcpus; +}; + +static uint64_t pread_uint64(int fd, const char *filename, uint64_t index) +{ + uint64_t value; + off_t offset = index * sizeof(value); + + TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value), + "pread from %s offset 0x%" PRIx64 " failed!", + filename, offset); + + return value; + +} + +#define PAGEMAP_PRESENT (1ULL << 63) +#define PAGEMAP_PFN_MASK ((1ULL << 55) - 1) + +static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) +{ + uint64_t hva = (uint64_t) addr_gva2hva(vm, gva); + uint64_t entry; + uint64_t pfn; + + entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize()); + if (!(entry & PAGEMAP_PRESENT)) + return 0; + + pfn = entry & PAGEMAP_PFN_MASK; + if (!pfn) { + print_skip("Looking up PFNs requires CAP_SYS_ADMIN"); + exit(KSFT_SKIP); + } + + return pfn; +} + +static bool is_page_idle(int page_idle_fd, uint64_t pfn) +{ + uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64); + + return !!((bits >> (pfn % 64)) & 1); +} + +static void mark_page_idle(int page_idle_fd, uint64_t pfn) +{ + uint64_t bits = 1ULL << (pfn % 64); + + TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8, + "Set page_idle bits for PFN 0x%" PRIx64, pfn); +} + +static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id) +{ + uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva; + uint64_t pages = perf_test_args.vcpu_args[vcpu_id].pages; + uint64_t page; + uint64_t still_idle = 0; + uint64_t no_pfn = 0; + int page_idle_fd; + int pagemap_fd; + + /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */ + if (overlap_memory_access && vcpu_id) + return; + + page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); + TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle."); + + pagemap_fd = open("/proc/self/pagemap", O_RDONLY); + TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap."); + + for (page = 0; page < pages; page++) { + uint64_t gva = base_gva + page * perf_test_args.guest_page_size; + uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); + + if (!pfn) { + no_pfn++; + continue; + } + + if (is_page_idle(page_idle_fd, pfn)) { + still_idle++; + continue; + } + + mark_page_idle(page_idle_fd, pfn); + } + + /* + * Assumption: Less than 1% of pages are going to be swapped out from + * under us during this test. + */ + TEST_ASSERT(no_pfn < pages / 100, + "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.", + vcpu_id, no_pfn, pages); + + /* + * Test that at least 90% of memory has been marked idle (the rest might + * not be marked idle because the pages have not yet made it to an LRU + * list or the translations are still cached in the TLB). 90% is + * arbitrary; high enough that we ensure most memory access went through + * access tracking but low enough as to not make the test too brittle + * over time and across architectures. + */ + TEST_ASSERT(still_idle < pages / 10, + "vCPU%d: Too many pages still idle (%"PRIu64 " out of %" + PRIu64 ").\n", + vcpu_id, still_idle, pages); + + close(page_idle_fd); + close(pagemap_fd); +} + +static void assert_ucall(struct kvm_vm *vm, uint32_t vcpu_id, + uint64_t expected_ucall) +{ + struct ucall uc; + uint64_t actual_ucall = get_ucall(vm, vcpu_id, &uc); + + TEST_ASSERT(expected_ucall == actual_ucall, + "Guest exited unexpectedly (expected ucall %" PRIu64 + ", got %" PRIu64 ")", + expected_ucall, actual_ucall); +} + +static bool spin_wait_for_next_iteration(int *current_iteration) +{ + int last_iteration = *current_iteration; + + do { + if (READ_ONCE(done)) + return false; + + *current_iteration = READ_ONCE(iteration); + } while (last_iteration == *current_iteration); + + return true; +} + +static void *vcpu_thread_main(void *arg) +{ + struct perf_test_vcpu_args *vcpu_args = arg; + struct kvm_vm *vm = perf_test_args.vm; + int vcpu_id = vcpu_args->vcpu_id; + int current_iteration = -1; + + vcpu_args_set(vm, vcpu_id, 1, vcpu_id); + + while (spin_wait_for_next_iteration(¤t_iteration)) { + switch (READ_ONCE(iteration_work)) { + case ITERATION_ACCESS_MEMORY: + vcpu_run(vm, vcpu_id); + assert_ucall(vm, vcpu_id, UCALL_SYNC); + break; + case ITERATION_MARK_IDLE: + mark_vcpu_memory_idle(vm, vcpu_id); + break; + }; + + vcpu_last_completed_iteration[vcpu_id] = current_iteration; + } + + return NULL; +} + +static void spin_wait_for_vcpu(int vcpu_id, int target_iteration) +{ + while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != + target_iteration) { + continue; + } +} + +/* The type of memory accesses to perform in the VM. */ +enum access_type { + ACCESS_READ, + ACCESS_WRITE, +}; + +static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description) +{ + struct timespec ts_start; + struct timespec ts_elapsed; + int next_iteration; + int vcpu_id; + + /* Kick off the vCPUs by incrementing iteration. */ + next_iteration = ++iteration; + + clock_gettime(CLOCK_MONOTONIC, &ts_start); + + /* Wait for all vCPUs to finish the iteration. */ + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) + spin_wait_for_vcpu(vcpu_id, next_iteration); + + ts_elapsed = timespec_elapsed(ts_start); + pr_info("%-30s: %ld.%09lds\n", + description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec); +} + +static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access, + const char *description) +{ + perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1; + sync_global_to_guest(vm, perf_test_args); + iteration_work = ITERATION_ACCESS_MEMORY; + run_iteration(vm, vcpus, description); +} + +static void mark_memory_idle(struct kvm_vm *vm, int vcpus) +{ + /* + * Even though this parallelizes the work across vCPUs, this is still a + * very slow operation because page_idle forces the test to mark one pfn + * at a time and the clear_young notifier serializes on the KVM MMU + * lock. + */ + pr_debug("Marking VM memory idle (slow)...\n"); + iteration_work = ITERATION_MARK_IDLE; + run_iteration(vm, vcpus, "Mark memory idle"); +} + +static pthread_t *create_vcpu_threads(int vcpus) +{ + pthread_t *vcpu_threads; + int i; + + vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0])); + TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads."); + + for (i = 0; i < vcpus; i++) { + vcpu_last_completed_iteration[i] = iteration; + pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main, + &perf_test_args.vcpu_args[i]); + } + + return vcpu_threads; +} + +static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus) +{ + int i; + + /* Set done to signal the vCPU threads to exit */ + done = true; + + for (i = 0; i < vcpus; i++) + pthread_join(vcpu_threads[i], NULL); +} + +static void run_test(enum vm_guest_mode mode, void *arg) +{ + struct test_params *params = arg; + struct kvm_vm *vm; + pthread_t *vcpu_threads; + int vcpus = params->vcpus; + + vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes, + params->backing_src); + + perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes, + !overlap_memory_access); + + vcpu_threads = create_vcpu_threads(vcpus); + + pr_info("\n"); + access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory"); + + /* As a control, read and write to the populated memory first. */ + access_memory(vm, vcpus, ACCESS_WRITE, "Writing to populated memory"); + access_memory(vm, vcpus, ACCESS_READ, "Reading from populated memory"); + + /* Repeat on memory that has been marked as idle. */ + mark_memory_idle(vm, vcpus); + access_memory(vm, vcpus, ACCESS_WRITE, "Writing to idle memory"); + mark_memory_idle(vm, vcpus); + access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory"); + + terminate_vcpu_threads(vcpu_threads, vcpus); + free(vcpu_threads); + perf_test_destroy_vm(vm); +} + +static void help(char *name) +{ + puts(""); + printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o] [-s mem_type]\n", + name); + puts(""); + printf(" -h: Display this help message."); + guest_modes_help(); + printf(" -b: specify the size of the memory region which should be\n" + " dirtied by each vCPU. e.g. 10M or 3G.\n" + " (default: 1G)\n"); + printf(" -v: specify the number of vCPUs to run.\n"); + printf(" -o: Overlap guest memory accesses instead of partitioning\n" + " them into a separate region of memory for each vCPU.\n"); + printf(" -s: specify the type of memory that should be used to\n" + " back the guest data region.\n\n"); + backing_src_help(); + puts(""); + exit(0); +} + +int main(int argc, char *argv[]) +{ + struct test_params params = { + .backing_src = VM_MEM_SRC_ANONYMOUS, + .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE, + .vcpus = 1, + }; + int page_idle_fd; + int opt; + + guest_modes_append_default(); + + while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) { + switch (opt) { + case 'm': + guest_modes_cmdline(optarg); + break; + case 'b': + params.vcpu_memory_bytes = parse_size(optarg); + break; + case 'v': + params.vcpus = atoi(optarg); + break; + case 'o': + overlap_memory_access = true; + break; + case 's': + params.backing_src = parse_backing_src_type(optarg); + break; + case 'h': + default: + help(argv[0]); + break; + } + } + + page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); + if (page_idle_fd < 0) { + print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled"); + exit(KSFT_SKIP); + } + close(page_idle_fd); + + for_each_guest_mode(run_test, ¶ms); + + return 0; +} diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 04a2641261be..80cbd3a748c0 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -312,6 +312,7 @@ int main(int argc, char *argv[]) break; case 'o': p.partition_vcpu_memory_access = false; + break; case 's': p.backing_src = parse_backing_src_type(optarg); break; diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 615ab254899d..010b59b13917 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -45,6 +45,7 @@ enum vm_guest_mode { VM_MODE_P40V48_64K, VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ VM_MODE_P47V64_4K, + VM_MODE_P44V64_4K, NUM_VM_MODES, }; @@ -62,7 +63,7 @@ enum vm_guest_mode { #elif defined(__s390x__) -#define VM_MODE_DEFAULT VM_MODE_P47V64_4K +#define VM_MODE_DEFAULT VM_MODE_P44V64_4K #define MIN_PAGE_SHIFT 12U #define ptes_per_page(page_size) ((page_size) / 16) diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h index 412eaee7884a..b66910702c0a 100644 --- a/tools/testing/selftests/kvm/include/x86_64/hyperv.h +++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h @@ -117,7 +117,7 @@ #define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1) #define HV_X64_PERF_MONITOR_AVAILABLE BIT(2) #define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3) -#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4) +#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4) #define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5) #define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8) #define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10) @@ -182,4 +182,7 @@ #define HV_STATUS_INVALID_CONNECTION_ID 18 #define HV_STATUS_INSUFFICIENT_BUFFERS 19 +/* hypercall options */ +#define HV_HYPERCALL_FAST_BIT BIT(16) + #endif /* !SELFTEST_KVM_HYPERV_H */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 9f49f6caafe5..632b74d6b3ca 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -401,7 +401,7 @@ unexpected_exception: void vm_init_descriptor_tables(struct kvm_vm *vm) { vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers), - vm->page_size, 0, 0); + vm->page_size); *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; } diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index 25bff307c71f..c330f414ef96 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -22,6 +22,22 @@ void guest_modes_append_default(void) } } #endif +#ifdef __s390x__ + { + int kvm_fd, vm_fd; + struct kvm_s390_vm_cpu_processor info; + + kvm_fd = open_kvm_dev_path_or_exit(); + vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, 0); + kvm_device_access(vm_fd, KVM_S390_VM_CPU_MODEL, + KVM_S390_VM_CPU_PROCESSOR, &info, false); + close(vm_fd); + close(kvm_fd); + /* Starting with z13 we have 47bits of physical address */ + if (info.ibc >= 0x30) + guest_mode_append(VM_MODE_P47V64_4K, true, true); + } +#endif } void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 5b56b57b3c20..10a8ed691c66 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -176,6 +176,7 @@ const char *vm_guest_mode_string(uint32_t i) [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", + [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", }; _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES, "Missing new mode strings?"); @@ -194,6 +195,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = { { 40, 48, 0x10000, 16 }, { 0, 0, 0x1000, 12 }, { 47, 64, 0x1000, 12 }, + { 44, 64, 0x1000, 12 }, }; _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, "Missing new mode params?"); @@ -282,6 +284,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) case VM_MODE_P47V64_4K: vm->pgtable_levels = 5; break; + case VM_MODE_P44V64_4K: + vm->pgtable_levels = 5; + break; default: TEST_FAIL("Unknown guest mode, mode: 0x%x", mode); } diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c index 85b18bb8f762..72a1c9b4882c 100644 --- a/tools/testing/selftests/kvm/set_memory_region_test.c +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -377,7 +377,8 @@ static void test_add_max_memory_regions(void) (max_mem_slots - 1), MEM_REGION_SIZE >> 10); mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment, - PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host"); mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1)); diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index b0031f2d38fd..ecec30865a74 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -320,7 +320,7 @@ int main(int ac, char **av) run_delay = get_run_delay(); pthread_create(&thread, &attr, do_steal_time, NULL); do - pthread_yield(); + sched_yield(); while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS); pthread_join(thread, NULL); run_delay = get_run_delay() - run_delay; diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c index bab10ae787b6..e0b2bb1339b1 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c @@ -215,7 +215,7 @@ int main(void) vcpu_set_hv_cpuid(vm, VCPU_ID); tsc_page_gva = vm_vaddr_alloc_page(vm); - memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize()); + memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize()); TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0, "TSC page has to be page aligned\n"); vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva)); diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 42bd658f52a8..91d88aaa9899 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -47,6 +47,7 @@ static void do_wrmsr(u32 idx, u64 val) } static int nr_gp; +static int nr_ud; static inline u64 hypercall(u64 control, vm_vaddr_t input_address, vm_vaddr_t output_address) @@ -80,6 +81,12 @@ static void guest_gp_handler(struct ex_regs *regs) regs->rip = (uint64_t)&wrmsr_end; } +static void guest_ud_handler(struct ex_regs *regs) +{ + nr_ud++; + regs->rip += 3; +} + struct msr_data { uint32_t idx; bool available; @@ -90,6 +97,7 @@ struct msr_data { struct hcall_data { uint64_t control; uint64_t expect; + bool ud_expected; }; static void guest_msr(struct msr_data *msr) @@ -117,13 +125,26 @@ static void guest_msr(struct msr_data *msr) static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) { int i = 0; + u64 res, input, output; wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID); wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa); while (hcall->control) { - GUEST_ASSERT(hypercall(hcall->control, pgs_gpa, - pgs_gpa + 4096) == hcall->expect); + nr_ud = 0; + if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) { + input = pgs_gpa; + output = pgs_gpa + 4096; + } else { + input = output = 0; + } + + res = hypercall(hcall->control, input, output); + if (hcall->ud_expected) + GUEST_ASSERT(nr_ud == 1); + else + GUEST_ASSERT(res == hcall->expect); + GUEST_SYNC(i++); } @@ -552,8 +573,18 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall recomm.ebx = 0xfff; hcall->expect = HV_STATUS_SUCCESS; break; - case 17: + /* XMM fast hypercall */ + hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT; + hcall->ud_expected = true; + break; + case 18: + feat.edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE; + hcall->ud_expected = false; + hcall->expect = HV_STATUS_SUCCESS; + break; + + case 19: /* END */ hcall->control = 0; break; @@ -615,7 +646,7 @@ int main(void) vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vm, VCPU_ID); - vm_handle_exception(vm, GP_VECTOR, guest_gp_handler); + vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); pr_info("Testing access to Hyper-V specific MSRs\n"); guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva), @@ -625,6 +656,10 @@ int main(void) /* Test hypercalls */ vm = vm_create_default(VCPU_ID, 0, guest_hcall); + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); + /* Hypercall input/output */ hcall_page = vm_vaddr_alloc_pages(vm, 2); memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); diff --git a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c index 523371cf8e8f..da2325fcad87 100644 --- a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c +++ b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c @@ -71,7 +71,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val) /* Set up a #PF handler to eat the RSVD #PF and signal all done! */ vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vm, VCPU_ID); - vm_handle_exception(vm, PF_VECTOR, guest_pf_handler); + vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler); r = _vcpu_run(vm, VCPU_ID); TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r); diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c index c1f831803ad2..d0fe2fdce58c 100644 --- a/tools/testing/selftests/kvm/x86_64/smm_test.c +++ b/tools/testing/selftests/kvm/x86_64/smm_test.c @@ -53,15 +53,28 @@ static inline void sync_with_host(uint64_t phase) : "+a" (phase)); } -void self_smi(void) +static void self_smi(void) { x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI); } -void guest_code(void *arg) +static void l2_guest_code(void) { + sync_with_host(8); + + sync_with_host(10); + + vmcall(); +} + +static void guest_code(void *arg) +{ + #define L2_GUEST_STACK_SIZE 64 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; uint64_t apicbase = rdmsr(MSR_IA32_APICBASE); + struct svm_test_data *svm = arg; + struct vmx_pages *vmx_pages = arg; sync_with_host(1); @@ -74,21 +87,50 @@ void guest_code(void *arg) sync_with_host(4); if (arg) { - if (cpu_has_svm()) - generic_svm_setup(arg, NULL, NULL); - else - GUEST_ASSERT(prepare_for_vmx_operation(arg)); + if (cpu_has_svm()) { + generic_svm_setup(svm, l2_guest_code, + &l2_guest_stack[L2_GUEST_STACK_SIZE]); + } else { + GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); + GUEST_ASSERT(load_vmcs(vmx_pages)); + prepare_vmcs(vmx_pages, l2_guest_code, + &l2_guest_stack[L2_GUEST_STACK_SIZE]); + } sync_with_host(5); self_smi(); sync_with_host(7); + + if (cpu_has_svm()) { + run_guest(svm->vmcb, svm->vmcb_gpa); + svm->vmcb->save.rip += 3; + run_guest(svm->vmcb, svm->vmcb_gpa); + } else { + vmlaunch(); + vmresume(); + } + + /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */ + sync_with_host(12); } sync_with_host(DONE); } +void inject_smi(struct kvm_vm *vm) +{ + struct kvm_vcpu_events events; + + vcpu_events_get(vm, VCPU_ID, &events); + + events.smi.pending = 1; + events.flags |= KVM_VCPUEVENT_VALID_SMM; + + vcpu_events_set(vm, VCPU_ID, &events); +} + int main(int argc, char *argv[]) { vm_vaddr_t nested_gva = 0; @@ -147,6 +189,22 @@ int main(int argc, char *argv[]) "Unexpected stage: #%x, got %x", stage, stage_reported); + /* + * Enter SMM during L2 execution and check that we correctly + * return from it. Do not perform save/restore while in SMM yet. + */ + if (stage == 8) { + inject_smi(vm); + continue; + } + + /* + * Perform save/restore while the guest is in SMM triggered + * during L2 execution. + */ + if (stage == 10) + inject_smi(vm); + state = vcpu_save_state(vm, VCPU_ID); kvm_vm_release(vm); kvm_vm_restart(vm, O_RDWR); diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config index 013446e87f1f..38edea25631b 100644 --- a/tools/testing/selftests/lkdtm/config +++ b/tools/testing/selftests/lkdtm/config @@ -6,3 +6,5 @@ CONFIG_HARDENED_USERCOPY=y # CONFIG_HARDENED_USERCOPY_FALLBACK is not set CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y +CONFIG_UBSAN_BOUNDS=y +CONFIG_UBSAN_TRAP=y diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt index 846cfd508d3c..09f7bfa383cc 100644 --- a/tools/testing/selftests/lkdtm/tests.txt +++ b/tools/testing/selftests/lkdtm/tests.txt @@ -7,6 +7,7 @@ EXCEPTION #EXHAUST_STACK Corrupts memory on failure #CORRUPT_STACK Crashes entire system on success #CORRUPT_STACK_STRONG Crashes entire system on success +ARRAY_BOUNDS CORRUPT_LIST_ADD list_add corruption CORRUPT_LIST_DEL list_del corruption STACK_GUARD_PAGE_LEADING @@ -72,4 +73,6 @@ USERCOPY_KERNEL STACKLEAK_ERASING OK: the rest of the thread stack is properly erased CFI_FORWARD_PROTO FORTIFIED_STRSCPY +FORTIFIED_OBJECT +FORTIFIED_SUBOBJECT PPC_SLB_MULTIHIT Recovered diff --git a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh index b37585e6aa38..46a97f318f58 100755 --- a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh +++ b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh @@ -282,7 +282,9 @@ done # echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error for memory in `hotpluggable_online_memory`; do - offline_memory_expect_fail $memory + if [ $((RANDOM % 100)) -lt $ratio ]; then + offline_memory_expect_fail $memory + fi done echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh index c19ecc6a8614..ecbf57f264ed 100755 --- a/tools/testing/selftests/net/icmp_redirect.sh +++ b/tools/testing/selftests/net/icmp_redirect.sh @@ -313,9 +313,10 @@ check_exception() fi log_test $? 0 "IPv4: ${desc}" - if [ "$with_redirect" = "yes" ]; then + # No PMTU info for test "redirect" and "mtu exception plus redirect" + if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \ - grep -q "${H2_N2_IP6} from :: via ${R2_LLADDR} dev br0.*${mtu}" + grep -v "mtu" | grep -q "${H2_N2_IP6} .*via ${R2_LLADDR} dev br0" elif [ -n "${mtu}" ]; then ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \ grep -q "${mtu}" diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c index f23438d512c5..3d7dde2c321b 100644 --- a/tools/testing/selftests/net/ipsec.c +++ b/tools/testing/selftests/net/ipsec.c @@ -484,13 +484,16 @@ enum desc_type { MONITOR_ACQUIRE, EXPIRE_STATE, EXPIRE_POLICY, + SPDINFO_ATTRS, }; const char *desc_name[] = { "create tunnel", "alloc spi", "monitor acquire", "expire state", - "expire policy" + "expire policy", + "spdinfo attributes", + "" }; struct xfrm_desc { enum desc_type type; @@ -1593,6 +1596,155 @@ out_close: return ret; } +static int xfrm_spdinfo_set_thresh(int xfrm_sock, uint32_t *seq, + unsigned thresh4_l, unsigned thresh4_r, + unsigned thresh6_l, unsigned thresh6_r, + bool add_bad_attr) + +{ + struct { + struct nlmsghdr nh; + union { + uint32_t unused; + int error; + }; + char attrbuf[MAX_PAYLOAD]; + } req; + struct xfrmu_spdhthresh thresh; + + memset(&req, 0, sizeof(req)); + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.unused)); + req.nh.nlmsg_type = XFRM_MSG_NEWSPDINFO; + req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + req.nh.nlmsg_seq = (*seq)++; + + thresh.lbits = thresh4_l; + thresh.rbits = thresh4_r; + if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV4_HTHRESH, &thresh, sizeof(thresh))) + return -1; + + thresh.lbits = thresh6_l; + thresh.rbits = thresh6_r; + if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV6_HTHRESH, &thresh, sizeof(thresh))) + return -1; + + if (add_bad_attr) { + BUILD_BUG_ON(XFRMA_IF_ID <= XFRMA_SPD_MAX + 1); + if (rtattr_pack(&req.nh, sizeof(req), XFRMA_IF_ID, NULL, 0)) { + pr_err("adding attribute failed: no space"); + return -1; + } + } + + if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) { + pr_err("send()"); + return -1; + } + + if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) { + pr_err("recv()"); + return -1; + } else if (req.nh.nlmsg_type != NLMSG_ERROR) { + printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type); + return -1; + } + + if (req.error) { + printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error)); + return -1; + } + + return 0; +} + +static int xfrm_spdinfo_attrs(int xfrm_sock, uint32_t *seq) +{ + struct { + struct nlmsghdr nh; + union { + uint32_t unused; + int error; + }; + char attrbuf[MAX_PAYLOAD]; + } req; + + if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 31, 120, 16, false)) { + pr_err("Can't set SPD HTHRESH"); + return KSFT_FAIL; + } + + memset(&req, 0, sizeof(req)); + + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.unused)); + req.nh.nlmsg_type = XFRM_MSG_GETSPDINFO; + req.nh.nlmsg_flags = NLM_F_REQUEST; + req.nh.nlmsg_seq = (*seq)++; + if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) { + pr_err("send()"); + return KSFT_FAIL; + } + + if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) { + pr_err("recv()"); + return KSFT_FAIL; + } else if (req.nh.nlmsg_type == XFRM_MSG_NEWSPDINFO) { + size_t len = NLMSG_PAYLOAD(&req.nh, sizeof(req.unused)); + struct rtattr *attr = (void *)req.attrbuf; + int got_thresh = 0; + + for (; RTA_OK(attr, len); attr = RTA_NEXT(attr, len)) { + if (attr->rta_type == XFRMA_SPD_IPV4_HTHRESH) { + struct xfrmu_spdhthresh *t = RTA_DATA(attr); + + got_thresh++; + if (t->lbits != 32 || t->rbits != 31) { + pr_err("thresh differ: %u, %u", + t->lbits, t->rbits); + return KSFT_FAIL; + } + } + if (attr->rta_type == XFRMA_SPD_IPV6_HTHRESH) { + struct xfrmu_spdhthresh *t = RTA_DATA(attr); + + got_thresh++; + if (t->lbits != 120 || t->rbits != 16) { + pr_err("thresh differ: %u, %u", + t->lbits, t->rbits); + return KSFT_FAIL; + } + } + } + if (got_thresh != 2) { + pr_err("only %d thresh returned by XFRM_MSG_GETSPDINFO", got_thresh); + return KSFT_FAIL; + } + } else if (req.nh.nlmsg_type != NLMSG_ERROR) { + printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type); + return KSFT_FAIL; + } else { + printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error)); + return -1; + } + + /* Restore the default */ + if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, false)) { + pr_err("Can't restore SPD HTHRESH"); + return KSFT_FAIL; + } + + /* + * At this moment xfrm uses nlmsg_parse_deprecated(), which + * implies NL_VALIDATE_LIBERAL - ignoring attributes with + * (type > maxtype). nla_parse_depricated_strict() would enforce + * it. Or even stricter nla_parse(). + * Right now it's not expected to fail, but to be ignored. + */ + if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, true)) + return KSFT_PASS; + + return KSFT_PASS; +} + static int child_serv(int xfrm_sock, uint32_t *seq, unsigned int nr, int cmd_fd, void *buf, struct xfrm_desc *desc) { @@ -1717,6 +1869,9 @@ static int child_f(unsigned int nr, int test_desc_fd, int cmd_fd, void *buf) case EXPIRE_POLICY: ret = xfrm_expire_policy(xfrm_sock, &seq, nr, &desc); break; + case SPDINFO_ATTRS: + ret = xfrm_spdinfo_attrs(xfrm_sock, &seq); + break; default: printk("Unknown desc type %d", desc.type); exit(KSFT_FAIL); @@ -1994,8 +2149,10 @@ static int write_proto_plan(int fd, int proto) * sizeof(xfrm_user_polexpire) = 168 | sizeof(xfrm_user_polexpire) = 176 * * Check the affected by the UABI difference structures. + * Also, check translation for xfrm_set_spdinfo: it has it's own attributes + * which needs to be correctly copied, but not translated. */ -const unsigned int compat_plan = 4; +const unsigned int compat_plan = 5; static int write_compat_struct_tests(int test_desc_fd) { struct xfrm_desc desc = {}; @@ -2019,6 +2176,10 @@ static int write_compat_struct_tests(int test_desc_fd) if (__write_desc(test_desc_fd, &desc)) return -1; + desc.type = SPDINFO_ATTRS; + if (__write_desc(test_desc_fd, &desc)) + return -1; + return 0; } diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 9a191c1a5de8..f02f4de2f3a0 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -1409,7 +1409,7 @@ syncookies_tests() ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow run_tests $ns1 $ns2 10.0.1.1 - chk_join_nr "subflows limited by server w cookies" 2 2 1 + chk_join_nr "subflows limited by server w cookies" 2 1 1 # test signal address with cookies reset_with_cookies diff --git a/tools/testing/selftests/net/nettest.c b/tools/testing/selftests/net/nettest.c index 6365c7fd1262..bd6288302094 100644 --- a/tools/testing/selftests/net/nettest.c +++ b/tools/testing/selftests/net/nettest.c @@ -11,9 +11,11 @@ #include <sys/socket.h> #include <sys/wait.h> #include <linux/tcp.h> +#include <linux/udp.h> #include <arpa/inet.h> #include <net/if.h> #include <netinet/in.h> +#include <netinet/ip.h> #include <netdb.h> #include <fcntl.h> #include <libgen.h> @@ -27,6 +29,10 @@ #include <time.h> #include <errno.h> +#include <linux/xfrm.h> +#include <linux/ipsec.h> +#include <linux/pfkeyv2.h> + #ifndef IPV6_UNICAST_IF #define IPV6_UNICAST_IF 76 #endif @@ -114,6 +120,9 @@ struct sock_args { struct in_addr in; struct in6_addr in6; } expected_raddr; + + /* ESP in UDP encap test */ + int use_xfrm; }; static int server_mode; @@ -1346,6 +1355,41 @@ static int bind_socket(int sd, struct sock_args *args) return 0; } +static int config_xfrm_policy(int sd, struct sock_args *args) +{ + struct xfrm_userpolicy_info policy = {}; + int type = UDP_ENCAP_ESPINUDP; + int xfrm_af = IP_XFRM_POLICY; + int level = SOL_IP; + + if (args->type != SOCK_DGRAM) { + log_error("Invalid socket type. Only DGRAM could be used for XFRM\n"); + return 1; + } + + policy.action = XFRM_POLICY_ALLOW; + policy.sel.family = args->version; + if (args->version == AF_INET6) { + xfrm_af = IPV6_XFRM_POLICY; + level = SOL_IPV6; + } + + policy.dir = XFRM_POLICY_OUT; + if (setsockopt(sd, level, xfrm_af, &policy, sizeof(policy)) < 0) + return 1; + + policy.dir = XFRM_POLICY_IN; + if (setsockopt(sd, level, xfrm_af, &policy, sizeof(policy)) < 0) + return 1; + + if (setsockopt(sd, IPPROTO_UDP, UDP_ENCAP, &type, sizeof(type)) < 0) { + log_err_errno("Failed to set xfrm encap"); + return 1; + } + + return 0; +} + static int lsock_init(struct sock_args *args) { long flags; @@ -1389,6 +1433,11 @@ static int lsock_init(struct sock_args *args) if (fcntl(sd, F_SETFD, FD_CLOEXEC) < 0) log_err_errno("Failed to set close-on-exec flag"); + if (args->use_xfrm && config_xfrm_policy(sd, args)) { + log_err_errno("Failed to set xfrm policy"); + goto err; + } + out: return sd; @@ -1772,7 +1821,7 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args) return client_status; } -#define GETOPT_STR "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6L:0:1:2:3:Fbq" +#define GETOPT_STR "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbq" static void print_usage(char *prog) { @@ -1795,6 +1844,7 @@ static void print_usage(char *prog) " -D|R datagram (D) / raw (R) socket (default stream)\n" " -l addr local address to bind to in server mode\n" " -c addr local address to bind to in client mode\n" + " -x configure XFRM policy on socket\n" "\n" " -d dev bind socket to given device name\n" " -I dev bind socket to given device name - server mode\n" @@ -1966,6 +2016,9 @@ int main(int argc, char *argv[]) case 'q': quiet = 1; break; + case 'x': + args.use_xfrm = 1; + break; default: print_usage(argv[0]); return 1; diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 64cd2e23c568..543ad7513a8e 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -118,6 +118,16 @@ # below for IPv6 doesn't apply here, because, on IPv4, administrative MTU # changes alone won't affect PMTU # +# - pmtu_vti4_udp_exception +# Same as pmtu_vti4_exception, but using ESP-in-UDP +# +# - pmtu_vti4_udp_routed_exception +# Set up vti tunnel on top of veth connected through routing namespace and +# add xfrm states and policies with ESP-in-UDP encapsulation. Check that +# route exception is not created if link layer MTU is not exceeded, then +# lower MTU on second part of routed environment and check that exception +# is created with the expected PMTU. +# # - pmtu_vti6_exception # Set up vti6 tunnel on top of veth, with xfrm states and policies, in two # namespaces with matching endpoints. Check that route exception is @@ -125,6 +135,13 @@ # decrease and increase MTU of tunnel, checking that route exception PMTU # changes accordingly # +# - pmtu_vti6_udp_exception +# Same as pmtu_vti6_exception, but using ESP-in-UDP +# +# - pmtu_vti6_udp_routed_exception +# Same as pmtu_vti6_udp_routed_exception but with routing between vti +# endpoints +# # - pmtu_vti4_default_mtu # Set up vti4 tunnel on top of veth, in two namespaces with matching # endpoints. Check that MTU assigned to vti interface is the MTU of the @@ -224,6 +241,10 @@ tests=" pmtu_ipv6_ipv6_exception IPv6 over IPv6: PMTU exceptions 1 pmtu_vti6_exception vti6: PMTU exceptions 0 pmtu_vti4_exception vti4: PMTU exceptions 0 + pmtu_vti6_udp_exception vti6: PMTU exceptions (ESP-in-UDP) 0 + pmtu_vti4_udp_exception vti4: PMTU exceptions (ESP-in-UDP) 0 + pmtu_vti6_udp_routed_exception vti6: PMTU exceptions, routed (ESP-in-UDP) 0 + pmtu_vti4_udp_routed_exception vti4: PMTU exceptions, routed (ESP-in-UDP) 0 pmtu_vti4_default_mtu vti4: default MTU assignment 0 pmtu_vti6_default_mtu vti6: default MTU assignment 0 pmtu_vti4_link_add_mtu vti4: MTU setting on link creation 0 @@ -246,7 +267,6 @@ ns_b="ip netns exec ${NS_B}" ns_c="ip netns exec ${NS_C}" ns_r1="ip netns exec ${NS_R1}" ns_r2="ip netns exec ${NS_R2}" - # Addressing and routing for tests with routers: four network segments, with # index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an # identifier ID, which is 1 for hosts (A and B), 2 for routers (R1 and R2). @@ -279,7 +299,6 @@ routes=" A ${prefix6}:${b_r2}::1 ${prefix6}:${a_r2}::2 B default ${prefix6}:${b_r1}::2 " - USE_NH="no" # ns family nh id destination gateway nexthops=" @@ -326,6 +345,7 @@ dummy6_mask="64" err_buf= tcpdump_pids= +nettest_pids= err() { err_buf="${err_buf}${1} @@ -548,6 +568,14 @@ setup_vti6() { setup_vti 6 ${veth6_a_addr} ${veth6_b_addr} ${tunnel6_a_addr} ${tunnel6_b_addr} ${tunnel6_mask} } +setup_vti4routed() { + setup_vti 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 ${tunnel4_a_addr} ${tunnel4_b_addr} ${tunnel4_mask} +} + +setup_vti6routed() { + setup_vti 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 ${tunnel6_a_addr} ${tunnel6_b_addr} ${tunnel6_mask} +} + setup_vxlan_or_geneve() { type="${1}" a_addr="${2}" @@ -619,18 +647,36 @@ setup_xfrm() { proto=${1} veth_a_addr="${2}" veth_b_addr="${3}" + encap=${4} - run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel || return 1 - run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel + run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap} || return 1 + run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap} run_cmd ${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel run_cmd ${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel - run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel - run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel + run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap} + run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap} run_cmd ${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel run_cmd ${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel } +setup_nettest_xfrm() { + which nettest >/dev/null + if [ $? -ne 0 ]; then + echo "'nettest' command not found; skipping tests" + return 1 + fi + + [ ${1} -eq 6 ] && proto="-6" || proto="" + port=${2} + + run_cmd ${ns_a} nettest ${proto} -q -D -s -x -p ${port} -t 5 & + nettest_pids="${nettest_pids} $!" + + run_cmd ${ns_b} nettest ${proto} -q -D -s -x -p ${port} -t 5 & + nettest_pids="${nettest_pids} $!" +} + setup_xfrm4() { setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} } @@ -639,6 +685,26 @@ setup_xfrm6() { setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} } +setup_xfrm4udp() { + setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0" + setup_nettest_xfrm 4 4500 +} + +setup_xfrm6udp() { + setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0" + setup_nettest_xfrm 6 4500 +} + +setup_xfrm4udprouted() { + setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0" + setup_nettest_xfrm 4 4500 +} + +setup_xfrm6udprouted() { + setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0" + setup_nettest_xfrm 6 4500 +} + setup_routing_old() { for i in ${routes}; do [ "${ns}" = "" ] && ns="${i}" && continue @@ -823,6 +889,11 @@ cleanup() { done tcpdump_pids= + for pid in ${nettest_pids}; do + kill ${pid} + done + nettest_pids= + for n in ${NS_A} ${NS_B} ${NS_C} ${NS_R1} ${NS_R2}; do ip netns del ${n} 2> /dev/null done @@ -1432,6 +1503,135 @@ test_pmtu_vti6_exception() { return ${fail} } +test_pmtu_vti4_udp_exception() { + setup namespaces veth vti4 xfrm4udp || return $ksft_skip + trace "${ns_a}" veth_a "${ns_b}" veth_b \ + "${ns_a}" vti4_a "${ns_b}" vti4_b + + veth_mtu=1500 + vti_mtu=$((veth_mtu - 20)) + + # UDP SPI SN IV ICV pad length next header + esp_payload_rfc4106=$((vti_mtu - 8 - 4 - 4 - 8 - 16 - 1 - 1)) + ping_payload=$((esp_payload_rfc4106 - 28)) + + mtu "${ns_a}" veth_a ${veth_mtu} + mtu "${ns_b}" veth_b ${veth_mtu} + mtu "${ns_a}" vti4_a ${vti_mtu} + mtu "${ns_b}" vti4_b ${vti_mtu} + + # Send DF packet without exceeding link layer MTU, check that no + # exception is created + run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr} + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})" + check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1 + + # Now exceed link layer MTU by one byte, check that exception is created + # with the right PMTU value + run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr} + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})" + check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))" +} + +test_pmtu_vti6_udp_exception() { + setup namespaces veth vti6 xfrm6udp || return $ksft_skip + trace "${ns_a}" veth_a "${ns_b}" veth_b \ + "${ns_a}" vti6_a "${ns_b}" vti6_b + fail=0 + + # Create route exception by exceeding link layer MTU + mtu "${ns_a}" veth_a 4000 + mtu "${ns_b}" veth_b 4000 + mtu "${ns_a}" vti6_a 5000 + mtu "${ns_b}" vti6_b 5000 + run_cmd ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr} + + # Check that exception was created + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})" + check_pmtu_value any "${pmtu}" "creating tunnel exceeding link layer MTU" || return 1 + + # Decrease tunnel MTU, check for PMTU decrease in route exception + mtu "${ns_a}" vti6_a 3000 + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})" + check_pmtu_value "3000" "${pmtu}" "decreasing tunnel MTU" || fail=1 + + # Increase tunnel MTU, check for PMTU increase in route exception + mtu "${ns_a}" vti6_a 9000 + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})" + check_pmtu_value "9000" "${pmtu}" "increasing tunnel MTU" || fail=1 + + return ${fail} +} + +test_pmtu_vti4_udp_routed_exception() { + setup namespaces routing vti4routed xfrm4udprouted || return $ksft_skip + trace "${ns_a}" veth_A-R1 "${ns_b}" veth_B-R1 \ + "${ns_a}" vti4_a "${ns_b}" vti4_b + + veth_mtu=1500 + vti_mtu=$((veth_mtu - 20)) + + # UDP SPI SN IV ICV pad length next header + esp_payload_rfc4106=$((vti_mtu - 8 - 4 - 4 - 8 - 16 - 1 - 1)) + ping_payload=$((esp_payload_rfc4106 - 28)) + + mtu "${ns_a}" veth_A-R1 ${veth_mtu} + mtu "${ns_r1}" veth_R1-A ${veth_mtu} + mtu "${ns_b}" veth_B-R1 ${veth_mtu} + mtu "${ns_r1}" veth_R1-B ${veth_mtu} + + mtu "${ns_a}" vti4_a ${vti_mtu} + mtu "${ns_b}" vti4_b ${vti_mtu} + + # Send DF packet without exceeding link layer MTU, check that no + # exception is created + run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr} + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})" + check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1 + + # Now decrease link layer MTU by 8 bytes on R1, check that exception is created + # with the right PMTU value + mtu "${ns_r1}" veth_R1-B $((veth_mtu - 8)) + run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload)) ${tunnel4_b_addr} + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})" + check_pmtu_value "$((esp_payload_rfc4106 - 8))" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106)))" +} + +test_pmtu_vti6_udp_routed_exception() { + setup namespaces routing vti6routed xfrm6udprouted || return $ksft_skip + trace "${ns_a}" veth_A-R1 "${ns_b}" veth_B-R1 \ + "${ns_a}" vti6_a "${ns_b}" vti6_b + + veth_mtu=1500 + vti_mtu=$((veth_mtu - 40)) + + # UDP SPI SN IV ICV pad length next header + esp_payload_rfc4106=$((vti_mtu - 8 - 4 - 4 - 8 - 16 - 1 - 1)) + ping_payload=$((esp_payload_rfc4106 - 48)) + + mtu "${ns_a}" veth_A-R1 ${veth_mtu} + mtu "${ns_r1}" veth_R1-A ${veth_mtu} + mtu "${ns_b}" veth_B-R1 ${veth_mtu} + mtu "${ns_r1}" veth_R1-B ${veth_mtu} + + # mtu "${ns_a}" vti6_a ${vti_mtu} + # mtu "${ns_b}" vti6_b ${vti_mtu} + + run_cmd ${ns_a} ${ping6} -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel6_b_addr} + + # Check that exception was not created + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})" + check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1 + + # Now decrease link layer MTU by 8 bytes on R1, check that exception is created + # with the right PMTU value + mtu "${ns_r1}" veth_R1-B $((veth_mtu - 8)) + run_cmd ${ns_a} ${ping6} -q -M want -i 0.1 -w 1 -s $((ping_payload)) ${tunnel6_b_addr} + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})" + check_pmtu_value "$((esp_payload_rfc4106 - 8))" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106)))" + +} + test_pmtu_vti4_default_mtu() { setup namespaces veth vti4 || return $ksft_skip diff --git a/tools/testing/selftests/net/timestamping.c b/tools/testing/selftests/net/timestamping.c index 21091be70688..aee631c5284e 100644 --- a/tools/testing/selftests/net/timestamping.c +++ b/tools/testing/selftests/net/timestamping.c @@ -47,7 +47,7 @@ static void usage(const char *error) { if (error) printf("invalid option: %s\n", error); - printf("timestamping interface option*\n\n" + printf("timestamping <interface> [bind_phc_index] [option]*\n\n" "Options:\n" " IP_MULTICAST_LOOP - looping outgoing multicasts\n" " SO_TIMESTAMP - normal software time stamping, ms resolution\n" @@ -58,6 +58,7 @@ static void usage(const char *error) " SOF_TIMESTAMPING_RX_SOFTWARE - software fallback for incoming packets\n" " SOF_TIMESTAMPING_SOFTWARE - request reporting of software time stamps\n" " SOF_TIMESTAMPING_RAW_HARDWARE - request reporting of raw HW time stamps\n" + " SOF_TIMESTAMPING_BIND_PHC - request to bind a PHC of PTP vclock\n" " SIOCGSTAMP - check last socket time stamp\n" " SIOCGSTAMPNS - more accurate socket time stamp\n" " PTPV2 - use PTPv2 messages\n"); @@ -311,7 +312,6 @@ static void recvpacket(int sock, int recvmsg_flags, int main(int argc, char **argv) { - int so_timestamping_flags = 0; int so_timestamp = 0; int so_timestampns = 0; int siocgstamp = 0; @@ -325,6 +325,8 @@ int main(int argc, char **argv) struct ifreq device; struct ifreq hwtstamp; struct hwtstamp_config hwconfig, hwconfig_requested; + struct so_timestamping so_timestamping_get = { 0, -1 }; + struct so_timestamping so_timestamping = { 0, -1 }; struct sockaddr_in addr; struct ip_mreq imr; struct in_addr iaddr; @@ -342,7 +344,12 @@ int main(int argc, char **argv) exit(1); } - for (i = 2; i < argc; i++) { + if (argc >= 3 && sscanf(argv[2], "%d", &so_timestamping.bind_phc) == 1) + val = 3; + else + val = 2; + + for (i = val; i < argc; i++) { if (!strcasecmp(argv[i], "SO_TIMESTAMP")) so_timestamp = 1; else if (!strcasecmp(argv[i], "SO_TIMESTAMPNS")) @@ -356,17 +363,19 @@ int main(int argc, char **argv) else if (!strcasecmp(argv[i], "PTPV2")) ptpv2 = 1; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_TX_HARDWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_TX_HARDWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_TX_HARDWARE; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_TX_SOFTWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_TX_SOFTWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_TX_SOFTWARE; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RX_HARDWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_RX_HARDWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_RX_HARDWARE; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RX_SOFTWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_RX_SOFTWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_RX_SOFTWARE; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_SOFTWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_SOFTWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_SOFTWARE; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RAW_HARDWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_RAW_HARDWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_RAW_HARDWARE; + else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_BIND_PHC")) + so_timestamping.flags |= SOF_TIMESTAMPING_BIND_PHC; else usage(argv[i]); } @@ -385,10 +394,10 @@ int main(int argc, char **argv) hwtstamp.ifr_data = (void *)&hwconfig; memset(&hwconfig, 0, sizeof(hwconfig)); hwconfig.tx_type = - (so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ? + (so_timestamping.flags & SOF_TIMESTAMPING_TX_HARDWARE) ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; hwconfig.rx_filter = - (so_timestamping_flags & SOF_TIMESTAMPING_RX_HARDWARE) ? + (so_timestamping.flags & SOF_TIMESTAMPING_RX_HARDWARE) ? ptpv2 ? HWTSTAMP_FILTER_PTP_V2_L4_SYNC : HWTSTAMP_FILTER_PTP_V1_L4_SYNC : HWTSTAMP_FILTER_NONE; hwconfig_requested = hwconfig; @@ -413,6 +422,9 @@ int main(int argc, char **argv) sizeof(struct sockaddr_in)) < 0) bail("bind"); + if (setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE, interface, if_len)) + bail("bind device"); + /* set multicast group for outgoing packets */ inet_aton("224.0.1.130", &iaddr); /* alternate PTP domain 1 */ addr.sin_addr = iaddr; @@ -444,10 +456,9 @@ int main(int argc, char **argv) &enabled, sizeof(enabled)) < 0) bail("setsockopt SO_TIMESTAMPNS"); - if (so_timestamping_flags && - setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, - &so_timestamping_flags, - sizeof(so_timestamping_flags)) < 0) + if (so_timestamping.flags && + setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &so_timestamping, + sizeof(so_timestamping)) < 0) bail("setsockopt SO_TIMESTAMPING"); /* request IP_PKTINFO for debugging purposes */ @@ -468,14 +479,18 @@ int main(int argc, char **argv) else printf("SO_TIMESTAMPNS %d\n", val); - if (getsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &val, &len) < 0) { + len = sizeof(so_timestamping_get); + if (getsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &so_timestamping_get, + &len) < 0) { printf("%s: %s\n", "getsockopt SO_TIMESTAMPING", strerror(errno)); } else { - printf("SO_TIMESTAMPING %d\n", val); - if (val != so_timestamping_flags) - printf(" not the expected value %d\n", - so_timestamping_flags); + printf("SO_TIMESTAMPING flags %d, bind phc %d\n", + so_timestamping_get.flags, so_timestamping_get.bind_phc); + if (so_timestamping_get.flags != so_timestamping.flags || + so_timestamping_get.bind_phc != so_timestamping.bind_phc) + printf(" not expected, flags %d, bind phc %d\n", + so_timestamping.flags, so_timestamping.bind_phc); } /* send packets forever every five seconds */ diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index cd6430b39982..8748199ac109 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile @@ -5,7 +5,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \ conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \ nft_concat_range.sh nft_conntrack_helper.sh \ nft_queue.sh nft_meta.sh nf_nat_edemux.sh \ - ipip-conntrack-mtu.sh + ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh LDLIBS = -lmnl TEST_GEN_FILES = nf-queue diff --git a/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh b/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh new file mode 100755 index 000000000000..e7d7bf13cff5 --- /dev/null +++ b/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh @@ -0,0 +1,167 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Check that UNREPLIED tcp conntrack will eventually timeout. +# + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 +ret=0 + +waittime=20 +sfx=$(mktemp -u "XXXXXXXX") +ns1="ns1-$sfx" +ns2="ns2-$sfx" + +nft --version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without nft tool" + exit $ksft_skip +fi + +ip -Version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without ip tool" + exit $ksft_skip +fi + +cleanup() { + ip netns pids $ns1 | xargs kill 2>/dev/null + ip netns pids $ns2 | xargs kill 2>/dev/null + + ip netns del $ns1 + ip netns del $ns2 +} + +ipv4() { + echo -n 192.168.$1.2 +} + +check_counter() +{ + ns=$1 + name=$2 + expect=$3 + local lret=0 + + cnt=$(ip netns exec $ns2 nft list counter inet filter "$name" | grep -q "$expect") + if [ $? -ne 0 ]; then + echo "ERROR: counter $name in $ns2 has unexpected value (expected $expect)" 1>&2 + ip netns exec $ns2 nft list counter inet filter "$name" 1>&2 + lret=1 + fi + + return $lret +} + +# Create test namespaces +ip netns add $ns1 || exit 1 + +trap cleanup EXIT + +ip netns add $ns2 || exit 1 + +# Connect the namespace to the host using a veth pair +ip -net $ns1 link add name veth1 type veth peer name veth2 +ip -net $ns1 link set netns $ns2 dev veth2 + +ip -net $ns1 link set up dev lo +ip -net $ns2 link set up dev lo +ip -net $ns1 link set up dev veth1 +ip -net $ns2 link set up dev veth2 + +ip -net $ns2 addr add 10.11.11.2/24 dev veth2 +ip -net $ns2 route add default via 10.11.11.1 + +ip netns exec $ns2 sysctl -q net.ipv4.conf.veth2.forwarding=1 + +# add a rule inside NS so we enable conntrack +ip netns exec $ns1 iptables -A INPUT -m state --state established,related -j ACCEPT + +ip -net $ns1 addr add 10.11.11.1/24 dev veth1 +ip -net $ns1 route add 10.99.99.99 via 10.11.11.2 + +# Check connectivity works +ip netns exec $ns1 ping -q -c 2 10.11.11.2 >/dev/null || exit 1 + +ip netns exec $ns2 nc -l -p 8080 < /dev/null & + +# however, conntrack entries are there + +ip netns exec $ns2 nft -f - <<EOF +table inet filter { + counter connreq { } + counter redir { } + chain input { + type filter hook input priority 0; policy accept; + ct state new tcp flags syn ip daddr 10.99.99.99 tcp dport 80 counter name "connreq" accept + ct state new ct status dnat tcp dport 8080 counter name "redir" accept + } +} +EOF +if [ $? -ne 0 ]; then + echo "ERROR: Could not load nft rules" + exit 1 +fi + +ip netns exec $ns2 sysctl -q net.netfilter.nf_conntrack_tcp_timeout_syn_sent=10 + +echo "INFO: connect $ns1 -> $ns2 to the virtual ip" +ip netns exec $ns1 bash -c 'while true ; do + nc -p 60000 10.99.99.99 80 + sleep 1 + done' & + +sleep 1 + +ip netns exec $ns2 nft -f - <<EOF +table inet nat { + chain prerouting { + type nat hook prerouting priority 0; policy accept; + ip daddr 10.99.99.99 tcp dport 80 redirect to :8080 + } +} +EOF +if [ $? -ne 0 ]; then + echo "ERROR: Could not load nat redirect" + exit 1 +fi + +count=$(ip netns exec $ns2 conntrack -L -p tcp --dport 80 2>/dev/null | wc -l) +if [ $count -eq 0 ]; then + echo "ERROR: $ns2 did not pick up tcp connection from peer" + exit 1 +fi + +echo "INFO: NAT redirect added in ns $ns2, waiting for $waittime seconds for nat to take effect" +for i in $(seq 1 $waittime); do + echo -n "." + + sleep 1 + + count=$(ip netns exec $ns2 conntrack -L -p tcp --reply-port-src 8080 2>/dev/null | wc -l) + if [ $count -gt 0 ]; then + echo + echo "PASS: redirection took effect after $i seconds" + break + fi + + m=$((i%20)) + if [ $m -eq 0 ]; then + echo " waited for $i seconds" + fi +done + +expect="packets 1 bytes 60" +check_counter "$ns2" "redir" "$expect" +if [ $? -ne 0 ]; then + ret=1 +fi + +if [ $ret -eq 0 ];then + echo "PASS: redirection counter has expected values" +else + echo "ERROR: no tcp connection was redirected" +fi + +exit $ret diff --git a/tools/testing/selftests/sgx/sigstruct.c b/tools/testing/selftests/sgx/sigstruct.c index dee7a3d6c5a5..92bbc5a15c39 100644 --- a/tools/testing/selftests/sgx/sigstruct.c +++ b/tools/testing/selftests/sgx/sigstruct.c @@ -55,10 +55,27 @@ static bool alloc_q1q2_ctx(const uint8_t *s, const uint8_t *m, return true; } +static void reverse_bytes(void *data, int length) +{ + int i = 0; + int j = length - 1; + uint8_t temp; + uint8_t *ptr = data; + + while (i < j) { + temp = ptr[i]; + ptr[i] = ptr[j]; + ptr[j] = temp; + i++; + j--; + } +} + static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1, uint8_t *q2) { struct q1q2_ctx ctx; + int len; if (!alloc_q1q2_ctx(s, m, &ctx)) { fprintf(stderr, "Not enough memory for Q1Q2 calculation\n"); @@ -89,8 +106,10 @@ static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1, goto out; } - BN_bn2bin(ctx.q1, q1); - BN_bn2bin(ctx.q2, q2); + len = BN_bn2bin(ctx.q1, q1); + reverse_bytes(q1, len); + len = BN_bn2bin(ctx.q2, q2); + reverse_bytes(q2, len); free_q1q2_ctx(&ctx); return true; @@ -152,22 +171,6 @@ static RSA *gen_sign_key(void) return key; } -static void reverse_bytes(void *data, int length) -{ - int i = 0; - int j = length - 1; - uint8_t temp; - uint8_t *ptr = data; - - while (i < j) { - temp = ptr[i]; - ptr[i] = ptr[j]; - ptr[j] = temp; - i++; - j--; - } -} - enum mrtags { MRECREATE = 0x0045544145524345, MREADD = 0x0000000044444145, @@ -367,8 +370,6 @@ bool encl_measure(struct encl *encl) /* BE -> LE */ reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE); reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE); - reverse_bytes(sigstruct->q1, SGX_MODULUS_SIZE); - reverse_bytes(sigstruct->q2, SGX_MODULUS_SIZE); EVP_MD_CTX_destroy(ctx); RSA_free(key); diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index e363bdaff59d..2ea438e6b8b1 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -210,8 +210,10 @@ static void anon_release_pages(char *rel_area) static void anon_allocate_area(void **alloc_area) { - if (posix_memalign(alloc_area, page_size, nr_pages * page_size)) - err("posix_memalign() failed"); + *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (*alloc_area == MAP_FAILED) + err("mmap of anonymous memory failed"); } static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset) diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile index b587b9a7a124..0d7bbe49359d 100644 --- a/tools/virtio/Makefile +++ b/tools/virtio/Makefile @@ -4,7 +4,8 @@ test: virtio_test vringh_test virtio_test: virtio_ring.o virtio_test.o vringh_test: vringh_test.o vringh.o virtio_ring.o -CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h +CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h +LDFLAGS += -lpthread vpath %.c ../../drivers/virtio ../../drivers/vhost mod: ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V} diff --git a/tools/virtio/linux/spinlock.h b/tools/virtio/linux/spinlock.h new file mode 100644 index 000000000000..028e3cdcc5d3 --- /dev/null +++ b/tools/virtio/linux/spinlock.h @@ -0,0 +1,56 @@ +#ifndef SPINLOCK_H_STUB +#define SPINLOCK_H_STUB + +#include <pthread.h> + +typedef pthread_spinlock_t spinlock_t; + +static inline void spin_lock_init(spinlock_t *lock) +{ + int r = pthread_spin_init(lock, 0); + assert(!r); +} + +static inline void spin_lock(spinlock_t *lock) +{ + int ret = pthread_spin_lock(lock); + assert(!ret); +} + +static inline void spin_unlock(spinlock_t *lock) +{ + int ret = pthread_spin_unlock(lock); + assert(!ret); +} + +static inline void spin_lock_bh(spinlock_t *lock) +{ + spin_lock(lock); +} + +static inline void spin_unlock_bh(spinlock_t *lock) +{ + spin_unlock(lock); +} + +static inline void spin_lock_irq(spinlock_t *lock) +{ + spin_lock(lock); +} + +static inline void spin_unlock_irq(spinlock_t *lock) +{ + spin_unlock(lock); +} + +static inline void spin_lock_irqsave(spinlock_t *lock, unsigned long f) +{ + spin_lock(lock); +} + +static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) +{ + spin_unlock(lock); +} + +#endif diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h index 5d90254ddae4..363b98228301 100644 --- a/tools/virtio/linux/virtio.h +++ b/tools/virtio/linux/virtio.h @@ -3,6 +3,7 @@ #define LINUX_VIRTIO_H #include <linux/scatterlist.h> #include <linux/kernel.h> +#include <linux/spinlock.h> struct device { void *parent; @@ -12,6 +13,7 @@ struct virtio_device { struct device dev; u64 features; struct list_head vqs; + spinlock_t vqs_list_lock; }; struct virtqueue { diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index f08f5e82460b..0be80c213f7f 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -186,7 +186,6 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, coalesced_mmio_in_range(dev, zone->addr, zone->size)) { r = kvm_io_bus_unregister_dev(kvm, zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev); - kvm_iodevice_destructor(&dev->dev); /* * On failure, unregister destroys all devices on the @@ -196,6 +195,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, */ if (r) break; + kvm_iodevice_destructor(&dev->dev); } } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7d95126cda9e..b50dbe269f4b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -892,6 +892,8 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm) static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) { + static DEFINE_MUTEX(kvm_debugfs_lock); + struct dentry *dent; char dir_name[ITOA_MAX_LEN * 2]; struct kvm_stat_data *stat_data; const struct _kvm_stats_desc *pdesc; @@ -903,8 +905,20 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) return 0; snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); - kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir); + mutex_lock(&kvm_debugfs_lock); + dent = debugfs_lookup(dir_name, kvm_debugfs_dir); + if (dent) { + pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); + dput(dent); + mutex_unlock(&kvm_debugfs_lock); + return 0; + } + dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); + mutex_unlock(&kvm_debugfs_lock); + if (IS_ERR(dent)) + return 0; + kvm->debugfs_dentry = dent; kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, sizeof(*kvm->debugfs_stat_data), GFP_KERNEL_ACCOUNT); @@ -935,7 +949,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) stat_data->kvm = kvm; stat_data->desc = pdesc; stat_data->kind = KVM_STAT_VCPU; - kvm->debugfs_stat_data[i] = stat_data; + kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), kvm->debugfs_dentry, stat_data, &stat_fops_per_vm); @@ -3110,6 +3124,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ++vcpu->stat.generic.halt_poll_invalid; goto out; } + cpu_relax(); poll_end = cur = ktime_get(); } while (kvm_vcpu_can_poll(cur, stop)); } @@ -4390,6 +4405,16 @@ struct compat_kvm_dirty_log { }; }; +struct compat_kvm_clear_dirty_log { + __u32 slot; + __u32 num_pages; + __u64 first_page; + union { + compat_uptr_t dirty_bitmap; /* one bit per page */ + __u64 padding2; + }; +}; + static long kvm_vm_compat_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -4399,6 +4424,24 @@ static long kvm_vm_compat_ioctl(struct file *filp, if (kvm->mm != current->mm) return -EIO; switch (ioctl) { +#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT + case KVM_CLEAR_DIRTY_LOG: { + struct compat_kvm_clear_dirty_log compat_log; + struct kvm_clear_dirty_log log; + + if (copy_from_user(&compat_log, (void __user *)arg, + sizeof(compat_log))) + return -EFAULT; + log.slot = compat_log.slot; + log.num_pages = compat_log.num_pages; + log.first_page = compat_log.first_page; + log.padding2 = compat_log.padding2; + log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); + + r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); + break; + } +#endif case KVM_GET_DIRTY_LOG: { struct compat_kvm_dirty_log compat_log; struct kvm_dirty_log log; @@ -5172,7 +5215,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) } add_uevent_var(env, "PID=%d", kvm->userspace_pid); - if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) { + if (kvm->debugfs_dentry) { char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); if (p) { |