summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c37
-rw-r--r--drivers/clk/imx/Kconfig4
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c2
-rw-r--r--drivers/fpga/Kconfig1
-rw-r--r--drivers/gpio/gpio-arizona.c1
-rw-r--r--drivers/gpio/gpio-dwapb.c2
-rw-r--r--drivers/gpio/gpio-eic-sprd.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c16
-rw-r--r--drivers/gpio/gpio-zynq.c4
-rw-r--r--drivers/gpio/gpiolib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c7
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu10.h14
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c103
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c168
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c4
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c9
-rw-r--r--drivers/gpu/drm/i915/i915_request.h6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/output.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c76
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-imx.c44
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c12
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c4
-rw-r--r--drivers/i2c/busses/i2c-qup.c3
-rw-r--r--drivers/idle/intel_idle.c28
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c9
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/misc/soc_button_array.c1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h4
-rw-r--r--drivers/input/serio/i8042.c3
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c4
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h2
-rw-r--r--drivers/md/dm-cache-target.c4
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm-table.c11
-rw-r--r--drivers/md/dm-writecache.c6
-rw-r--r--drivers/md/dm.c29
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c52
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c11
-rw-r--r--drivers/media/rc/mtk-cir.c9
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.c4
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_psi.h8
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_s302m.c4
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_ts.h2
-rw-r--r--drivers/misc/habanalabs/common/device.c16
-rw-r--r--drivers/misc/habanalabs/common/memory.c1
-rw-r--r--drivers/misc/mei/Kconfig10
-rw-r--r--drivers/misc/mei/Makefile3
-rw-r--r--drivers/misc/mei/hw-virtio.c874
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c39
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c3
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c4
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c4
-rw-r--r--drivers/mtd/nand/raw/gpio.c4
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c4
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c4
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c4
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c4
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c4
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c4
-rw-r--r--drivers/net/bonding/bond_options.c22
-rw-r--r--drivers/net/can/softing/softing_main.c9
-rw-r--r--drivers/net/dsa/ocelot/felix.c7
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c1
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c1
-rw-r--r--drivers/net/ethernet/agere/Kconfig1
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c6
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h10
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c31
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c37
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c24
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c9
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c1
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/nxp/Kconfig1
-rw-r--r--drivers/net/ethernet/rocker/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c51
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c3
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c9
-rw-r--r--drivers/net/geneve.c20
-rw-r--r--drivers/net/ipa/gsi_trans.c7
-rw-r--r--drivers/net/netdevsim/bpf.c15
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/vrf.c10
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c74
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h7
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c8
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c452
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c8
-rw-r--r--drivers/pinctrl/pinctrl-amd.c7
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c39
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/storvsc_drv.c9
-rw-r--r--drivers/thunderbolt/icm.c10
-rw-r--r--drivers/tty/tty_io.c7
-rw-r--r--drivers/tty/tty_jobctrl.c44
-rw-r--r--drivers/usb/cdns3/core.c29
-rw-r--r--drivers/usb/cdns3/gadget.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/host/ohci-omap.c4
-rw-r--r--drivers/usb/serial/ch341.c5
-rw-r--r--drivers/usb/serial/kl5kusb105.c10
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/storage/uas.c19
-rw-r--r--drivers/usb/storage/usb.c5
163 files changed, 1507 insertions, 1820 deletions
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 669392f31d4e..6284aff434a1 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -47,27 +47,20 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
{
struct spk_ldisc_data *ldisc_data;
+ if (tty != speakup_tty)
+ /* Somebody tried to use this line discipline outside speakup */
+ return -ENODEV;
+
if (!tty->ops->write)
return -EOPNOTSUPP;
- mutex_lock(&speakup_tty_mutex);
- if (speakup_tty) {
- mutex_unlock(&speakup_tty_mutex);
- return -EBUSY;
- }
- speakup_tty = tty;
-
ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
- if (!ldisc_data) {
- speakup_tty = NULL;
- mutex_unlock(&speakup_tty_mutex);
+ if (!ldisc_data)
return -ENOMEM;
- }
init_completion(&ldisc_data->completion);
ldisc_data->buf_free = true;
- speakup_tty->disc_data = ldisc_data;
- mutex_unlock(&speakup_tty_mutex);
+ tty->disc_data = ldisc_data;
return 0;
}
@@ -191,9 +184,25 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
tty_unlock(tty);
+ mutex_lock(&speakup_tty_mutex);
+ speakup_tty = tty;
ret = tty_set_ldisc(tty, N_SPEAKUP);
if (ret)
- pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
+ speakup_tty = NULL;
+ mutex_unlock(&speakup_tty_mutex);
+
+ if (!ret)
+ /* Success */
+ return 0;
+
+ pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
+
+ tty_lock(tty);
+ if (tty->ops->close)
+ tty->ops->close(tty, NULL);
+ tty_unlock(tty);
+
+ tty_kclose(tty);
return ret;
}
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 3b393cb07295..3061896503f3 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -5,8 +5,8 @@ config MXC_CLK
depends on ARCH_MXC || COMPILE_TEST
config MXC_CLK_SCU
- tristate "IMX SCU clock"
- depends on ARCH_MXC || COMPILE_TEST
+ tristate
+ depends on ARCH_MXC
depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index d900f6bf53d0..892e91b92f2c 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -55,7 +55,7 @@ struct r9a06g032_clkdesc {
u16 sel, g1, r1, g2, r2;
} dual;
};
-} __packed;
+};
#define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
{ .gate = _clk, .reset = _rst, \
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index d08ac824c993..fd95edeb702b 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -29,7 +29,7 @@
#define PM_API_FEATURE_CHECK_MAX_ORDER 7
static bool feature_check_enabled;
-DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
/**
* struct pm_api_feature_data - PM API Feature data
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 7cd5a29fc437..5645226ca3ce 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -142,6 +142,7 @@ config FPGA_DFL
tristate "FPGA Device Feature List (DFL) support"
select FPGA_BRIDGE
select FPGA_REGION
+ depends on HAS_IOMEM
help
Device Feature List (DFL) defines a feature list structure that
creates a linked list of feature headers within the MMIO space
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 5bda38e0780f..2bc173c352ce 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -192,6 +192,7 @@ static int arizona_gpio_probe(struct platform_device *pdev)
ret = devm_gpiochip_add_data(&pdev->dev, &arizona_gpio->gpio_chip,
arizona_gpio);
if (ret < 0) {
+ pm_runtime_disable(&pdev->dev);
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
return ret;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 2a9046c0fb16..4275c18a097a 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -724,6 +724,8 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
return err;
}
+ platform_set_drvdata(pdev, gpio);
+
return 0;
}
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index ad61daf6c212..865ab2b34fdd 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -598,7 +598,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
- continue;
+ break;
sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(sprd_eic->base[i]))
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 433e2c3f3fd5..2f245594a90a 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1197,6 +1197,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
+ /* Some MVEBU SoCs have simple PWM support for GPIO lines */
+ if (IS_ENABLED(CONFIG_PWM)) {
+ err = mvebu_pwm_probe(pdev, mvchip, id);
+ if (err)
+ return err;
+ }
+
/* Some gpio controllers do not provide irq support */
if (!have_irqs)
return 0;
@@ -1206,7 +1213,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
- return -ENODEV;
+ err = -ENODEV;
+ goto err_pwm;
}
err = irq_alloc_domain_generic_chips(
@@ -1254,14 +1262,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip);
}
- /* Some MVEBU SoCs have simple PWM support for GPIO lines */
- if (IS_ENABLED(CONFIG_PWM))
- return mvebu_pwm_probe(pdev, mvchip, id);
-
return 0;
err_domain:
irq_domain_remove(mvchip->domain);
+err_pwm:
+ pwmchip_remove(&mvchip->mvpwm->chip);
return err;
}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 0b5a17ab996f..3521c1dc3ac0 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -574,7 +574,7 @@ static int zynq_gpio_irq_reqres(struct irq_data *d)
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
int ret;
- ret = pm_runtime_get_sync(chip->parent);
+ ret = pm_runtime_resume_and_get(chip->parent);
if (ret < 0)
return ret;
@@ -942,7 +942,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
goto err_pm_dis;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 089ddcaa9bc6..6e3c4d7a7d14 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1806,6 +1806,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request);
*/
void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset)
{
+#ifdef CONFIG_PINCTRL
+ if (list_empty(&gc->gpiodev->pin_ranges))
+ return;
+#endif
+
pinctrl_gpio_free(gc->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 957934926b24..1b56dbc1f304 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -459,6 +459,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
+ struct drm_gem_object *gobj;
int ret;
memset(&bp, 0, sizeof(bp));
@@ -469,17 +470,20 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
bp.type = ttm_bo_type_sg;
bp.resv = resv;
dma_resv_lock(resv, NULL);
- ret = amdgpu_bo_create(adev, &bp, &bo);
+ ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_CPU,
+ 0, ttm_bo_type_sg, resv, &gobj);
if (ret)
goto error;
+ bo = gem_to_amdgpu_bo(gobj);
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
dma_resv_unlock(resv);
- return &bo->tbo.base;
+ return gobj;
error:
dma_resv_unlock(resv);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7e8265da9f25..e8c76bd8c501 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -66,26 +66,12 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
bp.type = type;
bp.resv = resv;
bp.preferred_domain = initial_domain;
-retry:
bp.flags = flags;
bp.domain = initial_domain;
r = amdgpu_bo_create(adev, &bp, &bo);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- goto retry;
- }
-
- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
- goto retry;
- }
- DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
- size, initial_domain, alignment, r);
- }
+ if (r)
return r;
- }
+
*obj = &bo->tbo.base;
return 0;
@@ -225,7 +211,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
uint64_t size = args->in.bo_size;
struct dma_resv *resv = NULL;
struct drm_gem_object *gobj;
- uint32_t handle;
+ uint32_t handle, initial_domain;
int r;
/* reject invalid gem flags */
@@ -269,9 +255,28 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
resv = vm->root.base.bo->tbo.base.resv;
}
+retry:
+ initial_domain = (u32)(0xffffffff & args->in.domains);
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
- (u32)(0xffffffff & args->in.domains),
+ initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
+ if (r) {
+ if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
+ }
+ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
+ size, initial_domain, args->in.alignment, r);
+ }
+ return r;
+ }
+
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 36604d751d62..3e4892b7b7d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -499,6 +499,9 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
else
size = amdgpu_gmc_get_vbios_fb_size(adev);
+ if (adev->mman.keep_stolen_vga_memory)
+ size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
+
/* set to 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
size = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 4e36551ab50b..82cd8e55595a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1172,7 +1172,7 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
con->dir, &con->disable_ras_err_cnt_harvest);
}
-void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
struct ras_fs_if *head)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -1194,7 +1194,6 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{
-#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
struct ras_fs_if fs_info;
@@ -1203,7 +1202,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
* it won't be called in resume path, no need to check
* suspend and gpu reset status
*/
- if (!con)
+ if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
return;
amdgpu_ras_debugfs_create_ctrl_node(adev);
@@ -1217,10 +1216,9 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_create(adev, &fs_info);
}
}
-#endif
}
-void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
+static void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head)
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
@@ -1234,7 +1232,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
{
-#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp;
@@ -1243,7 +1240,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
}
con->dir = NULL;
-#endif
}
/* debugfs end */
@@ -1291,7 +1287,8 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
{
- amdgpu_ras_debugfs_remove_all(adev);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ amdgpu_ras_debugfs_remove_all(adev);
amdgpu_ras_sysfs_remove_all(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6b8d7bb83bb3..ec398ed7deb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -607,14 +607,8 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head);
-void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
- struct ras_fs_if *head);
-
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
-void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
- struct ras_common_if *head);
-
int amdgpu_ras_error_query(struct amdgpu_device *adev,
struct ras_query_if *info);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 9f3952723c63..2a485052e3ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -186,7 +186,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
+ err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
if (err)
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index e074f7ed388c..b5f8f3d731cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1011,6 +1011,11 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
/* set the write pointer delay */
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
@@ -1033,6 +1038,10 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
return 0;
}
@@ -1556,8 +1565,14 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
/* Restore */
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+ ring->wptr = 0;
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
@@ -1565,14 +1580,16 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
+ ring->wptr = 0;
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1630,10 +1647,6 @@ static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
- lower_32_bits(ring->wptr) | 0x80000000);
-
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 222f1df1a6b6..8cc51cec988a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1736,6 +1736,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
}
mutex_unlock(&p->mutex);
+ dma_buf_put(dmabuf);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1745,6 +1746,7 @@ err_free:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
+ dma_buf_put(dmabuf);
return r;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9b6809f309f4..0f7749e9424d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1058,9 +1058,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
- /* Update the actual used number of crtc */
- adev->mode_info.num_crtc = adev->dm.display_indexes_num;
-
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
@@ -3251,6 +3248,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
DRM_ERROR("DM: Failed to initialize mode config\n");
@@ -3312,8 +3313,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
- dm->display_indexes_num = dm->dc->caps.max_streams;
-
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 2f8fee05547a..6b431db146cd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -163,8 +163,17 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
new_clocks->dppclk_khz = 100000;
}
- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
- if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ /*
+ * Temporally ignore thew 0 cases for disp and dpp clks.
+ * We may have a new feature that requires 0 clks in the future.
+ */
+ if (new_clocks->dppclk_khz == 0 || new_clocks->dispclk_khz == 0) {
+ new_clocks->dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+ new_clocks->dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
+ if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
update_dppclk = true;
@@ -570,7 +579,7 @@ static struct clk_bw_params rn_bw_params = {
};
-static struct wm_table ddr4_wm_table = {
+static struct wm_table ddr4_wm_table_gs = {
.entries = {
{
.wm_inst = WM_A,
@@ -607,7 +616,7 @@ static struct wm_table ddr4_wm_table = {
}
};
-static struct wm_table lpddr4_wm_table = {
+static struct wm_table lpddr4_wm_table_gs = {
.entries = {
{
.wm_inst = WM_A,
@@ -681,6 +690,80 @@ static struct wm_table lpddr4_wm_table_with_disabled_ppt = {
}
};
+static struct wm_table ddr4_wm_table_rn = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 9.09,
+ .sr_enter_plus_exit_time_us = 10.14,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ }
+};
+
+static struct wm_table lpddr4_wm_table_rn = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 7.32,
+ .sr_enter_plus_exit_time_us = 8.38,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.82,
+ .sr_enter_plus_exit_time_us = 11.196,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.89,
+ .sr_enter_plus_exit_time_us = 11.24,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.748,
+ .sr_enter_plus_exit_time_us = 11.102,
+ .valid = true,
+ },
+ }
+};
+
static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
int i;
@@ -762,6 +845,11 @@ void rn_clk_mgr_construct(
struct dc_debug_options *debug = &ctx->dc->debug;
struct dpm_clocks clock_table = { 0 };
enum pp_smu_status status = 0;
+ int is_green_sardine = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
+#endif
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn21_funcs;
@@ -802,10 +890,16 @@ void rn_clk_mgr_construct(
if (clk_mgr->periodic_retraining_disabled) {
rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt;
} else {
- rn_bw_params.wm_table = lpddr4_wm_table;
+ if (is_green_sardine)
+ rn_bw_params.wm_table = lpddr4_wm_table_gs;
+ else
+ rn_bw_params.wm_table = lpddr4_wm_table_rn;
}
} else {
- rn_bw_params.wm_table = ddr4_wm_table;
+ if (is_green_sardine)
+ rn_bw_params.wm_table = ddr4_wm_table_gs;
+ else
+ rn_bw_params.wm_table = ddr4_wm_table_rn;
}
/* Saved clocks configured at boot for debug purposes */
rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fec87a2e210c..5b0cedfa824a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3394,10 +3394,13 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
+ struct fixed31_32 link_bw_kbps;
if (timing->flags.DSC) {
- kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
- kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+ link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz);
+ link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160);
+ link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel);
+ kbps = dc_fixpt_ceil(link_bw_kbps);
return kbps;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/smu10.h b/drivers/gpu/drm/amd/pm/inc/smu10.h
index b96520528240..9e837a5014c5 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu10.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu10.h
@@ -136,14 +136,12 @@
#define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT)
/* Workload bits */
-#define WORKLOAD_DEFAULT_BIT 0
-#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
-#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
-#define WORKLOAD_PPLIB_VIDEO_BIT 3
-#define WORKLOAD_PPLIB_VR_BIT 4
-#define WORKLOAD_PPLIB_COMPUTE_BIT 5
-#define WORKLOAD_PPLIB_CUSTOM_BIT 6
-#define WORKLOAD_PPLIB_COUNT 7
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
+#define WORKLOAD_PPLIB_VIDEO_BIT 2
+#define WORKLOAD_PPLIB_VR_BIT 3
+#define WORKLOAD_PPLIB_COMPUTE_BIT 4
+#define WORKLOAD_PPLIB_CUSTOM_BIT 5
+#define WORKLOAD_PPLIB_COUNT 6
typedef struct {
/* MP1_EXT_SCRATCH0 */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
index 719597c5d27d..6606511891e3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
@@ -24,6 +24,8 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/pci.h>
+
#include <drm/amdgpu_drm.h>
#include "processpptables.h"
#include <atom-types.h>
@@ -984,6 +986,8 @@ static int init_thermal_controller(
struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
{
+ struct amdgpu_device *adev = hwmgr->adev;
+
hwmgr->thermal_controller.ucType =
powerplay_table->sThermalController.ucType;
hwmgr->thermal_controller.ucI2cLine =
@@ -1008,7 +1012,104 @@ static int init_thermal_controller(
ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
PHM_PlatformCaps_ThermalController);
- hwmgr->thermal_controller.use_hw_fan_control = 1;
+ if (powerplay_table->usTableSize >= sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
+ const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
+ (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
+
+ if (0 == le16_to_cpu(powerplay_table3->usFanTableOffset)) {
+ hwmgr->thermal_controller.use_hw_fan_control = 1;
+ return 0;
+ } else {
+ const ATOM_PPLIB_FANTABLE *fan_table =
+ (const ATOM_PPLIB_FANTABLE *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ if (1 <= fan_table->ucFanTableFormat) {
+ hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst =
+ fan_table->ucTHyst;
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin =
+ le16_to_cpu(fan_table->usTMin);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed =
+ le16_to_cpu(fan_table->usTMed);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTHigh =
+ le16_to_cpu(fan_table->usTHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
+ le16_to_cpu(fan_table->usPWMMin);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed =
+ le16_to_cpu(fan_table->usPWMMed);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh =
+ le16_to_cpu(fan_table->usPWMHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax = 10900;
+ hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay = 100000;
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ }
+
+ if (2 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE2 *fan_table2 =
+ (const ATOM_PPLIB_FANTABLE2 *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
+ le16_to_cpu(fan_table2->usTMax);
+ }
+
+ if (3 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE3 *fan_table3 =
+ (const ATOM_PPLIB_FANTABLE3 *) (((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode =
+ fan_table3->ucFanControlMode;
+
+ if ((3 == fan_table->ucFanTableFormat) &&
+ (0x67B1 == adev->pdev->device))
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM =
+ 47;
+ else
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM =
+ le16_to_cpu(fan_table3->usFanPWMMax);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity =
+ 4836;
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ le16_to_cpu(fan_table3->usFanOutputSensitivity);
+ }
+
+ if (6 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE4 *fan_table4 =
+ (const ATOM_PPLIB_FANTABLE4 *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM =
+ le16_to_cpu(fan_table4->usFanRPMMax);
+ }
+
+ if (7 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE5 *fan_table5 =
+ (const ATOM_PPLIB_FANTABLE5 *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ if (0x67A2 == adev->pdev->device ||
+ 0x67A9 == adev->pdev->device ||
+ 0x67B9 == adev->pdev->device) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_GeminiRegulatorFanControlSupport);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentLow =
+ le16_to_cpu(fan_table5->usFanCurrentLow);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentHigh =
+ le16_to_cpu(fan_table5->usFanCurrentHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMLow =
+ le16_to_cpu(fan_table5->usFanRPMLow);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMHigh =
+ le16_to_cpu(fan_table5->usFanRPMHigh);
+ }
+ }
+ }
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index cf60f3992303..e6f40ee9f313 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1297,15 +1297,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
int pplib_workload = 0;
switch (power_profile) {
- case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
- pplib_workload = WORKLOAD_DEFAULT_BIT;
- break;
case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
break;
- case PP_SMC_POWER_PROFILE_POWERSAVING:
- pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
- break;
case PP_SMC_POWER_PROFILE_VIDEO:
pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
break;
@@ -1315,6 +1309,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
case PP_SMC_POWER_PROFILE_COMPUTE:
pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
+ break;
}
return pplib_workload;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 895d89bea7fa..cf7c4f0e0a0b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -217,7 +217,7 @@ static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 2380759ddf48..6db96fa1df09 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1164,7 +1164,12 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
if (ret)
return ret;
- crystal_clock_freq = amdgpu_asic_get_xclk(adev);
+ /*
+ * crystal_clock_freq div by 4 is required since the fan control
+ * module refers to 25MHz
+ */
+
+ crystal_clock_freq = amdgpu_asic_get_xclk(adev) / 4;
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 99e682563d47..aabf09f89cad 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -18021,16 +18021,6 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
if (!HAS_GMCH(i915))
sanitize_watermarks(i915);
- /*
- * Force all active planes to recompute their states. So that on
- * mode_setcrtc after probe, all the intel_plane_state variables
- * are already calculated and there is no assert_plane warnings
- * during bootup.
- */
- ret = intel_initial_commit(dev);
- if (ret)
- drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
-
return 0;
}
@@ -18039,11 +18029,21 @@ int intel_modeset_init(struct drm_i915_private *i915)
{
int ret;
- intel_overlay_setup(i915);
-
if (!HAS_DISPLAY(i915))
return 0;
+ /*
+ * Force all active planes to recompute their states. So that on
+ * mode_setcrtc after probe, all the intel_plane_state variables
+ * are already calculated and there is no assert_plane warnings
+ * during bootup.
+ */
+ ret = intel_initial_commit(&i915->drm);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
+
+ intel_overlay_setup(i915);
+
ret = intel_fbdev_init(&i915->drm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index bf1e9cf1c0f3..9bc59fd2f95f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -573,7 +573,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
/* Also take into account max slice width */
- min_slice_count = min_t(u8, min_slice_count,
+ min_slice_count = max_t(u8, min_slice_count,
DIV_ROUND_UP(mode_hdisplay,
max_slice_width));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 1904e6e5ea64..b07dc1156a0e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -3097,7 +3097,7 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
break;
}
-static void eb_request_add(struct i915_execbuffer *eb)
+static int eb_request_add(struct i915_execbuffer *eb, int err)
{
struct i915_request *rq = eb->request;
struct intel_timeline * const tl = i915_request_timeline(rq);
@@ -3118,6 +3118,7 @@ static void eb_request_add(struct i915_execbuffer *eb)
/* Serialise with context_close via the add_to_timeline */
i915_request_set_error_once(rq, -ENOENT);
__i915_request_skip(rq);
+ err = -ENOENT; /* override any transient errors */
}
__i915_request_queue(rq, &attr);
@@ -3127,6 +3128,8 @@ static void eb_request_add(struct i915_execbuffer *eb)
retire_requests(tl, prev);
mutex_unlock(&tl->mutex);
+
+ return err;
}
static const i915_user_extension_fn execbuf_extensions[] = {
@@ -3332,7 +3335,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
err = eb_submit(&eb, batch);
err_request:
i915_request_get(eb.request);
- eb_request_add(&eb);
+ err = eb_request_add(&eb, err);
if (eb.fences)
signal_fence_array(&eb);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index cf6e05ea4d8f..a24cc1ff08a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -101,18 +101,37 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
intel_gt_pm_put_async(b->irq_engine->gt);
}
+static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+{
+ spin_lock(&b->irq_lock);
+ if (b->irq_armed)
+ __intel_breadcrumbs_disarm_irq(b);
+ spin_unlock(&b->irq_lock);
+}
+
static void add_signaling_context(struct intel_breadcrumbs *b,
struct intel_context *ce)
{
- intel_context_get(ce);
- list_add_tail(&ce->signal_link, &b->signalers);
+ lockdep_assert_held(&ce->signal_lock);
+
+ spin_lock(&b->signalers_lock);
+ list_add_rcu(&ce->signal_link, &b->signalers);
+ spin_unlock(&b->signalers_lock);
}
-static void remove_signaling_context(struct intel_breadcrumbs *b,
+static bool remove_signaling_context(struct intel_breadcrumbs *b,
struct intel_context *ce)
{
- list_del(&ce->signal_link);
- intel_context_put(ce);
+ lockdep_assert_held(&ce->signal_lock);
+
+ if (!list_empty(&ce->signals))
+ return false;
+
+ spin_lock(&b->signalers_lock);
+ list_del_rcu(&ce->signal_link);
+ spin_unlock(&b->signalers_lock);
+
+ return true;
}
static inline bool __request_completed(const struct i915_request *rq)
@@ -175,6 +194,8 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
static bool __signal_request(struct i915_request *rq)
{
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+
if (!__dma_fence_signal(&rq->fence)) {
i915_request_put(rq);
return false;
@@ -195,15 +216,12 @@ static void signal_irq_work(struct irq_work *work)
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
const ktime_t timestamp = ktime_get();
struct llist_node *signal, *sn;
- struct intel_context *ce, *cn;
- struct list_head *pos, *next;
+ struct intel_context *ce;
signal = NULL;
if (unlikely(!llist_empty(&b->signaled_requests)))
signal = llist_del_all(&b->signaled_requests);
- spin_lock(&b->irq_lock);
-
/*
* Keep the irq armed until the interrupt after all listeners are gone.
*
@@ -229,47 +247,44 @@ static void signal_irq_work(struct irq_work *work)
* interrupt draw less ire from other users of the system and tools
* like powertop.
*/
- if (!signal && b->irq_armed && list_empty(&b->signalers))
- __intel_breadcrumbs_disarm_irq(b);
+ if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers))
+ intel_breadcrumbs_disarm_irq(b);
- list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
- GEM_BUG_ON(list_empty(&ce->signals));
+ rcu_read_lock();
+ list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
+ struct i915_request *rq;
- list_for_each_safe(pos, next, &ce->signals) {
- struct i915_request *rq =
- list_entry(pos, typeof(*rq), signal_link);
+ list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
+ bool release;
- GEM_BUG_ON(!check_signal_order(ce, rq));
if (!__request_completed(rq))
break;
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags))
+ break;
+
/*
* Queue for execution after dropping the signaling
* spinlock as the callback chain may end up adding
* more signalers to the same context or engine.
*/
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ spin_lock(&ce->signal_lock);
+ list_del_rcu(&rq->signal_link);
+ release = remove_signaling_context(b, ce);
+ spin_unlock(&ce->signal_lock);
+
if (__signal_request(rq))
/* We own signal_node now, xfer to local list */
signal = slist_add(&rq->signal_node, signal);
- }
- /*
- * We process the list deletion in bulk, only using a list_add
- * (not list_move) above but keeping the status of
- * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit.
- */
- if (!list_is_first(pos, &ce->signals)) {
- /* Advance the list to the first incomplete request */
- __list_del_many(&ce->signals, pos);
- if (&ce->signals == pos) { /* now empty */
+ if (release) {
add_retire(b, ce->timeline);
- remove_signaling_context(b, ce);
+ intel_context_put(ce);
}
}
}
-
- spin_unlock(&b->irq_lock);
+ rcu_read_unlock();
llist_for_each_safe(signal, sn, signal) {
struct i915_request *rq =
@@ -298,14 +313,15 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
if (!b)
return NULL;
- spin_lock_init(&b->irq_lock);
+ b->irq_engine = irq_engine;
+
+ spin_lock_init(&b->signalers_lock);
INIT_LIST_HEAD(&b->signalers);
init_llist_head(&b->signaled_requests);
+ spin_lock_init(&b->irq_lock);
init_irq_work(&b->irq_work, signal_irq_work);
- b->irq_engine = irq_engine;
-
return b;
}
@@ -347,9 +363,9 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
kfree(b);
}
-static void insert_breadcrumb(struct i915_request *rq,
- struct intel_breadcrumbs *b)
+static void insert_breadcrumb(struct i915_request *rq)
{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
struct intel_context *ce = rq->context;
struct list_head *pos;
@@ -371,6 +387,7 @@ static void insert_breadcrumb(struct i915_request *rq,
}
if (list_empty(&ce->signals)) {
+ intel_context_get(ce);
add_signaling_context(b, ce);
pos = &ce->signals;
} else {
@@ -396,8 +413,9 @@ static void insert_breadcrumb(struct i915_request *rq,
break;
}
}
- list_add(&rq->signal_link, pos);
+ list_add_rcu(&rq->signal_link, pos);
GEM_BUG_ON(!check_signal_order(ce, rq));
+ GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
/*
@@ -410,7 +428,7 @@ static void insert_breadcrumb(struct i915_request *rq,
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b;
+ struct intel_context *ce = rq->context;
/* Serialises with i915_request_retire() using rq->lock */
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
@@ -425,67 +443,30 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
return true;
- /*
- * rq->engine is locked by rq->engine->active.lock. That however
- * is not known until after rq->engine has been dereferenced and
- * the lock acquired. Hence we acquire the lock and then validate
- * that rq->engine still matches the lock we hold for it.
- *
- * Here, we are using the breadcrumb lock as a proxy for the
- * rq->engine->active.lock, and we know that since the breadcrumb
- * will be serialised within i915_request_submit/i915_request_unsubmit,
- * the engine cannot change while active as long as we hold the
- * breadcrumb lock on that engine.
- *
- * From the dma_fence_enable_signaling() path, we are outside of the
- * request submit/unsubmit path, and so we must be more careful to
- * acquire the right lock.
- */
- b = READ_ONCE(rq->engine)->breadcrumbs;
- spin_lock(&b->irq_lock);
- while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) {
- spin_unlock(&b->irq_lock);
- b = READ_ONCE(rq->engine)->breadcrumbs;
- spin_lock(&b->irq_lock);
- }
-
- /*
- * Now that we are finally serialised with request submit/unsubmit,
- * [with b->irq_lock] and with i915_request_retire() [via checking
- * SIGNALED with rq->lock] confirm the request is indeed active. If
- * it is no longer active, the breadcrumb will be attached upon
- * i915_request_submit().
- */
+ spin_lock(&ce->signal_lock);
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
- insert_breadcrumb(rq, b);
-
- spin_unlock(&b->irq_lock);
+ insert_breadcrumb(rq);
+ spin_unlock(&ce->signal_lock);
return true;
}
void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b = rq->engine->breadcrumbs;
+ struct intel_context *ce = rq->context;
+ bool release;
- /*
- * We must wait for b->irq_lock so that we know the interrupt handler
- * has released its reference to the intel_context and has completed
- * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if
- * required).
- */
- spin_lock(&b->irq_lock);
- if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
- struct intel_context *ce = rq->context;
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ return;
- list_del(&rq->signal_link);
- if (list_empty(&ce->signals))
- remove_signaling_context(b, ce);
+ spin_lock(&ce->signal_lock);
+ list_del_rcu(&rq->signal_link);
+ release = remove_signaling_context(rq->engine->breadcrumbs, ce);
+ spin_unlock(&ce->signal_lock);
+ if (release)
+ intel_context_put(ce);
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
- i915_request_put(rq);
- }
- spin_unlock(&b->irq_lock);
+ i915_request_put(rq);
}
static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
@@ -495,18 +476,17 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
drm_printf(p, "Signals:\n");
- spin_lock_irq(&b->irq_lock);
- list_for_each_entry(ce, &b->signalers, signal_link) {
- list_for_each_entry(rq, &ce->signals, signal_link) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
+ list_for_each_entry_rcu(rq, &ce->signals, signal_link)
drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
rq->fence.context, rq->fence.seqno,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
- }
}
- spin_unlock_irq(&b->irq_lock);
+ rcu_read_unlock();
}
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index 3fa19820b37a..a74bb3062bd8 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -29,18 +29,16 @@
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
- spinlock_t irq_lock; /* protects the lists used in hardirq context */
-
/* Not all breadcrumbs are attached to physical HW */
struct intel_engine_cs *irq_engine;
+ spinlock_t signalers_lock; /* protects the list of signalers */
struct list_head signalers;
struct llist_head signaled_requests;
+ spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
-
unsigned int irq_enabled;
-
bool irq_armed;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 92a3f25c4006..349e7fa1488d 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -25,11 +25,18 @@ static struct intel_context *intel_context_alloc(void)
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}
-void intel_context_free(struct intel_context *ce)
+static void rcu_context_free(struct rcu_head *rcu)
{
+ struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
+
kmem_cache_free(global.slab_ce, ce);
}
+void intel_context_free(struct intel_context *ce)
+{
+ call_rcu(&ce->rcu, rcu_context_free);
+}
+
struct intel_context *
intel_context_create(struct intel_engine_cs *engine)
{
@@ -356,8 +363,7 @@ static int __intel_context_active(struct i915_active *active)
}
void
-intel_context_init(struct intel_context *ce,
- struct intel_engine_cs *engine)
+intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
{
GEM_BUG_ON(!engine->cops);
GEM_BUG_ON(!engine->gt->vm);
@@ -373,7 +379,8 @@ intel_context_init(struct intel_context *ce,
ce->vm = i915_vm_get(engine->gt->vm);
- INIT_LIST_HEAD(&ce->signal_link);
+ /* NB ce->signal_link/lock is used under RCU */
+ spin_lock_init(&ce->signal_lock);
INIT_LIST_HEAD(&ce->signals);
mutex_init(&ce->pin_mutex);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 552cb57a2e8c..52fa9c132746 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -25,6 +25,7 @@ DECLARE_EWMA(runtime, 3, 8);
struct i915_gem_context;
struct i915_gem_ww_ctx;
struct i915_vma;
+struct intel_breadcrumbs;
struct intel_context;
struct intel_ring;
@@ -44,7 +45,16 @@ struct intel_context_ops {
};
struct intel_context {
- struct kref ref;
+ /*
+ * Note: Some fields may be accessed under RCU.
+ *
+ * Unless otherwise noted a field can safely be assumed to be protected
+ * by strong reference counting.
+ */
+ union {
+ struct kref ref; /* no kref_get_unless_zero()! */
+ struct rcu_head rcu;
+ };
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
@@ -54,8 +64,15 @@ struct intel_context {
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
- struct list_head signal_link;
- struct list_head signals;
+ /*
+ * @signal_lock protects the list of requests that need signaling,
+ * @signals. While there are any requests that need signaling,
+ * we add the context to the breadcrumbs worker, and remove it
+ * upon completion/cancellation of the last request.
+ */
+ struct list_head signal_link; /* Accessed under RCU */
+ struct list_head signals; /* Guarded by signal_lock */
+ spinlock_t signal_lock; /* protects signals, the list of requests */
struct i915_vma *state;
struct intel_ring *ring;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 0952bf157234..724b2cb897d3 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2788,6 +2788,9 @@ static void __execlists_hold(struct i915_request *rq)
static bool execlists_hold(struct intel_engine_cs *engine,
struct i915_request *rq)
{
+ if (i915_request_on_hold(rq))
+ return false;
+
spin_lock_irq(&engine->active.lock);
if (i915_request_completed(rq)) { /* too late! */
@@ -3169,8 +3172,10 @@ static void execlists_submission_tasklet(unsigned long data)
spin_unlock_irqrestore(&engine->active.lock, flags);
/* Recheck after serialising with direct-submission */
- if (unlikely(timeout && preempt_timeout(engine)))
+ if (unlikely(timeout && preempt_timeout(engine))) {
+ cancel_timer(&engine->execlists.preempt);
execlists_reset(engine, "preemption time out");
+ }
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 313e51e7d4f7..413dadfac2d1 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -59,8 +59,7 @@ struct drm_i915_mocs_table {
#define _L3_CACHEABILITY(value) ((value) << 4)
/* Helper defines */
-#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
-#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
+#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
/* (e)LLC caching options */
/*
@@ -131,7 +130,19 @@ static const struct drm_i915_mocs_entry skl_mocs_table[] = {
GEN9_MOCS_ENTRIES,
MOCS_ENTRY(I915_MOCS_CACHED,
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
- L3_3_WB)
+ L3_3_WB),
+
+ /*
+ * mocs:63
+ * - used by the L3 for all of its evictions.
+ * Thus it is expected to allow LLC cacheability to enable coherent
+ * flows to be maintained.
+ * - used to force L3 uncachable cycles.
+ * Thus it is expected to make the surface L3 uncacheable.
+ */
+ MOCS_ENTRY(63,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC)
};
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
@@ -316,11 +327,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
if (INTEL_GEN(i915) >= 12) {
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
- table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
} else if (IS_GEN(i915, 11)) {
table->size = ARRAY_SIZE(icl_mocs_table);
table->table = icl_mocs_table;
- table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
table->size = ARRAY_SIZE(skl_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index e6a00eea0631..c1c9cc0ad3b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -883,6 +883,10 @@ void intel_rps_park(struct intel_rps *rps)
adj = -2;
rps->last_adj = adj;
rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
+ if (rps->cur_freq < rps->efficient_freq) {
+ rps->cur_freq = rps->efficient_freq;
+ rps->last_adj = 0;
+ }
GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
}
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index f011ea42487e..5982b62f913d 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -73,7 +73,7 @@ void *shmem_pin_map(struct file *file)
mapping_set_unevictable(file->f_mapping);
return vaddr;
err_page:
- while (--i >= 0)
+ while (i--)
put_page(pages[i]);
kvfree(pages);
return NULL;
@@ -103,10 +103,13 @@ static int __shmem_rw(struct file *file, loff_t off,
return PTR_ERR(page);
vaddr = kmap(page);
- if (write)
+ if (write) {
memcpy(vaddr + offset_in_page(off), ptr, this);
- else
+ set_page_dirty(page);
+ } else {
memcpy(ptr, vaddr + offset_in_page(off), this);
+ }
+ mark_page_accessed(page);
kunmap(page);
put_page(page);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 874af6db6103..620b6fab2c5c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -177,10 +177,8 @@ struct i915_request {
struct intel_ring *ring;
struct intel_timeline __rcu *timeline;
- union {
- struct list_head signal_link;
- struct llist_node signal_node;
- };
+ struct list_head signal_link;
+ struct llist_node signal_node;
/*
* The rcu epoch of when this request was allocated. Used to judiciously
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 23a6132c5f4e..412e21604a05 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -211,8 +211,8 @@ static int igt_gem_ww_ctx(void *arg)
return PTR_ERR(obj);
obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
+ if (IS_ERR(obj2)) {
+ err = PTR_ERR(obj2);
goto put1;
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index b721b8b262ce..9e1224d54729 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -22,6 +22,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
@@ -484,17 +485,27 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
writel(ctrl, mxsfb->base + LCDC_AS_CTRL);
}
+static bool mxsfb_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = mxsfb_plane_atomic_check,
.atomic_update = mxsfb_plane_primary_atomic_update,
};
static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = mxsfb_plane_atomic_check,
.atomic_update = mxsfb_plane_overlay_atomic_update,
};
static const struct drm_plane_funcs mxsfb_plane_funcs = {
+ .format_mod_supported = mxsfb_format_mod_supported,
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 56b335a55966..7daa12eec01b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1214,8 +1214,8 @@ retry:
}
reg->bus.offset = handle;
- ret = 0;
}
+ ret = 0;
break;
default:
ret = -EINVAL;
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 033fd30074b0..282e4c837cd9 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -195,8 +195,7 @@ static void sdi_bridge_mode_set(struct drm_bridge *bridge,
sdi->pixelclock = adjusted_mode->clock * 1000;
}
-static void sdi_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+static void sdi_bridge_enable(struct drm_bridge *bridge)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
struct dispc_clock_info dispc_cinfo;
@@ -259,8 +258,7 @@ err_get_dispc:
regulator_disable(sdi->vdds_sdi_reg);
}
-static void sdi_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+static void sdi_bridge_disable(struct drm_bridge *bridge)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
@@ -278,8 +276,8 @@ static const struct drm_bridge_funcs sdi_bridge_funcs = {
.mode_valid = sdi_bridge_mode_valid,
.mode_fixup = sdi_bridge_mode_fixup,
.mode_set = sdi_bridge_mode_set,
- .atomic_enable = sdi_bridge_enable,
- .atomic_disable = sdi_bridge_disable,
+ .enable = sdi_bridge_enable,
+ .disable = sdi_bridge_disable,
};
static void sdi_bridge_init(struct sdi_device *sdi)
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index e95fdfb16b6c..ba0b3ead150f 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -629,7 +629,7 @@ static int acx565akm_probe(struct spi_device *spi)
lcd->spi = spi;
mutex_init(&lcd->mutex);
- lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(lcd->reset_gpio)) {
dev_err(&spi->dev, "failed to get reset GPIO\n");
return PTR_ERR(lcd->reset_gpio);
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index f292c6a6e20f..41edd0a421b2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -544,7 +544,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
struct device_node *port, *endpoint;
int ret = 0, child_count = 0;
const char *name;
- u32 endpoint_id;
+ u32 endpoint_id = 0;
lvds->drm_dev = drm_dev;
port = of_graph_get_port_by_id(dev->of_node, 1);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index ba9d1c3e7cac..e4baf07992a4 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -90,7 +90,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
if (!fpriv)
return -ENOMEM;
- idr_init(&fpriv->contexts);
+ idr_init_base(&fpriv->contexts, 1);
mutex_init(&fpriv->lock);
filp->driver_priv = fpriv;
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 5a4fd0dbf4cf..47d26b5d9945 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -129,7 +129,6 @@ int tegra_output_probe(struct tegra_output *output)
if (!output->ddc) {
err = -EPROBE_DEFER;
- of_node_put(ddc);
return err;
}
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index e88a17c2937f..cc2aa2308a51 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -397,7 +397,6 @@ struct tegra_sor;
struct tegra_sor_ops {
const char *name;
int (*probe)(struct tegra_sor *sor);
- int (*remove)(struct tegra_sor *sor);
void (*audio_enable)(struct tegra_sor *sor);
void (*audio_disable)(struct tegra_sor *sor);
};
@@ -2942,6 +2941,24 @@ static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = {
.atomic_check = tegra_sor_encoder_atomic_check,
};
+static void tegra_sor_disable_regulator(void *data)
+{
+ struct regulator *reg = data;
+
+ regulator_disable(reg);
+}
+
+static int tegra_sor_enable_regulator(struct tegra_sor *sor, struct regulator *reg)
+{
+ int err;
+
+ err = regulator_enable(reg);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(sor->dev, tegra_sor_disable_regulator, reg);
+}
+
static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
{
int err;
@@ -2953,7 +2970,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
return PTR_ERR(sor->avdd_io_supply);
}
- err = regulator_enable(sor->avdd_io_supply);
+ err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
err);
@@ -2967,7 +2984,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
return PTR_ERR(sor->vdd_pll_supply);
}
- err = regulator_enable(sor->vdd_pll_supply);
+ err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
err);
@@ -2981,7 +2998,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
return PTR_ERR(sor->hdmi_supply);
}
- err = regulator_enable(sor->hdmi_supply);
+ err = tegra_sor_enable_regulator(sor, sor->hdmi_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
return err;
@@ -2992,19 +3009,9 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
return 0;
}
-static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
-{
- regulator_disable(sor->hdmi_supply);
- regulator_disable(sor->vdd_pll_supply);
- regulator_disable(sor->avdd_io_supply);
-
- return 0;
-}
-
static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
.name = "HDMI",
.probe = tegra_sor_hdmi_probe,
- .remove = tegra_sor_hdmi_remove,
.audio_enable = tegra_sor_hdmi_audio_enable,
.audio_disable = tegra_sor_hdmi_audio_disable,
};
@@ -3017,7 +3024,7 @@ static int tegra_sor_dp_probe(struct tegra_sor *sor)
if (IS_ERR(sor->avdd_io_supply))
return PTR_ERR(sor->avdd_io_supply);
- err = regulator_enable(sor->avdd_io_supply);
+ err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply);
if (err < 0)
return err;
@@ -3025,25 +3032,16 @@ static int tegra_sor_dp_probe(struct tegra_sor *sor)
if (IS_ERR(sor->vdd_pll_supply))
return PTR_ERR(sor->vdd_pll_supply);
- err = regulator_enable(sor->vdd_pll_supply);
+ err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply);
if (err < 0)
return err;
return 0;
}
-static int tegra_sor_dp_remove(struct tegra_sor *sor)
-{
- regulator_disable(sor->vdd_pll_supply);
- regulator_disable(sor->avdd_io_supply);
-
- return 0;
-}
-
static const struct tegra_sor_ops tegra_sor_dp_ops = {
.name = "DP",
.probe = tegra_sor_dp_probe,
- .remove = tegra_sor_dp_remove,
};
static int tegra_sor_init(struct host1x_client *client)
@@ -3145,6 +3143,7 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0) {
dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
err);
+ clk_disable_unprepare(sor->clk);
return err;
}
@@ -3152,12 +3151,17 @@ static int tegra_sor_init(struct host1x_client *client)
}
err = clk_prepare_enable(sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
+ clk_disable_unprepare(sor->clk);
return err;
+ }
err = clk_prepare_enable(sor->clk_dp);
- if (err < 0)
+ if (err < 0) {
+ clk_disable_unprepare(sor->clk_safe);
+ clk_disable_unprepare(sor->clk);
return err;
+ }
return 0;
}
@@ -3764,17 +3768,16 @@ static int tegra_sor_probe(struct platform_device *pdev)
return err;
err = tegra_output_probe(&sor->output);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to probe output: %d\n", err);
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to probe output\n");
if (sor->ops && sor->ops->probe) {
err = sor->ops->probe(sor);
if (err < 0) {
dev_err(&pdev->dev, "failed to probe %s: %d\n",
sor->ops->name, err);
- goto output;
+ goto remove;
}
}
@@ -3955,9 +3958,6 @@ unregister:
rpm_disable:
pm_runtime_disable(&pdev->dev);
remove:
- if (sor->ops && sor->ops->remove)
- sor->ops->remove(sor);
-output:
tegra_output_remove(&sor->output);
return err;
}
@@ -3976,12 +3976,6 @@ static int tegra_sor_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- if (sor->ops && sor->ops->remove) {
- err = sor->ops->remove(sor);
- if (err < 0)
- dev_err(&pdev->dev, "failed to remove SOR: %d\n", err);
- }
-
tegra_output_remove(&sor->output);
return 0;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a97a9d058198..a49e0ed4a599 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -734,6 +734,7 @@ config I2C_LPC2K
config I2C_MLXBF
tristate "Mellanox BlueField I2C controller"
depends on MELLANOX_PLATFORM && ARM64
+ select I2C_SLAVE
help
Enabling this option will add I2C SMBus support for Mellanox BlueField
system.
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index c98529c76348..e6f8d6e45a15 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -412,6 +412,19 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
dma->chan_using = NULL;
}
+static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits)
+{
+ unsigned int temp;
+
+ /*
+ * i2sr_clr_opcode is the value to clear all interrupts. Here we want to
+ * clear only <bits>, so we write ~i2sr_clr_opcode with just <bits>
+ * toggled. This is required because i.MX needs W0C and Vybrid uses W1C.
+ */
+ temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
+}
+
static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
{
unsigned long orig_jiffies = jiffies;
@@ -424,8 +437,7 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool a
/* check for arbitration lost */
if (temp & I2SR_IAL) {
- temp &= ~I2SR_IAL;
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
return -EAGAIN;
}
@@ -469,7 +481,7 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic)
*/
readb_poll_timeout_atomic(addr, regval, regval & I2SR_IIF, 5, 1000 + 100);
i2c_imx->i2csr = regval;
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IIF | I2SR_IAL);
} else {
wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
}
@@ -478,6 +490,16 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic)
dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
return -ETIMEDOUT;
}
+
+ /* check for arbitration lost */
+ if (i2c_imx->i2csr & I2SR_IAL) {
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
+
+ i2c_imx->i2csr = 0;
+ return -EAGAIN;
+ }
+
dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__);
i2c_imx->i2csr = 0;
return 0;
@@ -593,6 +615,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic)
/* Stop I2C transaction */
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ if (!(temp & I2CR_MSTA))
+ i2c_imx->stopped = 1;
temp &= ~(I2CR_MSTA | I2CR_MTX);
if (i2c_imx->dma)
temp &= ~I2CR_DMAEN;
@@ -623,9 +647,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
if (temp & I2SR_IIF) {
/* save status register */
i2c_imx->i2csr = temp;
- temp &= ~I2SR_IIF;
- temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF);
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
wake_up(&i2c_imx->queue);
return IRQ_HANDLED;
}
@@ -758,9 +780,12 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
*/
dev_dbg(dev, "<%s> clear MSTA\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ if (!(temp & I2CR_MSTA))
+ i2c_imx->stopped = 1;
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- i2c_imx_bus_busy(i2c_imx, 0, false);
+ if (!i2c_imx->stopped)
+ i2c_imx_bus_busy(i2c_imx, 0, false);
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -885,9 +910,12 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
dev_dbg(&i2c_imx->adapter.dev,
"<%s> clear MSTA\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ if (!(temp & I2CR_MSTA))
+ i2c_imx->stopped = 1;
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- i2c_imx_bus_busy(i2c_imx, 0, atomic);
+ if (!i2c_imx->stopped)
+ i2c_imx_bus_busy(i2c_imx, 0, atomic);
} else {
/*
* For i2c master receiver repeat restart operation like:
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 33574d40ea9c..2fb0532d8a16 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -1258,9 +1258,9 @@ static int mlxbf_i2c_get_gpio(struct platform_device *pdev,
return -EFAULT;
gpio_res->io = devm_ioremap(dev, params->start, size);
- if (IS_ERR(gpio_res->io)) {
+ if (!gpio_res->io) {
devm_release_mem_region(dev, params->start, size);
- return PTR_ERR(gpio_res->io);
+ return -ENOMEM;
}
return 0;
@@ -1323,9 +1323,9 @@ static int mlxbf_i2c_get_corepll(struct platform_device *pdev,
return -EFAULT;
corepll_res->io = devm_ioremap(dev, params->start, size);
- if (IS_ERR(corepll_res->io)) {
+ if (!corepll_res->io) {
devm_release_mem_region(dev, params->start, size);
- return PTR_ERR(corepll_res->io);
+ return -ENOMEM;
}
return 0;
@@ -1717,9 +1717,9 @@ static int mlxbf_i2c_init_coalesce(struct platform_device *pdev,
return -EFAULT;
coalesce_res->io = ioremap(params->start, size);
- if (IS_ERR(coalesce_res->io)) {
+ if (!coalesce_res->io) {
release_mem_region(params->start, size);
- return PTR_ERR(coalesce_res->io);
+ return -ENOMEM;
}
priv->coalesce = coalesce_res;
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index f13735beca58..1c259b5188de 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -194,9 +194,9 @@ static irqreturn_t cci_isr(int irq, void *dev)
if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
- cci->master[0].status = -ENXIO;
+ cci->master[1].status = -ENXIO;
else
- cci->master[0].status = -EIO;
+ cci->master[1].status = -EIO;
writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
ret = IRQ_HANDLED;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index fbc04b60cfd1..5a47915869ae 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -801,7 +801,8 @@ static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
if (ret || qup->bus_err || qup->qup_err) {
reinit_completion(&qup->xfer);
- if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret) {
dev_err(qup->dev, "change to run state timed out");
goto desc_err;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 7ee7ffe22ae3..d79335506ecd 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1140,6 +1140,20 @@ static bool __init intel_idle_max_cstate_reached(int cstate)
return false;
}
+static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
+{
+ unsigned long eax = flg2MWAIT(state->flags);
+
+ if (boot_cpu_has(X86_FEATURE_ARAT))
+ return false;
+
+ /*
+ * Switch over to one-shot tick broadcast if the target C-state
+ * is deeper than C1.
+ */
+ return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
+}
+
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
#include <acpi/processor.h>
@@ -1210,20 +1224,6 @@ static bool __init intel_idle_acpi_cst_extract(void)
return false;
}
-static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
-{
- unsigned long eax = flg2MWAIT(state->flags);
-
- if (boot_cpu_has(X86_FEATURE_ARAT))
- return false;
-
- /*
- * Switch over to one-shot tick broadcast if the target C-state
- * is deeper than C1.
- */
- return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
-}
-
static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
{
int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 8017c40dd110..7989b7e1d1c0 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1269,9 +1269,6 @@ ssize_t rdma_query_gid_table(struct ib_device *device,
unsigned long flags;
rdma_for_each_port(device, port_num) {
- if (!rdma_ib_or_roce(device, port_num))
- continue;
-
table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
for (i = 0; i < table->sz; i++) {
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 012156624b82..5afd142fe8c7 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1522,6 +1522,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
goto out;
}
@@ -2114,6 +2115,7 @@ static int cm_req_handler(struct cm_work *work)
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
goto destroy;
}
cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 191e0843f090..4e940fc50bba 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -940,8 +940,8 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
1);
EFA_SET(&params.modify_mask,
EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
- params.cur_qp_state = qp_attr->cur_qp_state;
- params.qp_state = qp_attr->qp_state;
+ params.cur_qp_state = cur_state;
+ params.qp_state = new_state;
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 019642ff24a7..511c95bb3d01 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1936,6 +1936,15 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
}
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
+
+ /* calculate the db_rec_db2 data since it is constant so no
+ * need to reflect from user
+ */
+ qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
+ qp->urq.db_rec_db2_data.data.value =
+ cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
+
rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
&qp->urq.db_rec_db2_data,
DB_REC_WIDTH_32B,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index c77cdb3b62b5..8c73377ac82c 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -241,6 +241,7 @@ static const struct xpad_device {
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
+ { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
@@ -418,6 +419,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
+ XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index cae1a3fae83a..d14a65683c5e 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/input.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a4c9b9652560..7ecb65176c1a 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -219,6 +219,10 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
+ },
},
{ }
};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 944cbb519c6d..abae23af0791 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1471,7 +1471,8 @@ static int __init i8042_setup_aux(void)
if (error)
goto err_free_ports;
- if (aux_enable())
+ error = aux_enable();
+ if (error)
goto err_free_irq;
i8042_aux_irq_registered = true;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 98f17fa3a892..b6f75367a284 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2183,11 +2183,11 @@ static int mxt_initialize(struct mxt_data *data)
msleep(MXT_FW_RESET_TIME);
}
- error = mxt_acquire_irq(data);
+ error = mxt_check_retrigen(data);
if (error)
return error;
- error = mxt_check_retrigen(data);
+ error = mxt_acquire_irq(data);
if (error)
return error;
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 89647700bab2..494b42a31b7a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -257,7 +257,7 @@
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
-#define DTE_IRQ_TABLE_LEN (8ULL << 1)
+#define DTE_IRQ_TABLE_LEN (9ULL << 1)
#define DTE_IRQ_REMAP_ENABLE 1ULL
#define PAGE_MODE_NONE 0x00
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9644424591da..4bc453f5bbaa 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -712,10 +712,6 @@ static bool block_size_is_power_of_two(struct cache *cache)
return cache->sectors_per_block_shift >= 0;
}
-/* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
-#if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
-__always_inline
-#endif
static dm_block_t block_div(dm_block_t b, uint32_t n)
{
do_div(b, n);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 3fc3757def55..5a7a1b90e671 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3462,7 +3462,7 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
int r;
if (a->alg_string) {
- *hash = crypto_alloc_shash(a->alg_string, 0, 0);
+ *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(*hash)) {
*error = error_alg;
r = PTR_ERR(*hash);
@@ -3519,7 +3519,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
struct journal_completion comp;
comp.ic = ic;
- ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
+ ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(ic->journal_crypt)) {
*error = "Invalid journal cipher";
r = PTR_ERR(ic->journal_crypt);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ce543b761be7..7eeb7c4169c9 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -18,7 +18,6 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/atomic.h>
-#include <linux/lcm.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>
#include <linux/dax.h>
@@ -1247,12 +1246,6 @@ void dm_table_event_callback(struct dm_table *t,
void dm_table_event(struct dm_table *t)
{
- /*
- * You can no longer call dm_table_event() from interrupt
- * context, use a bottom half instead.
- */
- BUG_ON(in_interrupt());
-
mutex_lock(&_event_lock);
if (t->event_fn)
t->event_fn(t->event_context);
@@ -1455,10 +1448,6 @@ int dm_calculate_queue_limits(struct dm_table *table,
zone_sectors = ti_limits.chunk_sectors;
}
- /* Stack chunk_sectors if target-specific splitting is required */
- if (ti->max_io_len)
- ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len,
- ti_limits.chunk_sectors);
/* Set I/O hints portion of queue limits */
if (ti->type->io_hints)
ti->type->io_hints(ti, &ti_limits);
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 9ae4ce7df95c..d5223a0e5cc5 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -319,7 +319,7 @@ err1:
#else
static int persistent_memory_claim(struct dm_writecache *wc)
{
- BUG();
+ return -EOPNOTSUPP;
}
#endif
@@ -2041,7 +2041,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct wc_memory_superblock s;
static struct dm_arg _args[] = {
- {0, 10, "Invalid number of feature args"},
+ {0, 16, "Invalid number of feature args"},
};
as.argc = argc;
@@ -2479,6 +2479,8 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
extra_args += 2;
if (wc->autocommit_time_set)
extra_args += 2;
+ if (wc->max_age != MAX_AGE_UNSPECIFIED)
+ extra_args += 2;
if (wc->cleaner)
extra_args++;
if (wc->writeback_fua_set)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c18fc2548518..4e0cbfe3f14d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -476,8 +476,10 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
return -EAGAIN;
map = dm_get_live_table(md, &srcu_idx);
- if (!map)
- return -EIO;
+ if (!map) {
+ ret = -EIO;
+ goto out;
+ }
do {
struct dm_target *tgt;
@@ -507,7 +509,6 @@ out:
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
- __acquires(md->io_barrier)
{
struct dm_target *tgt;
struct dm_table *map;
@@ -541,7 +542,6 @@ retry:
}
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
- __releases(md->io_barrier)
{
dm_put_live_table(md, srcu_idx);
}
@@ -1037,15 +1037,18 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
sector_t max_len;
/*
- * Does the target need to split even further?
- * - q->limits.chunk_sectors reflects ti->max_io_len so
- * blk_max_size_offset() provides required splitting.
- * - blk_max_size_offset() also respects q->limits.max_sectors
+ * Does the target need to split IO even further?
+ * - varied (per target) IO splitting is a tenet of DM; this
+ * explains why stacked chunk_sectors based splitting via
+ * blk_max_size_offset() isn't possible here. So pass in
+ * ti->max_io_len to override stacked chunk_sectors.
*/
- max_len = blk_max_size_offset(ti->table->md->queue,
- target_offset);
- if (len > max_len)
- len = max_len;
+ if (ti->max_io_len) {
+ max_len = blk_max_size_offset(ti->table->md->queue,
+ target_offset, ti->max_io_len);
+ if (len > max_len)
+ len = max_len;
+ }
return len;
}
@@ -1196,11 +1199,9 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
* ->zero_page_range() is mandatory dax operation. If we are
* here, something is wrong.
*/
- dm_put_live_table(md, srcu_idx);
goto out;
}
ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
-
out:
dm_put_live_table(md, srcu_idx);
diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
index e4d8446b87da..04b13cdc38d2 100644
--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
+++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
@@ -88,13 +88,15 @@ enum pulse8_msgcodes {
MSGCODE_SET_PHYSICAL_ADDRESS, /* 0x20 */
MSGCODE_GET_DEVICE_TYPE,
MSGCODE_SET_DEVICE_TYPE,
- MSGCODE_GET_HDMI_VERSION,
+ MSGCODE_GET_HDMI_VERSION, /* Removed in FW >= 10 */
MSGCODE_SET_HDMI_VERSION,
MSGCODE_GET_OSD_NAME,
MSGCODE_SET_OSD_NAME,
MSGCODE_WRITE_EEPROM,
MSGCODE_GET_ADAPTER_TYPE, /* 0x28 */
MSGCODE_SET_ACTIVE_SOURCE,
+ MSGCODE_GET_AUTO_POWER_ON, /* New for FW >= 10 */
+ MSGCODE_SET_AUTO_POWER_ON,
MSGCODE_FRAME_EOM = 0x80,
MSGCODE_FRAME_ACK = 0x40,
@@ -143,6 +145,8 @@ static const char * const pulse8_msgnames[] = {
"WRITE_EEPROM",
"GET_ADAPTER_TYPE",
"SET_ACTIVE_SOURCE",
+ "GET_AUTO_POWER_ON",
+ "SET_AUTO_POWER_ON",
};
static const char *pulse8_msgname(u8 cmd)
@@ -579,12 +583,14 @@ static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
if (err)
goto unlock;
- cmd[0] = MSGCODE_SET_HDMI_VERSION;
- cmd[1] = adap->log_addrs.cec_version;
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if (err)
- goto unlock;
+ if (pulse8->vers < 10) {
+ cmd[0] = MSGCODE_SET_HDMI_VERSION;
+ cmd[1] = adap->log_addrs.cec_version;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+ }
if (adap->log_addrs.osd_name[0]) {
size_t osd_len = strlen(adap->log_addrs.osd_name);
@@ -650,7 +656,6 @@ static void pulse8_disconnect(struct serio *serio)
struct pulse8 *pulse8 = serio_get_drvdata(serio);
cec_unregister_adapter(pulse8->adap);
- pulse8->serio = NULL;
serio_set_drvdata(serio, NULL);
serio_close(serio);
}
@@ -692,6 +697,14 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
dev_dbg(pulse8->dev, "Autonomous mode: %s",
data[0] ? "on" : "off");
+ if (pulse8->vers >= 10) {
+ cmd[0] = MSGCODE_GET_AUTO_POWER_ON;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (!err)
+ dev_dbg(pulse8->dev, "Auto Power On: %s",
+ data[0] ? "on" : "off");
+ }
+
cmd[0] = MSGCODE_GET_DEVICE_TYPE;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (err)
@@ -753,12 +766,15 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
cec_phys_addr_exp(*pa));
- cmd[0] = MSGCODE_GET_HDMI_VERSION;
- err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
- if (err)
- return err;
- log_addrs->cec_version = data[0];
- dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
+ log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4;
+ if (pulse8->vers < 10) {
+ cmd[0] = MSGCODE_GET_HDMI_VERSION;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (err)
+ return err;
+ log_addrs->cec_version = data[0];
+ dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
+ }
cmd[0] = MSGCODE_GET_OSD_NAME;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
@@ -830,8 +846,10 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
dev_name(&serio->dev), caps, 1);
err = PTR_ERR_OR_ZERO(pulse8->adap);
- if (err < 0)
- goto free_device;
+ if (err < 0) {
+ kfree(pulse8);
+ return err;
+ }
pulse8->dev = &serio->dev;
serio_set_drvdata(serio, pulse8);
@@ -874,8 +892,6 @@ close_serio:
serio_close(serio);
delete_adap:
cec_delete_adapter(pulse8->adap);
-free_device:
- kfree(pulse8);
return err;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 4eab6d81cce1..89e38392509c 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -414,6 +414,17 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
vb->index = q->num_buffers + buffer;
vb->type = q->type;
vb->memory = memory;
+ /*
+ * We need to set these flags here so that the videobuf2 core
+ * will call ->prepare()/->finish() cache sync/flush on vb2
+ * buffers when appropriate. However, we can avoid explicit
+ * ->prepare() and ->finish() cache sync for DMABUF buffers,
+ * because DMA exporter takes care of it.
+ */
+ if (q->memory != VB2_MEMORY_DMABUF) {
+ vb->need_cache_sync_on_prepare = 1;
+ vb->need_cache_sync_on_finish = 1;
+ }
for (plane = 0; plane < num_planes; ++plane) {
vb->planes[plane].length = plane_sizes[plane];
vb->planes[plane].min_length = plane_sizes[plane];
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index 5051a5e5244b..65a136c0fac2 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -151,15 +151,12 @@ static inline u32 mtk_chk_period(struct mtk_ir *ir)
{
u32 val;
- /* Period of raw software sampling in ns */
- val = DIV_ROUND_CLOSEST(1000000000ul,
- clk_get_rate(ir->bus) / ir->data->div);
-
/*
* Period for software decoder used in the
* unit of raw software sampling
*/
- val = DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, val);
+ val = DIV_ROUND_CLOSEST(clk_get_rate(ir->bus),
+ USEC_PER_SEC * ir->data->div / MTK_IR_SAMPLE);
dev_dbg(ir->dev, "@pwm clk = \t%lu\n",
clk_get_rate(ir->bus) / ir->data->div);
@@ -412,7 +409,7 @@ static int mtk_ir_probe(struct platform_device *pdev)
mtk_irq_enable(ir, MTK_IRINT_EN);
dev_info(dev, "Initialized MT7623 IR driver, sample period = %dus\n",
- DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, 1000));
+ MTK_IR_SAMPLE);
return 0;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.c b/drivers/media/test-drivers/vidtv/vidtv_channel.c
index 8ad6c0744d36..7838e6272712 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_channel.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_channel.c
@@ -504,11 +504,11 @@ void vidtv_channel_si_destroy(struct vidtv_mux *m)
{
u32 i;
- vidtv_psi_pat_table_destroy(m->si.pat);
-
for (i = 0; i < m->si.pat->num_pmt; ++i)
vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]);
+ vidtv_psi_pat_table_destroy(m->si.pat);
+
kfree(m->si.pmt_secs);
vidtv_psi_sdt_table_destroy(m->si.sdt);
vidtv_psi_nit_table_destroy(m->si.nit);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.h b/drivers/media/test-drivers/vidtv/vidtv_psi.h
index 340c9fb8d583..fdc825e54138 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_psi.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.h
@@ -420,7 +420,7 @@ void vidtv_psi_desc_assign(struct vidtv_psi_desc **to,
struct vidtv_psi_desc *desc);
/**
- * vidtv_psi_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section.
+ * vidtv_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section.
* @pmt: The PMT section that will contain the descriptor loop
* @to: Where in the PMT to assign this descriptor loop to
* @desc: The descriptor loop that will be assigned.
@@ -434,7 +434,7 @@ void vidtv_pmt_desc_assign(struct vidtv_psi_table_pmt *pmt,
struct vidtv_psi_desc *desc);
/**
- * vidtv_psi_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT.
+ * vidtv_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT.
* @sdt: The SDT that will contain the descriptor loop
* @to: Where in the PMT to assign this descriptor loop to
* @desc: The descriptor loop that will be assigned.
@@ -474,7 +474,7 @@ void vidtv_psi_pmt_stream_assign(struct vidtv_psi_table_pmt *pmt,
struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc);
/**
- * vidtv_psi_create_sec_for_each_pat_entry - Create a PMT section for each
+ * vidtv_psi_pmt_create_sec_for_each_pat_entry - Create a PMT section for each
* program found in the PAT
* @pat: The PAT to look for programs.
* @pcr_pid: packet ID for the PCR to be used for the program described in this
@@ -743,7 +743,7 @@ struct vidtv_psi_table_eit {
struct vidtv_psi_table_eit
*vidtv_psi_eit_table_init(u16 network_id,
u16 transport_stream_id,
- u16 service_id);
+ __be16 service_id);
/**
* struct vidtv_psi_eit_write_args - Arguments for writing an EIT section
diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.c b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
index ce7dd6cafc8b..d79b65854627 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_s302m.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
@@ -467,8 +467,10 @@ struct vidtv_encoder
e->is_video_encoder = false;
ctx = kzalloc(priv_sz, GFP_KERNEL);
- if (!ctx)
+ if (!ctx) {
+ kfree(e);
return NULL;
+ }
e->ctx = ctx;
ctx->last_duration = 0;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_ts.h b/drivers/media/test-drivers/vidtv/vidtv_ts.h
index 10838a2b8389..f5e8e1f37f05 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_ts.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_ts.h
@@ -44,7 +44,7 @@ struct vidtv_mpeg_ts {
u8 adaptation_field:1;
u8 scrambling:2;
} __packed;
- struct vidtv_mpeg_ts_adaption adaption[];
+ struct vidtv_mpeg_ts_adaption *adaption;
} __packed;
/**
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 20572224099a..783bbdcb1e61 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -231,16 +231,16 @@ delete_cdev_device:
static void device_cdev_sysfs_del(struct hl_device *hdev)
{
- /* device_release() won't be called so must free devices explicitly */
- if (!hdev->cdev_sysfs_created) {
- kfree(hdev->dev_ctrl);
- kfree(hdev->dev);
- return;
- }
+ if (!hdev->cdev_sysfs_created)
+ goto put_devices;
hl_sysfs_fini(hdev);
cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
cdev_device_del(&hdev->cdev, hdev->dev);
+
+put_devices:
+ put_device(hdev->dev);
+ put_device(hdev->dev_ctrl);
}
/*
@@ -1371,9 +1371,9 @@ sw_fini:
early_fini:
device_early_fini(hdev);
free_dev_ctrl:
- kfree(hdev->dev_ctrl);
+ put_device(hdev->dev_ctrl);
free_dev:
- kfree(hdev->dev);
+ put_device(hdev->dev);
out_disabled:
hdev->disabled = true;
if (add_cdev_sysfs_on_err)
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 84227819e4d1..bfe223abf142 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -1626,6 +1626,7 @@ static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
goto host_hpage_range_err;
}
} else {
+ kfree(ctx->host_huge_va_range);
ctx->host_huge_va_range = ctx->host_va_range;
}
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index c06581ffa7bd..f5fd5b786607 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -46,14 +46,4 @@ config INTEL_MEI_TXE
Supported SoCs:
Intel Bay Trail
-config INTEL_MEI_VIRTIO
- tristate "Intel MEI interface emulation with virtio framework"
- select INTEL_MEI
- depends on X86 && PCI && VIRTIO_PCI
- help
- This module implements mei hw emulation over virtio transport.
- The module will be called mei_virtio.
- Enable this if your virtual machine supports virtual mei
- device over virtio.
-
source "drivers/misc/mei/hdcp/Kconfig"
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 52aefaab5c1b..f1c76f7ee804 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -22,9 +22,6 @@ obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
mei-txe-objs := pci-txe.o
mei-txe-objs += hw-txe.o
-obj-$(CONFIG_INTEL_MEI_VIRTIO) += mei-virtio.o
-mei-virtio-objs := hw-virtio.o
-
mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
CFLAGS_mei-trace.o = -I$(src)
diff --git a/drivers/misc/mei/hw-virtio.c b/drivers/misc/mei/hw-virtio.c
deleted file mode 100644
index 899dc1c5e7ca..000000000000
--- a/drivers/misc/mei/hw-virtio.c
+++ /dev/null
@@ -1,874 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2018-2020, Intel Corporation.
- */
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/scatterlist.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/virtio_ids.h>
-#include <linux/atomic.h>
-
-#include "mei_dev.h"
-#include "hbm.h"
-#include "client.h"
-
-#define MEI_VIRTIO_RPM_TIMEOUT 500
-/* ACRN virtio device types */
-#ifndef VIRTIO_ID_MEI
-#define VIRTIO_ID_MEI 0xFFFE /* virtio mei */
-#endif
-
-/**
- * struct mei_virtio_cfg - settings passed from the virtio backend
- * @buf_depth: read buffer depth in slots (4bytes)
- * @hw_ready: hw is ready for operation
- * @host_reset: synchronize reset with virtio backend
- * @reserved: reserved for alignment
- * @fw_status: FW status
- */
-struct mei_virtio_cfg {
- u32 buf_depth;
- u8 hw_ready;
- u8 host_reset;
- u8 reserved[2];
- u32 fw_status[MEI_FW_STATUS_MAX];
-} __packed;
-
-struct mei_virtio_hw {
- struct mei_device mdev;
- char name[32];
-
- struct virtqueue *in;
- struct virtqueue *out;
-
- bool host_ready;
- struct work_struct intr_handler;
-
- u32 *recv_buf;
- u8 recv_rdy;
- size_t recv_sz;
- u32 recv_idx;
- u32 recv_len;
-
- /* send buffer */
- atomic_t hbuf_ready;
- const void *send_hdr;
- const void *send_buf;
-
- struct mei_virtio_cfg cfg;
-};
-
-#define to_virtio_hw(_dev) container_of(_dev, struct mei_virtio_hw, mdev)
-
-/**
- * mei_virtio_fw_status() - read status register of mei
- * @dev: mei device
- * @fw_status: fw status register values
- *
- * Return: always 0
- */
-static int mei_virtio_fw_status(struct mei_device *dev,
- struct mei_fw_status *fw_status)
-{
- struct virtio_device *vdev = dev_to_virtio(dev->dev);
-
- fw_status->count = MEI_FW_STATUS_MAX;
- virtio_cread_bytes(vdev, offsetof(struct mei_virtio_cfg, fw_status),
- fw_status->status, sizeof(fw_status->status));
- return 0;
-}
-
-/**
- * mei_virtio_pg_state() - translate internal pg state
- * to the mei power gating state
- * There is no power management in ACRN mode always return OFF
- * @dev: mei device
- *
- * Return:
- * * MEI_PG_OFF - if aliveness is on (always)
- * * MEI_PG_ON - (never)
- */
-static inline enum mei_pg_state mei_virtio_pg_state(struct mei_device *dev)
-{
- return MEI_PG_OFF;
-}
-
-/**
- * mei_virtio_hw_config() - configure hw dependent settings
- *
- * @dev: mei device
- *
- * Return: always 0
- */
-static int mei_virtio_hw_config(struct mei_device *dev)
-{
- return 0;
-}
-
-/**
- * mei_virtio_hbuf_empty_slots() - counts write empty slots.
- * @dev: the device structure
- *
- * Return: always return frontend buf size if buffer is ready, 0 otherwise
- */
-static int mei_virtio_hbuf_empty_slots(struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
-
- return (atomic_read(&hw->hbuf_ready) == 1) ? hw->cfg.buf_depth : 0;
-}
-
-/**
- * mei_virtio_hbuf_is_ready() - checks if write buffer is ready
- * @dev: the device structure
- *
- * Return: true if hbuf is ready
- */
-static bool mei_virtio_hbuf_is_ready(struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
-
- return atomic_read(&hw->hbuf_ready) == 1;
-}
-
-/**
- * mei_virtio_hbuf_max_depth() - returns depth of FE write buffer.
- * @dev: the device structure
- *
- * Return: size of frontend write buffer in bytes
- */
-static u32 mei_virtio_hbuf_depth(const struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
-
- return hw->cfg.buf_depth;
-}
-
-/**
- * mei_virtio_intr_clear() - clear and stop interrupts
- * @dev: the device structure
- */
-static void mei_virtio_intr_clear(struct mei_device *dev)
-{
- /*
- * In our virtio solution, there are two types of interrupts,
- * vq interrupt and config change interrupt.
- * 1) start/reset rely on virtio config changed interrupt;
- * 2) send/recv rely on virtio virtqueue interrupts.
- * They are all virtual interrupts. So, we don't have corresponding
- * operation to do here.
- */
-}
-
-/**
- * mei_virtio_intr_enable() - enables mei BE virtqueues callbacks
- * @dev: the device structure
- */
-static void mei_virtio_intr_enable(struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
- struct virtio_device *vdev = dev_to_virtio(dev->dev);
-
- virtio_config_enable(vdev);
-
- virtqueue_enable_cb(hw->in);
- virtqueue_enable_cb(hw->out);
-}
-
-/**
- * mei_virtio_intr_disable() - disables mei BE virtqueues callbacks
- *
- * @dev: the device structure
- */
-static void mei_virtio_intr_disable(struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
- struct virtio_device *vdev = dev_to_virtio(dev->dev);
-
- virtio_config_disable(vdev);
-
- virtqueue_disable_cb(hw->in);
- virtqueue_disable_cb(hw->out);
-}
-
-/**
- * mei_virtio_synchronize_irq() - wait for pending IRQ handlers for all
- * virtqueue
- * @dev: the device structure
- */
-static void mei_virtio_synchronize_irq(struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
-
- /*
- * Now, all IRQ handlers are converted to workqueue.
- * Change synchronize irq to flush this work.
- */
- flush_work(&hw->intr_handler);
-}
-
-static void mei_virtio_free_outbufs(struct mei_virtio_hw *hw)
-{
- kfree(hw->send_hdr);
- kfree(hw->send_buf);
- hw->send_hdr = NULL;
- hw->send_buf = NULL;
-}
-
-/**
- * mei_virtio_write_message() - writes a message to mei virtio back-end service.
- * @dev: the device structure
- * @hdr: mei header of message
- * @hdr_len: header length
- * @data: message payload will be written
- * @data_len: message payload length
- *
- * Return:
- * * 0: on success
- * * -EIO: if write has failed
- * * -ENOMEM: on memory allocation failure
- */
-static int mei_virtio_write_message(struct mei_device *dev,
- const void *hdr, size_t hdr_len,
- const void *data, size_t data_len)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
- struct scatterlist sg[2];
- const void *hbuf, *dbuf;
- int ret;
-
- if (WARN_ON(!atomic_add_unless(&hw->hbuf_ready, -1, 0)))
- return -EIO;
-
- hbuf = kmemdup(hdr, hdr_len, GFP_KERNEL);
- hw->send_hdr = hbuf;
-
- dbuf = kmemdup(data, data_len, GFP_KERNEL);
- hw->send_buf = dbuf;
-
- if (!hbuf || !dbuf) {
- ret = -ENOMEM;
- goto fail;
- }
-
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], hbuf, hdr_len);
- sg_set_buf(&sg[1], dbuf, data_len);
-
- ret = virtqueue_add_outbuf(hw->out, sg, 2, hw, GFP_KERNEL);
- if (ret) {
- dev_err(dev->dev, "failed to add outbuf\n");
- goto fail;
- }
-
- virtqueue_kick(hw->out);
- return 0;
-fail:
-
- mei_virtio_free_outbufs(hw);
-
- return ret;
-}
-
-/**
- * mei_virtio_count_full_read_slots() - counts read full slots.
- * @dev: the device structure
- *
- * Return: -EOVERFLOW if overflow, otherwise filled slots count
- */
-static int mei_virtio_count_full_read_slots(struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
-
- if (hw->recv_idx > hw->recv_len)
- return -EOVERFLOW;
-
- return hw->recv_len - hw->recv_idx;
-}
-
-/**
- * mei_virtio_read_hdr() - Reads 32bit dword from mei virtio receive buffer
- *
- * @dev: the device structure
- *
- * Return: 32bit dword of receive buffer (u32)
- */
-static inline u32 mei_virtio_read_hdr(const struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
-
- WARN_ON(hw->cfg.buf_depth < hw->recv_idx + 1);
-
- return hw->recv_buf[hw->recv_idx++];
-}
-
-static int mei_virtio_read(struct mei_device *dev, unsigned char *buffer,
- unsigned long len)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
- u32 slots = mei_data2slots(len);
-
- if (WARN_ON(hw->cfg.buf_depth < hw->recv_idx + slots))
- return -EOVERFLOW;
-
- /*
- * Assumption: There is only one MEI message in recv_buf each time.
- * Backend service need follow this rule too.
- */
- memcpy(buffer, hw->recv_buf + hw->recv_idx, len);
- hw->recv_idx += slots;
-
- return 0;
-}
-
-static bool mei_virtio_pg_is_enabled(struct mei_device *dev)
-{
- return false;
-}
-
-static bool mei_virtio_pg_in_transition(struct mei_device *dev)
-{
- return false;
-}
-
-static void mei_virtio_add_recv_buf(struct mei_virtio_hw *hw)
-{
- struct scatterlist sg;
-
- if (hw->recv_rdy) /* not needed */
- return;
-
- /* refill the recv_buf to IN virtqueue to get next message */
- sg_init_one(&sg, hw->recv_buf, mei_slots2data(hw->cfg.buf_depth));
- hw->recv_len = 0;
- hw->recv_idx = 0;
- hw->recv_rdy = 1;
- virtqueue_add_inbuf(hw->in, &sg, 1, hw->recv_buf, GFP_KERNEL);
- virtqueue_kick(hw->in);
-}
-
-/**
- * mei_virtio_hw_is_ready() - check whether the BE(hw) has turned ready
- * @dev: mei device
- * Return: bool
- */
-static bool mei_virtio_hw_is_ready(struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
- struct virtio_device *vdev = dev_to_virtio(dev->dev);
-
- virtio_cread(vdev, struct mei_virtio_cfg,
- hw_ready, &hw->cfg.hw_ready);
-
- dev_dbg(dev->dev, "hw ready %d\n", hw->cfg.hw_ready);
-
- return hw->cfg.hw_ready;
-}
-
-/**
- * mei_virtio_hw_reset - resets virtio hw.
- *
- * @dev: the device structure
- * @intr_enable: virtio use data/config callbacks
- *
- * Return: 0 on success an error code otherwise
- */
-static int mei_virtio_hw_reset(struct mei_device *dev, bool intr_enable)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
- struct virtio_device *vdev = dev_to_virtio(dev->dev);
-
- dev_dbg(dev->dev, "hw reset\n");
-
- dev->recvd_hw_ready = false;
- hw->host_ready = false;
- atomic_set(&hw->hbuf_ready, 0);
- hw->recv_len = 0;
- hw->recv_idx = 0;
-
- hw->cfg.host_reset = 1;
- virtio_cwrite(vdev, struct mei_virtio_cfg,
- host_reset, &hw->cfg.host_reset);
-
- mei_virtio_hw_is_ready(dev);
-
- if (intr_enable)
- mei_virtio_intr_enable(dev);
-
- return 0;
-}
-
-/**
- * mei_virtio_hw_reset_release() - release device from the reset
- * @dev: the device structure
- */
-static void mei_virtio_hw_reset_release(struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
- struct virtio_device *vdev = dev_to_virtio(dev->dev);
-
- dev_dbg(dev->dev, "hw reset release\n");
- hw->cfg.host_reset = 0;
- virtio_cwrite(vdev, struct mei_virtio_cfg,
- host_reset, &hw->cfg.host_reset);
-}
-
-/**
- * mei_virtio_hw_ready_wait() - wait until the virtio(hw) has turned ready
- * or timeout is reached
- * @dev: mei device
- *
- * Return: 0 on success, error otherwise
- */
-static int mei_virtio_hw_ready_wait(struct mei_device *dev)
-{
- mutex_unlock(&dev->device_lock);
- wait_event_timeout(dev->wait_hw_ready,
- dev->recvd_hw_ready,
- mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
- mutex_lock(&dev->device_lock);
- if (!dev->recvd_hw_ready) {
- dev_err(dev->dev, "wait hw ready failed\n");
- return -ETIMEDOUT;
- }
-
- dev->recvd_hw_ready = false;
- return 0;
-}
-
-/**
- * mei_virtio_hw_start() - hw start routine
- * @dev: mei device
- *
- * Return: 0 on success, error otherwise
- */
-static int mei_virtio_hw_start(struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
- int ret;
-
- dev_dbg(dev->dev, "hw start\n");
- mei_virtio_hw_reset_release(dev);
-
- ret = mei_virtio_hw_ready_wait(dev);
- if (ret)
- return ret;
-
- mei_virtio_add_recv_buf(hw);
- atomic_set(&hw->hbuf_ready, 1);
- dev_dbg(dev->dev, "hw is ready\n");
- hw->host_ready = true;
-
- return 0;
-}
-
-/**
- * mei_virtio_host_is_ready() - check whether the FE has turned ready
- * @dev: mei device
- *
- * Return: bool
- */
-static bool mei_virtio_host_is_ready(struct mei_device *dev)
-{
- struct mei_virtio_hw *hw = to_virtio_hw(dev);
-
- dev_dbg(dev->dev, "host ready %d\n", hw->host_ready);
-
- return hw->host_ready;
-}
-
-/**
- * mei_virtio_data_in() - The callback of recv virtqueue of virtio mei
- * @vq: receiving virtqueue
- */
-static void mei_virtio_data_in(struct virtqueue *vq)
-{
- struct mei_virtio_hw *hw = vq->vdev->priv;
-
- /* disable interrupts (enabled again from in the interrupt worker) */
- virtqueue_disable_cb(hw->in);
-
- schedule_work(&hw->intr_handler);
-}
-
-/**
- * mei_virtio_data_out() - The callback of send virtqueue of virtio mei
- * @vq: transmitting virtqueue
- */
-static void mei_virtio_data_out(struct virtqueue *vq)
-{
- struct mei_virtio_hw *hw = vq->vdev->priv;
-
- schedule_work(&hw->intr_handler);
-}
-
-static void mei_virtio_intr_handler(struct work_struct *work)
-{
- struct mei_virtio_hw *hw =
- container_of(work, struct mei_virtio_hw, intr_handler);
- struct mei_device *dev = &hw->mdev;
- LIST_HEAD(complete_list);
- s32 slots;
- int rets = 0;
- void *data;
- unsigned int len;
-
- mutex_lock(&dev->device_lock);
-
- if (dev->dev_state == MEI_DEV_DISABLED) {
- dev_warn(dev->dev, "Interrupt in disabled state.\n");
- mei_virtio_intr_disable(dev);
- goto end;
- }
-
- /* check if ME wants a reset */
- if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
- dev_warn(dev->dev, "BE service not ready: resetting.\n");
- schedule_work(&dev->reset_work);
- goto end;
- }
-
- /* check if we need to start the dev */
- if (!mei_host_is_ready(dev)) {
- if (mei_hw_is_ready(dev)) {
- dev_dbg(dev->dev, "we need to start the dev.\n");
- dev->recvd_hw_ready = true;
- wake_up(&dev->wait_hw_ready);
- } else {
- dev_warn(dev->dev, "Spurious Interrupt\n");
- }
- goto end;
- }
-
- /* read */
- if (hw->recv_rdy) {
- data = virtqueue_get_buf(hw->in, &len);
- if (!data || !len) {
- dev_dbg(dev->dev, "No data %d", len);
- } else {
- dev_dbg(dev->dev, "data_in %d\n", len);
- WARN_ON(data != hw->recv_buf);
- hw->recv_len = mei_data2slots(len);
- hw->recv_rdy = 0;
- }
- }
-
- /* write */
- if (!atomic_read(&hw->hbuf_ready)) {
- if (!virtqueue_get_buf(hw->out, &len)) {
- dev_warn(dev->dev, "Failed to getbuf\n");
- } else {
- mei_virtio_free_outbufs(hw);
- atomic_inc(&hw->hbuf_ready);
- }
- }
-
- /* check slots available for reading */
- slots = mei_count_full_read_slots(dev);
- while (slots > 0) {
- dev_dbg(dev->dev, "slots to read = %08x\n", slots);
- rets = mei_irq_read_handler(dev, &complete_list, &slots);
-
- if (rets &&
- (dev->dev_state != MEI_DEV_RESETTING &&
- dev->dev_state != MEI_DEV_POWER_DOWN)) {
- dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
- rets);
- schedule_work(&dev->reset_work);
- goto end;
- }
- }
-
- dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
-
- mei_irq_write_handler(dev, &complete_list);
-
- dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
-
- mei_irq_compl_handler(dev, &complete_list);
-
- mei_virtio_add_recv_buf(hw);
-
-end:
- if (dev->dev_state != MEI_DEV_DISABLED) {
- if (!virtqueue_enable_cb(hw->in))
- schedule_work(&hw->intr_handler);
- }
-
- mutex_unlock(&dev->device_lock);
-}
-
-static void mei_virtio_config_changed(struct virtio_device *vdev)
-{
- struct mei_virtio_hw *hw = vdev->priv;
- struct mei_device *dev = &hw->mdev;
-
- virtio_cread(vdev, struct mei_virtio_cfg,
- hw_ready, &hw->cfg.hw_ready);
-
- if (dev->dev_state == MEI_DEV_DISABLED) {
- dev_dbg(dev->dev, "disabled state don't start\n");
- return;
- }
-
- /* Run intr handler once to handle reset notify */
- schedule_work(&hw->intr_handler);
-}
-
-static void mei_virtio_remove_vqs(struct virtio_device *vdev)
-{
- struct mei_virtio_hw *hw = vdev->priv;
-
- virtqueue_detach_unused_buf(hw->in);
- hw->recv_len = 0;
- hw->recv_idx = 0;
- hw->recv_rdy = 0;
-
- virtqueue_detach_unused_buf(hw->out);
-
- mei_virtio_free_outbufs(hw);
-
- vdev->config->del_vqs(vdev);
-}
-
-/*
- * There are two virtqueues, one is for send and another is for recv.
- */
-static int mei_virtio_init_vqs(struct mei_virtio_hw *hw,
- struct virtio_device *vdev)
-{
- struct virtqueue *vqs[2];
-
- vq_callback_t *cbs[] = {
- mei_virtio_data_in,
- mei_virtio_data_out,
- };
- static const char * const names[] = {
- "in",
- "out",
- };
- int ret;
-
- ret = virtio_find_vqs(vdev, 2, vqs, cbs, names, NULL);
- if (ret)
- return ret;
-
- hw->in = vqs[0];
- hw->out = vqs[1];
-
- return 0;
-}
-
-static const struct mei_hw_ops mei_virtio_ops = {
- .fw_status = mei_virtio_fw_status,
- .pg_state = mei_virtio_pg_state,
-
- .host_is_ready = mei_virtio_host_is_ready,
-
- .hw_is_ready = mei_virtio_hw_is_ready,
- .hw_reset = mei_virtio_hw_reset,
- .hw_config = mei_virtio_hw_config,
- .hw_start = mei_virtio_hw_start,
-
- .pg_in_transition = mei_virtio_pg_in_transition,
- .pg_is_enabled = mei_virtio_pg_is_enabled,
-
- .intr_clear = mei_virtio_intr_clear,
- .intr_enable = mei_virtio_intr_enable,
- .intr_disable = mei_virtio_intr_disable,
- .synchronize_irq = mei_virtio_synchronize_irq,
-
- .hbuf_free_slots = mei_virtio_hbuf_empty_slots,
- .hbuf_is_ready = mei_virtio_hbuf_is_ready,
- .hbuf_depth = mei_virtio_hbuf_depth,
-
- .write = mei_virtio_write_message,
-
- .rdbuf_full_slots = mei_virtio_count_full_read_slots,
- .read_hdr = mei_virtio_read_hdr,
- .read = mei_virtio_read,
-};
-
-static int mei_virtio_probe(struct virtio_device *vdev)
-{
- struct mei_virtio_hw *hw;
- int ret;
-
- hw = devm_kzalloc(&vdev->dev, sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return -ENOMEM;
-
- vdev->priv = hw;
-
- INIT_WORK(&hw->intr_handler, mei_virtio_intr_handler);
-
- ret = mei_virtio_init_vqs(hw, vdev);
- if (ret)
- goto vqs_failed;
-
- virtio_cread(vdev, struct mei_virtio_cfg,
- buf_depth, &hw->cfg.buf_depth);
-
- hw->recv_buf = kzalloc(mei_slots2data(hw->cfg.buf_depth), GFP_KERNEL);
- if (!hw->recv_buf) {
- ret = -ENOMEM;
- goto hbuf_failed;
- }
- atomic_set(&hw->hbuf_ready, 0);
-
- virtio_device_ready(vdev);
-
- mei_device_init(&hw->mdev, &vdev->dev, &mei_virtio_ops);
-
- pm_runtime_get_noresume(&vdev->dev);
- pm_runtime_set_active(&vdev->dev);
- pm_runtime_enable(&vdev->dev);
-
- ret = mei_start(&hw->mdev);
- if (ret)
- goto mei_start_failed;
-
- pm_runtime_set_autosuspend_delay(&vdev->dev, MEI_VIRTIO_RPM_TIMEOUT);
- pm_runtime_use_autosuspend(&vdev->dev);
-
- ret = mei_register(&hw->mdev, &vdev->dev);
- if (ret)
- goto mei_failed;
-
- pm_runtime_put(&vdev->dev);
-
- return 0;
-
-mei_failed:
- mei_stop(&hw->mdev);
-mei_start_failed:
- mei_cancel_work(&hw->mdev);
- mei_disable_interrupts(&hw->mdev);
- kfree(hw->recv_buf);
-hbuf_failed:
- vdev->config->del_vqs(vdev);
-vqs_failed:
- return ret;
-}
-
-static int __maybe_unused mei_virtio_pm_runtime_idle(struct device *device)
-{
- struct virtio_device *vdev = dev_to_virtio(device);
- struct mei_virtio_hw *hw = vdev->priv;
-
- dev_dbg(&vdev->dev, "rpm: mei_virtio : runtime_idle\n");
-
- if (!hw)
- return -ENODEV;
-
- if (mei_write_is_idle(&hw->mdev))
- pm_runtime_autosuspend(device);
-
- return -EBUSY;
-}
-
-static int __maybe_unused mei_virtio_pm_runtime_suspend(struct device *device)
-{
- return 0;
-}
-
-static int __maybe_unused mei_virtio_pm_runtime_resume(struct device *device)
-{
- return 0;
-}
-
-static int __maybe_unused mei_virtio_freeze(struct virtio_device *vdev)
-{
- struct mei_virtio_hw *hw = vdev->priv;
-
- dev_dbg(&vdev->dev, "freeze\n");
-
- if (!hw)
- return -ENODEV;
-
- mei_stop(&hw->mdev);
- mei_disable_interrupts(&hw->mdev);
- cancel_work_sync(&hw->intr_handler);
- vdev->config->reset(vdev);
- mei_virtio_remove_vqs(vdev);
-
- return 0;
-}
-
-static int __maybe_unused mei_virtio_restore(struct virtio_device *vdev)
-{
- struct mei_virtio_hw *hw = vdev->priv;
- int ret;
-
- dev_dbg(&vdev->dev, "restore\n");
-
- if (!hw)
- return -ENODEV;
-
- ret = mei_virtio_init_vqs(hw, vdev);
- if (ret)
- return ret;
-
- virtio_device_ready(vdev);
-
- ret = mei_restart(&hw->mdev);
- if (ret)
- return ret;
-
- /* Start timer if stopped in suspend */
- schedule_delayed_work(&hw->mdev.timer_work, HZ);
-
- return 0;
-}
-
-static const struct dev_pm_ops mei_virtio_pm_ops = {
- SET_RUNTIME_PM_OPS(mei_virtio_pm_runtime_suspend,
- mei_virtio_pm_runtime_resume,
- mei_virtio_pm_runtime_idle)
-};
-
-static void mei_virtio_remove(struct virtio_device *vdev)
-{
- struct mei_virtio_hw *hw = vdev->priv;
-
- mei_stop(&hw->mdev);
- mei_disable_interrupts(&hw->mdev);
- cancel_work_sync(&hw->intr_handler);
- mei_deregister(&hw->mdev);
- vdev->config->reset(vdev);
- mei_virtio_remove_vqs(vdev);
- kfree(hw->recv_buf);
- pm_runtime_disable(&vdev->dev);
-}
-
-static struct virtio_device_id id_table[] = {
- { VIRTIO_ID_MEI, VIRTIO_DEV_ANY_ID },
- { }
-};
-
-static struct virtio_driver mei_virtio_driver = {
- .id_table = id_table,
- .probe = mei_virtio_probe,
- .remove = mei_virtio_remove,
- .config_changed = mei_virtio_config_changed,
- .driver = {
- .name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
- .pm = &mei_virtio_pm_ops,
- },
-#ifdef CONFIG_PM_SLEEP
- .freeze = mei_virtio_freeze,
- .restore = mei_virtio_restore,
-#endif
-};
-
-module_virtio_driver(mei_virtio_driver);
-MODULE_DEVICE_TABLE(virtio, id_table);
-MODULE_DESCRIPTION("Virtio MEI frontend driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 8d3df0be0355..42e27a298218 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -580,7 +580,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
+ if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
/*
* Ensure RPMB/R1B command has completed by polling CMD13
* "Send Status".
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index a704745e5882..004fbfc23672 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -446,7 +446,7 @@ struct msdc_host {
static const struct mtk_mmc_compatible mt8135_compat = {
.clk_div_bits = 8,
- .recheck_sdio_irq = false,
+ .recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
@@ -485,7 +485,7 @@ static const struct mtk_mmc_compatible mt8183_compat = {
static const struct mtk_mmc_compatible mt2701_compat = {
.clk_div_bits = 12,
- .recheck_sdio_irq = false,
+ .recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
@@ -511,7 +511,7 @@ static const struct mtk_mmc_compatible mt2712_compat = {
static const struct mtk_mmc_compatible mt7622_compat = {
.clk_div_bits = 12,
- .recheck_sdio_irq = false,
+ .recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
@@ -524,7 +524,7 @@ static const struct mtk_mmc_compatible mt7622_compat = {
static const struct mtk_mmc_compatible mt8516_compat = {
.clk_div_bits = 12,
- .recheck_sdio_irq = false,
+ .recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
@@ -535,7 +535,7 @@ static const struct mtk_mmc_compatible mt8516_compat = {
static const struct mtk_mmc_compatible mt7620_compat = {
.clk_div_bits = 8,
- .recheck_sdio_irq = false,
+ .recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
@@ -548,6 +548,7 @@ static const struct mtk_mmc_compatible mt7620_compat = {
static const struct mtk_mmc_compatible mt6779_compat = {
.clk_div_bits = 12,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
@@ -2603,7 +2604,6 @@ static int msdc_drv_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static void msdc_save_reg(struct msdc_host *host)
{
u32 tune_reg = host->dev_comp->pad_tune_reg;
@@ -2662,7 +2662,7 @@ static void msdc_restore_reg(struct msdc_host *host)
__msdc_enable_sdio_irq(host, 1);
}
-static int msdc_runtime_suspend(struct device *dev)
+static int __maybe_unused msdc_runtime_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
@@ -2672,7 +2672,7 @@ static int msdc_runtime_suspend(struct device *dev)
return 0;
}
-static int msdc_runtime_resume(struct device *dev)
+static int __maybe_unused msdc_runtime_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
@@ -2681,11 +2681,28 @@ static int msdc_runtime_resume(struct device *dev)
msdc_restore_reg(host);
return 0;
}
-#endif
+
+static int __maybe_unused msdc_suspend(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ int ret;
+
+ if (mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(mmc);
+ if (ret)
+ return ret;
+ }
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused msdc_resume(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
static const struct dev_pm_ops msdc_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
};
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index d25a4b50c2f3..3b8d456e857d 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -1186,16 +1186,19 @@ static struct sdhci_arasan_of_data sdhci_arasan_versal_data = {
static struct sdhci_arasan_of_data intel_keembay_emmc_data = {
.soc_ctl_map = &intel_keembay_soc_ctl_map,
.pdata = &sdhci_keembay_emmc_pdata,
+ .clk_ops = &arasan_clk_ops,
};
static struct sdhci_arasan_of_data intel_keembay_sd_data = {
.soc_ctl_map = &intel_keembay_soc_ctl_map,
.pdata = &sdhci_keembay_sd_pdata,
+ .clk_ops = &arasan_clk_ops,
};
static struct sdhci_arasan_of_data intel_keembay_sdio_data = {
.soc_ctl_map = &intel_keembay_soc_ctl_map,
.pdata = &sdhci_keembay_sdio_pdata,
+ .clk_ops = &arasan_clk_ops,
};
static const struct of_device_id sdhci_arasan_of_match[] = {
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index cb4149fd12e0..ac4e7874a3f1 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -927,9 +927,9 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
tmio_mmc_power_off(host);
- /* Downgrade ensures a sane state for tuning HW (e.g. SCC) */
- if (host->mmc->ops->hs400_downgrade)
- host->mmc->ops->hs400_downgrade(host->mmc);
+ /* For R-Car Gen2+, we need to reset SDHI specific SCC */
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ host->reset(host);
host->set_clock(host, 0);
break;
case MMC_POWER_UP:
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index 0c352b39ad4b..ff1697f899ba 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -218,7 +218,9 @@ static int gpio_nand_setup_interface(struct nand_chip *this, int csline,
static int gpio_nand_attach_chip(struct nand_chip *chip)
{
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 7892022bd6dd..7b6b354f2d39 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -239,7 +239,9 @@ static int au1550nd_exec_op(struct nand_chip *this,
static int au1550nd_attach_chip(struct nand_chip *chip)
{
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c
index eb03b8cea1cb..fb7a086de35e 100644
--- a/drivers/mtd/nand/raw/gpio.c
+++ b/drivers/mtd/nand/raw/gpio.c
@@ -164,7 +164,9 @@ static int gpio_nand_exec_op(struct nand_chip *chip,
static int gpio_nand_attach_chip(struct nand_chip *chip)
{
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index fb4c0b11689f..bcd4a556c959 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -606,7 +606,9 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
static int mpc5121_nfc_attach_chip(struct nand_chip *chip)
{
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index e3bb65fd3ab2..66211c9311d2 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -86,7 +86,9 @@ static void orion_nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
static int orion_nand_attach_chip(struct nand_chip *chip)
{
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
index 4dfff34800f4..68c08772d7c2 100644
--- a/drivers/mtd/nand/raw/pasemi_nand.c
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -77,7 +77,9 @@ static int pasemi_device_ready(struct nand_chip *chip)
static int pasemi_attach_chip(struct nand_chip *chip)
{
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index 93d9f1694dc1..7711e1020c21 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -22,7 +22,9 @@ struct plat_nand_data {
static int plat_nand_attach_chip(struct nand_chip *chip)
{
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
index 107208311987..70f8305c9b6e 100644
--- a/drivers/mtd/nand/raw/socrates_nand.c
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -120,7 +120,9 @@ static int socrates_nand_device_ready(struct nand_chip *nand_chip)
static int socrates_attach_chip(struct nand_chip *chip)
{
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index efc5bf5434e0..26751976e502 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -149,7 +149,9 @@ static void xway_write_buf(struct nand_chip *chip, const u_char *buf, int len)
static int xway_attach_chip(struct nand_chip *chip)
{
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 9abfaae1c6f7..a4e4e15f574d 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -745,6 +745,19 @@ const struct bond_option *bond_opt_get(unsigned int option)
return &bond_opts[option];
}
+static void bond_set_xfrm_features(struct net_device *bond_dev, u64 mode)
+{
+ if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD))
+ return;
+
+ if (mode == BOND_MODE_ACTIVEBACKUP)
+ bond_dev->wanted_features |= BOND_XFRM_FEATURES;
+ else
+ bond_dev->wanted_features &= ~BOND_XFRM_FEATURES;
+
+ netdev_update_features(bond_dev);
+}
+
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -767,13 +780,8 @@ static int bond_option_mode_set(struct bonding *bond,
if (newval->value == BOND_MODE_ALB)
bond->params.tlb_dynamic_lb = 1;
-#ifdef CONFIG_XFRM_OFFLOAD
- if (newval->value == BOND_MODE_ACTIVEBACKUP)
- bond->dev->wanted_features |= BOND_XFRM_FEATURES;
- else
- bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
- netdev_change_features(bond->dev);
-#endif /* CONFIG_XFRM_OFFLOAD */
+ if (bond->dev->reg_state == NETREG_REGISTERED)
+ bond_set_xfrm_features(bond->dev, newval->value);
/* don't cache arp_validate between modes */
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 03a68bb486fd..40070c930202 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -382,8 +382,13 @@ static int softing_netdev_open(struct net_device *ndev)
/* check or determine and set bittime */
ret = open_candev(ndev);
- if (!ret)
- ret = softing_startstop(ndev, 1);
+ if (ret)
+ return ret;
+
+ ret = softing_startstop(ndev, 1);
+ if (ret < 0)
+ close_candev(ndev);
+
return ret;
}
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index ada75fa15861..7dc230677b78 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -588,7 +588,6 @@ static int felix_setup(struct dsa_switch *ds)
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
int port, err;
- int tc;
err = felix_init_structs(felix, ds->num_ports);
if (err)
@@ -627,12 +626,6 @@ static int felix_setup(struct dsa_switch *ds)
ocelot_write_rix(ocelot,
ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_UC);
- /* Setup the per-traffic class flooding PGIDs */
- for (tc = 0; tc < FELIX_NUM_TC; tc++)
- ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
- ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
- ANA_FLOODING_FLD_UNICAST(PGID_UC),
- ANA_FLOODING, tc);
ds->mtu_enforcement_ingress = true;
ds->configure_vlan_while_not_filtering = true;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 3e925b8d5306..2e5bbdca5ea4 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1429,6 +1429,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, felix);
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
+ ocelot->num_flooding_pgids = FELIX_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev,
felix->info->switch_pci_bar);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 1d420c4a2f0f..ebbaf6817ec8 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1210,6 +1210,7 @@ static int seville_probe(struct platform_device *pdev)
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
+ ocelot->num_flooding_pgids = 1;
felix->info = &seville_info_vsc9953;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig
index d92516ae59cc..9cd750184947 100644
--- a/drivers/net/ethernet/agere/Kconfig
+++ b/drivers/net/ethernet/agere/Kconfig
@@ -21,6 +21,7 @@ config ET131X
tristate "Agere ET-1310 Gigabit Ethernet support"
depends on PCI
select PHYLIB
+ select CRC32
help
This driver supports Agere ET-1310 ethernet adapters.
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 85858163bac5..e432a68ac520 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -23,6 +23,7 @@ config MACB
tristate "Cadence MACB/GEM support"
depends on HAS_DMA && COMMON_CLK
select PHYLINK
+ select CRC32
help
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 7f90b828d159..1b7e8c91b541 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -987,9 +987,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
u32 ctrl, iplen, maclen;
-#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *ip6;
-#endif
unsigned int ndesc;
struct tcphdr *tcp;
int len16, pktlen;
@@ -1043,17 +1041,15 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
cpl->len = htons(pktlen);
memcpy(buf, skb->data, pktlen);
- if (tx_info->ip_family == AF_INET) {
+ if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
-#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
-#endif
}
cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index c2677ec0564d..3d1e9a302148 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -33,6 +33,7 @@ config FTGMAC100
depends on !64BIT || BROKEN
select PHYLIB
select MDIO_ASPEED if MACH_ASPEED_G6
+ select CRC32
help
This driver supports the FTGMAC100 Gigabit Ethernet controller
from Faraday. It is used on Faraday A369, Andes AG102 and some
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index a1d53ddf1593..3f9175bdce77 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -25,6 +25,7 @@ config FEC
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
+ select CRC32
select PHYLIB
imply PTP_1588_CLOCK
help
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 90cd243070d7..828c177df03d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -269,6 +269,7 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
if (!of_device_is_available(node)) {
netdev_err(mac->net_dev, "pcs-handle node not available\n");
+ of_node_put(node);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 8ed1ebd5a183..89e558135432 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -143,8 +143,8 @@ static const struct {
{ ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
{ ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
{ ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
- { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" },
- { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
+ { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
{ ENETC_PM0_ROVR, "MAC rx oversized packets" },
{ ENETC_PM0_RJBR, "MAC rx jabber packets" },
{ ENETC_PM0_RFRG, "MAC rx fragment packets" },
@@ -163,9 +163,13 @@ static const struct {
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
{ ENETC_PM0_TPKT, "MAC tx packets" },
{ ENETC_PM0_TUND, "MAC tx undersized packets" },
+ { ENETC_PM0_T64, "MAC tx 64 byte packets" },
{ ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
+ { ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
+ { ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
{ ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
- { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" },
+ { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
{ ENETC_PM0_TCNP, "MAC tx control packets" },
{ ENETC_PM0_TDFR, "MAC tx deferred packets" },
{ ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index d18f439f2b81..e1e950d48c92 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -267,8 +267,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_R255 0x8180
#define ENETC_PM0_R511 0x8188
#define ENETC_PM0_R1023 0x8190
-#define ENETC_PM0_R1518 0x8198
-#define ENETC_PM0_R1519X 0x81A0
+#define ENETC_PM0_R1522 0x8198
+#define ENETC_PM0_R1523X 0x81A0
#define ENETC_PM0_ROVR 0x81A8
#define ENETC_PM0_RJBR 0x81B0
#define ENETC_PM0_RFRG 0x81B8
@@ -287,9 +287,13 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_TBCA 0x8250
#define ENETC_PM0_TPKT 0x8260
#define ENETC_PM0_TUND 0x8268
+#define ENETC_PM0_T64 0x8270
#define ENETC_PM0_T127 0x8278
+#define ENETC_PM0_T255 0x8280
+#define ENETC_PM0_T511 0x8288
#define ENETC_PM0_T1023 0x8290
-#define ENETC_PM0_T1518 0x8298
+#define ENETC_PM0_T1522 0x8298
+#define ENETC_PM0_T1523X 0x82A0
#define ENETC_PM0_TCNP 0x82C0
#define ENETC_PM0_TDFR 0x82D0
#define ENETC_PM0_TMCOL 0x82D8
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 34150182cc35..48bf8088795d 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -4,6 +4,7 @@ config FSL_FMAN
depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
select PHYLIB
+ select CRC32
default n
help
Freescale Data-Path Acceleration Architecture Frame Manager
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index a9066e6ff697..ca2ab6cf84d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -35,8 +35,6 @@
#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
-#pragma pack(1)
-
struct hclge_qos_pri_map_cmd {
u8 pri0_tc : 4,
pri1_tc : 4;
@@ -85,8 +83,6 @@ struct hclge_dbg_reg_type_info {
struct hclge_dbg_reg_common_msg reg_msg;
};
-#pragma pack()
-
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b30f00891c03..128ab6898070 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6475,13 +6475,13 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
/* Ungate PGCB clock */
mac_data = er32(FEXTNVM9);
- mac_data |= BIT(28);
+ mac_data &= ~BIT(28);
ew32(FEXTNVM9, mac_data);
/* Enable K1 off to enable mPHY Power Gating */
mac_data = er32(FEXTNVM6);
mac_data |= BIT(31);
- ew32(FEXTNVM12, mac_data);
+ ew32(FEXTNVM6, mac_data);
/* Enable mPHY power gating for any link and speed */
mac_data = er32(FEXTNVM8);
@@ -6525,11 +6525,11 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
/* Disable K1 off */
mac_data = er32(FEXTNVM6);
mac_data &= ~BIT(31);
- ew32(FEXTNVM12, mac_data);
+ ew32(FEXTNVM6, mac_data);
/* Disable Ungate PGCB clock */
mac_data = er32(FEXTNVM9);
- mac_data &= ~BIT(28);
+ mac_data |= BIT(28);
ew32(FEXTNVM9, mac_data);
/* Cancel not waking from dynamic
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9f73cd7aee09..4aca637d4a23 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1861,6 +1861,7 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
+ * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
@@ -1883,7 +1884,8 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1894,7 +1896,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define I40E_LAST_OFFSET \
@@ -1953,16 +1955,24 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
+ * @rx_buffer_pgcnt: buffer page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
@@ -2113,14 +2123,16 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
+ * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
*
* This function will clean up the contents of the rx_buffer. It will
* either recycle the buffer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer)
+ struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
- if (i40e_can_reuse_rx_page(rx_buffer)) {
+ if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -2347,6 +2359,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ int rx_buffer_pgcnt;
unsigned int size;
u64 qword;
@@ -2389,7 +2402,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -2432,7 +2445,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
}
- i40e_put_rx_buffer(rx_ring, rx_buffer);
+ i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 77d5eae6b4c2..a2d0aad8cfdd 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -762,13 +762,15 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
+ * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed
*/
-static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+static bool
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -779,7 +781,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define ICE_LAST_OFFSET \
@@ -864,17 +866,24 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* @rx_ring: Rx descriptor ring to transact packets on
* @skb: skb to be used
* @size: size of buffer to add to skb
+ * @rx_buf_pgcnt: rx_buf page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size, int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+ *rx_buf_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buf->page);
+#else
+ 0;
+#endif
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
@@ -1006,12 +1015,15 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
+ * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
* This function will update next_to_clean and then clean up the contents
* of the rx_buf. It will either recycle the buffer or unmap it and free
* the associated resources.
*/
-static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+static void
+ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
@@ -1022,7 +1034,7 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf)) {
+ if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
@@ -1097,6 +1109,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
+ int rx_buf_pgcnt;
u16 vlan_tag = 0;
u8 rx_ptype;
@@ -1119,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- ice_put_rx_buf(rx_ring, NULL);
+ ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
continue;
}
@@ -1128,7 +1141,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+ rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
if (!size) {
xdp.data = NULL;
@@ -1168,7 +1181,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++;
cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
continue;
construct_skb:
if (skb) {
@@ -1187,7 +1200,7 @@ construct_skb:
break;
}
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
cleaned_count++;
/* skip if it is NOP desc */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 0286d2fceee4..aaa954aae574 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -138,6 +138,8 @@ struct vf_mac_filter {
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_1536 1536
@@ -247,6 +249,9 @@ enum igb_tx_flags {
#define IGB_SFF_ADDRESSING_MODE 0x4
#define IGB_SFF_8472_UNSUP 0x00
+/* TX resources are shared between XDP and netstack
+ * and we need to tag the buffer type to distinguish them
+ */
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 6a4ef4934fcf..03f78fdb0dcd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2824,20 +2824,25 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
{
- int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
struct igb_adapter *adapter = netdev_priv(dev);
+ struct bpf_prog *prog = bpf->prog, *old_prog;
bool running = netif_running(dev);
- struct bpf_prog *old_prog;
bool need_reset;
/* verify igb ring attributes are sufficient for XDP */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- if (frame_size > igb_rx_bufsz(ring))
+ if (frame_size > igb_rx_bufsz(ring)) {
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "The RX buffer size is too small for the frame size");
+ netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
+ igb_rx_bufsz(ring), frame_size);
return -EINVAL;
+ }
}
old_prog = xchg(&adapter->xdp_prog, prog);
@@ -2869,7 +2874,7 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
- return igb_xdp_setup(dev, xdp->prog);
+ return igb_xdp_setup(dev, xdp);
default:
return -EINVAL;
}
@@ -2910,10 +2915,12 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
*/
tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
- return -ENXIO;
+ return IGB_XDP_CONSUMED;
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ nq->trans_start = jiffies;
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
@@ -2946,6 +2953,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ nq->trans_start = jiffies;
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@@ -3950,8 +3960,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* set default work limits */
adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
+ adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
@@ -6491,7 +6500,7 @@ static void igb_get_stats64(struct net_device *netdev,
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
if (adapter->xdp_prog) {
int i;
@@ -6500,7 +6509,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_ring *ring = adapter->rx_ring[i];
if (max_frame > igb_rx_bufsz(ring)) {
- netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n");
+ netdev_warn(adapter->netdev,
+ "Requested MTU size is not supported with XDP. Max frame size is %d\n",
+ max_frame);
return -EINVAL;
}
}
@@ -8351,6 +8362,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start);
#endif
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
/* prefetch first cache line of first page */
@@ -8365,6 +8377,9 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
@@ -8771,7 +8786,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_ring->skb = skb;
if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 50e6b8b6ba7b..393d1c2cd853 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1945,7 +1945,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1956,7 +1957,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
/* The last offset is a bit aggressive in that we assume the
@@ -2021,11 +2022,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct ixgbe_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
@@ -2055,9 +2063,10 @@ skip_sync:
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
{
- if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+ if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -2303,6 +2312,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ int rx_buffer_pgcnt;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@@ -2322,7 +2332,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
- rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -2367,7 +2377,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
}
- ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
cleaned_count++;
/* place incomplete frames back on ring for completion */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 13bd58dafc6c..25dd903a3e92 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -313,8 +313,10 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_port_init;
}
- if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX)
+ if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) {
+ err = -EINVAL;
goto err_port_init;
+ }
/* firmware requires that port's MAC address consist of the first
* 5 bytes of the base MAC address
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 157f7eef92f1..32aad4d32b88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1378,8 +1378,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
tx_ring->cons, tx_ring->prod);
priv->port_stats.tx_timeout++;
- en_dbg(DRV, priv, "Scheduling watchdog\n");
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
+ en_dbg(DRV, priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+ }
}
@@ -1733,6 +1735,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
+ clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
@@ -1829,6 +1832,7 @@ int mlx4_en_start_port(struct net_device *dev)
local_bh_enable();
}
+ clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -1999,7 +2003,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
static void mlx4_en_restart(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
- watchdog_task);
+ restart_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
@@ -2376,7 +2380,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
- /* NIC is probably restarting - let watchdog task reset
+ /* NIC is probably restarting - let restart task reset
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
@@ -2385,7 +2389,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (err) {
en_err(priv, "Failed restarting port:%d\n",
priv->port);
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
+ &priv->state))
+ queue_work(mdev->workqueue, &priv->restart_task);
}
}
mutex_unlock(&mdev->state_lock);
@@ -2791,7 +2797,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (err) {
en_err(priv, "Failed starting port %d for XDP change\n",
priv->port);
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ queue_work(mdev->workqueue, &priv->restart_task);
}
}
@@ -3164,7 +3171,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
- INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+ INIT_WORK(&priv->restart_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index b15ec32758a3..31b74bddb7cd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -392,6 +392,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
+static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
+ u16 cqe_index, struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_info *tx_info;
+ struct mlx4_en_tx_desc *tx_desc;
+ u16 wqe_index;
+ int desc_size;
+
+ en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
+ ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
+ false);
+
+ wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
+ tx_info = &ring->tx_info[wqe_index];
+ desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
+ en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
+ wqe_index, desc_size);
+ tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
+
+ if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ return;
+
+ en_err(priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+}
+
int mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget)
{
@@ -438,13 +467,10 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
dma_rmb();
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
- MLX4_CQE_OPCODE_ERROR)) {
- struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
-
- en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
- cqe_err->vendor_err_syndrome,
- cqe_err->syndrome);
- }
+ MLX4_CQE_OPCODE_ERROR))
+ if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
+ mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
+ ring);
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1c50d0f22199..17f2b1919378 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -251,6 +251,10 @@ struct mlx4_en_page_cache {
} buf[MLX4_EN_CACHE_SIZE];
};
+enum {
+ MLX4_EN_TX_RING_STATE_RECOVERING,
+};
+
struct mlx4_en_priv;
struct mlx4_en_tx_ring {
@@ -297,6 +301,7 @@ struct mlx4_en_tx_ring {
* Only queue_stopped might be used if BQL is not properly working.
*/
unsigned long queue_stopped;
+ unsigned long state;
struct mlx4_hwq_resources sp_wqres;
struct mlx4_qp sp_qp;
struct mlx4_qp_context sp_context;
@@ -510,6 +515,10 @@ struct mlx4_en_stats_bitmap {
struct mutex mutex; /* for mutual access to stats bitmap */
};
+enum {
+ MLX4_EN_STATE_FLAG_RESTARTING,
+};
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -575,7 +584,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
- struct work_struct watchdog_task;
+ struct work_struct restart_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
@@ -620,6 +629,7 @@ struct mlx4_en_priv {
u32 pflags;
u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
u8 rss_hash_fn;
+ unsigned long state;
};
enum mlx4_en_wol {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 485478979b1a..6e4d7bb7fea2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -199,6 +199,7 @@ config MLX5_EN_TLS
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
depends on MLX5_CORE_EN && MLX5_ESWITCH
+ select CRC32
default y
help
Build support for software-managed steering in the NIC.
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 31f9a82dc113..d0f6dfe0dcf3 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -47,6 +47,7 @@ config LAN743X
depends on PCI
select PHYLIB
select CRC16
+ select CRC32
help
Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 2632fe2d2448..abea8dd2b0cb 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1551,10 +1551,11 @@ int ocelot_init(struct ocelot *ocelot)
SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
/* Setup flooding PGIDs */
- ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
- ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
- ANA_FLOODING_FLD_UNICAST(PGID_UC),
- ANA_FLOODING, 0);
+ for (i = 0; i < ocelot->num_flooding_pgids; i++)
+ ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST(PGID_UC),
+ ANA_FLOODING, i);
ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index dc00772950e5..1e7729421a82 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -1254,6 +1254,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
}
ocelot->num_phys_ports = of_get_child_count(ports);
+ ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index d8b99d6a0356..b82758d5beed 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -22,6 +22,7 @@ config NFP
depends on VXLAN || VXLAN=n
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
+ select CRC32
help
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index b4acf2f41e84..f21fb573ea3e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3562,9 +3562,6 @@ static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
struct nfp_net_dp *dp;
int err;
- if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
- return -EBUSY;
-
if (!prog == !nn->dp.xdp_prog) {
WRITE_ONCE(nn->dp.xdp_prog, prog);
xdp_attachment_setup(&nn->xdp, bpf);
@@ -3593,9 +3590,6 @@ static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
{
int err;
- if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
- return -EBUSY;
-
err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/nxp/Kconfig b/drivers/net/ethernet/nxp/Kconfig
index ee83a71c2509..c84997db828c 100644
--- a/drivers/net/ethernet/nxp/Kconfig
+++ b/drivers/net/ethernet/nxp/Kconfig
@@ -3,6 +3,7 @@ config LPC_ENET
tristate "NXP ethernet MAC on LPC devices"
depends on ARCH_LPC32XX || COMPILE_TEST
select PHYLIB
+ select CRC32
help
Say Y or M here if you want to use the NXP ethernet MAC included on
some NXP LPC devices. You can safely enable this option for LPC32xx
diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig
index 99e1290e0307..2318811ff75a 100644
--- a/drivers/net/ethernet/rocker/Kconfig
+++ b/drivers/net/ethernet/rocker/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_ROCKER
config ROCKER
tristate "Rocker switch driver (EXPERIMENTAL)"
depends on PCI && NET_SWITCHDEV && BRIDGE
+ select CRC32
help
This driver supports Rocker switch device.
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index efef5476a577..223f69da7e95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -246,13 +246,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
goto err_parse_dt;
}
- ret = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(dwmac->ops->addr_width));
- if (ret) {
- dev_err(&pdev->dev, "DMA mask set failed\n");
- goto err_dma_mask;
- }
-
+ plat_dat->addr64 = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
@@ -272,7 +266,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
err_dwmac_init:
err_drv_probe:
imx_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_dma_mask:
err_parse_dt:
err_match_data:
stmmac_remove_config_dt(pdev, plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index dc0b8b6d180d..459ae715b33d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -30,7 +30,6 @@
#define PRG_ETH0_EXT_RMII_MODE 4
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
-#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
/* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
@@ -155,8 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
return -ENOMEM;
clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
- clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
- clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
+ clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
+ clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
+ clk_configs->m250_mux.shift;
clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents,
ARRAY_SIZE(mux_parents), &clk_mux_ops,
&clk_configs->m250_mux.hw);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 6e30d7eb4983..0b4ee2dbb691 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -22,7 +22,7 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
- 10000, 100000);
+ 10000, 1000000);
}
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8c1ac75901ce..5b1c12ff98c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1558,6 +1558,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
}
/**
+ * stmmac_free_tx_skbufs - free TX skb buffers
+ * @priv: private structure
+ */
+static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ dma_free_tx_skbufs(priv, queue);
+}
+
+/**
* free_dma_rx_desc_resources - free RX dma desc resources
* @priv: private structure
*/
@@ -2925,9 +2938,6 @@ static int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- if (priv->eee_enabled)
- del_timer_sync(&priv->eee_ctrl_timer);
-
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
@@ -2946,6 +2956,11 @@ static int stmmac_release(struct net_device *dev)
if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
@@ -4960,6 +4975,14 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "SPH feature enabled\n");
}
+ /* The current IP register MAC_HW_Feature1[ADDR64] only define
+ * 32/40/64 bit width, but some SOC support others like i.MX8MP
+ * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
+ * So overwrite dma_cap.addr64 according to HW real design.
+ */
+ if (priv->plat->addr64)
+ priv->dma_cap.addr64 = priv->plat->addr64;
+
if (priv->dma_cap.addr64) {
ret = dma_set_mask_and_coherent(device,
DMA_BIT_MASK(priv->dma_cap.addr64));
@@ -5172,6 +5195,11 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
@@ -5277,11 +5305,20 @@ int stmmac_resume(struct device *dev)
return ret;
}
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ rtnl_lock();
+ phylink_start(priv->phylink);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
+ rtnl_unlock();
+ }
+
rtnl_lock();
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
+ stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
@@ -5295,14 +5332,6 @@ int stmmac_resume(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_unlock();
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
- rtnl_lock();
- phylink_start(priv->phylink);
- /* We may have called phylink_speed_down before */
- phylink_speed_up(priv->phylink);
- rtnl_unlock();
- }
-
phylink_mac_change(priv->phylink, true);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 6dd73bd0f458..99f44563e10f 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1265,9 +1265,6 @@ static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
if (!priv->xdpi.prog && !prog)
return 0;
- if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
- return -EBUSY;
-
WRITE_ONCE(priv->xdp_prog, prog);
xdp_attachment_setup(&priv->xdpi, bpf);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 60c199fcb91e..030185301014 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1351,7 +1351,6 @@ static int temac_probe(struct platform_device *pdev)
struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
struct temac_local *lp;
struct net_device *ndev;
- struct resource *res;
const void *addr;
__be32 *p;
bool little_endian;
@@ -1500,13 +1499,11 @@ static int temac_probe(struct platform_device *pdev)
of_node_put(dma_np);
} else if (pdata) {
/* 2nd memory resource specifies DMA registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!lp->sdma_regs) {
+ lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev,
"could not map DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(lp->sdma_regs);
}
if (pdata->dma_little_endian) {
lp->dma_in = temac_dma_in32_le;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 627c33333866..5523f069b9a5 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -258,21 +258,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
skb_dst_set(skb, &tun_dst->dst);
/* Ignore packet loops (and multicast echo) */
- if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
- goto rx_error;
-
- switch (skb_protocol(skb, true)) {
- case htons(ETH_P_IP):
- if (pskb_may_pull(skb, sizeof(struct iphdr)))
- goto rx_error;
- break;
- case htons(ETH_P_IPV6):
- if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
- goto rx_error;
- break;
- default:
- goto rx_error;
+ if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
+ geneve->dev->stats.rx_errors++;
+ goto drop;
}
+
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
@@ -309,8 +299,6 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
dev_sw_netstats_rx_add(geneve->dev, len);
return;
-rx_error:
- geneve->dev->stats.rx_errors++;
drop:
/* Consume bad packet */
kfree_skb(skb);
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index e8599bb948c0..6c3ed5b17b80 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -156,6 +156,9 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
/* The allocator will give us a power-of-2 number of pages. But we
* can't guarantee that, so request it. That way we won't waste any
* memory that would be available beyond the required space.
+ *
+ * Note that gsi_trans_pool_exit_dma() assumes the total allocated
+ * size is exactly (count * size).
*/
total_size = get_order(total_size) << PAGE_SHIFT;
@@ -175,7 +178,9 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
{
- dma_free_coherent(dev, pool->size, pool->base, pool->addr);
+ size_t total_size = pool->count * pool->size;
+
+ dma_free_coherent(dev, total_size, pool->base, pool->addr);
memset(pool, 0, sizeof(*pool));
}
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 2e90512f3bbe..90aafb56f140 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -63,15 +63,20 @@ static int
nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
{
struct nsim_bpf_bound_prog *state;
+ int ret = 0;
state = env->prog->aux->offload->dev_priv;
if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
msleep(state->nsim_dev->bpf_bind_verifier_delay);
- if (insn_idx == env->prog->len - 1)
+ if (insn_idx == env->prog->len - 1) {
pr_vlog(env, "Hello from netdevsim!\n");
- return 0;
+ if (!state->nsim_dev->bpf_bind_verifier_accept)
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
}
static int nsim_bpf_finalize(struct bpf_verifier_env *env)
@@ -190,9 +195,6 @@ nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
{
int err;
- if (!xdp_attachment_flags_ok(xdp, bpf))
- return -EBUSY;
-
if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
return -EOPNOTSUPP;
@@ -598,6 +600,9 @@ int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
&nsim_dev->bpf_bind_accept);
debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir,
&nsim_dev->bpf_bind_verifier_delay);
+ nsim_dev->bpf_bind_verifier_accept = true;
+ debugfs_create_bool("bpf_bind_verifier_accept", 0600, nsim_dev->ddir,
+ &nsim_dev->bpf_bind_verifier_accept);
return 0;
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 19b1e6ef5573..48163c5f2ec9 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -197,6 +197,7 @@ struct nsim_dev {
struct dentry *take_snapshot;
struct bpf_offload_dev *bpf_dev;
bool bpf_bind_accept;
+ bool bpf_bind_verifier_accept;
u32 bpf_bind_verifier_delay;
struct dentry *ddir_bpf_bound_progs;
u32 prog_id_gen;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 571bd03ebd81..6d9130859c55 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1371,11 +1371,17 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
int orig_iif = skb->skb_iif;
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
+ bool is_ll_src;
/* loopback, multicast & non-ND link-local traffic; do not push through
- * packet taps again. Reset pkt_type for upper layers to process skb
+ * packet taps again. Reset pkt_type for upper layers to process skb.
+ * for packets with lladdr src, however, skip so that the dst can be
+ * determine at input using original ifindex in the case that daddr
+ * needs strict
*/
- if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
+ is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL;
+ if (skb->pkt_type == PACKET_LOOPBACK ||
+ (need_strict && !is_ndisc && !is_ll_src)) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index d6b849552a1e..9c65d560d48f 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -286,14 +286,76 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
{
/*
- * The signal type is GPIO if the signal name has "GPI" as a prefix.
- * strncmp (rather than strcmp) is used to implement the prefix
- * requirement.
+ * We need to differentiate between GPIO and non-GPIO signals to
+ * implement the gpio_request_enable() interface. For better or worse
+ * the ASPEED pinctrl driver uses the expression names to determine
+ * whether an expression will mux a pin for GPIO.
*
- * expr->signal might look like "GPIOB1" in the GPIO case.
- * expr->signal might look like "GPIT0" in the GPI case.
+ * Generally we have the following - A GPIO such as B1 has:
+ *
+ * - expr->signal set to "GPIOB1"
+ * - expr->function set to "GPIOB1"
+ *
+ * Using this fact we can determine whether the provided expression is
+ * a GPIO expression by testing the signal name for the string prefix
+ * "GPIO".
+ *
+ * However, some GPIOs are input-only, and the ASPEED datasheets name
+ * them differently. An input-only GPIO such as T0 has:
+ *
+ * - expr->signal set to "GPIT0"
+ * - expr->function set to "GPIT0"
+ *
+ * It's tempting to generalise the prefix test from "GPIO" to "GPI" to
+ * account for both GPIOs and GPIs, but in doing so we run aground on
+ * another feature:
+ *
+ * Some pins in the ASPEED BMC SoCs have a "pass-through" GPIO
+ * function where the input state of one pin is replicated as the
+ * output state of another (as if they were shorted together - a mux
+ * configuration that is typically enabled by hardware strapping).
+ * This feature allows the BMC to pass e.g. power button state through
+ * to the host while the BMC is yet to boot, but take control of the
+ * button state once the BMC has booted by muxing each pin as a
+ * separate, pin-specific GPIO.
+ *
+ * Conceptually this pass-through mode is a form of GPIO and is named
+ * as such in the datasheets, e.g. "GPID0". This naming similarity
+ * trips us up with the simple GPI-prefixed-signal-name scheme
+ * discussed above, as the pass-through configuration is not what we
+ * want when muxing a pin as GPIO for the GPIO subsystem.
+ *
+ * On e.g. the AST2400, a pass-through function "GPID0" is grouped on
+ * balls A18 and D16, where we have:
+ *
+ * For ball A18:
+ * - expr->signal set to "GPID0IN"
+ * - expr->function set to "GPID0"
+ *
+ * For ball D16:
+ * - expr->signal set to "GPID0OUT"
+ * - expr->function set to "GPID0"
+ *
+ * By contrast, the pin-specific GPIO expressions for the same pins are
+ * as follows:
+ *
+ * For ball A18:
+ * - expr->signal looks like "GPIOD0"
+ * - expr->function looks like "GPIOD0"
+ *
+ * For ball D16:
+ * - expr->signal looks like "GPIOD1"
+ * - expr->function looks like "GPIOD1"
+ *
+ * Testing both the signal _and_ function names gives us the means
+ * differentiate the pass-through GPIO pinmux configuration from the
+ * pin-specific configuration that the GPIO subsystem is after: An
+ * expression is a pin-specific (non-pass-through) GPIO configuration
+ * if the signal prefix is "GPI" and the signal name matches the
+ * function name.
*/
- return strncmp(expr->signal, "GPI", 3) == 0;
+ return !strncmp(expr->signal, "GPI", 3) &&
+ !strcmp(expr->signal, expr->function);
}
static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index f86739e800c3..dba5875ff276 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -452,10 +452,11 @@ struct aspeed_sig_desc {
* evaluation of the descriptors.
*
* @signal: The signal name for the priority level on the pin. If the signal
- * type is GPIO, then the signal name must begin with the string
- * "GPIO", e.g. GPIOA0, GPIOT4 etc.
+ * type is GPIO, then the signal name must begin with the
+ * prefix "GPI", e.g. GPIOA0, GPIT0 etc.
* @function: The name of the function the signal participates in for the
- * associated expression
+ * associated expression. For pin-specific GPIO, the function
+ * name must match the signal name.
* @ndescs: The number of signal descriptors in the expression
* @descs: Pointer to an array of signal descriptors that comprise the
* function expression
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index d49aab3cfbaa..394a421a19d5 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1049,7 +1049,6 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
break;
case PIN_CONFIG_INPUT_DEBOUNCE:
debounce = readl(db_reg);
- debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
if (arg)
conf |= BYT_DEBOUNCE_EN;
@@ -1058,24 +1057,31 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
switch (arg) {
case 375:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_375US;
break;
case 750:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_750US;
break;
case 1500:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_1500US;
break;
case 3000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_3MS;
break;
case 6000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_6MS;
break;
case 12000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_12MS;
break;
case 24000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_24MS;
break;
default:
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 1c10ab184783..b6ef1911c1dd 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -442,8 +442,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
value |= PADCFG0_PMODE_GPIO;
/* Disable input and output buffers */
- value &= ~PADCFG0_GPIORXDIS;
- value &= ~PADCFG0_GPIOTXDIS;
+ value |= PADCFG0_GPIORXDIS;
+ value |= PADCFG0_GPIOTXDIS;
/* Disable SCI/SMI/NMI generation */
value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
diff --git a/drivers/pinctrl/intel/pinctrl-jasperlake.c b/drivers/pinctrl/intel/pinctrl-jasperlake.c
index 9bd0e8e6310c..ec435b7ab392 100644
--- a/drivers/pinctrl/intel/pinctrl-jasperlake.c
+++ b/drivers/pinctrl/intel/pinctrl-jasperlake.c
@@ -16,7 +16,7 @@
#define JSL_PAD_OWN 0x020
#define JSL_PADCFGLOCK 0x080
-#define JSL_HOSTSW_OWN 0x0b0
+#define JSL_HOSTSW_OWN 0x0c0
#define JSL_GPI_IS 0x100
#define JSL_GPI_IE 0x120
@@ -65,252 +65,263 @@ static const struct pinctrl_pin_desc jsl_pins[] = {
PINCTRL_PIN(17, "EMMC_CLK"),
PINCTRL_PIN(18, "EMMC_RESETB"),
PINCTRL_PIN(19, "A4WP_PRESENT"),
+ /* SPI */
+ PINCTRL_PIN(20, "SPI0_IO_2"),
+ PINCTRL_PIN(21, "SPI0_IO_3"),
+ PINCTRL_PIN(22, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(23, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(24, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(25, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(26, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(27, "SPI0_CLK"),
+ PINCTRL_PIN(28, "SPI0_CLK_LOOPBK"),
/* GPP_B */
- PINCTRL_PIN(20, "CORE_VID_0"),
- PINCTRL_PIN(21, "CORE_VID_1"),
- PINCTRL_PIN(22, "VRALERTB"),
- PINCTRL_PIN(23, "CPU_GP_2"),
- PINCTRL_PIN(24, "CPU_GP_3"),
- PINCTRL_PIN(25, "SRCCLKREQB_0"),
- PINCTRL_PIN(26, "SRCCLKREQB_1"),
- PINCTRL_PIN(27, "SRCCLKREQB_2"),
- PINCTRL_PIN(28, "SRCCLKREQB_3"),
- PINCTRL_PIN(29, "SRCCLKREQB_4"),
- PINCTRL_PIN(30, "SRCCLKREQB_5"),
- PINCTRL_PIN(31, "PMCALERTB"),
- PINCTRL_PIN(32, "SLP_S0B"),
- PINCTRL_PIN(33, "PLTRSTB"),
- PINCTRL_PIN(34, "SPKR"),
- PINCTRL_PIN(35, "GSPI0_CS0B"),
- PINCTRL_PIN(36, "GSPI0_CLK"),
- PINCTRL_PIN(37, "GSPI0_MISO"),
- PINCTRL_PIN(38, "GSPI0_MOSI"),
- PINCTRL_PIN(39, "GSPI1_CS0B"),
- PINCTRL_PIN(40, "GSPI1_CLK"),
- PINCTRL_PIN(41, "GSPI1_MISO"),
- PINCTRL_PIN(42, "GSPI1_MOSI"),
- PINCTRL_PIN(43, "DDSP_HPD_A"),
- PINCTRL_PIN(44, "GSPI0_CLK_LOOPBK"),
- PINCTRL_PIN(45, "GSPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(29, "CORE_VID_0"),
+ PINCTRL_PIN(30, "CORE_VID_1"),
+ PINCTRL_PIN(31, "VRALERTB"),
+ PINCTRL_PIN(32, "CPU_GP_2"),
+ PINCTRL_PIN(33, "CPU_GP_3"),
+ PINCTRL_PIN(34, "SRCCLKREQB_0"),
+ PINCTRL_PIN(35, "SRCCLKREQB_1"),
+ PINCTRL_PIN(36, "SRCCLKREQB_2"),
+ PINCTRL_PIN(37, "SRCCLKREQB_3"),
+ PINCTRL_PIN(38, "SRCCLKREQB_4"),
+ PINCTRL_PIN(39, "SRCCLKREQB_5"),
+ PINCTRL_PIN(40, "PMCALERTB"),
+ PINCTRL_PIN(41, "SLP_S0B"),
+ PINCTRL_PIN(42, "PLTRSTB"),
+ PINCTRL_PIN(43, "SPKR"),
+ PINCTRL_PIN(44, "GSPI0_CS0B"),
+ PINCTRL_PIN(45, "GSPI0_CLK"),
+ PINCTRL_PIN(46, "GSPI0_MISO"),
+ PINCTRL_PIN(47, "GSPI0_MOSI"),
+ PINCTRL_PIN(48, "GSPI1_CS0B"),
+ PINCTRL_PIN(49, "GSPI1_CLK"),
+ PINCTRL_PIN(50, "GSPI1_MISO"),
+ PINCTRL_PIN(51, "GSPI1_MOSI"),
+ PINCTRL_PIN(52, "DDSP_HPD_A"),
+ PINCTRL_PIN(53, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(54, "GSPI1_CLK_LOOPBK"),
/* GPP_A */
- PINCTRL_PIN(46, "ESPI_IO_0"),
- PINCTRL_PIN(47, "ESPI_IO_1"),
- PINCTRL_PIN(48, "ESPI_IO_2"),
- PINCTRL_PIN(49, "ESPI_IO_3"),
- PINCTRL_PIN(50, "ESPI_CSB"),
- PINCTRL_PIN(51, "ESPI_CLK"),
- PINCTRL_PIN(52, "ESPI_RESETB"),
- PINCTRL_PIN(53, "SMBCLK"),
- PINCTRL_PIN(54, "SMBDATA"),
- PINCTRL_PIN(55, "SMBALERTB"),
- PINCTRL_PIN(56, "CPU_GP_0"),
- PINCTRL_PIN(57, "CPU_GP_1"),
- PINCTRL_PIN(58, "USB2_OCB_1"),
- PINCTRL_PIN(59, "USB2_OCB_2"),
- PINCTRL_PIN(60, "USB2_OCB_3"),
- PINCTRL_PIN(61, "DDSP_HPD_A_TIME_SYNC_0"),
- PINCTRL_PIN(62, "DDSP_HPD_B"),
- PINCTRL_PIN(63, "DDSP_HPD_C"),
- PINCTRL_PIN(64, "USB2_OCB_0"),
- PINCTRL_PIN(65, "PCHHOTB"),
- PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+ PINCTRL_PIN(55, "ESPI_IO_0"),
+ PINCTRL_PIN(56, "ESPI_IO_1"),
+ PINCTRL_PIN(57, "ESPI_IO_2"),
+ PINCTRL_PIN(58, "ESPI_IO_3"),
+ PINCTRL_PIN(59, "ESPI_CSB"),
+ PINCTRL_PIN(60, "ESPI_CLK"),
+ PINCTRL_PIN(61, "ESPI_RESETB"),
+ PINCTRL_PIN(62, "SMBCLK"),
+ PINCTRL_PIN(63, "SMBDATA"),
+ PINCTRL_PIN(64, "SMBALERTB"),
+ PINCTRL_PIN(65, "CPU_GP_0"),
+ PINCTRL_PIN(66, "CPU_GP_1"),
+ PINCTRL_PIN(67, "USB2_OCB_1"),
+ PINCTRL_PIN(68, "USB2_OCB_2"),
+ PINCTRL_PIN(69, "USB2_OCB_3"),
+ PINCTRL_PIN(70, "DDSP_HPD_A_TIME_SYNC_0"),
+ PINCTRL_PIN(71, "DDSP_HPD_B"),
+ PINCTRL_PIN(72, "DDSP_HPD_C"),
+ PINCTRL_PIN(73, "USB2_OCB_0"),
+ PINCTRL_PIN(74, "PCHHOTB"),
+ PINCTRL_PIN(75, "ESPI_CLK_LOOPBK"),
/* GPP_S */
- PINCTRL_PIN(67, "SNDW1_CLK"),
- PINCTRL_PIN(68, "SNDW1_DATA"),
- PINCTRL_PIN(69, "SNDW2_CLK"),
- PINCTRL_PIN(70, "SNDW2_DATA"),
- PINCTRL_PIN(71, "SNDW1_CLK"),
- PINCTRL_PIN(72, "SNDW1_DATA"),
- PINCTRL_PIN(73, "SNDW4_CLK_DMIC_CLK_0"),
- PINCTRL_PIN(74, "SNDW4_DATA_DMIC_DATA_0"),
+ PINCTRL_PIN(76, "SNDW1_CLK"),
+ PINCTRL_PIN(77, "SNDW1_DATA"),
+ PINCTRL_PIN(78, "SNDW2_CLK"),
+ PINCTRL_PIN(79, "SNDW2_DATA"),
+ PINCTRL_PIN(80, "SNDW1_CLK"),
+ PINCTRL_PIN(81, "SNDW1_DATA"),
+ PINCTRL_PIN(82, "SNDW4_CLK_DMIC_CLK_0"),
+ PINCTRL_PIN(83, "SNDW4_DATA_DMIC_DATA_0"),
/* GPP_R */
- PINCTRL_PIN(75, "HDA_BCLK"),
- PINCTRL_PIN(76, "HDA_SYNC"),
- PINCTRL_PIN(77, "HDA_SDO"),
- PINCTRL_PIN(78, "HDA_SDI_0"),
- PINCTRL_PIN(79, "HDA_RSTB"),
- PINCTRL_PIN(80, "HDA_SDI_1"),
- PINCTRL_PIN(81, "I2S1_SFRM"),
- PINCTRL_PIN(82, "I2S1_TXD"),
+ PINCTRL_PIN(84, "HDA_BCLK"),
+ PINCTRL_PIN(85, "HDA_SYNC"),
+ PINCTRL_PIN(86, "HDA_SDO"),
+ PINCTRL_PIN(87, "HDA_SDI_0"),
+ PINCTRL_PIN(88, "HDA_RSTB"),
+ PINCTRL_PIN(89, "HDA_SDI_1"),
+ PINCTRL_PIN(90, "I2S1_SFRM"),
+ PINCTRL_PIN(91, "I2S1_TXD"),
/* GPP_H */
- PINCTRL_PIN(83, "GPPC_H_0"),
- PINCTRL_PIN(84, "SD_PWR_EN_B"),
- PINCTRL_PIN(85, "MODEM_CLKREQ"),
- PINCTRL_PIN(86, "SX_EXIT_HOLDOFFB"),
- PINCTRL_PIN(87, "I2C2_SDA"),
- PINCTRL_PIN(88, "I2C2_SCL"),
- PINCTRL_PIN(89, "I2C3_SDA"),
- PINCTRL_PIN(90, "I2C3_SCL"),
- PINCTRL_PIN(91, "I2C4_SDA"),
- PINCTRL_PIN(92, "I2C4_SCL"),
- PINCTRL_PIN(93, "CPU_VCCIO_PWR_GATEB"),
- PINCTRL_PIN(94, "I2S2_SCLK"),
- PINCTRL_PIN(95, "I2S2_SFRM"),
- PINCTRL_PIN(96, "I2S2_TXD"),
- PINCTRL_PIN(97, "I2S2_RXD"),
- PINCTRL_PIN(98, "I2S1_SCLK"),
- PINCTRL_PIN(99, "GPPC_H_16"),
- PINCTRL_PIN(100, "GPPC_H_17"),
- PINCTRL_PIN(101, "GPPC_H_18"),
- PINCTRL_PIN(102, "GPPC_H_19"),
- PINCTRL_PIN(103, "GPPC_H_20"),
- PINCTRL_PIN(104, "GPPC_H_21"),
- PINCTRL_PIN(105, "GPPC_H_22"),
- PINCTRL_PIN(106, "GPPC_H_23"),
+ PINCTRL_PIN(92, "GPPC_H_0"),
+ PINCTRL_PIN(93, "SD_PWR_EN_B"),
+ PINCTRL_PIN(94, "MODEM_CLKREQ"),
+ PINCTRL_PIN(95, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(96, "I2C2_SDA"),
+ PINCTRL_PIN(97, "I2C2_SCL"),
+ PINCTRL_PIN(98, "I2C3_SDA"),
+ PINCTRL_PIN(99, "I2C3_SCL"),
+ PINCTRL_PIN(100, "I2C4_SDA"),
+ PINCTRL_PIN(101, "I2C4_SCL"),
+ PINCTRL_PIN(102, "CPU_VCCIO_PWR_GATEB"),
+ PINCTRL_PIN(103, "I2S2_SCLK"),
+ PINCTRL_PIN(104, "I2S2_SFRM"),
+ PINCTRL_PIN(105, "I2S2_TXD"),
+ PINCTRL_PIN(106, "I2S2_RXD"),
+ PINCTRL_PIN(107, "I2S1_SCLK"),
+ PINCTRL_PIN(108, "GPPC_H_16"),
+ PINCTRL_PIN(109, "GPPC_H_17"),
+ PINCTRL_PIN(110, "GPPC_H_18"),
+ PINCTRL_PIN(111, "GPPC_H_19"),
+ PINCTRL_PIN(112, "GPPC_H_20"),
+ PINCTRL_PIN(113, "GPPC_H_21"),
+ PINCTRL_PIN(114, "GPPC_H_22"),
+ PINCTRL_PIN(115, "GPPC_H_23"),
/* GPP_D */
- PINCTRL_PIN(107, "SPI1_CSB"),
- PINCTRL_PIN(108, "SPI1_CLK"),
- PINCTRL_PIN(109, "SPI1_MISO_IO_1"),
- PINCTRL_PIN(110, "SPI1_MOSI_IO_0"),
- PINCTRL_PIN(111, "ISH_I2C0_SDA"),
- PINCTRL_PIN(112, "ISH_I2C0_SCL"),
- PINCTRL_PIN(113, "ISH_I2C1_SDA"),
- PINCTRL_PIN(114, "ISH_I2C1_SCL"),
- PINCTRL_PIN(115, "ISH_SPI_CSB"),
- PINCTRL_PIN(116, "ISH_SPI_CLK"),
- PINCTRL_PIN(117, "ISH_SPI_MISO"),
- PINCTRL_PIN(118, "ISH_SPI_MOSI"),
- PINCTRL_PIN(119, "ISH_UART0_RXD"),
- PINCTRL_PIN(120, "ISH_UART0_TXD"),
- PINCTRL_PIN(121, "ISH_UART0_RTSB"),
- PINCTRL_PIN(122, "ISH_UART0_CTSB"),
- PINCTRL_PIN(123, "SPI1_IO_2"),
- PINCTRL_PIN(124, "SPI1_IO_3"),
- PINCTRL_PIN(125, "I2S_MCLK"),
- PINCTRL_PIN(126, "CNV_MFUART2_RXD"),
- PINCTRL_PIN(127, "CNV_MFUART2_TXD"),
- PINCTRL_PIN(128, "CNV_PA_BLANKING"),
- PINCTRL_PIN(129, "I2C5_SDA"),
- PINCTRL_PIN(130, "I2C5_SCL"),
- PINCTRL_PIN(131, "GSPI2_CLK_LOOPBK"),
- PINCTRL_PIN(132, "SPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(116, "SPI1_CSB"),
+ PINCTRL_PIN(117, "SPI1_CLK"),
+ PINCTRL_PIN(118, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(119, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(120, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(121, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(122, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(123, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(124, "ISH_SPI_CSB"),
+ PINCTRL_PIN(125, "ISH_SPI_CLK"),
+ PINCTRL_PIN(126, "ISH_SPI_MISO"),
+ PINCTRL_PIN(127, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(128, "ISH_UART0_RXD"),
+ PINCTRL_PIN(129, "ISH_UART0_TXD"),
+ PINCTRL_PIN(130, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(131, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(132, "SPI1_IO_2"),
+ PINCTRL_PIN(133, "SPI1_IO_3"),
+ PINCTRL_PIN(134, "I2S_MCLK"),
+ PINCTRL_PIN(135, "CNV_MFUART2_RXD"),
+ PINCTRL_PIN(136, "CNV_MFUART2_TXD"),
+ PINCTRL_PIN(137, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(138, "I2C5_SDA"),
+ PINCTRL_PIN(139, "I2C5_SCL"),
+ PINCTRL_PIN(140, "GSPI2_CLK_LOOPBK"),
+ PINCTRL_PIN(141, "SPI1_CLK_LOOPBK"),
/* vGPIO */
- PINCTRL_PIN(133, "CNV_BTEN"),
- PINCTRL_PIN(134, "CNV_WCEN"),
- PINCTRL_PIN(135, "CNV_BT_HOST_WAKEB"),
- PINCTRL_PIN(136, "CNV_BT_IF_SELECT"),
- PINCTRL_PIN(137, "vCNV_BT_UART_TXD"),
- PINCTRL_PIN(138, "vCNV_BT_UART_RXD"),
- PINCTRL_PIN(139, "vCNV_BT_UART_CTS_B"),
- PINCTRL_PIN(140, "vCNV_BT_UART_RTS_B"),
- PINCTRL_PIN(141, "vCNV_MFUART1_TXD"),
- PINCTRL_PIN(142, "vCNV_MFUART1_RXD"),
- PINCTRL_PIN(143, "vCNV_MFUART1_CTS_B"),
- PINCTRL_PIN(144, "vCNV_MFUART1_RTS_B"),
- PINCTRL_PIN(145, "vUART0_TXD"),
- PINCTRL_PIN(146, "vUART0_RXD"),
- PINCTRL_PIN(147, "vUART0_CTS_B"),
- PINCTRL_PIN(148, "vUART0_RTS_B"),
- PINCTRL_PIN(149, "vISH_UART0_TXD"),
- PINCTRL_PIN(150, "vISH_UART0_RXD"),
- PINCTRL_PIN(151, "vISH_UART0_CTS_B"),
- PINCTRL_PIN(152, "vISH_UART0_RTS_B"),
- PINCTRL_PIN(153, "vCNV_BT_I2S_BCLK"),
- PINCTRL_PIN(154, "vCNV_BT_I2S_WS_SYNC"),
- PINCTRL_PIN(155, "vCNV_BT_I2S_SDO"),
- PINCTRL_PIN(156, "vCNV_BT_I2S_SDI"),
- PINCTRL_PIN(157, "vI2S2_SCLK"),
- PINCTRL_PIN(158, "vI2S2_SFRM"),
- PINCTRL_PIN(159, "vI2S2_TXD"),
- PINCTRL_PIN(160, "vI2S2_RXD"),
- PINCTRL_PIN(161, "vSD3_CD_B"),
+ PINCTRL_PIN(142, "CNV_BTEN"),
+ PINCTRL_PIN(143, "CNV_WCEN"),
+ PINCTRL_PIN(144, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(145, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(146, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(147, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(148, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(149, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(150, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(151, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(152, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(153, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(154, "vUART0_TXD"),
+ PINCTRL_PIN(155, "vUART0_RXD"),
+ PINCTRL_PIN(156, "vUART0_CTS_B"),
+ PINCTRL_PIN(157, "vUART0_RTS_B"),
+ PINCTRL_PIN(158, "vISH_UART0_TXD"),
+ PINCTRL_PIN(159, "vISH_UART0_RXD"),
+ PINCTRL_PIN(160, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(161, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(162, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(163, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(164, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(165, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(166, "vI2S2_SCLK"),
+ PINCTRL_PIN(167, "vI2S2_SFRM"),
+ PINCTRL_PIN(168, "vI2S2_TXD"),
+ PINCTRL_PIN(169, "vI2S2_RXD"),
+ PINCTRL_PIN(170, "vSD3_CD_B"),
/* GPP_C */
- PINCTRL_PIN(162, "GPPC_C_0"),
- PINCTRL_PIN(163, "GPPC_C_1"),
- PINCTRL_PIN(164, "GPPC_C_2"),
- PINCTRL_PIN(165, "GPPC_C_3"),
- PINCTRL_PIN(166, "GPPC_C_4"),
- PINCTRL_PIN(167, "GPPC_C_5"),
- PINCTRL_PIN(168, "SUSWARNB_SUSPWRDNACK"),
- PINCTRL_PIN(169, "SUSACKB"),
- PINCTRL_PIN(170, "UART0_RXD"),
- PINCTRL_PIN(171, "UART0_TXD"),
- PINCTRL_PIN(172, "UART0_RTSB"),
- PINCTRL_PIN(173, "UART0_CTSB"),
- PINCTRL_PIN(174, "UART1_RXD"),
- PINCTRL_PIN(175, "UART1_TXD"),
- PINCTRL_PIN(176, "UART1_RTSB"),
- PINCTRL_PIN(177, "UART1_CTSB"),
- PINCTRL_PIN(178, "I2C0_SDA"),
- PINCTRL_PIN(179, "I2C0_SCL"),
- PINCTRL_PIN(180, "I2C1_SDA"),
- PINCTRL_PIN(181, "I2C1_SCL"),
- PINCTRL_PIN(182, "UART2_RXD"),
- PINCTRL_PIN(183, "UART2_TXD"),
- PINCTRL_PIN(184, "UART2_RTSB"),
- PINCTRL_PIN(185, "UART2_CTSB"),
+ PINCTRL_PIN(171, "GPPC_C_0"),
+ PINCTRL_PIN(172, "GPPC_C_1"),
+ PINCTRL_PIN(173, "GPPC_C_2"),
+ PINCTRL_PIN(174, "GPPC_C_3"),
+ PINCTRL_PIN(175, "GPPC_C_4"),
+ PINCTRL_PIN(176, "GPPC_C_5"),
+ PINCTRL_PIN(177, "SUSWARNB_SUSPWRDNACK"),
+ PINCTRL_PIN(178, "SUSACKB"),
+ PINCTRL_PIN(179, "UART0_RXD"),
+ PINCTRL_PIN(180, "UART0_TXD"),
+ PINCTRL_PIN(181, "UART0_RTSB"),
+ PINCTRL_PIN(182, "UART0_CTSB"),
+ PINCTRL_PIN(183, "UART1_RXD"),
+ PINCTRL_PIN(184, "UART1_TXD"),
+ PINCTRL_PIN(185, "UART1_RTSB"),
+ PINCTRL_PIN(186, "UART1_CTSB"),
+ PINCTRL_PIN(187, "I2C0_SDA"),
+ PINCTRL_PIN(188, "I2C0_SCL"),
+ PINCTRL_PIN(189, "I2C1_SDA"),
+ PINCTRL_PIN(190, "I2C1_SCL"),
+ PINCTRL_PIN(191, "UART2_RXD"),
+ PINCTRL_PIN(192, "UART2_TXD"),
+ PINCTRL_PIN(193, "UART2_RTSB"),
+ PINCTRL_PIN(194, "UART2_CTSB"),
/* HVCMOS */
- PINCTRL_PIN(186, "L_BKLTEN"),
- PINCTRL_PIN(187, "L_BKLTCTL"),
- PINCTRL_PIN(188, "L_VDDEN"),
- PINCTRL_PIN(189, "SYS_PWROK"),
- PINCTRL_PIN(190, "SYS_RESETB"),
- PINCTRL_PIN(191, "MLK_RSTB"),
+ PINCTRL_PIN(195, "L_BKLTEN"),
+ PINCTRL_PIN(196, "L_BKLTCTL"),
+ PINCTRL_PIN(197, "L_VDDEN"),
+ PINCTRL_PIN(198, "SYS_PWROK"),
+ PINCTRL_PIN(199, "SYS_RESETB"),
+ PINCTRL_PIN(200, "MLK_RSTB"),
/* GPP_E */
- PINCTRL_PIN(192, "ISH_GP_0"),
- PINCTRL_PIN(193, "ISH_GP_1"),
- PINCTRL_PIN(194, "IMGCLKOUT_1"),
- PINCTRL_PIN(195, "ISH_GP_2"),
- PINCTRL_PIN(196, "IMGCLKOUT_2"),
- PINCTRL_PIN(197, "SATA_LEDB"),
- PINCTRL_PIN(198, "IMGCLKOUT_3"),
- PINCTRL_PIN(199, "ISH_GP_3"),
- PINCTRL_PIN(200, "ISH_GP_4"),
- PINCTRL_PIN(201, "ISH_GP_5"),
- PINCTRL_PIN(202, "ISH_GP_6"),
- PINCTRL_PIN(203, "ISH_GP_7"),
- PINCTRL_PIN(204, "IMGCLKOUT_4"),
- PINCTRL_PIN(205, "DDPA_CTRLCLK"),
- PINCTRL_PIN(206, "DDPA_CTRLDATA"),
- PINCTRL_PIN(207, "DDPB_CTRLCLK"),
- PINCTRL_PIN(208, "DDPB_CTRLDATA"),
- PINCTRL_PIN(209, "DDPC_CTRLCLK"),
- PINCTRL_PIN(210, "DDPC_CTRLDATA"),
- PINCTRL_PIN(211, "IMGCLKOUT_5"),
- PINCTRL_PIN(212, "CNV_BRI_DT"),
- PINCTRL_PIN(213, "CNV_BRI_RSP"),
- PINCTRL_PIN(214, "CNV_RGI_DT"),
- PINCTRL_PIN(215, "CNV_RGI_RSP"),
+ PINCTRL_PIN(201, "ISH_GP_0"),
+ PINCTRL_PIN(202, "ISH_GP_1"),
+ PINCTRL_PIN(203, "IMGCLKOUT_1"),
+ PINCTRL_PIN(204, "ISH_GP_2"),
+ PINCTRL_PIN(205, "IMGCLKOUT_2"),
+ PINCTRL_PIN(206, "SATA_LEDB"),
+ PINCTRL_PIN(207, "IMGCLKOUT_3"),
+ PINCTRL_PIN(208, "ISH_GP_3"),
+ PINCTRL_PIN(209, "ISH_GP_4"),
+ PINCTRL_PIN(210, "ISH_GP_5"),
+ PINCTRL_PIN(211, "ISH_GP_6"),
+ PINCTRL_PIN(212, "ISH_GP_7"),
+ PINCTRL_PIN(213, "IMGCLKOUT_4"),
+ PINCTRL_PIN(214, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(215, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(216, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(217, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(218, "DDPC_CTRLCLK"),
+ PINCTRL_PIN(219, "DDPC_CTRLDATA"),
+ PINCTRL_PIN(220, "IMGCLKOUT_5"),
+ PINCTRL_PIN(221, "CNV_BRI_DT"),
+ PINCTRL_PIN(222, "CNV_BRI_RSP"),
+ PINCTRL_PIN(223, "CNV_RGI_DT"),
+ PINCTRL_PIN(224, "CNV_RGI_RSP"),
/* GPP_G */
- PINCTRL_PIN(216, "SD3_CMD"),
- PINCTRL_PIN(217, "SD3_D0"),
- PINCTRL_PIN(218, "SD3_D1"),
- PINCTRL_PIN(219, "SD3_D2"),
- PINCTRL_PIN(220, "SD3_D3"),
- PINCTRL_PIN(221, "SD3_CDB"),
- PINCTRL_PIN(222, "SD3_CLK"),
- PINCTRL_PIN(223, "SD3_WP"),
+ PINCTRL_PIN(225, "SD3_CMD"),
+ PINCTRL_PIN(226, "SD3_D0"),
+ PINCTRL_PIN(227, "SD3_D1"),
+ PINCTRL_PIN(228, "SD3_D2"),
+ PINCTRL_PIN(229, "SD3_D3"),
+ PINCTRL_PIN(230, "SD3_CDB"),
+ PINCTRL_PIN(231, "SD3_CLK"),
+ PINCTRL_PIN(232, "SD3_WP"),
};
static const struct intel_padgroup jsl_community0_gpps[] = {
JSL_GPP(0, 0, 19, 320), /* GPP_F */
- JSL_GPP(1, 20, 45, 32), /* GPP_B */
- JSL_GPP(2, 46, 66, 64), /* GPP_A */
- JSL_GPP(3, 67, 74, 96), /* GPP_S */
- JSL_GPP(4, 75, 82, 128), /* GPP_R */
+ JSL_GPP(1, 20, 28, INTEL_GPIO_BASE_NOMAP), /* SPI */
+ JSL_GPP(2, 29, 54, 32), /* GPP_B */
+ JSL_GPP(3, 55, 75, 64), /* GPP_A */
+ JSL_GPP(4, 76, 83, 96), /* GPP_S */
+ JSL_GPP(5, 84, 91, 128), /* GPP_R */
};
static const struct intel_padgroup jsl_community1_gpps[] = {
- JSL_GPP(0, 83, 106, 160), /* GPP_H */
- JSL_GPP(1, 107, 132, 192), /* GPP_D */
- JSL_GPP(2, 133, 161, 224), /* vGPIO */
- JSL_GPP(3, 162, 185, 256), /* GPP_C */
+ JSL_GPP(0, 92, 115, 160), /* GPP_H */
+ JSL_GPP(1, 116, 141, 192), /* GPP_D */
+ JSL_GPP(2, 142, 170, 224), /* vGPIO */
+ JSL_GPP(3, 171, 194, 256), /* GPP_C */
};
static const struct intel_padgroup jsl_community4_gpps[] = {
- JSL_GPP(0, 186, 191, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
- JSL_GPP(1, 192, 215, 288), /* GPP_E */
+ JSL_GPP(0, 195, 200, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
+ JSL_GPP(1, 201, 224, 288), /* GPP_E */
};
static const struct intel_padgroup jsl_community5_gpps[] = {
- JSL_GPP(0, 216, 223, INTEL_GPIO_BASE_ZERO), /* GPP_G */
+ JSL_GPP(0, 225, 232, INTEL_GPIO_BASE_ZERO), /* GPP_G */
};
static const struct intel_community jsl_communities[] = {
- JSL_COMMUNITY(0, 0, 82, jsl_community0_gpps),
- JSL_COMMUNITY(1, 83, 185, jsl_community1_gpps),
- JSL_COMMUNITY(2, 186, 215, jsl_community4_gpps),
- JSL_COMMUNITY(3, 216, 223, jsl_community5_gpps),
+ JSL_COMMUNITY(0, 0, 91, jsl_community0_gpps),
+ JSL_COMMUNITY(1, 92, 194, jsl_community1_gpps),
+ JSL_COMMUNITY(2, 195, 224, jsl_community4_gpps),
+ JSL_COMMUNITY(3, 225, 232, jsl_community5_gpps),
};
static const struct intel_pinctrl_soc_data jsl_soc_data = {
@@ -336,7 +347,6 @@ static struct platform_driver jsl_pinctrl_driver = {
.pm = &jsl_pinctrl_pm_ops,
},
};
-
module_platform_driver(jsl_pinctrl_driver);
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index e4ff8da1b894..3ae141e0b421 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -745,6 +745,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
bits |= BUFCFG_PU_EN;
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 20000;
+
switch (arg) {
case 50000:
bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
@@ -765,6 +769,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
bits |= BUFCFG_PD_EN;
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 20000;
+
switch (arg) {
case 50000:
bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 4aea3e05e8c6..899c16c17b6d 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -429,7 +429,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg &= ~BIT(LEVEL_TRIG_OFF);
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_edge_irq);
break;
@@ -437,7 +436,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg &= ~BIT(LEVEL_TRIG_OFF);
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_edge_irq);
break;
@@ -445,7 +443,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg &= ~BIT(LEVEL_TRIG_OFF);
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_edge_irq);
break;
@@ -453,8 +450,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
- pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
- pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_level_irq);
break;
@@ -462,8 +457,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
- pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
- pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_level_irq);
break;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 41cd66fc7d81..e158d3d62056 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,7 +37,6 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -114,10 +113,6 @@ unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
-int host_tagset_enable = 1;
-module_param(host_tagset_enable, int, 0444);
-MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
-
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -3124,19 +3119,6 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
-static int megasas_map_queues(struct Scsi_Host *shost)
-{
- struct megasas_instance *instance;
-
- instance = (struct megasas_instance *)shost->hostdata;
-
- if (shost->nr_hw_queues == 1)
- return 0;
-
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- instance->pdev, instance->low_latency_index_start);
-}
-
static void megasas_aen_polling(struct work_struct *work);
/**
@@ -3445,7 +3427,6 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
- .map_queues = megasas_map_queues,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
};
@@ -6827,26 +6808,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
- /* Use shared host tagset only for fusion adaptors
- * if there are managed interrupts (smp affinity enabled case).
- * Single msix_vectors in kdump, so shared host tag is also disabled.
- */
-
- host->host_tagset = 0;
- host->nr_hw_queues = 1;
-
- if ((instance->adapter_type != MFI_SERIES) &&
- (instance->msix_vectors > instance->low_latency_index_start) &&
- host_tagset_enable &&
- instance->smp_affinity_enable) {
- host->host_tagset = 1;
- host->nr_hw_queues = instance->msix_vectors -
- instance->low_latency_index_start;
- }
-
- dev_info(&instance->pdev->dev,
- "Max firmware commands: %d shared with nr_hw_queues = %d\n",
- instance->max_fw_cmds, host->nr_hw_queues);
/*
* Notify the mid-layer about the new controller
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index fd607287608e..b0c01cf0428f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -359,29 +359,24 @@ megasas_get_msix_index(struct megasas_instance *instance,
{
int sdev_busy;
- /* TBD - if sml remove device_busy in future, driver
- * should track counter in internal structure.
- */
- sdev_busy = atomic_read(&scmd->device->device_busy);
+ /* nr_hw_queue = 1 for MegaRAID */
+ struct blk_mq_hw_ctx *hctx =
+ scmd->device->request_queue->queue_hw_ctx[0];
+
+ sdev_busy = atomic_read(&hctx->nr_active);
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
- sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
+ sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- } else if (instance->msix_load_balance) {
+ else if (instance->msix_load_balance)
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
- } else if (instance->host->nr_hw_queues > 1) {
- u32 tag = blk_mq_unique_tag(scmd->request);
-
- cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
- instance->low_latency_index_start;
- } else {
+ else
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
- }
}
/**
@@ -961,6 +956,9 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_cmdlist_fusion(instance))
goto fail_exit;
+ dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
+ instance->max_fw_cmds);
+
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1104,9 +1102,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
instance->perf_mode = MR_BALANCED_PERF_MODE;
- dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
- MEGASAS_PERF_MODE_2STR(instance->perf_mode),
- instance->low_latency_index_start);
+ dev_info(&instance->pdev->dev, "Performance mode :%s\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode));
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e4cc92bc4d94..bb940cbcbb5d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -6459,7 +6459,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
r = _base_handshake_req_reply_wait(ioc,
sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
- sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
+ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
if (r != 0) {
ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 0f2b681449e6..edd26a2570fa 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -664,7 +664,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
struct _pcie_device *pcie_device = NULL;
u16 smid;
- u8 timeout;
+ unsigned long timeout;
u8 issue_reset;
u32 sz, sz_arg;
void *psge;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 0c65fbd41035..99c8ff81de74 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1246,6 +1246,11 @@ static void storvsc_on_channel_callback(void *context)
request = (struct storvsc_cmd_request *)
((unsigned long)desc->trans_id);
+ if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) - vmscsi_size_delta) {
+ dev_err(&device->device, "Invalid packet len\n");
+ continue;
+ }
+
if (request == &stor_device->init_request ||
request == &stor_device->reset_request) {
memcpy(&request->vstor_packet, packet,
@@ -1994,8 +1999,10 @@ static int storvsc_probe(struct hv_device *device,
alloc_ordered_workqueue("storvsc_error_wq_%d",
WQ_MEM_RECLAIM,
host->host_no);
- if (!host_dev->handle_error_wq)
+ if (!host_dev->handle_error_wq) {
+ ret = -ENOMEM;
goto err_out2;
+ }
INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan);
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 977ba91f4d0e..82c46b200c34 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1976,7 +1976,9 @@ static int complete_rpm(struct device *dev, void *data)
static void remove_unplugged_switch(struct tb_switch *sw)
{
- pm_runtime_get_sync(sw->dev.parent);
+ struct device *parent = get_device(sw->dev.parent);
+
+ pm_runtime_get_sync(parent);
/*
* Signal this and switches below for rpm_complete because
@@ -1987,8 +1989,10 @@ static void remove_unplugged_switch(struct tb_switch *sw)
bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
tb_switch_remove(sw);
- pm_runtime_mark_last_busy(sw->dev.parent);
- pm_runtime_put_autosuspend(sw->dev.parent);
+ pm_runtime_mark_last_busy(parent);
+ pm_runtime_put_autosuspend(parent);
+
+ put_device(parent);
}
static void icm_free_unplugged_children(struct tb_switch *sw)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 9f8b9a567b35..56ade99ef99f 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2897,10 +2897,14 @@ void __do_SAK(struct tty_struct *tty)
struct task_struct *g, *p;
struct pid *session;
int i;
+ unsigned long flags;
if (!tty)
return;
- session = tty->session;
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ session = get_pid(tty->session);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
tty_ldisc_flush(tty);
@@ -2932,6 +2936,7 @@ void __do_SAK(struct tty_struct *tty)
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ put_pid(session);
#endif
}
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index 28a23a0fef21..aa6d0537b379 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -103,8 +103,8 @@ static void __proc_set_tty(struct tty_struct *tty)
put_pid(tty->session);
put_pid(tty->pgrp);
tty->pgrp = get_pid(task_pgrp(current));
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
tty->session = get_pid(task_session(current));
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
if (current->signal->tty) {
tty_debug(tty, "current tty %s not NULL!!\n",
current->signal->tty->name);
@@ -293,20 +293,23 @@ void disassociate_ctty(int on_exit)
spin_lock_irq(&current->sighand->siglock);
put_pid(current->signal->tty_old_pgrp);
current->signal->tty_old_pgrp = NULL;
-
tty = tty_kref_get(current->signal->tty);
+ spin_unlock_irq(&current->sighand->siglock);
+
if (tty) {
unsigned long flags;
+
+ tty_lock(tty);
spin_lock_irqsave(&tty->ctrl_lock, flags);
put_pid(tty->session);
put_pid(tty->pgrp);
tty->session = NULL;
tty->pgrp = NULL;
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ tty_unlock(tty);
tty_kref_put(tty);
}
- spin_unlock_irq(&current->sighand->siglock);
/* Now clear signal->tty under the lock */
read_lock(&tasklist_lock);
session_clear_tty(task_session(current));
@@ -477,14 +480,19 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
return -ENOTTY;
if (retval)
return retval;
- if (!current->signal->tty ||
- (current->signal->tty != real_tty) ||
- (real_tty->session != task_session(current)))
- return -ENOTTY;
+
if (get_user(pgrp_nr, p))
return -EFAULT;
if (pgrp_nr < 0)
return -EINVAL;
+
+ spin_lock_irq(&real_tty->ctrl_lock);
+ if (!current->signal->tty ||
+ (current->signal->tty != real_tty) ||
+ (real_tty->session != task_session(current))) {
+ retval = -ENOTTY;
+ goto out_unlock_ctrl;
+ }
rcu_read_lock();
pgrp = find_vpid(pgrp_nr);
retval = -ESRCH;
@@ -494,12 +502,12 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
if (session_of_pgrp(pgrp) != task_session(current))
goto out_unlock;
retval = 0;
- spin_lock_irq(&tty->ctrl_lock);
put_pid(real_tty->pgrp);
real_tty->pgrp = get_pid(pgrp);
- spin_unlock_irq(&tty->ctrl_lock);
out_unlock:
rcu_read_unlock();
+out_unlock_ctrl:
+ spin_unlock_irq(&real_tty->ctrl_lock);
return retval;
}
@@ -511,20 +519,30 @@ out_unlock:
*
* Obtain the session id of the tty. If there is no session
* return an error.
- *
- * Locking: none. Reference to current->signal->tty is safe.
*/
static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
{
+ unsigned long flags;
+ pid_t sid;
+
/*
* (tty == real_tty) is a cheap way of
* testing if the tty is NOT a master pty.
*/
if (tty == real_tty && current->signal->tty != real_tty)
return -ENOTTY;
+
+ spin_lock_irqsave(&real_tty->ctrl_lock, flags);
if (!real_tty->session)
- return -ENOTTY;
- return put_user(pid_vnr(real_tty->session), p);
+ goto err;
+ sid = pid_vnr(real_tty->session);
+ spin_unlock_irqrestore(&real_tty->ctrl_lock, flags);
+
+ return put_user(sid, p);
+
+err:
+ spin_unlock_irqrestore(&real_tty->ctrl_lock, flags);
+ return -ENOTTY;
}
/*
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index a0f73d4711ae..039ab5d2435e 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -427,7 +427,6 @@ static irqreturn_t cdns3_wakeup_irq(int irq, void *data)
*/
static int cdns3_probe(struct platform_device *pdev)
{
- struct usb_role_switch_desc sw_desc = { };
struct device *dev = &pdev->dev;
struct resource *res;
struct cdns3 *cdns;
@@ -529,18 +528,21 @@ static int cdns3_probe(struct platform_device *pdev)
if (ret)
goto err2;
- sw_desc.set = cdns3_role_set;
- sw_desc.get = cdns3_role_get;
- sw_desc.allow_userspace_control = true;
- sw_desc.driver_data = cdns;
- if (device_property_read_bool(dev, "usb-role-switch"))
+ if (device_property_read_bool(dev, "usb-role-switch")) {
+ struct usb_role_switch_desc sw_desc = { };
+
+ sw_desc.set = cdns3_role_set;
+ sw_desc.get = cdns3_role_get;
+ sw_desc.allow_userspace_control = true;
+ sw_desc.driver_data = cdns;
sw_desc.fwnode = dev->fwnode;
- cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
- if (IS_ERR(cdns->role_sw)) {
- ret = PTR_ERR(cdns->role_sw);
- dev_warn(dev, "Unable to register Role Switch\n");
- goto err3;
+ cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
+ if (IS_ERR(cdns->role_sw)) {
+ ret = PTR_ERR(cdns->role_sw);
+ dev_warn(dev, "Unable to register Role Switch\n");
+ goto err3;
+ }
}
if (cdns->wakeup_irq) {
@@ -551,7 +553,7 @@ static int cdns3_probe(struct platform_device *pdev)
if (ret) {
dev_err(cdns->dev, "couldn't register wakeup irq handler\n");
- goto err3;
+ goto err4;
}
}
@@ -582,7 +584,8 @@ static int cdns3_probe(struct platform_device *pdev)
return 0;
err4:
cdns3_drd_exit(cdns);
- usb_role_switch_unregister(cdns->role_sw);
+ if (cdns->role_sw)
+ usb_role_switch_unregister(cdns->role_sw);
err3:
set_phy_power_off(cdns);
err2:
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 365f30fb1159..0aa85cc07ff1 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -1260,6 +1260,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
priv_req->end_trb = priv_ep->enqueue;
cdns3_ep_inc_enq(priv_ep);
trb = priv_ep->trb_pool + priv_ep->enqueue;
+ trb->length = 0;
} while (sg_iter < num_trb);
trb = priv_req->trb;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 046f770a76da..c727cb5de871 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1324,7 +1324,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
case FUNCTIONFS_ENDPOINT_DESC:
{
int desc_idx;
- struct usb_endpoint_descriptor *desc;
+ struct usb_endpoint_descriptor desc1, *desc;
switch (epfile->ffs->gadget->speed) {
case USB_SPEED_SUPER:
@@ -1336,10 +1336,12 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
default:
desc_idx = 0;
}
+
desc = epfile->ep->descs[desc_idx];
+ memcpy(&desc1, desc, desc->bLength);
spin_unlock_irq(&epfile->ffs->eps_lock);
- ret = copy_to_user((void __user *)value, desc, desc->bLength);
+ ret = copy_to_user((void __user *)value, &desc1, desc1.bLength);
if (ret)
ret = -EFAULT;
return ret;
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 9ccdf2c216b5..6374501ba139 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -91,14 +91,14 @@ static int omap_ohci_transceiver_power(struct ohci_omap_priv *priv, int on)
| ((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
INNOVATOR_FPGA_CAM_USB_CONTROL);
else if (priv->power)
- gpiod_set_value(priv->power, 0);
+ gpiod_set_value_cansleep(priv->power, 0);
} else {
if (machine_is_omap_innovator() && cpu_is_omap1510())
__raw_writeb(__raw_readb(INNOVATOR_FPGA_CAM_USB_CONTROL)
& ~((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
INNOVATOR_FPGA_CAM_USB_CONTROL);
else if (priv->power)
- gpiod_set_value(priv->power, 1);
+ gpiod_set_value_cansleep(priv->power, 1);
}
return 0;
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index a2e2f56c88cd..28deaaec581f 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -81,10 +81,11 @@
#define CH341_QUIRK_SIMULATE_BREAK BIT(1)
static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x4348, 0x5523) },
+ { USB_DEVICE(0x1a86, 0x5512) },
+ { USB_DEVICE(0x1a86, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7522) },
{ USB_DEVICE(0x1a86, 0x7523) },
- { USB_DEVICE(0x1a86, 0x5523) },
+ { USB_DEVICE(0x4348, 0x5523) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 5ee48b0650c4..5f6b82ebccc5 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -276,12 +276,12 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
priv->cfg.unknown2 = cfg->unknown2;
spin_unlock_irqrestore(&priv->lock, flags);
+ kfree(cfg);
+
/* READ_ON and urb submission */
rc = usb_serial_generic_open(tty, port);
- if (rc) {
- retval = rc;
- goto err_free_cfg;
- }
+ if (rc)
+ return rc;
rc = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
@@ -324,8 +324,6 @@ err_disable_read:
KLSI_TIMEOUT);
err_generic_close:
usb_serial_generic_close(port);
-err_free_cfg:
- kfree(cfg);
return retval;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 54ca85cc920d..56d6f6d83bd7 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_PH8 0x0053
#define CINTERION_PRODUCT_AHXX 0x0055
#define CINTERION_PRODUCT_PLXX 0x0060
+#define CINTERION_PRODUCT_EXS82 0x006c
#define CINTERION_PRODUCT_PH8_2RMNET 0x0082
#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
@@ -1105,9 +1106,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff),
- .driver_info = NUMEP2 },
- { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
@@ -1902,6 +1902,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
.driver_info = RSVD(0) | RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EXS82, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
@@ -2046,12 +2047,13 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
- { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
+ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 560efd1479ba..e5a971b83e3f 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -92,7 +92,7 @@ static int slave_alloc (struct scsi_device *sdev)
static int slave_configure(struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
- struct device *dev = sdev->host->dma_dev;
+ struct device *dev = us->pusb_dev->bus->sysdev;
/*
* Many devices have trouble transferring more than 32KB at a time,
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index c8a577309e8f..652d6d6f1f36 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -837,24 +837,17 @@ static int uas_slave_alloc(struct scsi_device *sdev)
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+ if (devinfo->flags & US_FL_MAX_SECTORS_64)
+ blk_queue_max_hw_sectors(sdev->request_queue, 64);
+ else if (devinfo->flags & US_FL_MAX_SECTORS_240)
+ blk_queue_max_hw_sectors(sdev->request_queue, 240);
+
return 0;
}
static int uas_slave_configure(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo = sdev->hostdata;
- struct device *dev = sdev->host->dma_dev;
-
- if (devinfo->flags & US_FL_MAX_SECTORS_64)
- blk_queue_max_hw_sectors(sdev->request_queue, 64);
- else if (devinfo->flags & US_FL_MAX_SECTORS_240)
- blk_queue_max_hw_sectors(sdev->request_queue, 240);
- else if (devinfo->udev->speed >= USB_SPEED_SUPER)
- blk_queue_max_hw_sectors(sdev->request_queue, 2048);
-
- blk_queue_max_hw_sectors(sdev->request_queue,
- min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
- dma_max_mapping_size(dev) >> SECTOR_SHIFT));
if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
sdev->no_report_opcodes = 1;
@@ -1040,7 +1033,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
shost->can_queue = devinfo->qdepth - 2;
usb_set_intfdata(intf, shost);
- result = scsi_add_host_with_dma(shost, &intf->dev, udev->bus->sysdev);
+ result = scsi_add_host(shost, &intf->dev);
if (result)
goto free_streams;
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index c2ef367cf257..94a64729dc27 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -1049,9 +1049,8 @@ int usb_stor_probe2(struct us_data *us)
goto BadDevice;
usb_autopm_get_interface_no_resume(us->pusb_intf);
snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
- dev_name(dev));
- result = scsi_add_host_with_dma(us_to_host(us), dev,
- us->pusb_dev->bus->sysdev);
+ dev_name(&us->pusb_intf->dev));
+ result = scsi_add_host(us_to_host(us), dev);
if (result) {
dev_warn(dev,
"Unable to add the scsi host\n");