summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c115
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c28
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c18
-rw-r--r--drivers/net/ethernet/seeq/ether3.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c1
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c37
-rw-r--r--drivers/net/hamradio/6pack.c60
-rw-r--r--drivers/net/mctp/mctp-serial.c23
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c42
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c3
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c24
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/usbnet.c37
-rw-r--r--drivers/net/virtio_net.c10
23 files changed, 315 insertions, 262 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b560644ee1b1..b1bffd8e9a95 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5610,9 +5610,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
break;
default:
- /* Should never happen. Mode guarded by bond_xdp_check() */
- netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
- WARN_ON_ONCE(1);
+ if (net_ratelimit())
+ netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n",
+ BOND_MODE(bond));
return NULL;
}
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9190eff6c0bb..e1d003fdbc2e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -104,7 +104,7 @@ static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops,
return 0;
} else {
/* wait_event_timeout does not guarantee a delay of at
- * least one whole jiffie, so timeout must be no less
+ * least one whole jiffy, so timeout must be no less
* than two.
*/
timeout = max(usecs_to_jiffies(MVMDIO_SMI_TIMEOUT), 2);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 9af8ddb4a78f..a64d96effb9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1887,10 +1887,12 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
if (throttle_op) {
- /* atomic context may not sleep */
- if (callback)
- return -EINVAL;
- down(&dev->cmd.vars.throttle_sem);
+ if (callback) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem))
+ return -EBUSY;
+ } else {
+ down(&dev->cmd.vars.throttle_sem);
+ }
}
pages_queue = is_manage_pages(in);
@@ -2096,10 +2098,19 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
+ struct mlx5_core_dev *dev;
+ u16 opcode;
ctx = work->ctx;
- status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
+ dev = ctx->dev;
+ opcode = work->opcode;
+ status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
+ /* Can't access "work" from this point on. It could have been freed in
+ * the callback.
+ */
+ if (mlx5_cmd_is_throttle_opcode(opcode))
+ up(&dev->cmd.vars.throttle_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index cf8045b92689..8577db3308cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -445,6 +445,34 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
}
+static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
+{
+ struct net_device *ndev = NULL;
+ struct mlx5_lag *ldev;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+
+ if (!ldev)
+ goto unlock;
+
+ for (i = 0; i < ldev->ports; i++)
+ if (ldev->tracker.netdev_state[i].tx_enabled)
+ ndev = ldev->pf[i].netdev;
+ if (!ndev)
+ ndev = ldev->pf[ldev->ports - 1].netdev;
+
+ if (ndev)
+ dev_hold(ndev);
+
+unlock:
+ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return ndev;
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
@@ -477,9 +505,18 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
}
}
- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
- !(ldev->mode == MLX5_LAG_MODE_ROCE))
- mlx5_lag_drop_rule_setup(ldev, tracker);
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ struct net_device *ndev = mlx5_lag_active_backup_get_netdev(dev0);
+
+ if(!(ldev->mode == MLX5_LAG_MODE_ROCE))
+ mlx5_lag_drop_rule_setup(ldev, tracker);
+ /** Only sriov and roce lag should have tracker->tx_type set so
+ * no need to check the mode
+ */
+ blocking_notifier_call_chain(&dev0->priv.lag_nh,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
+ ndev);
+ }
}
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
@@ -613,6 +650,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
mlx5_core_err(dev0,
"Failed to deactivate RoCE LAG; driver restart required\n");
}
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev0->priv.lag_nh);
return err;
}
@@ -1492,38 +1530,6 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
mlx5_queue_bond_work(ldev, 0);
}
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
-{
- struct net_device *ndev = NULL;
- struct mlx5_lag *ldev;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&lag_lock, flags);
- ldev = mlx5_lag_dev(dev);
-
- if (!(ldev && __mlx5_lag_is_roce(ldev)))
- goto unlock;
-
- if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- for (i = 0; i < ldev->ports; i++)
- if (ldev->tracker.netdev_state[i].tx_enabled)
- ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
- } else {
- ndev = ldev->pf[MLX5_LAG_P1].netdev;
- }
- if (ndev)
- dev_hold(ndev);
-
-unlock:
- spin_unlock_irqrestore(&lag_lock, flags);
-
- return ndev;
-}
-EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
-
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8b0abd61eca6..220a9ac75c8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -454,8 +454,8 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
{
+ bool do_set = false, mem_page_fault = false;
void *set_hca_cap;
- bool do_set = false;
int err;
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
@@ -470,6 +470,17 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
MLX5_ST_SZ_BYTES(odp_cap));
+ /* For best performance, enable memory scheme ODP only when
+ * it has page prefetch enabled.
+ */
+ if (MLX5_CAP_ODP_MAX(dev, mem_page_fault) &&
+ MLX5_CAP_ODP_MAX(dev, memory_page_fault_scheme_cap.page_prefetch)) {
+ mem_page_fault = true;
+ do_set = true;
+ MLX5_SET(odp_cap, set_hca_cap, mem_page_fault, mem_page_fault);
+ goto set;
+ }
+
#define ODP_CAP_SET_MAX(dev, field) \
do { \
u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
@@ -479,25 +490,28 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
} \
} while (0)
- ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
-
- if (!do_set)
- return 0;
-
- return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.ud_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.rc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.atomic);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.atomic);
+
+set:
+ if (do_set)
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+
+ mlx5_core_dbg(dev, "Using ODP %s scheme\n",
+ mem_page_fault ? "memory" : "transport");
+ return err;
}
static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 303d2ce4dc1e..e746cd9c68ed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -165,52 +165,22 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
return -ENODEV;
}
-static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
+static bool mlxsw_thermal_should_bind(struct thermal_zone_device *tzdev,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct mlxsw_thermal *thermal = thermal_zone_device_priv(tzdev);
- struct device *dev = thermal->bus_info->dev;
- int i, err;
+ const struct mlxsw_cooling_states *state = trip->priv;
/* If the cooling device is one of ours bind it */
if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
+ return false;
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- const struct mlxsw_cooling_states *state = &thermal->cooling_states[i];
+ c->upper = state->max_state;
+ c->lower = state->min_state;
- err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
- state->max_state,
- state->min_state,
- THERMAL_WEIGHT_DEFAULT);
- if (err < 0) {
- dev_err(dev, "Failed to bind cooling device to trip %d\n", i);
- return err;
- }
- }
- return 0;
-}
-
-static int mlxsw_thermal_unbind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
-{
- struct mlxsw_thermal *thermal = thermal_zone_device_priv(tzdev);
- struct device *dev = thermal->bus_info->dev;
- int i;
- int err;
-
- /* If the cooling device is our one unbind it */
- if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
-
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
- if (err < 0) {
- dev_err(dev, "Failed to unbind cooling device\n");
- return err;
- }
- }
- return 0;
+ return true;
}
static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
@@ -240,57 +210,27 @@ static struct thermal_zone_params mlxsw_thermal_params = {
};
static struct thermal_zone_device_ops mlxsw_thermal_ops = {
- .bind = mlxsw_thermal_bind,
- .unbind = mlxsw_thermal_unbind,
+ .should_bind = mlxsw_thermal_should_bind,
.get_temp = mlxsw_thermal_get_temp,
};
-static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
+static bool mlxsw_thermal_module_should_bind(struct thermal_zone_device *tzdev,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct mlxsw_thermal_module *tz = thermal_zone_device_priv(tzdev);
+ const struct mlxsw_cooling_states *state = trip->priv;
struct mlxsw_thermal *thermal = tz->parent;
- int i, j, err;
/* If the cooling device is one of ours bind it */
if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
+ return false;
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- const struct mlxsw_cooling_states *state = &tz->cooling_states[i];
+ c->upper = state->max_state;
+ c->lower = state->min_state;
- err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
- state->max_state,
- state->min_state,
- THERMAL_WEIGHT_DEFAULT);
- if (err < 0)
- goto err_thermal_zone_bind_cooling_device;
- }
- return 0;
-
-err_thermal_zone_bind_cooling_device:
- for (j = i - 1; j >= 0; j--)
- thermal_zone_unbind_cooling_device(tzdev, j, cdev);
- return err;
-}
-
-static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
-{
- struct mlxsw_thermal_module *tz = thermal_zone_device_priv(tzdev);
- struct mlxsw_thermal *thermal = tz->parent;
- int i;
- int err;
-
- /* If the cooling device is one of ours unbind it */
- if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
-
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
- WARN_ON(err);
- }
- return err;
+ return true;
}
static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
@@ -313,8 +253,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
}
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
- .bind = mlxsw_thermal_module_bind,
- .unbind = mlxsw_thermal_module_unbind,
+ .should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_module_temp_get,
};
@@ -342,8 +281,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
}
static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
- .bind = mlxsw_thermal_module_bind,
- .unbind = mlxsw_thermal_module_unbind,
+ .should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_gearbox_temp_get,
};
@@ -450,6 +388,7 @@ mlxsw_thermal_module_init(struct mlxsw_thermal *thermal,
struct mlxsw_thermal_area *area, u8 module)
{
struct mlxsw_thermal_module *module_tz;
+ int i;
module_tz = &area->tz_module_arr[module];
module_tz->module = module;
@@ -461,6 +400,8 @@ mlxsw_thermal_module_init(struct mlxsw_thermal *thermal,
sizeof(thermal->trips));
memcpy(module_tz->cooling_states, default_cooling_states,
sizeof(thermal->cooling_states));
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++)
+ module_tz->trips[i].priv = &module_tz->cooling_states[i];
return mlxsw_thermal_module_tz_init(module_tz);
}
@@ -566,7 +507,7 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal_module *gearbox_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
u8 gbox_num;
- int i;
+ int i, j;
int err;
mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
@@ -593,6 +534,9 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
sizeof(thermal->trips));
memcpy(gearbox_tz->cooling_states, default_cooling_states,
sizeof(thermal->cooling_states));
+ for (j = 0; j < MLXSW_THERMAL_NUM_TRIPS; j++)
+ gearbox_tz->trips[j].priv = &gearbox_tz->cooling_states[j];
+
gearbox_tz->module = i;
gearbox_tz->parent = thermal;
gearbox_tz->slot_index = area->slot_index;
@@ -709,6 +653,9 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
thermal->bus_info = bus_info;
memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
memcpy(thermal->cooling_states, default_cooling_states, sizeof(thermal->cooling_states));
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++)
+ thermal->trips[i].priv = &thermal->cooling_states[i];
+
thermal->line_cards[0].slot_index = 0;
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index ddb8f68d80a2..ca4ed58f1206 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1496,11 +1496,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto release_region;
- err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- if (err) {
- dev_err(&pdev->dev, "Failed to set dma device segment size\n");
- goto release_region;
- }
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
gc = vzalloc(sizeof(*gc));
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 45ac8befba29..305ec19ccef1 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -579,6 +579,33 @@ struct rtl8169_counters {
__le32 rx_multicast;
__le16 tx_aborted;
__le16 tx_underrun;
+ /* new since RTL8125 */
+ __le64 tx_octets;
+ __le64 rx_octets;
+ __le64 rx_multicast64;
+ __le64 tx_unicast64;
+ __le64 tx_broadcast64;
+ __le64 tx_multicast64;
+ __le32 tx_pause_on;
+ __le32 tx_pause_off;
+ __le32 tx_pause_all;
+ __le32 tx_deferred;
+ __le32 tx_late_collision;
+ __le32 tx_all_collision;
+ __le32 tx_aborted32;
+ __le32 align_errors32;
+ __le32 rx_frame_too_long;
+ __le32 rx_runt;
+ __le32 rx_pause_on;
+ __le32 rx_pause_off;
+ __le32 rx_pause_all;
+ __le32 rx_unknown_opcode;
+ __le32 rx_mac_error;
+ __le32 tx_underrun32;
+ __le32 rx_mac_missed;
+ __le32 rx_tcam_dropped;
+ __le32 tdu;
+ __le32 rdu;
};
struct rtl8169_tc_offsets {
@@ -681,6 +708,7 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
+MODULE_FIRMWARE(FIRMWARE_8126A_3);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 9893c91af105..a7de5cf6b317 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1052,6 +1052,7 @@ struct ravb_hw_info {
netdev_features_t net_features;
int stats_len;
u32 tccr_mask;
+ u32 tx_max_frame_size;
u32 rx_max_frame_size;
u32 rx_buffer_size;
u32 rx_desc_size;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c7ec23688d56..d2a6518532f3 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -555,8 +555,16 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
static void ravb_emac_init_rcar(struct net_device *ndev)
{
- /* Receive frame limit set register */
- ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ /* Set receive frame length
+ *
+ * The length set here describes the frame from the destination address
+ * up to and including the CRC data. However only the frame data,
+ * excluding the CRC, are transferred to memory. To allow for the
+ * largest frames add the CRC length to the maximum Rx descriptor size.
+ */
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
ravb_write(ndev, ECMR_ZPF | ECMR_DM |
@@ -2674,6 +2682,7 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2696,6 +2705,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2721,6 +2731,7 @@ static const struct ravb_hw_info ravb_gen4_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2770,6 +2781,7 @@ static const struct ravb_hw_info gbeth_hw_info = {
.net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
.tccr_mask = TCCR_TSRQ0,
+ .tx_max_frame_size = 1522,
.rx_max_frame_size = SZ_8K,
.rx_buffer_size = SZ_2K,
.rx_desc_size = sizeof(struct ravb_rx_desc),
@@ -2981,7 +2993,7 @@ static int ravb_probe(struct platform_device *pdev)
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- ndev->max_mtu = info->rx_max_frame_size -
+ ndev->max_mtu = info->tx_max_frame_size -
(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index c672f92d65e9..9319a2675e7b 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -847,9 +847,11 @@ static void ether3_remove(struct expansion_card *ec)
{
struct net_device *dev = ecard_get_drvdata(ec);
+ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
ecard_set_drvdata(ec, NULL);
unregister_netdev(dev);
+ del_timer_sync(&priv(dev)->timer);
free_netdev(dev);
ecard_release_resources(ec);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d3895d7eecfc..e2140482270a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2035,7 +2035,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
rx_q->queue_index = queue;
rx_q->priv_data = priv;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.flags = PP_FLAG_DMA_MAP | (xdp_prog ? PP_FLAG_DMA_SYNC_DEV : 0);
pp_params.pool_size = dma_conf->dma_rx_size;
num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 832998bc020b..75ad2da1a37f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -386,6 +386,7 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
return ret;
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ return 0;
}
/* Final adjustments for HW */
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 85cdbdd44fec..e46ccebcfd22 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -41,10 +41,9 @@ config TXGBE
tristate "Wangxun(R) 10GbE PCI Express adapters support"
depends on PCI
depends on COMMON_CLK
+ depends on I2C_DESIGNWARE_PLATFORM
select MARVELL_10G_PHY
select REGMAP
- select I2C
- select I2C_DESIGNWARE_PLATFORM
select PHYLINK
select HWMON if TXGBE=y
select SFP
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index ea7d7c03f48e..fc35fcb22d94 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -736,15 +736,15 @@ static int axienet_device_reset(struct net_device *ndev)
*
* Would either be called after a successful transmit operation, or after
* there was an error when setting up the chain.
- * Returns the number of descriptors handled.
+ * Returns the number of packets handled.
*/
static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
int nr_bds, bool force, u32 *sizep, int budget)
{
struct axidma_bd *cur_p;
unsigned int status;
+ int i, packets = 0;
dma_addr_t phys;
- int i;
for (i = 0; i < nr_bds; i++) {
cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
@@ -763,8 +763,10 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
napi_consume_skb(cur_p->skb, budget);
+ packets++;
+ }
cur_p->app0 = 0;
cur_p->app1 = 0;
@@ -780,7 +782,13 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
}
- return i;
+ if (!force) {
+ lp->tx_bd_ci += i;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci %= lp->tx_bd_num;
+ }
+
+ return packets;
}
/**
@@ -953,13 +961,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
u32 size = 0;
int packets;
- packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
+ packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
+ &size, budget);
if (packets) {
- lp->tx_bd_ci += packets;
- if (lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci %= lp->tx_bd_num;
-
u64_stats_update_begin(&lp->tx_stat_sync);
u64_stats_add(&lp->tx_packets, packets);
u64_stats_add(&lp->tx_bytes, size);
@@ -1282,9 +1287,10 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
u32 cr = lp->tx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- napi_schedule(&lp->napi_tx);
+ if (napi_schedule_prep(&lp->napi_tx)) {
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ __napi_schedule(&lp->napi_tx);
+ }
}
return IRQ_HANDLED;
@@ -1326,9 +1332,10 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
u32 cr = lp->rx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- napi_schedule(&lp->napi_rx);
+ if (napi_schedule_prep(&lp->napi_rx)) {
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ __napi_schedule(&lp->napi_rx);
+ }
}
return IRQ_HANDLED;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 6ed38a3cdd73..3bf6785f9057 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -37,8 +37,6 @@
#include <linux/semaphore.h>
#include <linux/refcount.h>
-#define SIXPACK_VERSION "Revision: 0.3.0"
-
/* sixpack priority commands */
#define SIXP_SEOF 0x40 /* start and end of a 6pack frame */
#define SIXP_TX_URUN 0x48 /* transmit overrun */
@@ -88,22 +86,18 @@ struct sixpack {
struct net_device *dev; /* easy for intr handling */
/* These are pointers to the malloc()ed frame buffers. */
- unsigned char *rbuff; /* receiver buffer */
int rcount; /* received chars counter */
unsigned char *xbuff; /* transmitter buffer */
unsigned char *xhead; /* next byte to XMIT */
int xleft; /* bytes left in XMIT queue */
- unsigned char raw_buf[4];
- unsigned char cooked_buf[400];
+ u8 raw_buf[4];
+ u8 cooked_buf[400];
unsigned int rx_count;
unsigned int rx_count_cooked;
spinlock_t rxlock;
- int mtu; /* Our mtu (to spot changes!) */
- int buffsize; /* Max buffers sizes */
-
unsigned long flags; /* Flag values/ mode etc */
unsigned char mode; /* 6pack mode */
@@ -113,8 +107,8 @@ struct sixpack {
unsigned char slottime;
unsigned char duplex;
unsigned char led_state;
- unsigned char status;
- unsigned char status1;
+ u8 status;
+ u8 status1;
unsigned char status2;
unsigned char tx_enable;
unsigned char tnc_state;
@@ -128,7 +122,7 @@ struct sixpack {
#define AX25_6PACK_HEADER_LEN 0
-static void sixpack_decode(struct sixpack *, const unsigned char[], int);
+static void sixpack_decode(struct sixpack *, const u8 *, size_t);
static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
/*
@@ -167,7 +161,7 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
unsigned char *msg, *p = icp;
int actual, count;
- if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */
+ if (len > AX25_MTU + 73) {
msg = "oversized transmit packet!";
goto out_drop;
}
@@ -333,7 +327,7 @@ static void sp_bump(struct sixpack *sp, char cmd)
{
struct sk_buff *skb;
int count;
- unsigned char *ptr;
+ u8 *ptr;
count = sp->rcount + 1;
@@ -431,7 +425,7 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct sixpack *sp;
- int count1;
+ size_t count1;
if (!count)
return;
@@ -544,7 +538,7 @@ static inline int tnc_init(struct sixpack *sp)
*/
static int sixpack_open(struct tty_struct *tty)
{
- char *rbuff = NULL, *xbuff = NULL;
+ char *xbuff = NULL;
struct net_device *dev;
struct sixpack *sp;
unsigned long len;
@@ -574,10 +568,8 @@ static int sixpack_open(struct tty_struct *tty)
len = dev->mtu * 2;
- rbuff = kmalloc(len + 4, GFP_KERNEL);
xbuff = kmalloc(len + 4, GFP_KERNEL);
-
- if (rbuff == NULL || xbuff == NULL) {
+ if (xbuff == NULL) {
err = -ENOBUFS;
goto out_free;
}
@@ -586,11 +578,8 @@ static int sixpack_open(struct tty_struct *tty)
sp->tty = tty;
- sp->rbuff = rbuff;
sp->xbuff = xbuff;
- sp->mtu = AX25_MTU + 73;
- sp->buffsize = len;
sp->rcount = 0;
sp->rx_count = 0;
sp->rx_count_cooked = 0;
@@ -631,7 +620,6 @@ static int sixpack_open(struct tty_struct *tty)
out_free:
kfree(xbuff);
- kfree(rbuff);
free_netdev(dev);
@@ -676,7 +664,6 @@ static void sixpack_close(struct tty_struct *tty)
del_timer_sync(&sp->resync_t);
/* Free all 6pack frame buffers after unreg. */
- kfree(sp->rbuff);
kfree(sp->xbuff);
free_netdev(sp->dev);
@@ -756,21 +743,14 @@ static struct tty_ldisc_ops sp_ldisc = {
/* Initialize 6pack control device -- register 6pack line discipline */
-static const char msg_banner[] __initconst = KERN_INFO \
- "AX.25: 6pack driver, " SIXPACK_VERSION "\n";
-static const char msg_regfail[] __initconst = KERN_ERR \
- "6pack: can't register line discipline (err = %d)\n";
-
static int __init sixpack_init_driver(void)
{
int status;
- printk(msg_banner);
-
/* Register the provided line protocol discipline */
status = tty_register_ldisc(&sp_ldisc);
if (status)
- printk(msg_regfail, status);
+ pr_err("6pack: can't register line discipline (err = %d)\n", status);
return status;
}
@@ -820,9 +800,9 @@ static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw,
/* decode 4 sixpack-encoded bytes into 3 data bytes */
-static void decode_data(struct sixpack *sp, unsigned char inbyte)
+static void decode_data(struct sixpack *sp, u8 inbyte)
{
- unsigned char *buf;
+ u8 *buf;
if (sp->rx_count != 3) {
sp->raw_buf[sp->rx_count++] = inbyte;
@@ -848,9 +828,9 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
/* identify and execute a 6pack priority command byte */
-static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
+static void decode_prio_command(struct sixpack *sp, u8 cmd)
{
- int actual;
+ ssize_t actual;
if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */
@@ -898,9 +878,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
/* identify and execute a standard 6pack command byte */
-static void decode_std_command(struct sixpack *sp, unsigned char cmd)
+static void decode_std_command(struct sixpack *sp, u8 cmd)
{
- unsigned char checksum = 0, rest = 0;
+ u8 checksum = 0, rest = 0;
short i;
switch (cmd & SIXP_CMD_MASK) { /* normal command */
@@ -948,10 +928,10 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
/* decode a 6pack packet */
static void
-sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
+sixpack_decode(struct sixpack *sp, const u8 *pre_rbuff, size_t count)
{
- unsigned char inbyte;
- int count1;
+ size_t count1;
+ u8 inbyte;
for (count1 = 0; count1 < count; count1++) {
inbyte = pre_rbuff[count1];
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index f39bbe255497..e63720ec3238 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -64,18 +64,18 @@ struct mctp_serial {
u16 txfcs, rxfcs, rxfcs_rcvd;
unsigned int txlen, rxlen;
unsigned int txpos, rxpos;
- unsigned char txbuf[BUFSIZE],
+ u8 txbuf[BUFSIZE],
rxbuf[BUFSIZE];
};
-static bool needs_escape(unsigned char c)
+static bool needs_escape(u8 c)
{
return c == BYTE_ESC || c == BYTE_FRAME;
}
-static int next_chunk_len(struct mctp_serial *dev)
+static unsigned int next_chunk_len(struct mctp_serial *dev)
{
- int i;
+ unsigned int i;
/* either we have no bytes to send ... */
if (dev->txpos == dev->txlen)
@@ -99,7 +99,7 @@ static int next_chunk_len(struct mctp_serial *dev)
return i;
}
-static int write_chunk(struct mctp_serial *dev, unsigned char *buf, int len)
+static ssize_t write_chunk(struct mctp_serial *dev, u8 *buf, size_t len)
{
return dev->tty->ops->write(dev->tty, buf, len);
}
@@ -108,9 +108,10 @@ static void mctp_serial_tx_work(struct work_struct *work)
{
struct mctp_serial *dev = container_of(work, struct mctp_serial,
tx_work);
- unsigned char c, buf[3];
unsigned long flags;
- int len, txlen;
+ ssize_t txlen;
+ unsigned int len;
+ u8 c, buf[3];
spin_lock_irqsave(&dev->lock, flags);
@@ -293,7 +294,7 @@ static void mctp_serial_rx(struct mctp_serial *dev)
dev->netdev->stats.rx_bytes += dev->rxlen;
}
-static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push_header(struct mctp_serial *dev, u8 c)
{
switch (dev->rxpos) {
case 0:
@@ -323,7 +324,7 @@ static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
}
}
-static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push_trailer(struct mctp_serial *dev, u8 c)
{
switch (dev->rxpos) {
case 0:
@@ -347,7 +348,7 @@ static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
}
}
-static void mctp_serial_push(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push(struct mctp_serial *dev, u8 c)
{
switch (dev->rxstate) {
case STATE_IDLE:
@@ -394,7 +395,7 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty, const u8 *c,
const u8 *f, size_t len)
{
struct mctp_serial *dev = tty->disc_data;
- int i;
+ size_t i;
if (!netif_running(dev->netdev))
return;
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c
index 524627a36c6f..dac6464b5fe2 100644
--- a/drivers/net/phy/aquantia/aquantia_firmware.c
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -353,26 +353,32 @@ int aqr_firmware_load(struct phy_device *phydev)
{
int ret;
- ret = aqr_wait_reset_complete(phydev);
- if (ret)
- return ret;
-
- /* Check if the firmware is not already loaded by pooling
- * the current version returned by the PHY. If 0 is returned,
- * no firmware is loaded.
+ /* Check if the firmware is not already loaded by polling
+ * the current version returned by the PHY.
*/
- ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
- if (ret > 0)
- goto exit;
-
- ret = aqr_firmware_load_nvmem(phydev);
- if (!ret)
- goto exit;
-
- ret = aqr_firmware_load_fs(phydev);
- if (ret)
+ ret = aqr_wait_reset_complete(phydev);
+ switch (ret) {
+ case 0:
+ /* Some firmware is loaded => do nothing */
+ return 0;
+ case -ETIMEDOUT:
+ /* VEND1_GLOBAL_FW_ID still reads 0 after 2 seconds of polling.
+ * We don't have full confidence that no firmware is loaded (in
+ * theory it might just not have loaded yet), but we will
+ * assume that, and load a new image.
+ */
+ ret = aqr_firmware_load_nvmem(phydev);
+ if (!ret)
+ return ret;
+
+ ret = aqr_firmware_load_fs(phydev);
+ if (ret)
+ return ret;
+ break;
+ default:
+ /* PHY read error, propagate it to the caller */
return ret;
+ }
-exit:
return 0;
}
diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c
index 0516ac02c3f8..201c8df93fad 100644
--- a/drivers/net/phy/aquantia/aquantia_leds.c
+++ b/drivers/net/phy/aquantia/aquantia_leds.c
@@ -120,7 +120,8 @@ int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable)
{
return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_DRIVE(index),
- VEND1_GLOBAL_LED_DRIVE_VDD, enable);
+ VEND1_GLOBAL_LED_DRIVE_VDD,
+ enable ? VEND1_GLOBAL_LED_DRIVE_VDD : 0);
}
int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long modes)
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index e982e9ce44a5..4d156d406bab 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -435,6 +435,9 @@ static int aqr107_set_tunable(struct phy_device *phydev,
}
}
+#define AQR_FW_WAIT_SLEEP_US 20000
+#define AQR_FW_WAIT_TIMEOUT_US 2000000
+
/* If we configure settings whilst firmware is still initializing the chip,
* then these settings may be overwritten. Therefore make sure chip
* initialization has completed. Use presence of the firmware ID as
@@ -444,11 +447,19 @@ static int aqr107_set_tunable(struct phy_device *phydev,
*/
int aqr_wait_reset_complete(struct phy_device *phydev)
{
- int val;
+ int ret, val;
+
+ ret = read_poll_timeout(phy_read_mmd, val, val != 0,
+ AQR_FW_WAIT_SLEEP_US, AQR_FW_WAIT_TIMEOUT_US,
+ false, phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_FW_ID);
+ if (val < 0) {
+ phydev_err(phydev, "Failed to read VEND1_GLOBAL_FW_ID: %pe\n",
+ ERR_PTR(val));
+ return val;
+ }
- return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_FW_ID, val, val != 0,
- 20000, 2000000, false);
+ return ret;
}
static void aqr107_chip_info(struct phy_device *phydev)
@@ -478,7 +489,7 @@ static int aqr107_config_init(struct phy_device *phydev)
{
struct aqr107_priv *priv = phydev->priv;
u32 led_active_low;
- int ret, index = 0;
+ int ret;
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
@@ -505,10 +516,9 @@ static int aqr107_config_init(struct phy_device *phydev)
/* Restore LED polarity state after reset */
for_each_set_bit(led_active_low, &priv->leds_active_low, AQR_MAX_LEDS) {
- ret = aqr_phy_led_active_low_set(phydev, index, led_active_low);
+ ret = aqr_phy_led_active_low_set(phydev, led_active_low, true);
if (ret)
return ret;
- index++;
}
return 0;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a645c36f2cee..5f77faef0ff1 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -3452,6 +3452,12 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
struct tun_file *tfile = file->private_data;
int ret;
+ if (on) {
+ ret = file_f_owner_allocate(file);
+ if (ret)
+ goto out;
+ }
+
if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
goto out;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 18eb5ba436df..2506aa8c603e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -464,10 +464,15 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
- if (!schedule_work (&dev->kevent))
- netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]);
- else
- netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]);
+ if (!usbnet_going_away(dev)) {
+ if (!schedule_work(&dev->kevent))
+ netdev_dbg(dev->net,
+ "kevent %s may have been dropped\n",
+ usbnet_event_names[work]);
+ else
+ netdev_dbg(dev->net,
+ "kevent %s scheduled\n", usbnet_event_names[work]);
+ }
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
@@ -535,7 +540,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
tasklet_schedule (&dev->bh);
break;
case 0:
- __usbnet_queue_skb(&dev->rxq, skb, rx_start);
+ if (!usbnet_going_away(dev))
+ __usbnet_queue_skb(&dev->rxq, skb, rx_start);
}
} else {
netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@ -843,9 +849,18 @@ int usbnet_stop (struct net_device *net)
/* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
- del_timer_sync (&dev->delay);
- tasklet_kill (&dev->bh);
+ del_timer_sync(&dev->delay);
+ tasklet_kill(&dev->bh);
cancel_work_sync(&dev->kevent);
+
+ /* We have cyclic dependencies. Those calls are needed
+ * to break a cycle. We cannot fall into the gaps because
+ * we have a flag
+ */
+ tasklet_kill(&dev->bh);
+ del_timer_sync(&dev->delay);
+ cancel_work_sync(&dev->kevent);
+
if (!pm)
usb_autopm_put_interface(dev->intf);
@@ -1171,7 +1186,8 @@ fail_halt:
status);
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
- tasklet_schedule (&dev->bh);
+ if (!usbnet_going_away(dev))
+ tasklet_schedule(&dev->bh);
}
}
@@ -1196,7 +1212,8 @@ fail_halt:
usb_autopm_put_interface(dev->intf);
fail_lowmem:
if (resched)
- tasklet_schedule (&dev->bh);
+ if (!usbnet_going_away(dev))
+ tasklet_schedule(&dev->bh);
}
}
@@ -1559,6 +1576,7 @@ static void usbnet_bh (struct timer_list *t)
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
+ !usbnet_going_away(dev) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_PAUSED, &dev->flags) &&
!test_bit(EVENT_RX_HALT, &dev->flags)) {
@@ -1606,6 +1624,7 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (!dev)
return;
+ usbnet_mark_going_away(dev);
xdev = interface_to_usbdev (intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6f4781ec2b36..f8131f92a392 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1807,6 +1807,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct page *page = virt_to_head_page(buf);
struct sk_buff *skb;
+ /* We passed the address of virtnet header to virtio-core,
+ * so truncate the padding.
+ */
+ buf -= VIRTNET_RX_PAD + xdp_headroom;
+
len -= vi->hdr_len;
u64_stats_add(&stats->bytes, len);
@@ -2422,8 +2427,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(!buf))
return -ENOMEM;
- virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
- vi->hdr_len + GOOD_PACKET_LEN);
+ buf += VIRTNET_RX_PAD + xdp_headroom;
+
+ virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {