summaryrefslogtreecommitdiff
path: root/drivers/vdpa/mlx5/net/mlx5_vnet.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vdpa/mlx5/net/mlx5_vnet.c')
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c465
1 files changed, 301 insertions, 164 deletions
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index fa78e8288ebb..dee019977716 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -941,11 +941,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
- vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr &&
MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey);
@@ -953,11 +953,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
/* If there is no mr update, make sure that the existing ones are set
* modify to ready.
*/
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY;
- vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
}
@@ -1184,40 +1184,92 @@ struct mlx5_virtq_attr {
u16 used_index;
};
-static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
- struct mlx5_virtq_attr *attr)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
- u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
- void *out;
- void *obj_context;
- void *cmd_hdr;
- int err;
+struct mlx5_virtqueue_query_mem {
+ u8 in[MLX5_ST_SZ_BYTES(query_virtio_net_q_in)];
+ u8 out[MLX5_ST_SZ_BYTES(query_virtio_net_q_out)];
+};
- out = kzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+struct mlx5_virtqueue_modify_mem {
+ u8 in[MLX5_ST_SZ_BYTES(modify_virtio_net_q_in)];
+ u8 out[MLX5_ST_SZ_BYTES(modify_virtio_net_q_out)];
+};
- cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+static void fill_query_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ struct mlx5_virtqueue_query_mem *cmd)
+{
+ void *cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
- err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
- if (err)
- goto err_cmd;
+}
+
+static void query_virtqueue_end(struct mlx5_vdpa_net *ndev,
+ struct mlx5_virtqueue_query_mem *cmd,
+ struct mlx5_virtq_attr *attr)
+{
+ void *obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, cmd->out, obj_context);
- obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
memset(attr, 0, sizeof(*attr));
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
- kfree(out);
- return 0;
+}
-err_cmd:
- kfree(out);
+static int query_virtqueues(struct mlx5_vdpa_net *ndev,
+ int start_vq,
+ int num_vqs,
+ struct mlx5_virtq_attr *attrs)
+{
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ struct mlx5_virtqueue_query_mem *cmd_mem;
+ struct mlx5_vdpa_async_cmd *cmds;
+ int err = 0;
+
+ WARN(start_vq + num_vqs > mvdev->max_vqs, "query vq range invalid [%d, %d), max_vqs: %u\n",
+ start_vq, start_vq + num_vqs, mvdev->max_vqs);
+
+ cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (int i = 0; i < num_vqs; i++) {
+ cmds[i].in = &cmd_mem[i].in;
+ cmds[i].inlen = sizeof(cmd_mem[i].in);
+ cmds[i].out = &cmd_mem[i].out;
+ cmds[i].outlen = sizeof(cmd_mem[i].out);
+ fill_query_virtqueue_cmd(ndev, &ndev->vqs[start_vq + i], &cmd_mem[i]);
+ }
+
+ err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs);
+ if (err) {
+ mlx5_vdpa_err(mvdev, "error issuing query cmd for vq range [%d, %d): %d\n",
+ start_vq, start_vq + num_vqs, err);
+ goto done;
+ }
+
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ int vq_idx = start_vq + i;
+
+ if (cmd->err) {
+ mlx5_vdpa_err(mvdev, "query vq %d failed, err: %d\n", vq_idx, err);
+ if (!err)
+ err = cmd->err;
+ continue;
+ }
+
+ query_virtqueue_end(ndev, &cmd_mem[i], &attrs[i]);
+ }
+
+done:
+ kvfree(cmd_mem);
+ kvfree(cmds);
return err;
}
@@ -1251,51 +1303,30 @@ static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq)
return true;
}
-static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
- struct mlx5_vdpa_virtqueue *mvq,
- int state)
+static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ int state,
+ struct mlx5_virtqueue_modify_mem *cmd)
{
- int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
- u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
struct mlx5_vdpa_mr *desc_mr = NULL;
struct mlx5_vdpa_mr *vq_mr = NULL;
- bool state_change = false;
void *obj_context;
void *cmd_hdr;
void *vq_ctx;
- void *in;
- int err;
- if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
- return 0;
-
- if (!modifiable_virtqueue_fields(mvq))
- return -EINVAL;
-
- in = kzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+ cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
- obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
+ obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, obj_context);
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
- if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) {
- if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
- err = -EINVAL;
- goto done;
- }
-
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
MLX5_SET(virtio_net_q_object, obj_context, state, state);
- state_change = true;
- }
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) {
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
@@ -1323,7 +1354,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
@@ -1332,7 +1363,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
- desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey);
@@ -1341,38 +1372,36 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields);
- err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
- if (err)
- goto done;
+}
- if (state_change)
- mvq->fw_state = state;
+static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ int state)
+{
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
+ unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP];
+ struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid];
+
mlx5_vdpa_put_mr(mvdev, mvq->vq_mr);
mlx5_vdpa_get_mr(mvdev, vq_mr);
mvq->vq_mr = vq_mr;
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
+ unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP];
+ struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid];
+
mlx5_vdpa_put_mr(mvdev, mvq->desc_mr);
mlx5_vdpa_get_mr(mvdev, desc_mr);
mvq->desc_mr = desc_mr;
}
- mvq->modified_fields = 0;
-
-done:
- kfree(in);
- return err;
-}
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
+ mvq->fw_state = state;
-static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev,
- struct mlx5_vdpa_virtqueue *mvq,
- unsigned int state)
-{
- mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
- return modify_virtqueue(ndev, mvq, state);
+ mvq->modified_fields = 0;
}
static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
@@ -1525,53 +1554,136 @@ err_fwqp:
return err;
}
-static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int modify_virtqueues(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs, int state)
{
- struct mlx5_virtq_attr attr;
- int err;
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ struct mlx5_virtqueue_modify_mem *cmd_mem;
+ struct mlx5_vdpa_async_cmd *cmds;
+ int err = 0;
- if (!mvq->initialized)
- return 0;
+ WARN(start_vq + num_vqs > mvdev->max_vqs, "modify vq range invalid [%d, %d), max_vqs: %u\n",
+ start_vq, start_vq + num_vqs, mvdev->max_vqs);
- if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
- return 0;
+ cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
- err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
- if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed, err: %d\n", err);
- return err;
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ struct mlx5_vdpa_virtqueue *mvq;
+ int vq_idx = start_vq + i;
+
+ mvq = &ndev->vqs[vq_idx];
+
+ if (!modifiable_virtqueue_fields(mvq)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (mvq->fw_state != state) {
+ if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
+ }
+
+ cmd->in = &cmd_mem[i].in;
+ cmd->inlen = sizeof(cmd_mem[i].in);
+ cmd->out = &cmd_mem[i].out;
+ cmd->outlen = sizeof(cmd_mem[i].out);
+ fill_modify_virtqueue_cmd(ndev, mvq, state, &cmd_mem[i]);
}
- err = query_virtqueue(ndev, mvq, &attr);
+ err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue, err: %d\n", err);
- return err;
+ mlx5_vdpa_err(mvdev, "error issuing modify cmd for vq range [%d, %d)\n",
+ start_vq, start_vq + num_vqs);
+ goto done;
}
- mvq->avail_idx = attr.available_index;
- mvq->used_idx = attr.used_index;
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ struct mlx5_vdpa_virtqueue *mvq;
+ int vq_idx = start_vq + i;
- return 0;
+ mvq = &ndev->vqs[vq_idx];
+
+ if (cmd->err) {
+ mlx5_vdpa_err(mvdev, "modify vq %d failed, state: %d -> %d, err: %d\n",
+ vq_idx, mvq->fw_state, state, err);
+ if (!err)
+ err = cmd->err;
+ continue;
+ }
+
+ modify_virtqueue_end(ndev, mvq, state);
+ }
+
+done:
+ kvfree(cmd_mem);
+ kvfree(cmds);
+ return err;
}
-static int suspend_vqs(struct mlx5_vdpa_net *ndev)
+static int suspend_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs)
{
- int err = 0;
- int i;
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_virtq_attr *attrs;
+ int vq_idx, i;
+ int err;
+
+ if (start_vq >= ndev->cur_num_vqs)
+ return -EINVAL;
+
+ mvq = &ndev->vqs[start_vq];
+ if (!mvq->initialized)
+ return 0;
+
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+ return 0;
+
+ err = modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
+ if (err)
+ return err;
+
+ attrs = kcalloc(num_vqs, sizeof(struct mlx5_virtq_attr), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
- for (i = 0; i < ndev->cur_num_vqs; i++) {
- int local_err = suspend_vq(ndev, &ndev->vqs[i]);
+ err = query_virtqueues(ndev, start_vq, num_vqs, attrs);
+ if (err)
+ goto done;
- err = local_err ? local_err : err;
+ for (i = 0, vq_idx = start_vq; i < num_vqs; i++, vq_idx++) {
+ mvq = &ndev->vqs[vq_idx];
+ mvq->avail_idx = attrs[i].available_index;
+ mvq->used_idx = attrs[i].used_index;
}
+done:
+ kfree(attrs);
return err;
}
-static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ return suspend_vqs(ndev, mvq->index, 1);
+}
+
+static int resume_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs)
{
+ struct mlx5_vdpa_virtqueue *mvq;
int err;
+ if (start_vq >= ndev->mvdev.max_vqs)
+ return -EINVAL;
+
+ mvq = &ndev->vqs[start_vq];
if (!mvq->initialized)
return 0;
@@ -1583,13 +1695,9 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
/* Due to a FW quirk we need to modify the VQ fields first then change state.
* This should be fixed soon. After that, a single command can be used.
*/
- err = modify_virtqueue(ndev, mvq, 0);
- if (err) {
- mlx5_vdpa_warn(&ndev->mvdev,
- "modify vq properties failed for vq %u, err: %d\n",
- mvq->index, err);
+ err = modify_virtqueues(ndev, start_vq, num_vqs, mvq->fw_state);
+ if (err)
return err;
- }
break;
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
if (!is_resumable(ndev)) {
@@ -1600,30 +1708,17 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
return 0;
default:
- mlx5_vdpa_warn(&ndev->mvdev, "resume vq %u called from bad state %d\n",
+ mlx5_vdpa_err(&ndev->mvdev, "resume vq %u called from bad state %d\n",
mvq->index, mvq->fw_state);
return -EINVAL;
}
- err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
- if (err)
- mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u, err: %d\n",
- mvq->index, err);
-
- return err;
+ return modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
}
-static int resume_vqs(struct mlx5_vdpa_net *ndev)
+static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
- int err = 0;
-
- for (int i = 0; i < ndev->cur_num_vqs; i++) {
- int local_err = resume_vq(ndev, &ndev->vqs[i]);
-
- err = local_err ? local_err : err;
- }
-
- return err;
+ return resume_vqs(ndev, mvq->index, 1);
}
static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
@@ -2002,13 +2097,13 @@ static int setup_steering(struct mlx5_vdpa_net *ndev)
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
if (!ns) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
+ mlx5_vdpa_err(&ndev->mvdev, "failed to get flow namespace\n");
return -EOPNOTSUPP;
}
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ndev->rxft)) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
+ mlx5_vdpa_err(&ndev->mvdev, "failed to create flow table\n");
return PTR_ERR(ndev->rxft);
}
mlx5_vdpa_add_rx_flow_table(ndev);
@@ -2124,45 +2219,48 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- int cur_qps = ndev->cur_num_vqs / 2;
+ int cur_vqs = ndev->cur_num_vqs;
+ int new_vqs = newqps * 2;
int err;
int i;
- if (cur_qps > newqps) {
- err = modify_rqt(ndev, 2 * newqps);
+ if (cur_vqs > new_vqs) {
+ err = modify_rqt(ndev, new_vqs);
if (err)
return err;
- for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--) {
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i];
-
- if (is_resumable(ndev))
- suspend_vq(ndev, mvq);
- else
- teardown_vq(ndev, mvq);
+ if (is_resumable(ndev)) {
+ suspend_vqs(ndev, new_vqs, cur_vqs - new_vqs);
+ } else {
+ for (i = new_vqs; i < cur_vqs; i++)
+ teardown_vq(ndev, &ndev->vqs[i]);
}
- ndev->cur_num_vqs = 2 * newqps;
+ ndev->cur_num_vqs = new_vqs;
} else {
- ndev->cur_num_vqs = 2 * newqps;
- for (i = cur_qps * 2; i < 2 * newqps; i++) {
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i];
+ ndev->cur_num_vqs = new_vqs;
- err = mvq->initialized ? resume_vq(ndev, mvq) : setup_vq(ndev, mvq, true);
+ for (i = cur_vqs; i < new_vqs; i++) {
+ err = setup_vq(ndev, &ndev->vqs[i], false);
if (err)
goto clean_added;
}
- err = modify_rqt(ndev, 2 * newqps);
+
+ err = resume_vqs(ndev, cur_vqs, new_vqs - cur_vqs);
+ if (err)
+ goto clean_added;
+
+ err = modify_rqt(ndev, new_vqs);
if (err)
goto clean_added;
}
return 0;
clean_added:
- for (--i; i >= 2 * cur_qps; --i)
+ for (--i; i >= cur_vqs; --i)
teardown_vq(ndev, &ndev->vqs[i]);
- ndev->cur_num_vqs = 2 * cur_qps;
+ ndev->cur_num_vqs = cur_vqs;
return err;
}
@@ -2528,9 +2626,9 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
return 0;
}
- err = query_virtqueue(ndev, mvq, &attr);
+ err = query_virtqueues(ndev, mvq->index, 1, &attr);
if (err) {
- mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
+ mlx5_vdpa_err(mvdev, "failed to query virtqueue\n");
return err;
}
state->split.avail_index = attr.used_index;
@@ -2755,6 +2853,9 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
struct mlx5_eqe *eqe = param;
int ret = NOTIFY_DONE;
+ if (ndev->mvdev.suspended)
+ return NOTIFY_DONE;
+
if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
@@ -2879,7 +2980,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
int err;
if (mvq->initialized) {
- err = query_virtqueue(ndev, mvq, &attr);
+ err = query_virtqueues(ndev, mvq->index, 1, &attr);
if (err)
return err;
}
@@ -2948,7 +3049,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
bool teardown = !is_resumable(ndev);
int err;
- suspend_vqs(ndev);
+ suspend_vqs(ndev, 0, ndev->cur_num_vqs);
if (teardown) {
err = save_channels_info(ndev);
if (err)
@@ -2973,7 +3074,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
return err;
}
- resume_vqs(ndev);
+ resume_vqs(ndev, 0, ndev->cur_num_vqs);
return 0;
}
@@ -3097,7 +3198,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
teardown_vq_resources(ndev);
if (ndev->setup) {
- err = resume_vqs(ndev);
+ err = resume_vqs(ndev, 0, ndev->cur_num_vqs);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to resume VQs\n");
goto err_driver;
@@ -3122,7 +3223,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
err_driver:
unregister_link_notifier(ndev);
err_setup:
- mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_vdpa_clean_mrs(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
up_write(&ndev->reslock);
@@ -3134,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
/* default mapping all groups are mapped to asid 0 */
for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
- mvdev->group2asid[i] = 0;
+ mvdev->mres.group2asid[i] = 0;
}
static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev)
@@ -3174,7 +3275,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
}
if (flags & VDPA_RESET_F_CLEAN_MAP)
- mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_vdpa_clean_mrs(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.suspended = false;
ndev->cur_num_vqs = MLX5V_DEFAULT_VQ_COUNT;
@@ -3189,7 +3290,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
if ((flags & VDPA_RESET_F_CLEAN_MAP) &&
MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_dma_mr(mvdev))
- mlx5_vdpa_warn(mvdev, "create MR failed\n");
+ mlx5_vdpa_err(mvdev, "create MR failed\n");
}
if (vq_reset)
setup_vq_resources(ndev, false);
@@ -3244,7 +3345,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
new_mr = mlx5_vdpa_create_mr(mvdev, iotlb);
if (IS_ERR(new_mr)) {
err = PTR_ERR(new_mr);
- mlx5_vdpa_warn(mvdev, "create map failed(%d)\n", err);
+ mlx5_vdpa_err(mvdev, "create map failed(%d)\n", err);
return err;
}
} else {
@@ -3252,12 +3353,12 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
new_mr = NULL;
}
- if (!mvdev->mr[asid]) {
+ if (!mvdev->mres.mr[asid]) {
mlx5_vdpa_update_mr(mvdev, new_mr, asid);
} else {
err = mlx5_vdpa_change_map(mvdev, new_mr, asid);
if (err) {
- mlx5_vdpa_warn(mvdev, "change map failed(%d)\n", err);
+ mlx5_vdpa_err(mvdev, "change map failed(%d)\n", err);
goto out_err;
}
}
@@ -3332,7 +3433,10 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
ndev = to_mlx5_vdpa_ndev(mvdev);
free_fixed_resources(ndev);
- mlx5_vdpa_destroy_mr_resources(mvdev);
+ mlx5_vdpa_clean_mrs(mvdev);
+ mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
+
if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
@@ -3500,8 +3604,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
mlx5_vdpa_info(mvdev, "suspending device\n");
down_write(&ndev->reslock);
- unregister_link_notifier(ndev);
- err = suspend_vqs(ndev);
+ err = suspend_vqs(ndev, 0, ndev->cur_num_vqs);
mlx5_vdpa_cvq_suspend(mvdev);
mvdev->suspended = true;
up_write(&ndev->reslock);
@@ -3521,8 +3624,8 @@ static int mlx5_vdpa_resume(struct vdpa_device *vdev)
down_write(&ndev->reslock);
mvdev->suspended = false;
- err = resume_vqs(ndev);
- register_link_notifier(ndev);
+ err = resume_vqs(ndev, 0, ndev->cur_num_vqs);
+ queue_link_work(ndev);
up_write(&ndev->reslock);
return err;
@@ -3537,12 +3640,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
if (group >= MLX5_VDPA_NUMVQ_GROUPS)
return -EINVAL;
- mvdev->group2asid[group] = asid;
+ mvdev->mres.group2asid[group] = asid;
- mutex_lock(&mvdev->mr_mtx);
- if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid])
- err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
+ if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid])
+ err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid);
+ mutex_unlock(&mvdev->mres.lock);
return err;
}
@@ -3854,18 +3957,22 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->rqt_size = 1;
}
+ mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx);
+
ndev->mvdev.mlx_features = device_features;
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
goto err_mpfs;
- INIT_LIST_HEAD(&mvdev->mr_list_head);
+ err = mlx5_vdpa_init_mr_resources(mvdev);
+ if (err)
+ goto err_res;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_dma_mr(mvdev);
if (err)
- goto err_res;
+ goto err_mr_res;
}
err = alloc_fixed_resources(ndev);
@@ -3906,6 +4013,8 @@ err_reg:
err_res2:
free_fixed_resources(ndev);
err_mr:
+ mlx5_vdpa_clean_mrs(mvdev);
+err_mr_res:
mlx5_vdpa_destroy_mr_resources(mvdev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
@@ -3937,9 +4046,37 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
mgtdev->ndev = NULL;
}
+static int mlx5_vdpa_set_attr(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *add_config)
+{
+ struct virtio_net_config *config;
+ struct mlx5_core_dev *pfmdev;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_vdpa_net *ndev;
+ struct mlx5_core_dev *mdev;
+ int err = -EOPNOTSUPP;
+
+ mvdev = to_mvdev(dev);
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ mdev = mvdev->mdev;
+ config = &ndev->config;
+
+ down_write(&ndev->reslock);
+ if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
+ pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
+ err = mlx5_mpfs_add_mac(pfmdev, config->mac);
+ if (!err)
+ ether_addr_copy(config->mac, add_config->net.mac);
+ }
+
+ up_write(&ndev->reslock);
+ return err;
+}
+
static const struct vdpa_mgmtdev_ops mdev_ops = {
.dev_add = mlx5_vdpa_dev_add,
.dev_del = mlx5_vdpa_dev_del,
+ .dev_set_attr = mlx5_vdpa_set_attr,
};
static struct virtio_device_id id_table[] = {