vdpa/mlx5: Ensure valid indices are provided
Following patches add control virtuqeue and multiqueue support. We want to verify that the index value to callbacks referencing a virtqueue is valid. The logic defining valid indices is as follows: CVQ clear: 0 and 1. CVQ set, MQ clear: 0, 1 and 2 CVQ set, MQ set: 0..nvq where nvq is whatever provided to _vdpa_register_device() Signed-off-by: Eli Cohen <elic@nvidia.com> Link: https://lore.kernel.org/r/20210823052123.14909-5-elic@nvidia.com Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Родитель
db296d252d
Коммит
e4fc66508c
|
@ -56,6 +56,7 @@ struct mlx5_vdpa_dev {
|
||||||
u64 actual_features;
|
u64 actual_features;
|
||||||
u8 status;
|
u8 status;
|
||||||
u32 max_vqs;
|
u32 max_vqs;
|
||||||
|
u16 max_idx;
|
||||||
u32 generation;
|
u32 generation;
|
||||||
|
|
||||||
struct mlx5_vdpa_mr mr;
|
struct mlx5_vdpa_mr mr;
|
||||||
|
|
|
@ -45,6 +45,8 @@ MODULE_LICENSE("Dual BSD/GPL");
|
||||||
(VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
|
(VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
|
||||||
VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
|
VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
|
||||||
|
|
||||||
|
#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
|
||||||
|
|
||||||
struct mlx5_vdpa_net_resources {
|
struct mlx5_vdpa_net_resources {
|
||||||
u32 tisn;
|
u32 tisn;
|
||||||
u32 tdn;
|
u32 tdn;
|
||||||
|
@ -133,6 +135,14 @@ struct mlx5_vdpa_virtqueue {
|
||||||
*/
|
*/
|
||||||
#define MLX5_MAX_SUPPORTED_VQS 16
|
#define MLX5_MAX_SUPPORTED_VQS 16
|
||||||
|
|
||||||
|
static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
|
||||||
|
{
|
||||||
|
if (unlikely(idx > mvdev->max_idx))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
struct mlx5_vdpa_net {
|
struct mlx5_vdpa_net {
|
||||||
struct mlx5_vdpa_dev mvdev;
|
struct mlx5_vdpa_dev mvdev;
|
||||||
struct mlx5_vdpa_net_resources res;
|
struct mlx5_vdpa_net_resources res;
|
||||||
|
@ -1354,6 +1364,9 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
|
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
|
||||||
|
|
||||||
|
if (!is_index_valid(mvdev, idx))
|
||||||
|
return;
|
||||||
|
|
||||||
if (unlikely(!mvq->ready))
|
if (unlikely(!mvq->ready))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -1367,6 +1380,9 @@ static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
|
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
|
||||||
|
|
||||||
|
if (!is_index_valid(mvdev, idx))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
mvq->desc_addr = desc_area;
|
mvq->desc_addr = desc_area;
|
||||||
mvq->device_addr = device_area;
|
mvq->device_addr = device_area;
|
||||||
mvq->driver_addr = driver_area;
|
mvq->driver_addr = driver_area;
|
||||||
|
@ -1379,6 +1395,9 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
struct mlx5_vdpa_virtqueue *mvq;
|
struct mlx5_vdpa_virtqueue *mvq;
|
||||||
|
|
||||||
|
if (!is_index_valid(mvdev, idx))
|
||||||
|
return;
|
||||||
|
|
||||||
mvq = &ndev->vqs[idx];
|
mvq = &ndev->vqs[idx];
|
||||||
mvq->num_ent = num;
|
mvq->num_ent = num;
|
||||||
}
|
}
|
||||||
|
@ -1397,6 +1416,9 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
|
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
|
||||||
|
|
||||||
|
if (!is_index_valid(mvdev, idx))
|
||||||
|
return;
|
||||||
|
|
||||||
if (!ready)
|
if (!ready)
|
||||||
suspend_vq(ndev, mvq);
|
suspend_vq(ndev, mvq);
|
||||||
|
|
||||||
|
@ -1409,6 +1431,9 @@ static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
|
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
|
||||||
|
|
||||||
|
if (!is_index_valid(mvdev, idx))
|
||||||
|
return false;
|
||||||
|
|
||||||
return mvq->ready;
|
return mvq->ready;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1419,6 +1444,9 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
|
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
|
||||||
|
|
||||||
|
if (!is_index_valid(mvdev, idx))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
|
if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
|
||||||
mlx5_vdpa_warn(mvdev, "can't modify available index\n");
|
mlx5_vdpa_warn(mvdev, "can't modify available index\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1437,6 +1465,9 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
|
||||||
struct mlx5_virtq_attr attr;
|
struct mlx5_virtq_attr attr;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (!is_index_valid(mvdev, idx))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* If the virtq object was destroyed, use the value saved at
|
/* If the virtq object was destroyed, use the value saved at
|
||||||
* the last minute of suspend_vq. This caters for userspace
|
* the last minute of suspend_vq. This caters for userspace
|
||||||
* that cares about emulating the index after vq is stopped.
|
* that cares about emulating the index after vq is stopped.
|
||||||
|
@ -1556,6 +1587,24 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
|
||||||
return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
|
return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
|
||||||
|
{
|
||||||
|
if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) {
|
||||||
|
if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) {
|
||||||
|
/* MQ supported. CVQ index is right above the last data virtqueue's */
|
||||||
|
mvdev->max_idx = mvdev->max_vqs;
|
||||||
|
} else {
|
||||||
|
/* Only CVQ supportted. data virtqueues occupy indices 0 and 1.
|
||||||
|
* CVQ gets index 2
|
||||||
|
*/
|
||||||
|
mvdev->max_idx = 2;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Two data virtqueues only: one for rx and one for tx */
|
||||||
|
mvdev->max_idx = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
|
static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
|
||||||
{
|
{
|
||||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||||
|
@ -1571,6 +1620,7 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
|
||||||
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
|
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
|
||||||
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
|
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
|
||||||
ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
|
ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
|
||||||
|
update_cvq_info(mvdev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1792,6 +1842,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
|
||||||
ndev->mvdev.status = 0;
|
ndev->mvdev.status = 0;
|
||||||
ndev->mvdev.mlx_features = 0;
|
ndev->mvdev.mlx_features = 0;
|
||||||
memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
|
memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
|
||||||
|
ndev->mvdev.actual_features = 0;
|
||||||
++mvdev->generation;
|
++mvdev->generation;
|
||||||
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
|
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
|
||||||
if (mlx5_vdpa_create_mr(mvdev, NULL))
|
if (mlx5_vdpa_create_mr(mvdev, NULL))
|
||||||
|
@ -1892,6 +1943,9 @@ static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device
|
||||||
struct mlx5_vdpa_net *ndev;
|
struct mlx5_vdpa_net *ndev;
|
||||||
phys_addr_t addr;
|
phys_addr_t addr;
|
||||||
|
|
||||||
|
if (!is_index_valid(mvdev, idx))
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* If SF BAR size is smaller than PAGE_SIZE, do not use direct
|
/* If SF BAR size is smaller than PAGE_SIZE, do not use direct
|
||||||
* notification to avoid the risk of mapping pages that contain BAR of more
|
* notification to avoid the risk of mapping pages that contain BAR of more
|
||||||
* than one SF
|
* than one SF
|
||||||
|
|
Загрузка…
Ссылка в новой задаче