virtio: Protect vqs list access
[ Upstream commit 0e566c8f0f2e8325e35f6f97e13cde5356b41814 ]
VQs may be accessed to mark the device broken while they are
created/destroyed. Hence protect the access to the vqs list.
Fixes: e2dcdfe95c
("virtio: virtio_break_device() to mark all virtqueues broken.")
Signed-off-by: Parav Pandit <parav@nvidia.com>
Link: https://lore.kernel.org/r/20210721142648.1525924-4-parav@nvidia.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Sasha Levin

parent
9108120fbe
commit
293180f593
@@ -357,6 +357,7 @@ int register_virtio_device(struct virtio_device *dev)
|
|||||||
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
|
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dev->vqs);
|
INIT_LIST_HEAD(&dev->vqs);
|
||||||
|
spin_lock_init(&dev->vqs_list_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* device_add() causes the bus infrastructure to look for a matching
|
* device_add() causes the bus infrastructure to look for a matching
|
||||||
|
@@ -1668,7 +1668,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
|
|||||||
cpu_to_le16(vq->packed.event_flags_shadow);
|
cpu_to_le16(vq->packed.event_flags_shadow);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock(&vdev->vqs_list_lock);
|
||||||
list_add_tail(&vq->vq.list, &vdev->vqs);
|
list_add_tail(&vq->vq.list, &vdev->vqs);
|
||||||
|
spin_unlock(&vdev->vqs_list_lock);
|
||||||
return &vq->vq;
|
return &vq->vq;
|
||||||
|
|
||||||
err_desc_extra:
|
err_desc_extra:
|
||||||
@@ -2126,7 +2128,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
|||||||
memset(vq->split.desc_state, 0, vring.num *
|
memset(vq->split.desc_state, 0, vring.num *
|
||||||
sizeof(struct vring_desc_state_split));
|
sizeof(struct vring_desc_state_split));
|
||||||
|
|
||||||
|
spin_lock(&vdev->vqs_list_lock);
|
||||||
list_add_tail(&vq->vq.list, &vdev->vqs);
|
list_add_tail(&vq->vq.list, &vdev->vqs);
|
||||||
|
spin_unlock(&vdev->vqs_list_lock);
|
||||||
return &vq->vq;
|
return &vq->vq;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
|
EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
|
||||||
@@ -2210,7 +2214,9 @@ void vring_del_virtqueue(struct virtqueue *_vq)
|
|||||||
}
|
}
|
||||||
if (!vq->packed_ring)
|
if (!vq->packed_ring)
|
||||||
kfree(vq->split.desc_state);
|
kfree(vq->split.desc_state);
|
||||||
|
spin_lock(&vq->vq.vdev->vqs_list_lock);
|
||||||
list_del(&_vq->list);
|
list_del(&_vq->list);
|
||||||
|
spin_unlock(&vq->vq.vdev->vqs_list_lock);
|
||||||
kfree(vq);
|
kfree(vq);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
|
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
|
||||||
@@ -2274,10 +2280,12 @@ void virtio_break_device(struct virtio_device *dev)
|
|||||||
{
|
{
|
||||||
struct virtqueue *_vq;
|
struct virtqueue *_vq;
|
||||||
|
|
||||||
|
spin_lock(&dev->vqs_list_lock);
|
||||||
list_for_each_entry(_vq, &dev->vqs, list) {
|
list_for_each_entry(_vq, &dev->vqs, list) {
|
||||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||||
vq->broken = true;
|
vq->broken = true;
|
||||||
}
|
}
|
||||||
|
spin_unlock(&dev->vqs_list_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(virtio_break_device);
|
EXPORT_SYMBOL_GPL(virtio_break_device);
|
||||||
|
|
||||||
|
@@ -110,6 +110,7 @@ struct virtio_device {
|
|||||||
bool config_enabled;
|
bool config_enabled;
|
||||||
bool config_change_pending;
|
bool config_change_pending;
|
||||||
spinlock_t config_lock;
|
spinlock_t config_lock;
|
||||||
|
spinlock_t vqs_list_lock; /* Protects VQs list access */
|
||||||
struct device dev;
|
struct device dev;
|
||||||
struct virtio_device_id id;
|
struct virtio_device_id id;
|
||||||
const struct virtio_config_ops *config;
|
const struct virtio_config_ops *config;
|
||||||
|
Reference in New Issue
Block a user