Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin: "Fixes, cleanups, performance A bunch of changes to virtio, most affecting virtio net. Also ptr_ring batched zeroing - first of batching enhancements that seems ready." * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: s390/virtio: change maintainership tools/virtio: fix spelling mistake: "wakeus" -> "wakeups" virtio_net: tidy a couple debug statements ptr_ring: support testing different batching sizes ringtest: support test specific parameters ptr_ring: batch ring zeroing virtio: virtio_driver doc virtio_net: don't reset twice on XDP on/off virtio_net: fix support for small rings virtio_net: reduce alignment for buffers virtio_net: rework mergeable buffer handling virtio_net: allow specifying context for rx virtio: allow extra context per descriptor tools/virtio: fix build breakage virtio: add context flag to find vqs virtio: wrap find_vqs ringtest: fix an assert statement
This commit is contained in:
@@ -34,11 +34,13 @@
|
||||
struct ptr_ring {
|
||||
int producer ____cacheline_aligned_in_smp;
|
||||
spinlock_t producer_lock;
|
||||
int consumer ____cacheline_aligned_in_smp;
|
||||
int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */
|
||||
int consumer_tail; /* next entry to invalidate */
|
||||
spinlock_t consumer_lock;
|
||||
/* Shared consumer/producer data */
|
||||
/* Read-only by both the producer and the consumer */
|
||||
int size ____cacheline_aligned_in_smp; /* max entries in queue */
|
||||
int batch; /* number of entries to consume in a batch */
|
||||
void **queue;
|
||||
};
|
||||
|
||||
@@ -170,7 +172,7 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
|
||||
static inline void *__ptr_ring_peek(struct ptr_ring *r)
|
||||
{
|
||||
if (likely(r->size))
|
||||
return r->queue[r->consumer];
|
||||
return r->queue[r->consumer_head];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -231,9 +233,38 @@ static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
|
||||
/* Must only be called after __ptr_ring_peek returned !NULL */
|
||||
static inline void __ptr_ring_discard_one(struct ptr_ring *r)
|
||||
{
|
||||
r->queue[r->consumer++] = NULL;
|
||||
if (unlikely(r->consumer >= r->size))
|
||||
r->consumer = 0;
|
||||
/* Fundamentally, what we want to do is update consumer
|
||||
* index and zero out the entry so producer can reuse it.
|
||||
* Doing it naively at each consume would be as simple as:
|
||||
* r->queue[r->consumer++] = NULL;
|
||||
* if (unlikely(r->consumer >= r->size))
|
||||
* r->consumer = 0;
|
||||
* but that is suboptimal when the ring is full as producer is writing
|
||||
* out new entries in the same cache line. Defer these updates until a
|
||||
* batch of entries has been consumed.
|
||||
*/
|
||||
int head = r->consumer_head++;
|
||||
|
||||
/* Once we have processed enough entries invalidate them in
|
||||
* the ring all at once so producer can reuse their space in the ring.
|
||||
* We also do this when we reach end of the ring - not mandatory
|
||||
* but helps keep the implementation simple.
|
||||
*/
|
||||
if (unlikely(r->consumer_head - r->consumer_tail >= r->batch ||
|
||||
r->consumer_head >= r->size)) {
|
||||
/* Zero out entries in the reverse order: this way we touch the
|
||||
* cache line that producer might currently be reading the last;
|
||||
* producer won't make progress and touch other cache lines
|
||||
* besides the first one until we write out all entries.
|
||||
*/
|
||||
while (likely(head >= r->consumer_tail))
|
||||
r->queue[head--] = NULL;
|
||||
r->consumer_tail = r->consumer_head;
|
||||
}
|
||||
if (unlikely(r->consumer_head >= r->size)) {
|
||||
r->consumer_head = 0;
|
||||
r->consumer_tail = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void *__ptr_ring_consume(struct ptr_ring *r)
|
||||
@@ -345,14 +376,27 @@ static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
|
||||
return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
|
||||
}
|
||||
|
||||
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
|
||||
{
|
||||
r->size = size;
|
||||
r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue));
|
||||
/* We need to set batch at least to 1 to make logic
|
||||
* in __ptr_ring_discard_one work correctly.
|
||||
* Batching too much (because ring is small) would cause a lot of
|
||||
* burstiness. Needs tuning, for now disable batching.
|
||||
*/
|
||||
if (r->batch > r->size / 2 || !r->batch)
|
||||
r->batch = 1;
|
||||
}
|
||||
|
||||
static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
|
||||
{
|
||||
r->queue = __ptr_ring_init_queue_alloc(size, gfp);
|
||||
if (!r->queue)
|
||||
return -ENOMEM;
|
||||
|
||||
r->size = size;
|
||||
r->producer = r->consumer = 0;
|
||||
__ptr_ring_set_size(r, size);
|
||||
r->producer = r->consumer_head = r->consumer_tail = 0;
|
||||
spin_lock_init(&r->producer_lock);
|
||||
spin_lock_init(&r->consumer_lock);
|
||||
|
||||
@@ -373,9 +417,10 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
|
||||
else if (destroy)
|
||||
destroy(ptr);
|
||||
|
||||
r->size = size;
|
||||
__ptr_ring_set_size(r, size);
|
||||
r->producer = producer;
|
||||
r->consumer = 0;
|
||||
r->consumer_head = 0;
|
||||
r->consumer_tail = 0;
|
||||
old = r->queue;
|
||||
r->queue = queue;
|
||||
|
||||
|
@@ -44,6 +44,12 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
|
||||
void *data,
|
||||
gfp_t gfp);
|
||||
|
||||
int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
|
||||
struct scatterlist sg[], unsigned int num,
|
||||
void *data,
|
||||
void *ctx,
|
||||
gfp_t gfp);
|
||||
|
||||
int virtqueue_add_sgs(struct virtqueue *vq,
|
||||
struct scatterlist *sgs[],
|
||||
unsigned int out_sgs,
|
||||
@@ -59,6 +65,9 @@ bool virtqueue_notify(struct virtqueue *vq);
|
||||
|
||||
void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
|
||||
|
||||
void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
|
||||
void **ctx);
|
||||
|
||||
void virtqueue_disable_cb(struct virtqueue *vq);
|
||||
|
||||
bool virtqueue_enable_cb(struct virtqueue *vq);
|
||||
@@ -156,9 +165,13 @@ int virtio_device_restore(struct virtio_device *dev);
|
||||
* @feature_table_legacy: same as feature_table but when working in legacy mode.
|
||||
* @feature_table_size_legacy: number of entries in feature table legacy array.
|
||||
* @probe: the function to call when a device is found. Returns 0 or -errno.
|
||||
* @scan: optional function to call after successful probe; intended
|
||||
* for virtio-scsi to invoke a scan.
|
||||
* @remove: the function to call when a device is removed.
|
||||
* @config_changed: optional function to call when the device configuration
|
||||
* changes; may be called in interrupt context.
|
||||
* @freeze: optional function to call during suspend/hibernation.
|
||||
* @restore: optional function to call on resume.
|
||||
*/
|
||||
struct virtio_driver {
|
||||
struct device_driver driver;
|
||||
|
@@ -72,7 +72,8 @@ struct virtio_config_ops {
|
||||
void (*reset)(struct virtio_device *vdev);
|
||||
int (*find_vqs)(struct virtio_device *, unsigned nvqs,
|
||||
struct virtqueue *vqs[], vq_callback_t *callbacks[],
|
||||
const char * const names[], struct irq_affinity *desc);
|
||||
const char * const names[], const bool *ctx,
|
||||
struct irq_affinity *desc);
|
||||
void (*del_vqs)(struct virtio_device *);
|
||||
u64 (*get_features)(struct virtio_device *vdev);
|
||||
int (*finalize_features)(struct virtio_device *vdev);
|
||||
@@ -173,12 +174,32 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
|
||||
vq_callback_t *callbacks[] = { c };
|
||||
const char *names[] = { n };
|
||||
struct virtqueue *vq;
|
||||
int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL);
|
||||
int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
|
||||
NULL);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
return vq;
|
||||
}
|
||||
|
||||
static inline
|
||||
int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[], vq_callback_t *callbacks[],
|
||||
const char * const names[],
|
||||
struct irq_affinity *desc)
|
||||
{
|
||||
return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
|
||||
}
|
||||
|
||||
static inline
|
||||
int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[], vq_callback_t *callbacks[],
|
||||
const char * const names[], const bool *ctx,
|
||||
struct irq_affinity *desc)
|
||||
{
|
||||
return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
|
||||
desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* virtio_device_ready - enable vq use in probe function
|
||||
* @vdev: the device
|
||||
|
@@ -71,6 +71,7 @@ struct virtqueue *vring_create_virtqueue(unsigned int index,
|
||||
struct virtio_device *vdev,
|
||||
bool weak_barriers,
|
||||
bool may_reduce_num,
|
||||
bool ctx,
|
||||
bool (*notify)(struct virtqueue *vq),
|
||||
void (*callback)(struct virtqueue *vq),
|
||||
const char *name);
|
||||
@@ -80,6 +81,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
||||
struct vring vring,
|
||||
struct virtio_device *vdev,
|
||||
bool weak_barriers,
|
||||
bool ctx,
|
||||
bool (*notify)(struct virtqueue *),
|
||||
void (*callback)(struct virtqueue *),
|
||||
const char *name);
|
||||
@@ -93,6 +95,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
|
||||
unsigned int vring_align,
|
||||
struct virtio_device *vdev,
|
||||
bool weak_barriers,
|
||||
bool ctx,
|
||||
void *pages,
|
||||
bool (*notify)(struct virtqueue *vq),
|
||||
void (*callback)(struct virtqueue *vq),
|
||||
|
Reference in New Issue
Block a user