Drivers: hv: Optimize the signaling on the write path
The host has already implemented the "read" side optimizations. Leverage that to optimize "write" side signaling. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
f878f3d59e
commit
98fa8cf4bc
@@ -564,6 +564,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
|
|||||||
struct scatterlist bufferlist[3];
|
struct scatterlist bufferlist[3];
|
||||||
u64 aligned_data = 0;
|
u64 aligned_data = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
bool signal = false;
|
||||||
|
|
||||||
|
|
||||||
/* Setup the descriptor */
|
/* Setup the descriptor */
|
||||||
@@ -580,9 +581,9 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
|
|||||||
sg_set_buf(&bufferlist[2], &aligned_data,
|
sg_set_buf(&bufferlist[2], &aligned_data,
|
||||||
packetlen_aligned - packetlen);
|
packetlen_aligned - packetlen);
|
||||||
|
|
||||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
|
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
|
||||||
|
|
||||||
if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
|
if (ret == 0 && signal)
|
||||||
vmbus_setevent(channel);
|
vmbus_setevent(channel);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@@ -606,6 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
|
|||||||
u32 packetlen_aligned;
|
u32 packetlen_aligned;
|
||||||
struct scatterlist bufferlist[3];
|
struct scatterlist bufferlist[3];
|
||||||
u64 aligned_data = 0;
|
u64 aligned_data = 0;
|
||||||
|
bool signal = false;
|
||||||
|
|
||||||
if (pagecount > MAX_PAGE_BUFFER_COUNT)
|
if (pagecount > MAX_PAGE_BUFFER_COUNT)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -641,9 +643,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
|
|||||||
sg_set_buf(&bufferlist[2], &aligned_data,
|
sg_set_buf(&bufferlist[2], &aligned_data,
|
||||||
packetlen_aligned - packetlen);
|
packetlen_aligned - packetlen);
|
||||||
|
|
||||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
|
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
|
||||||
|
|
||||||
if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
|
if (ret == 0 && signal)
|
||||||
vmbus_setevent(channel);
|
vmbus_setevent(channel);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@@ -665,6 +667,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
|
|||||||
u32 packetlen_aligned;
|
u32 packetlen_aligned;
|
||||||
struct scatterlist bufferlist[3];
|
struct scatterlist bufferlist[3];
|
||||||
u64 aligned_data = 0;
|
u64 aligned_data = 0;
|
||||||
|
bool signal = false;
|
||||||
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
|
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
|
||||||
multi_pagebuffer->len);
|
multi_pagebuffer->len);
|
||||||
|
|
||||||
@@ -703,9 +706,9 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
|
|||||||
sg_set_buf(&bufferlist[2], &aligned_data,
|
sg_set_buf(&bufferlist[2], &aligned_data,
|
||||||
packetlen_aligned - packetlen);
|
packetlen_aligned - packetlen);
|
||||||
|
|
||||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
|
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
|
||||||
|
|
||||||
if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
|
if (ret == 0 && signal)
|
||||||
vmbus_setevent(channel);
|
vmbus_setevent(channel);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@@ -555,7 +555,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
|
|||||||
|
|
||||||
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
|
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
|
||||||
struct scatterlist *sglist,
|
struct scatterlist *sglist,
|
||||||
u32 sgcount);
|
u32 sgcount, bool *signal);
|
||||||
|
|
||||||
int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
|
int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
|
||||||
u32 buflen);
|
u32 buflen);
|
||||||
|
@@ -53,6 +53,37 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
|
|||||||
return read;
|
return read;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When we write to the ring buffer, check if the host needs to
|
||||||
|
* be signaled. Here is the details of this protocol:
|
||||||
|
*
|
||||||
|
* 1. The host guarantees that while it is draining the
|
||||||
|
* ring buffer, it will set the interrupt_mask to
|
||||||
|
* indicate it does not need to be interrupted when
|
||||||
|
* new data is placed.
|
||||||
|
*
|
||||||
|
* 2. The host guarantees that it will completely drain
|
||||||
|
* the ring buffer before exiting the read loop. Further,
|
||||||
|
* once the ring buffer is empty, it will clear the
|
||||||
|
* interrupt_mask and re-check to see if new data has
|
||||||
|
* arrived.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
|
||||||
|
{
|
||||||
|
if (rbi->ring_buffer->interrupt_mask)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is the only case we need to signal when the
|
||||||
|
* ring transitions from being empty to non-empty.
|
||||||
|
*/
|
||||||
|
if (old_write == rbi->ring_buffer->read_index)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* hv_get_next_write_location()
|
* hv_get_next_write_location()
|
||||||
@@ -322,7 +353,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
||||||
struct scatterlist *sglist, u32 sgcount)
|
struct scatterlist *sglist, u32 sgcount, bool *signal)
|
||||||
{
|
{
|
||||||
int i = 0;
|
int i = 0;
|
||||||
u32 bytes_avail_towrite;
|
u32 bytes_avail_towrite;
|
||||||
@@ -331,6 +362,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|||||||
|
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
u32 next_write_location;
|
u32 next_write_location;
|
||||||
|
u32 old_write;
|
||||||
u64 prev_indices = 0;
|
u64 prev_indices = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@@ -359,6 +391,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|||||||
/* Write to the ring buffer */
|
/* Write to the ring buffer */
|
||||||
next_write_location = hv_get_next_write_location(outring_info);
|
next_write_location = hv_get_next_write_location(outring_info);
|
||||||
|
|
||||||
|
old_write = next_write_location;
|
||||||
|
|
||||||
for_each_sg(sglist, sg, sgcount, i)
|
for_each_sg(sglist, sg, sgcount, i)
|
||||||
{
|
{
|
||||||
next_write_location = hv_copyto_ringbuffer(outring_info,
|
next_write_location = hv_copyto_ringbuffer(outring_info,
|
||||||
@@ -375,14 +409,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|||||||
&prev_indices,
|
&prev_indices,
|
||||||
sizeof(u64));
|
sizeof(u64));
|
||||||
|
|
||||||
/* Make sure we flush all writes before updating the writeIndex */
|
/* Issue a full memory barrier before updating the write index */
|
||||||
smp_wmb();
|
smp_mb();
|
||||||
|
|
||||||
/* Now, update the write location */
|
/* Now, update the write location */
|
||||||
hv_set_next_write_location(outring_info, next_write_location);
|
hv_set_next_write_location(outring_info, next_write_location);
|
||||||
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
||||||
|
|
||||||
|
*signal = hv_need_to_signal(old_write, outring_info);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user