nvme-tcp: fix regression that causes sporadic requests to time out
[ Upstream commit 3770a42bb8ceb856877699257a43c0585a5d2996 ]
When we queue requests, we strive to batch as much as possible and also
signal the network stack that more data is about to be sent over a socket
with MSG_SENDPAGE_NOTLAST. This flag looks at the pending requests queued
as well as queue->more_requests that is derived from the block layer
last-in-batch indication.
We set more_request=true when we flush the request directly from
.queue_rq submission context (in nvme_tcp_send_all), however this is
wrongly assuming that no other requests may be queued during the
execution of nvme_tcp_send_all.
Due to this, a race condition may happen where:
1. request X is queued as !last-in-batch
2. request X submission context calls nvme_tcp_send_all directly
3. nvme_tcp_send_all is preempted and schedules to a different cpu
4. request Y is queued as last-in-batch
5. nvme_tcp_send_all context sends request X+Y, however signals for
both MSG_SENDPAGE_NOTLAST because queue->more_requests=true.
==> none of the requests is pushed down to the wire as the network
stack is waiting for more data, both requests timeout.
To fix this, we eliminate queue->more_requests and only rely on
the queue req_list and send_list to be not-empty.
Fixes: 122e5b9f3d
("nvme-tcp: optimize network stack with setting msg flags according to batch size")
Reported-by: Jonathan Nicklin <jnicklin@blockbridge.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Tested-by: Jonathan Nicklin <jnicklin@blockbridge.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
5914fa32ef
commit
6a2a344844
@@ -118,7 +118,6 @@ struct nvme_tcp_queue {
|
|||||||
struct mutex send_mutex;
|
struct mutex send_mutex;
|
||||||
struct llist_head req_list;
|
struct llist_head req_list;
|
||||||
struct list_head send_list;
|
struct list_head send_list;
|
||||||
bool more_requests;
|
|
||||||
|
|
||||||
/* recv state */
|
/* recv state */
|
||||||
void *pdu;
|
void *pdu;
|
||||||
@@ -314,7 +313,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
|
|||||||
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
||||||
{
|
{
|
||||||
return !list_empty(&queue->send_list) ||
|
return !list_empty(&queue->send_list) ||
|
||||||
!llist_empty(&queue->req_list) || queue->more_requests;
|
!llist_empty(&queue->req_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||||||
@@ -333,9 +332,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
|||||||
*/
|
*/
|
||||||
if (queue->io_cpu == raw_smp_processor_id() &&
|
if (queue->io_cpu == raw_smp_processor_id() &&
|
||||||
sync && empty && mutex_trylock(&queue->send_mutex)) {
|
sync && empty && mutex_trylock(&queue->send_mutex)) {
|
||||||
queue->more_requests = !last;
|
|
||||||
nvme_tcp_send_all(queue);
|
nvme_tcp_send_all(queue);
|
||||||
queue->more_requests = false;
|
|
||||||
mutex_unlock(&queue->send_mutex);
|
mutex_unlock(&queue->send_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user