IB/qib: Improve SDMA performance
1. The code accepts chunks of messages, and splits the chunk into packets when converting packets into sdma queue entries. Adjacent packets will use user buffer pages smartly to avoid pinning the same page multiple times. 2. Instead of discarding all the work when SDMA queue is full, the work is saved in a pending queue. Whenever there are enough SDMA queue free entries, pending queue is directly put onto SDMA queue. 3. An interrupt handler is used to progress this pending queue. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: CQ Tang <cq.tang@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> [ Fixed up sparse warnings. - Roland ] Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
@@ -423,8 +423,11 @@ void qib_sdma_intr(struct qib_pportdata *ppd)
|
||||
|
||||
void __qib_sdma_intr(struct qib_pportdata *ppd)
|
||||
{
|
||||
if (__qib_sdma_running(ppd))
|
||||
if (__qib_sdma_running(ppd)) {
|
||||
qib_sdma_make_progress(ppd);
|
||||
if (!list_empty(&ppd->sdma_userpending))
|
||||
qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
|
||||
}
|
||||
}
|
||||
|
||||
int qib_setup_sdma(struct qib_pportdata *ppd)
|
||||
@@ -452,6 +455,9 @@ int qib_setup_sdma(struct qib_pportdata *ppd)
|
||||
ppd->sdma_descq_removed = 0;
|
||||
ppd->sdma_descq_added = 0;
|
||||
|
||||
ppd->sdma_intrequest = 0;
|
||||
INIT_LIST_HEAD(&ppd->sdma_userpending);
|
||||
|
||||
INIT_LIST_HEAD(&ppd->sdma_activelist);
|
||||
|
||||
tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
|
||||
|
Reference in New Issue
Block a user