IB/mlx4: Implement IB_QP_CREATE_USE_GFP_NOIO
Modify the various routines used to allocate memory resources which serve QPs in mlx4 to get an input GFP directive. Have the Ethernet driver to use GFP_KERNEL in it's QP allocations as done prior to this commit, and the IB driver to use GFP_NOIO when the IB verbs IB_QP_CREATE_USE_GFP_NOIO QP creation flag is provided. Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Jiri Kosina <jkosina@suse.cz> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Šī revīzija ir iekļauta:

revīziju iesūtīja
Roland Dreier

vecāks
09b93088d7
revīzija
40f2287bd5
@@ -171,7 +171,7 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
|
||||
*/
|
||||
|
||||
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
|
||||
struct mlx4_buf *buf)
|
||||
struct mlx4_buf *buf, gfp_t gfp)
|
||||
{
|
||||
dma_addr_t t;
|
||||
|
||||
@@ -180,7 +180,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
|
||||
buf->npages = 1;
|
||||
buf->page_shift = get_order(size) + PAGE_SHIFT;
|
||||
buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
|
||||
size, &t, GFP_KERNEL);
|
||||
size, &t, gfp);
|
||||
if (!buf->direct.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -200,14 +200,14 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
|
||||
buf->npages = buf->nbufs;
|
||||
buf->page_shift = PAGE_SHIFT;
|
||||
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
|
||||
GFP_KERNEL);
|
||||
gfp);
|
||||
if (!buf->page_list)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i) {
|
||||
buf->page_list[i].buf =
|
||||
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
&t, GFP_KERNEL);
|
||||
&t, gfp);
|
||||
if (!buf->page_list[i].buf)
|
||||
goto err_free;
|
||||
|
||||
@@ -218,7 +218,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
|
||||
|
||||
if (BITS_PER_LONG == 64) {
|
||||
struct page **pages;
|
||||
pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
|
||||
pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
|
||||
if (!pages)
|
||||
goto err_free;
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
@@ -260,11 +260,12 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_buf_free);
|
||||
|
||||
static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
|
||||
static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct mlx4_db_pgdir *pgdir;
|
||||
|
||||
pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
|
||||
pgdir = kzalloc(sizeof *pgdir, gfp);
|
||||
if (!pgdir)
|
||||
return NULL;
|
||||
|
||||
@@ -272,7 +273,7 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
|
||||
pgdir->bits[0] = pgdir->order0;
|
||||
pgdir->bits[1] = pgdir->order1;
|
||||
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
|
||||
&pgdir->db_dma, GFP_KERNEL);
|
||||
&pgdir->db_dma, gfp);
|
||||
if (!pgdir->db_page) {
|
||||
kfree(pgdir);
|
||||
return NULL;
|
||||
@@ -312,7 +313,7 @@ found:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
|
||||
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_db_pgdir *pgdir;
|
||||
@@ -324,7 +325,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
|
||||
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
|
||||
goto out;
|
||||
|
||||
pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
|
||||
pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp);
|
||||
if (!pgdir) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@@ -376,13 +377,13 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx4_db_alloc(dev, &wqres->db, 1);
|
||||
err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*wqres->db.db = 0;
|
||||
|
||||
err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
|
||||
err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_db;
|
||||
|
||||
@@ -391,7 +392,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
|
||||
err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user