RDMA: Convert put_page() to put_user_page*()
For infiniband code that retains pages via get_user_pages*(), release those pages via the new put_user_page(), or put_user_pages*(), instead of put_page() This is a tiny part of the second step of fixing the problem described in [1]. The steps are: 1) Provide put_user_page*() routines, intended to be used for releasing pages that were pinned via get_user_pages*(). 2) Convert all of the call sites for get_user_pages*(), to invoke put_user_page*(), instead of put_page(). This involves dozens of call sites, and will take some time. 3) After (2) is complete, use get_user_pages*() and put_user_page*() to implement tracking of these pages. This tracking will be separate from the existing struct page refcounting. 4) Use the tracking and identification of these pages, to implement special handling (especially in writeback paths) when the pages are backed by a filesystem. Again, [1] provides details as to why that is desirable. [1] https://lwn.net/Articles/753027/ : "The Trouble with get_user_pages()" Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Jason Gunthorpe <jgg@mellanox.com> Tested-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:

gecommit door
Jason Gunthorpe

bovenliggende
cfcc048ca7
commit
ea99697458
@@ -40,13 +40,10 @@
|
||||
static void __qib_release_user_pages(struct page **p, size_t num_pages,
|
||||
int dirty)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
if (dirty)
|
||||
set_page_dirty_lock(p[i]);
|
||||
put_page(p[i]);
|
||||
}
|
||||
if (dirty)
|
||||
put_user_pages_dirty_lock(p, num_pages);
|
||||
else
|
||||
put_user_pages(p, num_pages);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -317,7 +317,7 @@ static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
|
||||
* the caller can ignore this page.
|
||||
*/
|
||||
if (put) {
|
||||
put_page(page);
|
||||
put_user_page(page);
|
||||
} else {
|
||||
/* coalesce case */
|
||||
kunmap(page);
|
||||
@@ -631,7 +631,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
|
||||
kunmap(pkt->addr[i].page);
|
||||
|
||||
if (pkt->addr[i].put_page)
|
||||
put_page(pkt->addr[i].page);
|
||||
put_user_page(pkt->addr[i].page);
|
||||
else
|
||||
__free_page(pkt->addr[i].page);
|
||||
} else if (pkt->addr[i].kvaddr) {
|
||||
@@ -706,7 +706,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
|
||||
/* if error, return all pages not managed by pkt */
|
||||
free_pages:
|
||||
while (i < j)
|
||||
put_page(pages[i++]);
|
||||
put_user_page(pages[i++]);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
|
Verwijs in nieuw issue
Block a user