mm: optimize dev_pagemap reference counting around get_dev_pagemap
Change the calling convention so that get_dev_pagemap always consumes the previous reference instead of doing this using an explicit earlier call to put_dev_pagemap in the callers. The callers will still need to put the final reference after finishing the loop over the pages. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Logan Gunthorpe <logang@deltatee.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:

committed by
Dan Williams

parent
0822acb86c
commit
832d7aa051
@@ -507,22 +507,23 @@ struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
|
||||
* @pfn: page frame number to lookup page_map
|
||||
* @pgmap: optional known pgmap that already has a reference
|
||||
*
|
||||
* @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
|
||||
* same mapping.
|
||||
* If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
|
||||
* is non-NULL but does not cover @pfn the reference to it will be released.
|
||||
*/
|
||||
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
|
||||
struct dev_pagemap *pgmap)
|
||||
{
|
||||
const struct resource *res = pgmap ? pgmap->res : NULL;
|
||||
resource_size_t phys = PFN_PHYS(pfn);
|
||||
|
||||
/*
|
||||
* In the cached case we're already holding a live reference so
|
||||
* we can simply do a blind increment
|
||||
* In the cached case we're already holding a live reference.
|
||||
*/
|
||||
if (res && phys >= res->start && phys <= res->end) {
|
||||
percpu_ref_get(pgmap->ref);
|
||||
return pgmap;
|
||||
if (pgmap) {
|
||||
const struct resource *res = pgmap ? pgmap->res : NULL;
|
||||
|
||||
if (res && phys >= res->start && phys <= res->end)
|
||||
return pgmap;
|
||||
put_dev_pagemap(pgmap);
|
||||
}
|
||||
|
||||
/* fall back to slow path lookup */
|
||||
|
Reference in New Issue
Block a user