mm: introduce compound_nr()
Replace 1 << compound_order(page) with compound_nr(page). Minor improvements in readability. Link: http://lkml.kernel.org/r/20190721104612.19120-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
94ad933810
commit
d8c6546b1a
@@ -609,7 +609,7 @@ static int shmem_add_to_page_cache(struct page *page,
|
||||
{
|
||||
XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
|
||||
unsigned long i = 0;
|
||||
unsigned long nr = 1UL << compound_order(page);
|
||||
unsigned long nr = compound_nr(page);
|
||||
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
VM_BUG_ON_PAGE(index != round_down(index, nr), page);
|
||||
@@ -1884,7 +1884,7 @@ alloc_nohuge:
|
||||
lru_cache_add_anon(page);
|
||||
|
||||
spin_lock_irq(&info->lock);
|
||||
info->alloced += 1 << compound_order(page);
|
||||
info->alloced += compound_nr(page);
|
||||
inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
|
||||
shmem_recalc_inode(inode);
|
||||
spin_unlock_irq(&info->lock);
|
||||
@@ -1925,7 +1925,7 @@ clear:
|
||||
struct page *head = compound_head(page);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (1 << compound_order(head)); i++) {
|
||||
for (i = 0; i < compound_nr(head); i++) {
|
||||
clear_highpage(head + i);
|
||||
flush_dcache_page(head + i);
|
||||
}
|
||||
@@ -1952,7 +1952,7 @@ clear:
|
||||
* Error recovery.
|
||||
*/
|
||||
unacct:
|
||||
shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
|
||||
shmem_inode_unacct_blocks(inode, compound_nr(page));
|
||||
|
||||
if (PageTransHuge(page)) {
|
||||
unlock_page(page);
|
||||
|
Reference in New Issue
Block a user