mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
e3bba3c3c9
commit
309381feae
@@ -5,6 +5,7 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/list.h>
|
||||
@@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page)
|
||||
*/
|
||||
static inline int put_page_testzero(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(atomic_read(&page->_count) == 0);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
|
||||
return atomic_dec_and_test(&page->_count);
|
||||
}
|
||||
|
||||
@@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
|
||||
static inline void compound_lock(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
VM_BUG_ON(PageSlab(page));
|
||||
VM_BUG_ON_PAGE(PageSlab(page), page);
|
||||
bit_spin_lock(PG_compound_lock, &page->flags);
|
||||
#endif
|
||||
}
|
||||
@@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page)
|
||||
static inline void compound_unlock(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
VM_BUG_ON(PageSlab(page));
|
||||
VM_BUG_ON_PAGE(PageSlab(page), page);
|
||||
bit_spin_unlock(PG_compound_lock, &page->flags);
|
||||
#endif
|
||||
}
|
||||
@@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page)
|
||||
*/
|
||||
static inline bool compound_tail_refcounted(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageHead(page));
|
||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
return __compound_tail_refcounted(page);
|
||||
}
|
||||
|
||||
@@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page)
|
||||
/*
|
||||
* __split_huge_page_refcount() cannot run from under us.
|
||||
*/
|
||||
VM_BUG_ON(!PageTail(page));
|
||||
VM_BUG_ON(page_mapcount(page) < 0);
|
||||
VM_BUG_ON(atomic_read(&page->_count) != 0);
|
||||
VM_BUG_ON_PAGE(!PageTail(page), page);
|
||||
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
|
||||
if (compound_tail_refcounted(page->first_page))
|
||||
atomic_inc(&page->_mapcount);
|
||||
}
|
||||
@@ -474,7 +475,7 @@ static inline void get_page(struct page *page)
|
||||
* Getting a normal page or the head of a compound page
|
||||
* requires to already have an elevated page->_count.
|
||||
*/
|
||||
VM_BUG_ON(atomic_read(&page->_count) <= 0);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
|
||||
atomic_inc(&page->_count);
|
||||
}
|
||||
|
||||
@@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page)
|
||||
|
||||
static inline void __SetPageBuddy(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
|
||||
atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
|
||||
}
|
||||
|
||||
static inline void __ClearPageBuddy(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageBuddy(page));
|
||||
VM_BUG_ON_PAGE(!PageBuddy(page), page);
|
||||
atomic_set(&page->_mapcount, -1);
|
||||
}
|
||||
|
||||
@@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page)
|
||||
* slab code uses page->slab_cache and page->first_page (for tail
|
||||
* pages), which share storage with page->ptl.
|
||||
*/
|
||||
VM_BUG_ON(*(unsigned long *)&page->ptl);
|
||||
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
|
||||
if (!ptlock_alloc(page))
|
||||
return false;
|
||||
spin_lock_init(ptlock_ptr(page));
|
||||
@@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
|
||||
static inline void pgtable_pmd_page_dtor(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
VM_BUG_ON(page->pmd_huge_pte);
|
||||
VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
|
||||
#endif
|
||||
ptlock_free(page);
|
||||
}
|
||||
@@ -2029,10 +2030,6 @@ extern void shake_page(struct page *p, int access);
|
||||
extern atomic_long_t num_poisoned_pages;
|
||||
extern int soft_offline_page(struct page *page, int flags);
|
||||
|
||||
extern void dump_page(struct page *page, char *reason);
|
||||
extern void dump_page_badflags(struct page *page, char *reason,
|
||||
unsigned long badflags);
|
||||
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
|
||||
extern void clear_huge_page(struct page *page,
|
||||
unsigned long addr,
|
||||
|
Reference in New Issue
Block a user