mm: PageBuddy and mapcount robustness
Change the _mapcount value indicating PageBuddy from -2 to -128 for more robusteness against page_mapcount() undeflows. Use reset_page_mapcount instead of __ClearPageBuddy in bad_page to ignore the previous retval of PageBuddy(). Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reported-by: Hugh Dickins <hughd@google.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
7b7adc4a01
commit
ef2b4b95a6
@@ -402,16 +402,23 @@ static inline void init_page_count(struct page *page)
|
||||
/*
|
||||
* PageBuddy() indicate that the page is free and in the buddy system
|
||||
* (see mm/page_alloc.c).
|
||||
*
|
||||
* PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
|
||||
* -2 so that an underflow of the page_mapcount() won't be mistaken
|
||||
* for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
|
||||
* efficiently by most CPU architectures.
|
||||
*/
|
||||
#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
|
||||
|
||||
static inline int PageBuddy(struct page *page)
|
||||
{
|
||||
return atomic_read(&page->_mapcount) == -2;
|
||||
return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
|
||||
}
|
||||
|
||||
static inline void __SetPageBuddy(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
|
||||
atomic_set(&page->_mapcount, -2);
|
||||
atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
|
||||
}
|
||||
|
||||
static inline void __ClearPageBuddy(struct page *page)
|
||||
|
Reference in New Issue
Block a user