FROMLIST: mm: fix use-after free of page_ext after race with memory-offline
The below is one path where race between page_ext and offline of the respective memory blocks will cause use-after-free on the access of page_ext structure. process1 process2 --------- --------- a)doing /proc/page_owner doing memory offline through offline_pages. b)PageBuddy check is failed thus proceed to get the page_owner information through page_ext access. page_ext = lookup_page_ext(page); migrate_pages(); ................. Since all pages are successfully migrated as part of the offline operation,send MEM_OFFLINE notification where for page_ext it calls: offline_page_ext()--> __free_page_ext()--> free_page_ext()--> vfree(ms->page_ext) mem_section->page_ext = NULL c) Check for the PAGE_EXT flags in the page_ext->flags access results into the use-after-free(leading to the translation faults). As mentioned above, there is really no synchronization between page_ext access and its freeing in the memory_offline. The memory offline steps(roughly) on a memory block is as below: 1) Isolate all the pages 2) while(1) try free the pages to buddy.(->free_list[MIGRATE_ISOLATE]) 3) delete the pages from this buddy list. 4) Then free page_ext.(Note: The struct page is still alive as it is freed only during hot remove of the memory which frees the memmap, which steps the user might not perform). This design leads to the state where struct page is alive but the struct page_ext is freed, where the later is ideally part of the former which just representing the page_flags (check [3] for why this design is chosen). The above mentioned race is just one example __but the problem persists in the other paths too involving page_ext->flags access(eg: page_is_idle())__. Fix all the paths where offline races with page_ext access by maintaining synchronization with rcu lock and is achieved in 3 steps: 1) Invalidate all the page_ext's of the sections of a memory block by storing a flag in the LSB of mem_section->page_ext. 2) Wait till all the existing readers to finish working with the ->page_ext's with synchronize_rcu(). Any parallel process that starts after this call will not get page_ext, through lookup_page_ext(), for the block parallel offline operation is being performed. 3) Now safely free all sections ->page_ext's of the block on which offline operation is being performed. Note: If synchronize_rcu() takes time then optimizations can be done in this path through call_rcu()[2]. Thanks to David Hildenbrand for his views/suggestions on the initial discussion[1] and Pavan kondeti for various inputs on this patch. [1] https://lore.kernel.org/linux-mm/59edde13-4167-8550-86f0-11fc67882107@quicinc.com/ [2] https://lore.kernel.org/all/a26ce299-aed1-b8ad-711e-a49e82bdd180@quicinc.com/T/#u [3] https://lore.kernel.org/all/6fa6b7aa-731e-891c-3efb-a03d6a700efa@redhat.com/ Bug: 236222283 Link: https://lore.kernel.org/all/1661496993-11473-1-git-send-email-quic_charante@quicinc.com/ Change-Id: Ib439ae19c61a557a5c70ea90e3c4b35a5583ba0d Suggested-by: David Hildenbrand <david@redhat.com> Suggested-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Charan Teja Kalla <quic_charante@quicinc.com> (fixed merge conflicts and still exported lookup_page_ext)
This commit is contained in:

committed by
Suren Baghdasaryan

parent
dec2f52d08
commit
2b3f9b8187
@@ -173,7 +173,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
|
||||
|
||||
handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
page_ext = page_ext_get(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
for (i = 0; i < (1 << order); i++) {
|
||||
@@ -183,6 +183,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
|
||||
page_owner->free_ts_nsec = free_ts_nsec;
|
||||
page_ext = page_ext_next(page_ext);
|
||||
}
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
|
||||
static inline void __set_page_owner_handle(struct page *page,
|
||||
@@ -210,19 +211,21 @@ static inline void __set_page_owner_handle(struct page *page,
|
||||
noinline void __set_page_owner(struct page *page, unsigned int order,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
struct page_ext *page_ext;
|
||||
depot_stack_handle_t handle;
|
||||
|
||||
handle = save_stack(gfp_mask);
|
||||
|
||||
page_ext = page_ext_get(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
handle = save_stack(gfp_mask);
|
||||
__set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
|
||||
void __set_page_owner_migrate_reason(struct page *page, int reason)
|
||||
{
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
struct page_ext *page_ext = page_ext_get(page);
|
||||
struct page_owner *page_owner;
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
@@ -230,12 +233,13 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
|
||||
|
||||
page_owner = get_page_owner(page_ext);
|
||||
page_owner->last_migrate_reason = reason;
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
|
||||
void __split_page_owner(struct page *page, unsigned int nr)
|
||||
{
|
||||
int i;
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
struct page_ext *page_ext = page_ext_get(page);
|
||||
struct page_owner *page_owner;
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
@@ -246,17 +250,25 @@ void __split_page_owner(struct page *page, unsigned int nr)
|
||||
page_owner->order = 0;
|
||||
page_ext = page_ext_next(page_ext);
|
||||
}
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
|
||||
void __copy_page_owner(struct page *oldpage, struct page *newpage)
|
||||
{
|
||||
struct page_ext *old_ext = lookup_page_ext(oldpage);
|
||||
struct page_ext *new_ext = lookup_page_ext(newpage);
|
||||
struct page_ext *old_ext;
|
||||
struct page_ext *new_ext;
|
||||
struct page_owner *old_page_owner, *new_page_owner;
|
||||
|
||||
if (unlikely(!old_ext || !new_ext))
|
||||
old_ext = page_ext_get(oldpage);
|
||||
if (unlikely(!old_ext))
|
||||
return;
|
||||
|
||||
new_ext = page_ext_get(newpage);
|
||||
if (unlikely(!new_ext)) {
|
||||
page_ext_put(old_ext);
|
||||
return;
|
||||
}
|
||||
|
||||
old_page_owner = get_page_owner(old_ext);
|
||||
new_page_owner = get_page_owner(new_ext);
|
||||
new_page_owner->order = old_page_owner->order;
|
||||
@@ -279,6 +291,8 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
|
||||
*/
|
||||
__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
|
||||
__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
|
||||
page_ext_put(new_ext);
|
||||
page_ext_put(old_ext);
|
||||
}
|
||||
|
||||
void pagetypeinfo_showmixedcount_print(struct seq_file *m,
|
||||
@@ -335,12 +349,12 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
|
||||
if (PageReserved(page))
|
||||
continue;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
page_ext = page_ext_get(page);
|
||||
if (unlikely(!page_ext))
|
||||
continue;
|
||||
|
||||
if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
|
||||
continue;
|
||||
goto ext_put_continue;
|
||||
|
||||
page_owner = get_page_owner(page_ext);
|
||||
page_mt = gfp_migratetype(page_owner->gfp_mask);
|
||||
@@ -351,9 +365,12 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
|
||||
count[pageblock_mt]++;
|
||||
|
||||
pfn = block_end_pfn;
|
||||
page_ext_put(page_ext);
|
||||
break;
|
||||
}
|
||||
pfn += (1UL << page_owner->order) - 1;
|
||||
ext_put_continue:
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -432,7 +449,7 @@ err:
|
||||
|
||||
void __dump_page_owner(struct page *page)
|
||||
{
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
struct page_ext *page_ext = page_ext_get((void *)page);
|
||||
struct page_owner *page_owner;
|
||||
depot_stack_handle_t handle;
|
||||
unsigned long *entries;
|
||||
@@ -451,6 +468,7 @@ void __dump_page_owner(struct page *page)
|
||||
|
||||
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
|
||||
pr_alert("page_owner info is not present (never set?)\n");
|
||||
page_ext_put(page_ext);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -483,6 +501,7 @@ void __dump_page_owner(struct page *page)
|
||||
if (page_owner->last_migrate_reason != -1)
|
||||
pr_alert("page has been migrated, last migrate reason: %s\n",
|
||||
migrate_reason_names[page_owner->last_migrate_reason]);
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -508,6 +527,14 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
|
||||
/* Find an allocated page */
|
||||
for (; pfn < max_pfn; pfn++) {
|
||||
/*
|
||||
* This temporary page_owner is required so
|
||||
* that we can avoid the context switches while holding
|
||||
* the rcu lock and copying the page owner information to
|
||||
* user through copy_to_user() or GFP_KERNEL allocations.
|
||||
*/
|
||||
struct page_owner page_owner_tmp;
|
||||
|
||||
/*
|
||||
* If the new page is in a new MAX_ORDER_NR_PAGES area,
|
||||
* validate the area as existing, skip it if not
|
||||
@@ -530,7 +557,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
continue;
|
||||
}
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
page_ext = page_ext_get(page);
|
||||
if (unlikely(!page_ext))
|
||||
continue;
|
||||
|
||||
@@ -539,14 +566,14 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
* because we don't hold the zone lock.
|
||||
*/
|
||||
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
|
||||
continue;
|
||||
goto ext_put_continue;
|
||||
|
||||
/*
|
||||
* Although we do have the info about past allocation of free
|
||||
* pages, it's not relevant for current memory usage.
|
||||
*/
|
||||
if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
|
||||
continue;
|
||||
goto ext_put_continue;
|
||||
|
||||
page_owner = get_page_owner(page_ext);
|
||||
|
||||
@@ -555,7 +582,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
* would inflate the stats.
|
||||
*/
|
||||
if (!IS_ALIGNED(pfn, 1 << page_owner->order))
|
||||
continue;
|
||||
goto ext_put_continue;
|
||||
|
||||
/*
|
||||
* Access to page_ext->handle isn't synchronous so we should
|
||||
@@ -563,13 +590,17 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
*/
|
||||
handle = READ_ONCE(page_owner->handle);
|
||||
if (!handle)
|
||||
continue;
|
||||
goto ext_put_continue;
|
||||
|
||||
/* Record the next PFN to read in the file offset */
|
||||
*ppos = (pfn - min_low_pfn) + 1;
|
||||
|
||||
page_owner_tmp = *page_owner;
|
||||
page_ext_put(page_ext);
|
||||
return print_page_owner(buf, count, pfn, page,
|
||||
page_owner, handle);
|
||||
&page_owner_tmp, handle);
|
||||
ext_put_continue:
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -627,18 +658,20 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
|
||||
if (PageReserved(page))
|
||||
continue;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
page_ext = page_ext_get(page);
|
||||
if (unlikely(!page_ext))
|
||||
continue;
|
||||
|
||||
/* Maybe overlapping zone */
|
||||
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
|
||||
continue;
|
||||
goto ext_put_continue;
|
||||
|
||||
/* Found early allocated page */
|
||||
__set_page_owner_handle(page, page_ext, early_handle,
|
||||
0, 0);
|
||||
count++;
|
||||
ext_put_continue:
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
|
Reference in New Issue
Block a user