Merge branch 'hwpoison' of git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6
* 'hwpoison' of git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6: hugetlb: add missing unlock in avoidcopy path in hugetlb_cow() hwpoison: rename CONFIG HWPOISON, hugetlb: support hwpoison injection for hugepage HWPOISON, hugetlb: detect hwpoison in hugetlb code HWPOISON, hugetlb: isolate corrupted hugepage HWPOISON, hugetlb: maintain mce_bad_pages in handling hugepage error HWPOISON, hugetlb: set/clear PG_hwpoison bits on hugepage HWPOISON, hugetlb: enable error handling path for hugepage hugetlb, rmap: add reverse mapping for hugepage hugetlb: move definition of is_vm_hugetlb_page() to hugepage_inline.h Fix up trivial conflicts in mm/memory-failure.c
This commit is contained in:
@@ -2,6 +2,7 @@
|
||||
#define _LINUX_HUGETLB_H
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/hugetlb_inline.h>
|
||||
|
||||
struct ctl_table;
|
||||
struct user_struct;
|
||||
@@ -14,11 +15,6 @@ struct user_struct;
|
||||
|
||||
int PageHuge(struct page *page);
|
||||
|
||||
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return vma->vm_flags & VM_HUGETLB;
|
||||
}
|
||||
|
||||
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
|
||||
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
|
||||
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
|
||||
@@ -47,6 +43,7 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
|
||||
struct vm_area_struct *vma,
|
||||
int acctflags);
|
||||
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
|
||||
void __isolate_hwpoisoned_huge_page(struct page *page);
|
||||
|
||||
extern unsigned long hugepages_treat_as_movable;
|
||||
extern const unsigned long hugetlb_zero, hugetlb_infinity;
|
||||
@@ -77,11 +74,6 @@ static inline int PageHuge(struct page *page)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
@@ -108,6 +100,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
|
||||
#define is_hugepage_only_range(mm, addr, len) 0
|
||||
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
|
||||
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
|
||||
#define huge_pte_offset(mm, address) 0
|
||||
#define __isolate_hwpoisoned_huge_page(page) 0
|
||||
|
||||
#define hugetlb_change_protection(vma, address, end, newprot)
|
||||
|
||||
|
22
include/linux/hugetlb_inline.h
Normal file
22
include/linux/hugetlb_inline.h
Normal file
@@ -0,0 +1,22 @@
|
||||
#ifndef _LINUX_HUGETLB_INLINE_H
|
||||
#define _LINUX_HUGETLB_INLINE_H
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
||||
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return vma->vm_flags & VM_HUGETLB;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -13,6 +13,7 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/hardirq.h> /* for in_interrupt() */
|
||||
#include <linux/hugetlb_inline.h>
|
||||
|
||||
/*
|
||||
* Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
|
||||
@@ -281,10 +282,16 @@ static inline loff_t page_offset(struct page *page)
|
||||
return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
|
||||
}
|
||||
|
||||
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
|
||||
unsigned long address);
|
||||
|
||||
static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
|
||||
unsigned long address)
|
||||
{
|
||||
pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
|
||||
pgoff_t pgoff;
|
||||
if (unlikely(is_vm_hugetlb_page(vma)))
|
||||
return linear_hugepage_index(vma, address);
|
||||
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
|
||||
pgoff += vma->vm_pgoff;
|
||||
return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
}
|
||||
|
@@ -48,15 +48,6 @@
|
||||
#define POISON_FREE 0x6b /* for use-after-free poisoning */
|
||||
#define POISON_END 0xa5 /* end-byte of poisoning */
|
||||
|
||||
/********** mm/hugetlb.c **********/
|
||||
/*
|
||||
* Private mappings of hugetlb pages use this poisoned value for
|
||||
* page->mapping. The core VM should not be doing anything with this mapping
|
||||
* but futex requires the existence of some page->mapping value even though it
|
||||
* is unused if PAGE_MAPPING_ANON is set.
|
||||
*/
|
||||
#define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON))
|
||||
|
||||
/********** arch/$ARCH/mm/init.c **********/
|
||||
#define POISON_FREE_INITMEM 0xcc
|
||||
|
||||
|
@@ -168,6 +168,11 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
|
||||
void page_add_file_rmap(struct page *);
|
||||
void page_remove_rmap(struct page *);
|
||||
|
||||
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
unsigned long);
|
||||
void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
unsigned long);
|
||||
|
||||
static inline void page_dup_rmap(struct page *page)
|
||||
{
|
||||
atomic_inc(&page->_mapcount);
|
||||
|
Reference in New Issue
Block a user