Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next
Freescale updates from Scott: "Highlights include 8xx hugepage support, qbman fixes/cleanup, device tree updates, and some misc cleanup."
This commit is contained in:
@@ -2,14 +2,42 @@
|
||||
#define _ASM_POWERPC_BOOK3S_32_PGALLOC_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
|
||||
#define MAX_PGTABLE_INDEX_SIZE 0
|
||||
/*
|
||||
* Functions that deal with pagetables that could be at any level of
|
||||
* the table need to be passed an "index_size" so they know how to
|
||||
* handle allocation. For PTE pages (which are linked to a struct
|
||||
* page for now, and drawn from the main get_free_pages() pool), the
|
||||
* allocation size will be (2^index_size * sizeof(pointer)) and
|
||||
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
|
||||
*
|
||||
* The maximum index size needs to be big enough to allow any
|
||||
* pagetable sizes we need, but small enough to fit in the low bits of
|
||||
* any page table pointer. In other words all pagetables, even tiny
|
||||
* ones, must be aligned to allow at least enough low 0 bits to
|
||||
* contain this value. This value is also used as a mask, so it must
|
||||
* be one less than a power of two.
|
||||
*/
|
||||
#define MAX_PGTABLE_INDEX_SIZE 0xf
|
||||
|
||||
extern void __bad_pte(pmd_t *pmd);
|
||||
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
extern struct kmem_cache *pgtable_cache[];
|
||||
#define PGT_CACHE(shift) ({ \
|
||||
BUG_ON(!(shift)); \
|
||||
pgtable_cache[(shift) - 1]; \
|
||||
})
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't have any real pmd's, and this code never triggers because
|
||||
@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
||||
|
||||
static inline void pgtable_free(void *table, unsigned index_size)
|
||||
{
|
||||
BUG_ON(index_size); /* 32-bit doesn't use this */
|
||||
free_page((unsigned long)table);
|
||||
if (!index_size) {
|
||||
free_page((unsigned long)table);
|
||||
} else {
|
||||
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
|
||||
kmem_cache_free(PGT_CACHE(index_size), table);
|
||||
}
|
||||
}
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
|
@@ -8,6 +8,23 @@
|
||||
/* And here we include common definitions */
|
||||
#include <asm/pte-common.h>
|
||||
|
||||
#define PTE_INDEX_SIZE PTE_SHIFT
|
||||
#define PMD_INDEX_SIZE 0
|
||||
#define PUD_INDEX_SIZE 0
|
||||
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
|
||||
|
||||
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
|
||||
#define PMD_TABLE_SIZE 0
|
||||
#define PUD_TABLE_SIZE 0
|
||||
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
|
||||
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
|
||||
|
||||
/*
|
||||
* The normal case is that PTEs are 32-bits and we have a 1-page
|
||||
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
|
||||
@@ -19,14 +36,10 @@
|
||||
* -Matt
|
||||
*/
|
||||
/* PGDIR_SHIFT determines what a top-level page table entry can map */
|
||||
#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
|
||||
#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
|
||||
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
|
||||
#define PTRS_PER_PTE (1 << PTE_SHIFT)
|
||||
#define PTRS_PER_PMD 1
|
||||
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
|
||||
|
||||
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
|
||||
/*
|
||||
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
|
||||
@@ -82,12 +95,8 @@
|
||||
|
||||
extern unsigned long ioremap_bot;
|
||||
|
||||
/*
|
||||
* entries per page directory level: our page-table tree is two-level, so
|
||||
* we don't really have any PMD directory.
|
||||
*/
|
||||
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
|
||||
#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
|
||||
/* Bits to mask out from a PGD to get to the PUD page */
|
||||
#define PGD_MASKED_BITS 0
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
|
||||
@@ -284,15 +293,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
|
||||
|
||||
#ifndef CONFIG_PPC_4K_PAGES
|
||||
void pgtable_cache_init(void);
|
||||
#else
|
||||
/*
|
||||
* No page table caches to initialise
|
||||
*/
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
#endif
|
||||
|
||||
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
|
||||
pmd_t **pmdp);
|
||||
|
||||
|
@@ -800,9 +800,6 @@ extern struct page *pgd_page(pgd_t pgd);
|
||||
#define pgd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
|
||||
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
|
||||
void pgtable_cache_init(void);
|
||||
|
||||
static inline int map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
unsigned long flags)
|
||||
{
|
||||
|
@@ -51,12 +51,20 @@ static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
|
||||
static inline pte_t *hugepd_page(hugepd_t hpd)
|
||||
{
|
||||
BUG_ON(!hugepd_ok(hpd));
|
||||
#ifdef CONFIG_PPC_8xx
|
||||
return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
|
||||
#else
|
||||
return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned int hugepd_shift(hugepd_t hpd)
|
||||
{
|
||||
#ifdef CONFIG_PPC_8xx
|
||||
return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
|
||||
#else
|
||||
return hpd.pd & HUGEPD_SHIFT_MASK;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
@@ -99,7 +107,15 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
|
||||
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
|
||||
pte_t pte);
|
||||
#ifdef CONFIG_PPC_8xx
|
||||
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
}
|
||||
#else
|
||||
void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
|
||||
#endif
|
||||
|
||||
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||
unsigned long end, unsigned long floor,
|
||||
@@ -205,7 +221,8 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
|
||||
* are reserved early in the boot process by memblock instead of via
|
||||
* the .dts as on IBM platforms.
|
||||
*/
|
||||
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
|
||||
#if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \
|
||||
defined(CONFIG_PPC_8xx))
|
||||
extern void __init reserve_hugetlb_gpages(void);
|
||||
#else
|
||||
static inline void reserve_hugetlb_gpages(void)
|
||||
|
@@ -172,6 +172,41 @@ typedef struct {
|
||||
|
||||
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
|
||||
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
|
||||
|
||||
/* Page size definitions, common between 32 and 64-bit
|
||||
*
|
||||
* shift : is the "PAGE_SHIFT" value for that page size
|
||||
* penc : is the pte encoding mask
|
||||
*
|
||||
*/
|
||||
struct mmu_psize_def {
|
||||
unsigned int shift; /* number of bits */
|
||||
unsigned int enc; /* PTE encoding */
|
||||
unsigned int ind; /* Corresponding indirect page size shift */
|
||||
unsigned int flags;
|
||||
#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */
|
||||
#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */
|
||||
};
|
||||
|
||||
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
|
||||
|
||||
static inline int shift_to_mmu_psize(unsigned int shift)
|
||||
{
|
||||
int psize;
|
||||
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
|
||||
if (mmu_psize_defs[psize].shift == shift)
|
||||
return psize;
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
|
||||
{
|
||||
if (mmu_psize_defs[mmu_psize].shift)
|
||||
return mmu_psize_defs[mmu_psize].shift;
|
||||
BUG();
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#if defined(CONFIG_PPC_4K_PAGES)
|
||||
|
@@ -269,19 +269,20 @@ static inline bool early_radix_enabled(void)
|
||||
#define MMU_PAGE_64K 2
|
||||
#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
|
||||
#define MMU_PAGE_256K 4
|
||||
#define MMU_PAGE_1M 5
|
||||
#define MMU_PAGE_2M 6
|
||||
#define MMU_PAGE_4M 7
|
||||
#define MMU_PAGE_8M 8
|
||||
#define MMU_PAGE_16M 9
|
||||
#define MMU_PAGE_64M 10
|
||||
#define MMU_PAGE_256M 11
|
||||
#define MMU_PAGE_1G 12
|
||||
#define MMU_PAGE_16G 13
|
||||
#define MMU_PAGE_64G 14
|
||||
#define MMU_PAGE_512K 5
|
||||
#define MMU_PAGE_1M 6
|
||||
#define MMU_PAGE_2M 7
|
||||
#define MMU_PAGE_4M 8
|
||||
#define MMU_PAGE_8M 9
|
||||
#define MMU_PAGE_16M 10
|
||||
#define MMU_PAGE_64M 11
|
||||
#define MMU_PAGE_256M 12
|
||||
#define MMU_PAGE_1G 13
|
||||
#define MMU_PAGE_16G 14
|
||||
#define MMU_PAGE_64G 15
|
||||
|
||||
/* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */
|
||||
#define MMU_PAGE_COUNT 15
|
||||
#define MMU_PAGE_COUNT 16
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#include <asm/book3s/64/mmu.h>
|
||||
|
@@ -2,14 +2,42 @@
|
||||
#define _ASM_POWERPC_PGALLOC_32_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
|
||||
#define MAX_PGTABLE_INDEX_SIZE 0
|
||||
/*
|
||||
* Functions that deal with pagetables that could be at any level of
|
||||
* the table need to be passed an "index_size" so they know how to
|
||||
* handle allocation. For PTE pages (which are linked to a struct
|
||||
* page for now, and drawn from the main get_free_pages() pool), the
|
||||
* allocation size will be (2^index_size * sizeof(pointer)) and
|
||||
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
|
||||
*
|
||||
* The maximum index size needs to be big enough to allow any
|
||||
* pagetable sizes we need, but small enough to fit in the low bits of
|
||||
* any page table pointer. In other words all pagetables, even tiny
|
||||
* ones, must be aligned to allow at least enough low 0 bits to
|
||||
* contain this value. This value is also used as a mask, so it must
|
||||
* be one less than a power of two.
|
||||
*/
|
||||
#define MAX_PGTABLE_INDEX_SIZE 0xf
|
||||
|
||||
extern void __bad_pte(pmd_t *pmd);
|
||||
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
extern struct kmem_cache *pgtable_cache[];
|
||||
#define PGT_CACHE(shift) ({ \
|
||||
BUG_ON(!(shift)); \
|
||||
pgtable_cache[(shift) - 1]; \
|
||||
})
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't have any real pmd's, and this code never triggers because
|
||||
@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
||||
|
||||
static inline void pgtable_free(void *table, unsigned index_size)
|
||||
{
|
||||
BUG_ON(index_size); /* 32-bit doesn't use this */
|
||||
free_page((unsigned long)table);
|
||||
if (!index_size) {
|
||||
free_page((unsigned long)table);
|
||||
} else {
|
||||
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
|
||||
kmem_cache_free(PGT_CACHE(index_size), table);
|
||||
}
|
||||
}
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
|
@@ -16,6 +16,23 @@ extern int icache_44x_need_flush;
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define PTE_INDEX_SIZE PTE_SHIFT
|
||||
#define PMD_INDEX_SIZE 0
|
||||
#define PUD_INDEX_SIZE 0
|
||||
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
|
||||
|
||||
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
|
||||
#define PMD_TABLE_SIZE 0
|
||||
#define PUD_TABLE_SIZE 0
|
||||
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
|
||||
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
|
||||
|
||||
/*
|
||||
* The normal case is that PTEs are 32-bits and we have a 1-page
|
||||
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
|
||||
@@ -27,22 +44,12 @@ extern int icache_44x_need_flush;
|
||||
* -Matt
|
||||
*/
|
||||
/* PGDIR_SHIFT determines what a top-level page table entry can map */
|
||||
#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
|
||||
#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
|
||||
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
|
||||
/*
|
||||
* entries per page directory level: our page-table tree is two-level, so
|
||||
* we don't really have any PMD directory.
|
||||
*/
|
||||
#ifndef __ASSEMBLY__
|
||||
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
|
||||
#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define PTRS_PER_PTE (1 << PTE_SHIFT)
|
||||
#define PTRS_PER_PMD 1
|
||||
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
|
||||
/* Bits to mask out from a PGD to get to the PUD page */
|
||||
#define PGD_MASKED_BITS 0
|
||||
|
||||
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
|
||||
#define FIRST_USER_ADDRESS 0UL
|
||||
@@ -329,15 +336,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
|
||||
|
||||
#ifndef CONFIG_PPC_4K_PAGES
|
||||
void pgtable_cache_init(void);
|
||||
#else
|
||||
/*
|
||||
* No page table caches to initialise
|
||||
*/
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
#endif
|
||||
|
||||
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
|
||||
pmd_t **pmdp);
|
||||
|
||||
|
@@ -49,6 +49,7 @@
|
||||
#define _PMD_BAD 0x0ff0
|
||||
#define _PMD_PAGE_MASK 0x000c
|
||||
#define _PMD_PAGE_8M 0x000c
|
||||
#define _PMD_PAGE_512K 0x0004
|
||||
|
||||
/* Until my rework is finished, 8xx still needs atomic PTE updates */
|
||||
#define PTE_ATOMIC_UPDATES 1
|
||||
|
@@ -347,8 +347,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
|
||||
#define __swp_entry_to_pte(x) __pte((x).val)
|
||||
|
||||
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
|
||||
void pgtable_cache_init(void);
|
||||
extern int map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
unsigned long flags);
|
||||
extern int __meminit vmemmap_create_mapping(unsigned long start,
|
||||
|
@@ -226,7 +226,11 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
static inline int hugepd_ok(hugepd_t hpd)
|
||||
{
|
||||
#ifdef CONFIG_PPC_8xx
|
||||
return ((hpd.pd & 0x4) != 0);
|
||||
#else
|
||||
return (hpd.pd > 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int pmd_huge(pmd_t pmd)
|
||||
|
@@ -78,6 +78,8 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||
|
||||
unsigned long vmalloc_to_phys(void *vmalloc_addr);
|
||||
|
||||
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
|
||||
void pgtable_cache_init(void);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_POWERPC_PGTABLE_H */
|
||||
|
@@ -4,7 +4,7 @@
|
||||
#ifndef _ASM_POWERPC_REG_8xx_H
|
||||
#define _ASM_POWERPC_REG_8xx_H
|
||||
|
||||
#include <asm/mmu-8xx.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
/* Cache control on the MPC8xx is provided through some additional
|
||||
* special purpose registers.
|
||||
|
Reference in New Issue
Block a user