powerpc/mm: Merge various PTE bits and accessors definitions

Now that they are almost identical, we can merge some of the definitions
related to the PTE format into common files.

This creates a new pte-common.h which is included by both 32 and 64-bit
right after the CPU specific pte-*.h file, and which defines some
bits to "default" values if they haven't been defined already, and
then provides a generic definition of most of the bit combinations
based on these and exposed to the rest of the kernel.

I also moved to the common pgtable.h most of the "small" accessors to the
PTE bits and modification helpers (pte_mk*). The actual accessors remain
in their separate files.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Benjamin Herrenschmidt
2009-03-19 19:34:09 +00:00
parent 8d1cf34e7a
commit 71087002cf
4 changed files with 233 additions and 337 deletions

View File

@@ -97,174 +97,11 @@ extern int icache_44x_need_flush;
#include <asm/pte-hash32.h>
#endif
/* If _PAGE_SPECIAL is defined, then we advertise our support for it */
#ifdef _PAGE_SPECIAL
#define __HAVE_ARCH_PTE_SPECIAL
#endif
/*
* Some bits are only used on some cpu families... Make sure that all
* the undefined gets defined as 0
*/
#ifndef _PAGE_HASHPTE
#define _PAGE_HASHPTE 0
#endif
#ifndef _PTE_NONE_MASK
#define _PTE_NONE_MASK 0
#endif
#ifndef _PAGE_SHARED
#define _PAGE_SHARED 0
#endif
#ifndef _PAGE_HWWRITE
#define _PAGE_HWWRITE 0
#endif
#ifndef _PAGE_HWEXEC
#define _PAGE_HWEXEC 0
#endif
#ifndef _PAGE_EXEC
#define _PAGE_EXEC 0
#endif
#ifndef _PAGE_ENDIAN
#define _PAGE_ENDIAN 0
#endif
#ifndef _PAGE_COHERENT
#define _PAGE_COHERENT 0
#endif
#ifndef _PAGE_WRITETHRU
#define _PAGE_WRITETHRU 0
#endif
#ifndef _PAGE_SPECIAL
#define _PAGE_SPECIAL 0
#endif
#ifndef _PMD_PRESENT_MASK
#define _PMD_PRESENT_MASK _PMD_PRESENT
#endif
#ifndef _PMD_SIZE
#define _PMD_SIZE 0
#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
#endif
#ifndef _PAGE_KERNEL_RO
#define _PAGE_KERNEL_RO 0
#endif
#ifndef _PAGE_KERNEL_RW
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
#endif
#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
/* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT
* here (ie, naturally aligned). Platform who don't just pre-define the
* value so we don't override it here
*/
#ifndef PTE_RPN_SHIFT
#define PTE_RPN_SHIFT (PAGE_SHIFT)
#endif
#ifdef CONFIG_PTE_64BIT
#define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT))
#define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1))
#else
#define PTE_RPN_MAX (1UL << (32 - PTE_RPN_SHIFT))
#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
#endif
/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
* pgprot changes
*/
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_SPECIAL)
/* Mask of bits returned by pte_pgprot() */
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
_PAGE_WRITETHRU | _PAGE_ENDIAN | \
_PAGE_USER | _PAGE_ACCESSED | \
_PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
_PAGE_EXEC | _PAGE_HWEXEC)
/*
* We define 2 sets of base prot bits, one for basic pages (ie,
* cacheable kernel and user pages) and one for non cacheable
* pages. We always set _PAGE_COHERENT when SMP is enabled or
* the processor might need it for DMA coherency.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
#else
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
#endif
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
_PAGE_NO_CACHE)
#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
_PAGE_NO_CACHE | _PAGE_GUARDED)
#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
defined(CONFIG_KPROBES)
/* We want the debuggers to be able to set breakpoints anywhere, so
* don't write protect the kernel text */
#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
#else
#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
#endif
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
/*
* The PowerPC can only do execute protection on a segment (256MB) basis,
* not on a page basis. So we consider execute permission the same as read.
* Also, write permissions imply read permissions.
* This is the closest we can get..
*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY_X
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY_X
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY_X
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY_X
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY_X
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED_X
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY_X
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED_X
/* And here we include common definitions */
#include <asm/pte-common.h>
#ifndef __ASSEMBLY__
/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
* kernel without large page PMD support */
extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
/*
* Conversions between PTE values and page frame numbers.
*/
#define pte_pfn(x) (pte_val(x) >> PTE_RPN_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |\
pgprot_val(prot))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
#endif /* __ASSEMBLY__ */
#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pte_clear(mm, addr, ptep) \
do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
@@ -273,43 +110,6 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
#ifndef __ASSEMBLY__
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline pte_t pte_wrprotect(pte_t pte) {
pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
static inline pte_t pte_mkclean(pte_t pte) {
pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
static inline pte_t pte_mkold(pte_t pte) {
pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) {
pte_val(pte) |= _PAGE_RW; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) {
pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) {
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) {
pte_val(pte) |= _PAGE_SPECIAL; return pte; }
static inline pgprot_t pte_pgprot(pte_t pte)
{
return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
return pte;
}
/*
* When flushing the tlb entry for a page, we also need to flush the hash
* table entry. flush_hash_pages is assembler (for speed) in hashtable.S.