sparc: mmu_gather rework
Rework the sparc mmu_gather usage to conform to the new world order :-) Sparc mmu_gather does two things: - tracks vaddrs to unhash - tracks pages to free Split these two things like powerpc has done and keep the vaddrs in per-cpu data structures and flush them on context switch. The remaining bits can then use the generic mmu_gather. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
d6bf29b44d
commit
90f08e399d
@@ -655,9 +655,11 @@ static inline int pte_special(pte_t pte)
|
||||
#define pte_unmap(pte) do { } while (0)
|
||||
|
||||
/* Actual page table PTE updates. */
|
||||
extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
|
||||
extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm);
|
||||
|
||||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
|
||||
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte, int fullmm)
|
||||
{
|
||||
pte_t orig = *ptep;
|
||||
|
||||
@@ -670,12 +672,19 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
|
||||
* and SUN4V pte layout, so this inline test is fine.
|
||||
*/
|
||||
if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
|
||||
tlb_batch_add(mm, addr, ptep, orig);
|
||||
tlb_batch_add(mm, addr, ptep, orig, fullmm);
|
||||
}
|
||||
|
||||
#define set_pte_at(mm,addr,ptep,pte) \
|
||||
__set_pte_at((mm), (addr), (ptep), (pte), 0)
|
||||
|
||||
#define pte_clear(mm,addr,ptep) \
|
||||
set_pte_at((mm), (addr), (ptep), __pte(0UL))
|
||||
|
||||
#define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
|
||||
#define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
|
||||
__set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
|
||||
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
#define __HAVE_ARCH_MOVE_PTE
|
||||
#define move_pte(pte, prot, old_addr, new_addr) \
|
||||
|
Reference in New Issue
Block a user