page.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. /*
  2. * include/asm-xtensa/page.h
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * Copyright (C) 2001 - 2007 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_PAGE_H
  11. #define _XTENSA_PAGE_H
  12. #include <asm/processor.h>
  13. #include <asm/types.h>
  14. #include <asm/cache.h>
  15. #include <asm/kmem_layout.h>
  16. /*
  17. * PAGE_SHIFT determines the page size
  18. */
  19. #define PAGE_SHIFT 12
  20. #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
  21. #define PAGE_MASK (~(PAGE_SIZE-1))
  22. #ifdef CONFIG_MMU
  23. #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
  24. #define PHYS_OFFSET XCHAL_KSEG_PADDR
  25. #define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
  26. PHYS_PFN(XCHAL_KSEG_SIZE))
  27. #else
  28. #define PAGE_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
  29. #define PHYS_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
  30. #define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
  31. #endif
  32. /*
  33. * Cache aliasing:
  34. *
  35. * If the cache size for one way is greater than the page size, we have to
  36. * deal with cache aliasing. The cache index is wider than the page size:
  37. *
  38. * | |cache| cache index
  39. * | pfn |off| virtual address
  40. * |xxxx:X|zzz|
  41. * | : | |
  42. * | \ / | |
  43. * |trans.| |
  44. * | / \ | |
  45. * |yyyy:Y|zzz| physical address
  46. *
  47. * When the page number is translated to the physical page address, the lowest
  48. * bit(s) (X) that are part of the cache index are also translated (Y).
  49. * If this translation changes bit(s) (X), the cache index is also afected,
  50. * thus resulting in a different cache line than before.
  51. * The kernel does not provide a mechanism to ensure that the page color
  52. * (represented by this bit) remains the same when allocated or when pages
  53. * are remapped. When user pages are mapped into kernel space, the color of
  54. * the page might also change.
  55. *
  56. * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
  57. * to temporarily map a patch so we can match the color.
  58. */
  59. #if DCACHE_WAY_SIZE > PAGE_SIZE
  60. # define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
  61. # define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
  62. # define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
  63. # define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
  64. #else
  65. # define DCACHE_ALIAS_ORDER 0
  66. # define DCACHE_ALIAS(a) ((void)(a), 0)
  67. #endif
  68. #define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER)
  69. #if ICACHE_WAY_SIZE > PAGE_SIZE
  70. # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
  71. # define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
  72. # define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
  73. # define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
  74. #else
  75. # define ICACHE_ALIAS_ORDER 0
  76. #endif
  77. #ifdef __ASSEMBLY__
  78. #define __pgprot(x) (x)
  79. #else
  80. /*
  81. * These are used to make use of C type-checking..
  82. */
  83. typedef struct { unsigned long pte; } pte_t; /* page table entry */
  84. typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
  85. typedef struct { unsigned long pgprot; } pgprot_t;
  86. typedef struct page *pgtable_t;
  87. #define pte_val(x) ((x).pte)
  88. #define pgd_val(x) ((x).pgd)
  89. #define pgprot_val(x) ((x).pgprot)
  90. #define __pte(x) ((pte_t) { (x) } )
  91. #define __pgd(x) ((pgd_t) { (x) } )
  92. #define __pgprot(x) ((pgprot_t) { (x) } )
  93. /*
  94. * Pure 2^n version of get_order
  95. * Use 'nsau' instructions if supported by the processor or the generic version.
  96. */
  97. #if XCHAL_HAVE_NSA
  98. static inline __attribute_const__ int get_order(unsigned long size)
  99. {
  100. int lz;
  101. asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
  102. return 32 - lz;
  103. }
  104. #else
  105. # include <asm-generic/getorder.h>
  106. #endif
  107. struct page;
  108. struct vm_area_struct;
  109. extern void clear_page(void *page);
  110. extern void copy_page(void *to, void *from);
  111. /*
  112. * If we have cache aliasing and writeback caches, we might have to do
  113. * some extra work
  114. */
  115. #if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
  116. extern void clear_page_alias(void *vaddr, unsigned long paddr);
  117. extern void copy_page_alias(void *to, void *from,
  118. unsigned long to_paddr, unsigned long from_paddr);
  119. #define clear_user_highpage clear_user_highpage
  120. void clear_user_highpage(struct page *page, unsigned long vaddr);
  121. #define __HAVE_ARCH_COPY_USER_HIGHPAGE
  122. void copy_user_highpage(struct page *to, struct page *from,
  123. unsigned long vaddr, struct vm_area_struct *vma);
  124. #else
  125. # define clear_user_page(page, vaddr, pg) clear_page(page)
  126. # define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
  127. #endif
  128. /*
  129. * This handles the memory map. We handle pages at
  130. * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
  131. * These macros are for conversion of kernel address, not user
  132. * addresses.
  133. */
  134. #define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
  135. #ifdef CONFIG_MMU
  136. static inline unsigned long ___pa(unsigned long va)
  137. {
  138. unsigned long off = va - PAGE_OFFSET;
  139. if (off >= XCHAL_KSEG_SIZE)
  140. off -= XCHAL_KSEG_SIZE;
  141. #ifndef CONFIG_XIP_KERNEL
  142. return off + PHYS_OFFSET;
  143. #else
  144. if (off < XCHAL_KSEG_SIZE)
  145. return off + PHYS_OFFSET;
  146. off -= XCHAL_KSEG_SIZE;
  147. if (off >= XCHAL_KIO_SIZE)
  148. off -= XCHAL_KIO_SIZE;
  149. return off + XCHAL_KIO_PADDR;
  150. #endif
  151. }
  152. #define __pa(x) ___pa((unsigned long)(x))
  153. #else
  154. #define __pa(x) \
  155. ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
  156. #endif
  157. #define __va(x) \
  158. ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
  159. #define pfn_valid(pfn) \
  160. ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
  161. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  162. #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
  163. #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  164. #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
  165. #endif /* __ASSEMBLY__ */
  166. #include <asm-generic/memory_model.h>
  167. #endif /* _XTENSA_PAGE_H */