motorola.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/arch/m68k/mm/motorola.c
  4. *
  5. * Routines specific to the Motorola MMU, originally from:
  6. * linux/arch/m68k/init.c
  7. * which are Copyright (C) 1995 Hamish Macdonald
  8. *
  9. * Moved 8/20/1999 Sam Creasey
  10. */
  11. #include <linux/module.h>
  12. #include <linux/signal.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/swap.h>
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/types.h>
  19. #include <linux/init.h>
  20. #include <linux/memblock.h>
  21. #include <linux/gfp.h>
  22. #include <asm/setup.h>
  23. #include <linux/uaccess.h>
  24. #include <asm/page.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/machdep.h>
  27. #include <asm/io.h>
  28. #ifdef CONFIG_ATARI
  29. #include <asm/atari_stram.h>
  30. #endif
  31. #include <asm/sections.h>
  32. #undef DEBUG
  33. #ifndef mm_cachebits
  34. /*
  35. * Bits to add to page descriptors for "normal" caching mode.
  36. * For 68020/030 this is 0.
  37. * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
  38. */
  39. unsigned long mm_cachebits;
  40. EXPORT_SYMBOL(mm_cachebits);
  41. #endif
  42. /* Prior to calling these routines, the page should have been flushed
  43. * from both the cache and ATC, or the CPU might not notice that the
  44. * cache setting for the page has been changed. -jskov
  45. */
  46. static inline void nocache_page(void *vaddr)
  47. {
  48. unsigned long addr = (unsigned long)vaddr;
  49. if (CPU_IS_040_OR_060) {
  50. pte_t *ptep = virt_to_kpte(addr);
  51. *ptep = pte_mknocache(*ptep);
  52. }
  53. }
  54. static inline void cache_page(void *vaddr)
  55. {
  56. unsigned long addr = (unsigned long)vaddr;
  57. if (CPU_IS_040_OR_060) {
  58. pte_t *ptep = virt_to_kpte(addr);
  59. *ptep = pte_mkcache(*ptep);
  60. }
  61. }
  62. /*
  63. * Motorola 680x0 user's manual recommends using uncached memory for address
  64. * translation tables.
  65. *
  66. * Seeing how the MMU can be external on (some of) these chips, that seems like
  67. * a very important recommendation to follow. Provide some helpers to combat
  68. * 'variation' amongst the users of this.
  69. */
  70. void mmu_page_ctor(void *page)
  71. {
  72. __flush_page_to_ram(page);
  73. flush_tlb_kernel_page(page);
  74. nocache_page(page);
  75. }
  76. void mmu_page_dtor(void *page)
  77. {
  78. cache_page(page);
  79. }
  80. /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
  81. struct page instead of separately kmalloced struct. Stolen from
  82. arch/sparc/mm/srmmu.c ... */
  83. typedef struct list_head ptable_desc;
  84. static struct list_head ptable_list[2] = {
  85. LIST_HEAD_INIT(ptable_list[0]),
  86. LIST_HEAD_INIT(ptable_list[1]),
  87. };
  88. #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
  89. #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
  90. #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
  91. static const int ptable_shift[2] = {
  92. 7+2, /* PGD, PMD */
  93. 6+2, /* PTE */
  94. };
  95. #define ptable_size(type) (1U << ptable_shift[type])
  96. #define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
  97. void __init init_pointer_table(void *table, int type)
  98. {
  99. ptable_desc *dp;
  100. unsigned long ptable = (unsigned long)table;
  101. unsigned long page = ptable & PAGE_MASK;
  102. unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
  103. dp = PD_PTABLE(page);
  104. if (!(PD_MARKBITS(dp) & mask)) {
  105. PD_MARKBITS(dp) = ptable_mask(type);
  106. list_add(dp, &ptable_list[type]);
  107. }
  108. PD_MARKBITS(dp) &= ~mask;
  109. pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
  110. /* unreserve the page so it's possible to free that page */
  111. __ClearPageReserved(PD_PAGE(dp));
  112. init_page_count(PD_PAGE(dp));
  113. return;
  114. }
  115. void *get_pointer_table(int type)
  116. {
  117. ptable_desc *dp = ptable_list[type].next;
  118. unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
  119. unsigned int tmp, off;
  120. /*
  121. * For a pointer table for a user process address space, a
  122. * table is taken from a page allocated for the purpose. Each
  123. * page can hold 8 pointer tables. The page is remapped in
  124. * virtual address space to be noncacheable.
  125. */
  126. if (mask == 0) {
  127. void *page;
  128. ptable_desc *new;
  129. if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
  130. return NULL;
  131. if (type == TABLE_PTE) {
  132. /*
  133. * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
  134. * SMP.
  135. */
  136. pgtable_pte_page_ctor(virt_to_page(page));
  137. }
  138. mmu_page_ctor(page);
  139. new = PD_PTABLE(page);
  140. PD_MARKBITS(new) = ptable_mask(type) - 1;
  141. list_add_tail(new, dp);
  142. return (pmd_t *)page;
  143. }
  144. for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
  145. ;
  146. PD_MARKBITS(dp) = mask & ~tmp;
  147. if (!PD_MARKBITS(dp)) {
  148. /* move to end of list */
  149. list_move_tail(dp, &ptable_list[type]);
  150. }
  151. return page_address(PD_PAGE(dp)) + off;
  152. }
  153. int free_pointer_table(void *table, int type)
  154. {
  155. ptable_desc *dp;
  156. unsigned long ptable = (unsigned long)table;
  157. unsigned long page = ptable & PAGE_MASK;
  158. unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
  159. dp = PD_PTABLE(page);
  160. if (PD_MARKBITS (dp) & mask)
  161. panic ("table already free!");
  162. PD_MARKBITS (dp) |= mask;
  163. if (PD_MARKBITS(dp) == ptable_mask(type)) {
  164. /* all tables in page are free, free page */
  165. list_del(dp);
  166. mmu_page_dtor((void *)page);
  167. if (type == TABLE_PTE)
  168. pgtable_pte_page_dtor(virt_to_page(page));
  169. free_page (page);
  170. return 1;
  171. } else if (ptable_list[type].next != dp) {
  172. /*
  173. * move this descriptor to the front of the list, since
  174. * it has one or more free tables.
  175. */
  176. list_move(dp, &ptable_list[type]);
  177. }
  178. return 0;
  179. }
  180. /* size of memory already mapped in head.S */
  181. extern __initdata unsigned long m68k_init_mapped_size;
  182. extern unsigned long availmem;
  183. static pte_t *last_pte_table __initdata = NULL;
  184. static pte_t * __init kernel_page_table(void)
  185. {
  186. pte_t *pte_table = last_pte_table;
  187. if (PAGE_ALIGNED(last_pte_table)) {
  188. pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
  189. if (!pte_table) {
  190. panic("%s: Failed to allocate %lu bytes align=%lx\n",
  191. __func__, PAGE_SIZE, PAGE_SIZE);
  192. }
  193. clear_page(pte_table);
  194. mmu_page_ctor(pte_table);
  195. last_pte_table = pte_table;
  196. }
  197. last_pte_table += PTRS_PER_PTE;
  198. return pte_table;
  199. }
  200. static pmd_t *last_pmd_table __initdata = NULL;
  201. static pmd_t * __init kernel_ptr_table(void)
  202. {
  203. if (!last_pmd_table) {
  204. unsigned long pmd, last;
  205. int i;
  206. /* Find the last ptr table that was used in head.S and
  207. * reuse the remaining space in that page for further
  208. * ptr tables.
  209. */
  210. last = (unsigned long)kernel_pg_dir;
  211. for (i = 0; i < PTRS_PER_PGD; i++) {
  212. pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
  213. if (!pud_present(*pud))
  214. continue;
  215. pmd = pgd_page_vaddr(kernel_pg_dir[i]);
  216. if (pmd > last)
  217. last = pmd;
  218. }
  219. last_pmd_table = (pmd_t *)last;
  220. #ifdef DEBUG
  221. printk("kernel_ptr_init: %p\n", last_pmd_table);
  222. #endif
  223. }
  224. last_pmd_table += PTRS_PER_PMD;
  225. if (PAGE_ALIGNED(last_pmd_table)) {
  226. last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
  227. if (!last_pmd_table)
  228. panic("%s: Failed to allocate %lu bytes align=%lx\n",
  229. __func__, PAGE_SIZE, PAGE_SIZE);
  230. clear_page(last_pmd_table);
  231. mmu_page_ctor(last_pmd_table);
  232. }
  233. return last_pmd_table;
  234. }
  235. static void __init map_node(int node)
  236. {
  237. unsigned long physaddr, virtaddr, size;
  238. pgd_t *pgd_dir;
  239. p4d_t *p4d_dir;
  240. pud_t *pud_dir;
  241. pmd_t *pmd_dir;
  242. pte_t *pte_dir;
  243. size = m68k_memory[node].size;
  244. physaddr = m68k_memory[node].addr;
  245. virtaddr = (unsigned long)phys_to_virt(physaddr);
  246. physaddr |= m68k_supervisor_cachemode |
  247. _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
  248. if (CPU_IS_040_OR_060)
  249. physaddr |= _PAGE_GLOBAL040;
  250. while (size > 0) {
  251. #ifdef DEBUG
  252. if (!(virtaddr & (PMD_SIZE-1)))
  253. printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
  254. virtaddr);
  255. #endif
  256. pgd_dir = pgd_offset_k(virtaddr);
  257. if (virtaddr && CPU_IS_020_OR_030) {
  258. if (!(virtaddr & (PGDIR_SIZE-1)) &&
  259. size >= PGDIR_SIZE) {
  260. #ifdef DEBUG
  261. printk ("[very early term]");
  262. #endif
  263. pgd_val(*pgd_dir) = physaddr;
  264. size -= PGDIR_SIZE;
  265. virtaddr += PGDIR_SIZE;
  266. physaddr += PGDIR_SIZE;
  267. continue;
  268. }
  269. }
  270. p4d_dir = p4d_offset(pgd_dir, virtaddr);
  271. pud_dir = pud_offset(p4d_dir, virtaddr);
  272. if (!pud_present(*pud_dir)) {
  273. pmd_dir = kernel_ptr_table();
  274. #ifdef DEBUG
  275. printk ("[new pointer %p]", pmd_dir);
  276. #endif
  277. pud_set(pud_dir, pmd_dir);
  278. } else
  279. pmd_dir = pmd_offset(pud_dir, virtaddr);
  280. if (CPU_IS_020_OR_030) {
  281. if (virtaddr) {
  282. #ifdef DEBUG
  283. printk ("[early term]");
  284. #endif
  285. pmd_val(*pmd_dir) = physaddr;
  286. physaddr += PMD_SIZE;
  287. } else {
  288. int i;
  289. #ifdef DEBUG
  290. printk ("[zero map]");
  291. #endif
  292. pte_dir = kernel_page_table();
  293. pmd_set(pmd_dir, pte_dir);
  294. pte_val(*pte_dir++) = 0;
  295. physaddr += PAGE_SIZE;
  296. for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
  297. pte_val(*pte_dir++) = physaddr;
  298. }
  299. size -= PMD_SIZE;
  300. virtaddr += PMD_SIZE;
  301. } else {
  302. if (!pmd_present(*pmd_dir)) {
  303. #ifdef DEBUG
  304. printk ("[new table]");
  305. #endif
  306. pte_dir = kernel_page_table();
  307. pmd_set(pmd_dir, pte_dir);
  308. }
  309. pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
  310. if (virtaddr) {
  311. if (!pte_present(*pte_dir))
  312. pte_val(*pte_dir) = physaddr;
  313. } else
  314. pte_val(*pte_dir) = 0;
  315. size -= PAGE_SIZE;
  316. virtaddr += PAGE_SIZE;
  317. physaddr += PAGE_SIZE;
  318. }
  319. }
  320. #ifdef DEBUG
  321. printk("\n");
  322. #endif
  323. }
  324. /*
  325. * Alternate definitions that are compile time constants, for
  326. * initializing protection_map. The cachebits are fixed later.
  327. */
  328. #define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  329. #define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
  330. #define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
  331. #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
  332. static pgprot_t protection_map[16] __ro_after_init = {
  333. [VM_NONE] = PAGE_NONE_C,
  334. [VM_READ] = PAGE_READONLY_C,
  335. [VM_WRITE] = PAGE_COPY_C,
  336. [VM_WRITE | VM_READ] = PAGE_COPY_C,
  337. [VM_EXEC] = PAGE_READONLY_C,
  338. [VM_EXEC | VM_READ] = PAGE_READONLY_C,
  339. [VM_EXEC | VM_WRITE] = PAGE_COPY_C,
  340. [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_C,
  341. [VM_SHARED] = PAGE_NONE_C,
  342. [VM_SHARED | VM_READ] = PAGE_READONLY_C,
  343. [VM_SHARED | VM_WRITE] = PAGE_SHARED_C,
  344. [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_C,
  345. [VM_SHARED | VM_EXEC] = PAGE_READONLY_C,
  346. [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_C,
  347. [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_C,
  348. [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_C
  349. };
  350. DECLARE_VM_GET_PAGE_PROT
  351. /*
  352. * paging_init() continues the virtual memory environment setup which
  353. * was begun by the code in arch/head.S.
  354. */
  355. void __init paging_init(void)
  356. {
  357. unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
  358. unsigned long min_addr, max_addr;
  359. unsigned long addr;
  360. int i;
  361. #ifdef DEBUG
  362. printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
  363. #endif
  364. /* Fix the cache mode in the page descriptors for the 680[46]0. */
  365. if (CPU_IS_040_OR_060) {
  366. int i;
  367. #ifndef mm_cachebits
  368. mm_cachebits = _PAGE_CACHE040;
  369. #endif
  370. for (i = 0; i < 16; i++)
  371. pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
  372. }
  373. min_addr = m68k_memory[0].addr;
  374. max_addr = min_addr + m68k_memory[0].size - 1;
  375. memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
  376. MEMBLOCK_NONE);
  377. for (i = 1; i < m68k_num_memory;) {
  378. if (m68k_memory[i].addr < min_addr) {
  379. printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
  380. m68k_memory[i].addr, m68k_memory[i].size);
  381. printk("Fix your bootloader or use a memfile to make use of this area!\n");
  382. m68k_num_memory--;
  383. memmove(m68k_memory + i, m68k_memory + i + 1,
  384. (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
  385. continue;
  386. }
  387. memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
  388. MEMBLOCK_NONE);
  389. addr = m68k_memory[i].addr + m68k_memory[i].size - 1;
  390. if (addr > max_addr)
  391. max_addr = addr;
  392. i++;
  393. }
  394. m68k_memoffset = min_addr - PAGE_OFFSET;
  395. m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6;
  396. module_fixup(NULL, __start_fixup, __stop_fixup);
  397. flush_icache();
  398. high_memory = phys_to_virt(max_addr) + 1;
  399. min_low_pfn = availmem >> PAGE_SHIFT;
  400. max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1;
  401. /* Reserve kernel text/data/bss and the memory allocated in head.S */
  402. memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
  403. /*
  404. * Map the physical memory available into the kernel virtual
  405. * address space. Make sure memblock will not try to allocate
  406. * pages beyond the memory we already mapped in head.S
  407. */
  408. memblock_set_bottom_up(true);
  409. for (i = 0; i < m68k_num_memory; i++) {
  410. m68k_setup_node(i);
  411. map_node(i);
  412. }
  413. flush_tlb_all();
  414. early_memtest(min_addr, max_addr);
  415. /*
  416. * initialize the bad page table and bad page to point
  417. * to a couple of allocated pages
  418. */
  419. empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  420. if (!empty_zero_page)
  421. panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
  422. __func__, PAGE_SIZE, PAGE_SIZE);
  423. /*
  424. * Set up SFC/DFC registers
  425. */
  426. set_fc(USER_DATA);
  427. #ifdef DEBUG
  428. printk ("before free_area_init\n");
  429. #endif
  430. for (i = 0; i < m68k_num_memory; i++)
  431. if (node_present_pages(i))
  432. node_set_state(i, N_NORMAL_MEMORY);
  433. max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
  434. free_area_init(max_zone_pfn);
  435. }