copypage-v6.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/mm/copypage-v6.c
  4. *
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
  6. */
  7. #include <linux/init.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/mm.h>
  10. #include <linux/highmem.h>
  11. #include <linux/pagemap.h>
  12. #include <asm/shmparam.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/cachetype.h>
  16. #include "mm.h"
  17. #if SHMLBA > 16384
  18. #error FIX ME
  19. #endif
  20. static DEFINE_RAW_SPINLOCK(v6_lock);
  21. /*
  22. * Copy the user page. No aliasing to deal with so we can just
  23. * attack the kernel's existing mapping of these pages.
  24. */
  25. static void v6_copy_user_highpage_nonaliasing(struct page *to,
  26. struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
  27. {
  28. void *kto, *kfrom;
  29. kfrom = kmap_atomic(from);
  30. kto = kmap_atomic(to);
  31. copy_page(kto, kfrom);
  32. kunmap_atomic(kto);
  33. kunmap_atomic(kfrom);
  34. }
  35. /*
  36. * Clear the user page. No aliasing to deal with so we can just
  37. * attack the kernel's existing mapping of this page.
  38. */
  39. static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
  40. {
  41. void *kaddr = kmap_atomic(page);
  42. clear_page(kaddr);
  43. kunmap_atomic(kaddr);
  44. }
  45. /*
  46. * Discard data in the kernel mapping for the new page.
  47. * FIXME: needs this MCRR to be supported.
  48. */
  49. static void discard_old_kernel_data(void *kto)
  50. {
  51. __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
  52. :
  53. : "r" (kto),
  54. "r" ((unsigned long)kto + PAGE_SIZE - 1)
  55. : "cc");
  56. }
  57. /*
  58. * Copy the page, taking account of the cache colour.
  59. */
  60. static void v6_copy_user_highpage_aliasing(struct page *to,
  61. struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
  62. {
  63. unsigned int offset = CACHE_COLOUR(vaddr);
  64. unsigned long kfrom, kto;
  65. if (!test_and_set_bit(PG_dcache_clean, &from->flags))
  66. __flush_dcache_page(page_mapping_file(from), from);
  67. /* FIXME: not highmem safe */
  68. discard_old_kernel_data(page_address(to));
  69. /*
  70. * Now copy the page using the same cache colour as the
  71. * pages ultimate destination.
  72. */
  73. raw_spin_lock(&v6_lock);
  74. kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
  75. kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
  76. set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
  77. set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
  78. copy_page((void *)kto, (void *)kfrom);
  79. raw_spin_unlock(&v6_lock);
  80. }
  81. /*
  82. * Clear the user page. We need to deal with the aliasing issues,
  83. * so remap the kernel page into the same cache colour as the user
  84. * page.
  85. */
  86. static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
  87. {
  88. unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
  89. /* FIXME: not highmem safe */
  90. discard_old_kernel_data(page_address(page));
  91. /*
  92. * Now clear the page using the same cache colour as
  93. * the pages ultimate destination.
  94. */
  95. raw_spin_lock(&v6_lock);
  96. set_top_pte(to, mk_pte(page, PAGE_KERNEL));
  97. clear_page((void *)to);
  98. raw_spin_unlock(&v6_lock);
  99. }
  100. struct cpu_user_fns v6_user_fns __initdata = {
  101. .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
  102. .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
  103. };
  104. static int __init v6_userpage_init(void)
  105. {
  106. if (cache_is_vipt_aliasing()) {
  107. cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
  108. cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
  109. }
  110. return 0;
  111. }
  112. core_initcall(v6_userpage_init);