copypage-v4wb.c 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/mm/copypage-v4wb.c
  4. *
  5. * Copyright (C) 1995-1999 Russell King
  6. */
  7. #include <linux/init.h>
  8. #include <linux/highmem.h>
  9. /*
  10. * ARMv4 optimised copy_user_highpage
  11. *
  12. * We flush the destination cache lines just before we write the data into the
  13. * corresponding address. Since the Dcache is read-allocate, this removes the
  14. * Dcache aliasing issue. The writes will be forwarded to the write buffer,
  15. * and merged as appropriate.
  16. *
  17. * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
  18. * instruction. If your processor does not supply this, you have to write your
  19. * own copy_user_highpage that does the right thing.
  20. */
  21. static void v4wb_copy_user_page(void *kto, const void *kfrom)
  22. {
  23. int tmp;
  24. asm volatile ("\
  25. .syntax unified\n\
  26. ldmia %1!, {r3, r4, ip, lr} @ 4\n\
  27. 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
  28. stmia %0!, {r3, r4, ip, lr} @ 4\n\
  29. ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
  30. stmia %0!, {r3, r4, ip, lr} @ 4\n\
  31. ldmia %1!, {r3, r4, ip, lr} @ 4\n\
  32. mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
  33. stmia %0!, {r3, r4, ip, lr} @ 4\n\
  34. ldmia %1!, {r3, r4, ip, lr} @ 4\n\
  35. subs %2, %2, #1 @ 1\n\
  36. stmia %0!, {r3, r4, ip, lr} @ 4\n\
  37. ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
  38. bne 1b @ 1\n\
  39. mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB"
  40. : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
  41. : "2" (PAGE_SIZE / 64)
  42. : "r3", "r4", "ip", "lr");
  43. }
  44. void v4wb_copy_user_highpage(struct page *to, struct page *from,
  45. unsigned long vaddr, struct vm_area_struct *vma)
  46. {
  47. void *kto, *kfrom;
  48. kto = kmap_atomic(to);
  49. kfrom = kmap_atomic(from);
  50. flush_cache_page(vma, vaddr, page_to_pfn(from));
  51. v4wb_copy_user_page(kto, kfrom);
  52. kunmap_atomic(kfrom);
  53. kunmap_atomic(kto);
  54. }
  55. /*
  56. * ARMv4 optimised clear_user_page
  57. *
  58. * Same story as above.
  59. */
  60. void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
  61. {
  62. void *ptr, *kaddr = kmap_atomic(page);
  63. asm volatile("\
  64. mov r1, %2 @ 1\n\
  65. mov r2, #0 @ 1\n\
  66. mov r3, #0 @ 1\n\
  67. mov ip, #0 @ 1\n\
  68. mov lr, #0 @ 1\n\
  69. 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
  70. stmia %0!, {r2, r3, ip, lr} @ 4\n\
  71. stmia %0!, {r2, r3, ip, lr} @ 4\n\
  72. mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
  73. stmia %0!, {r2, r3, ip, lr} @ 4\n\
  74. stmia %0!, {r2, r3, ip, lr} @ 4\n\
  75. subs r1, r1, #1 @ 1\n\
  76. bne 1b @ 1\n\
  77. mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB"
  78. : "=r" (ptr)
  79. : "0" (kaddr), "I" (PAGE_SIZE / 64)
  80. : "r1", "r2", "r3", "ip", "lr");
  81. kunmap_atomic(kaddr);
  82. }
  83. struct cpu_user_fns v4wb_user_fns __initdata = {
  84. .cpu_clear_user_highpage = v4wb_clear_user_highpage,
  85. .cpu_copy_user_highpage = v4wb_copy_user_highpage,
  86. };