copypage-xsc3.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/mm/copypage-xsc3.S
  4. *
  5. * Copyright (C) 2004 Intel Corp.
  6. *
  7. * Adapted for 3rd gen XScale core, no more mini-dcache
  8. * Author: Matt Gilbert ([email protected])
  9. */
  10. #include <linux/init.h>
  11. #include <linux/highmem.h>
  12. /*
  13. * General note:
  14. * We don't really want write-allocate cache behaviour for these functions
  15. * since that will just eat through 8K of the cache.
  16. */
  17. /*
  18. * XSC3 optimised copy_user_highpage
  19. *
  20. * The source page may have some clean entries in the cache already, but we
  21. * can safely ignore them - break_cow() will flush them out of the cache
  22. * if we eventually end up using our copied page.
  23. *
  24. */
  25. static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
  26. {
  27. int tmp;
  28. asm volatile ("\
  29. .arch xscale \n\
  30. pld [%1, #0] \n\
  31. pld [%1, #32] \n\
  32. 1: pld [%1, #64] \n\
  33. pld [%1, #96] \n\
  34. \n\
  35. 2: ldrd r2, r3, [%1], #8 \n\
  36. ldrd r4, r5, [%1], #8 \n\
  37. mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
  38. strd r2, r3, [%0], #8 \n\
  39. ldrd r2, r3, [%1], #8 \n\
  40. strd r4, r5, [%0], #8 \n\
  41. ldrd r4, r5, [%1], #8 \n\
  42. strd r2, r3, [%0], #8 \n\
  43. strd r4, r5, [%0], #8 \n\
  44. ldrd r2, r3, [%1], #8 \n\
  45. ldrd r4, r5, [%1], #8 \n\
  46. mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
  47. strd r2, r3, [%0], #8 \n\
  48. ldrd r2, r3, [%1], #8 \n\
  49. subs %2, %2, #1 \n\
  50. strd r4, r5, [%0], #8 \n\
  51. ldrd r4, r5, [%1], #8 \n\
  52. strd r2, r3, [%0], #8 \n\
  53. strd r4, r5, [%0], #8 \n\
  54. bgt 1b \n\
  55. beq 2b "
  56. : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
  57. : "2" (PAGE_SIZE / 64 - 1)
  58. : "r2", "r3", "r4", "r5");
  59. }
  60. void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
  61. unsigned long vaddr, struct vm_area_struct *vma)
  62. {
  63. void *kto, *kfrom;
  64. kto = kmap_atomic(to);
  65. kfrom = kmap_atomic(from);
  66. flush_cache_page(vma, vaddr, page_to_pfn(from));
  67. xsc3_mc_copy_user_page(kto, kfrom);
  68. kunmap_atomic(kfrom);
  69. kunmap_atomic(kto);
  70. }
  71. /*
  72. * XScale optimised clear_user_page
  73. */
  74. void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
  75. {
  76. void *ptr, *kaddr = kmap_atomic(page);
  77. asm volatile ("\
  78. .arch xscale \n\
  79. mov r1, %2 \n\
  80. mov r2, #0 \n\
  81. mov r3, #0 \n\
  82. 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\
  83. strd r2, r3, [%0], #8 \n\
  84. strd r2, r3, [%0], #8 \n\
  85. strd r2, r3, [%0], #8 \n\
  86. strd r2, r3, [%0], #8 \n\
  87. subs r1, r1, #1 \n\
  88. bne 1b"
  89. : "=r" (ptr)
  90. : "0" (kaddr), "I" (PAGE_SIZE / 32)
  91. : "r1", "r2", "r3");
  92. kunmap_atomic(kaddr);
  93. }
  94. struct cpu_user_fns xsc3_mc_user_fns __initdata = {
  95. .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
  96. .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
  97. };