uaccess.S 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. #include <linux/linkage.h>
  2. #include <asm-generic/export.h>
  3. #include <asm/asm.h>
  4. #include <asm/asm-extable.h>
  5. #include <asm/csr.h>
  6. .macro fixup op reg addr lbl
  7. 100:
  8. \op \reg, \addr
  9. _asm_extable 100b, \lbl
  10. .endm
  11. ENTRY(__asm_copy_to_user)
  12. ENTRY(__asm_copy_from_user)
  13. /* Enable access to user memory */
  14. li t6, SR_SUM
  15. csrs CSR_STATUS, t6
  16. /*
  17. * Save the terminal address which will be used to compute the number
  18. * of bytes copied in case of a fixup exception.
  19. */
  20. add t5, a0, a2
  21. /*
  22. * Register allocation for code below:
  23. * a0 - start of uncopied dst
  24. * a1 - start of uncopied src
  25. * a2 - size
  26. * t0 - end of uncopied dst
  27. */
  28. add t0, a0, a2
  29. /*
  30. * Use byte copy only if too small.
  31. * SZREG holds 4 for RV32 and 8 for RV64
  32. */
  33. li a3, 9*SZREG /* size must be larger than size in word_copy */
  34. bltu a2, a3, .Lbyte_copy_tail
  35. /*
  36. * Copy first bytes until dst is aligned to word boundary.
  37. * a0 - start of dst
  38. * t1 - start of aligned dst
  39. */
  40. addi t1, a0, SZREG-1
  41. andi t1, t1, ~(SZREG-1)
  42. /* dst is already aligned, skip */
  43. beq a0, t1, .Lskip_align_dst
  44. 1:
  45. /* a5 - one byte for copying data */
  46. fixup lb a5, 0(a1), 10f
  47. addi a1, a1, 1 /* src */
  48. fixup sb a5, 0(a0), 10f
  49. addi a0, a0, 1 /* dst */
  50. bltu a0, t1, 1b /* t1 - start of aligned dst */
  51. .Lskip_align_dst:
  52. /*
  53. * Now dst is aligned.
  54. * Use shift-copy if src is misaligned.
  55. * Use word-copy if both src and dst are aligned because
  56. * can not use shift-copy which do not require shifting
  57. */
  58. /* a1 - start of src */
  59. andi a3, a1, SZREG-1
  60. bnez a3, .Lshift_copy
  61. .Lword_copy:
  62. /*
  63. * Both src and dst are aligned, unrolled word copy
  64. *
  65. * a0 - start of aligned dst
  66. * a1 - start of aligned src
  67. * t0 - end of aligned dst
  68. */
  69. addi t0, t0, -(8*SZREG) /* not to over run */
  70. 2:
  71. fixup REG_L a4, 0(a1), 10f
  72. fixup REG_L a5, SZREG(a1), 10f
  73. fixup REG_L a6, 2*SZREG(a1), 10f
  74. fixup REG_L a7, 3*SZREG(a1), 10f
  75. fixup REG_L t1, 4*SZREG(a1), 10f
  76. fixup REG_L t2, 5*SZREG(a1), 10f
  77. fixup REG_L t3, 6*SZREG(a1), 10f
  78. fixup REG_L t4, 7*SZREG(a1), 10f
  79. fixup REG_S a4, 0(a0), 10f
  80. fixup REG_S a5, SZREG(a0), 10f
  81. fixup REG_S a6, 2*SZREG(a0), 10f
  82. fixup REG_S a7, 3*SZREG(a0), 10f
  83. fixup REG_S t1, 4*SZREG(a0), 10f
  84. fixup REG_S t2, 5*SZREG(a0), 10f
  85. fixup REG_S t3, 6*SZREG(a0), 10f
  86. fixup REG_S t4, 7*SZREG(a0), 10f
  87. addi a0, a0, 8*SZREG
  88. addi a1, a1, 8*SZREG
  89. bltu a0, t0, 2b
  90. addi t0, t0, 8*SZREG /* revert to original value */
  91. j .Lbyte_copy_tail
  92. .Lshift_copy:
  93. /*
  94. * Word copy with shifting.
  95. * For misaligned copy we still perform aligned word copy, but
  96. * we need to use the value fetched from the previous iteration and
  97. * do some shifts.
  98. * This is safe because reading is less than a word size.
  99. *
  100. * a0 - start of aligned dst
  101. * a1 - start of src
  102. * a3 - a1 & mask:(SZREG-1)
  103. * t0 - end of uncopied dst
  104. * t1 - end of aligned dst
  105. */
  106. /* calculating aligned word boundary for dst */
  107. andi t1, t0, ~(SZREG-1)
  108. /* Converting unaligned src to aligned src */
  109. andi a1, a1, ~(SZREG-1)
  110. /*
  111. * Calculate shifts
  112. * t3 - prev shift
  113. * t4 - current shift
  114. */
  115. slli t3, a3, 3 /* converting bytes in a3 to bits */
  116. li a5, SZREG*8
  117. sub t4, a5, t3
  118. /* Load the first word to combine with second word */
  119. fixup REG_L a5, 0(a1), 10f
  120. 3:
  121. /* Main shifting copy
  122. *
  123. * a0 - start of aligned dst
  124. * a1 - start of aligned src
  125. * t1 - end of aligned dst
  126. */
  127. /* At least one iteration will be executed */
  128. srl a4, a5, t3
  129. fixup REG_L a5, SZREG(a1), 10f
  130. addi a1, a1, SZREG
  131. sll a2, a5, t4
  132. or a2, a2, a4
  133. fixup REG_S a2, 0(a0), 10f
  134. addi a0, a0, SZREG
  135. bltu a0, t1, 3b
  136. /* Revert src to original unaligned value */
  137. add a1, a1, a3
  138. .Lbyte_copy_tail:
  139. /*
  140. * Byte copy anything left.
  141. *
  142. * a0 - start of remaining dst
  143. * a1 - start of remaining src
  144. * t0 - end of remaining dst
  145. */
  146. bgeu a0, t0, .Lout_copy_user /* check if end of copy */
  147. 4:
  148. fixup lb a5, 0(a1), 10f
  149. addi a1, a1, 1 /* src */
  150. fixup sb a5, 0(a0), 10f
  151. addi a0, a0, 1 /* dst */
  152. bltu a0, t0, 4b /* t0 - end of dst */
  153. .Lout_copy_user:
  154. /* Disable access to user memory */
  155. csrc CSR_STATUS, t6
  156. li a0, 0
  157. ret
  158. /* Exception fixup code */
  159. 10:
  160. /* Disable access to user memory */
  161. csrc CSR_STATUS, t6
  162. sub a0, t5, a0
  163. ret
  164. ENDPROC(__asm_copy_to_user)
  165. ENDPROC(__asm_copy_from_user)
  166. EXPORT_SYMBOL(__asm_copy_to_user)
  167. EXPORT_SYMBOL(__asm_copy_from_user)
  168. ENTRY(__clear_user)
  169. /* Enable access to user memory */
  170. li t6, SR_SUM
  171. csrs CSR_STATUS, t6
  172. add a3, a0, a1
  173. addi t0, a0, SZREG-1
  174. andi t1, a3, ~(SZREG-1)
  175. andi t0, t0, ~(SZREG-1)
  176. /*
  177. * a3: terminal address of target region
  178. * t0: lowest doubleword-aligned address in target region
  179. * t1: highest doubleword-aligned address in target region
  180. */
  181. bgeu t0, t1, 2f
  182. bltu a0, t0, 4f
  183. 1:
  184. fixup REG_S, zero, (a0), 11f
  185. addi a0, a0, SZREG
  186. bltu a0, t1, 1b
  187. 2:
  188. bltu a0, a3, 5f
  189. 3:
  190. /* Disable access to user memory */
  191. csrc CSR_STATUS, t6
  192. li a0, 0
  193. ret
  194. 4: /* Edge case: unalignment */
  195. fixup sb, zero, (a0), 11f
  196. addi a0, a0, 1
  197. bltu a0, t0, 4b
  198. j 1b
  199. 5: /* Edge case: remainder */
  200. fixup sb, zero, (a0), 11f
  201. addi a0, a0, 1
  202. bltu a0, a3, 5b
  203. j 3b
  204. /* Exception fixup code */
  205. 11:
  206. /* Disable access to user memory */
  207. csrc CSR_STATUS, t6
  208. sub a0, a3, a0
  209. ret
  210. ENDPROC(__clear_user)
  211. EXPORT_SYMBOL(__clear_user)