percpu.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. */
  5. #ifndef __ASM_PERCPU_H
  6. #define __ASM_PERCPU_H
  7. #include <asm/cmpxchg.h>
  8. #include <asm/loongarch.h>
  9. /*
  10. * The "address" (in fact, offset from $r21) of a per-CPU variable is close to
  11. * the loading address of main kernel image, but far from where the modules are
  12. * loaded. Tell the compiler this fact when using explicit relocs.
  13. */
  14. #if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS)
  15. #define PER_CPU_ATTRIBUTES __attribute__((model("extreme")))
  16. #endif
  17. /* Use r21 for fast access */
  18. register unsigned long __my_cpu_offset __asm__("$r21");
  19. static inline void set_my_cpu_offset(unsigned long off)
  20. {
  21. __my_cpu_offset = off;
  22. csr_write64(off, PERCPU_BASE_KS);
  23. }
  24. #define __my_cpu_offset __my_cpu_offset
  25. #define PERCPU_OP(op, asm_op, c_op) \
  26. static __always_inline unsigned long __percpu_##op(void *ptr, \
  27. unsigned long val, int size) \
  28. { \
  29. unsigned long ret; \
  30. \
  31. switch (size) { \
  32. case 4: \
  33. __asm__ __volatile__( \
  34. "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
  35. : [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \
  36. : [val] "r" (val)); \
  37. break; \
  38. case 8: \
  39. __asm__ __volatile__( \
  40. "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
  41. : [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \
  42. : [val] "r" (val)); \
  43. break; \
  44. default: \
  45. ret = 0; \
  46. BUILD_BUG(); \
  47. } \
  48. \
  49. return ret c_op val; \
  50. }
  51. PERCPU_OP(add, add, +)
  52. PERCPU_OP(and, and, &)
  53. PERCPU_OP(or, or, |)
  54. #undef PERCPU_OP
  55. static __always_inline unsigned long __percpu_read(void *ptr, int size)
  56. {
  57. unsigned long ret;
  58. switch (size) {
  59. case 1:
  60. __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
  61. : [ret] "=&r"(ret)
  62. : [ptr] "r"(ptr)
  63. : "memory");
  64. break;
  65. case 2:
  66. __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
  67. : [ret] "=&r"(ret)
  68. : [ptr] "r"(ptr)
  69. : "memory");
  70. break;
  71. case 4:
  72. __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
  73. : [ret] "=&r"(ret)
  74. : [ptr] "r"(ptr)
  75. : "memory");
  76. break;
  77. case 8:
  78. __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
  79. : [ret] "=&r"(ret)
  80. : [ptr] "r"(ptr)
  81. : "memory");
  82. break;
  83. default:
  84. ret = 0;
  85. BUILD_BUG();
  86. }
  87. return ret;
  88. }
  89. static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
  90. {
  91. switch (size) {
  92. case 1:
  93. __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
  94. :
  95. : [val] "r" (val), [ptr] "r" (ptr)
  96. : "memory");
  97. break;
  98. case 2:
  99. __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
  100. :
  101. : [val] "r" (val), [ptr] "r" (ptr)
  102. : "memory");
  103. break;
  104. case 4:
  105. __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
  106. :
  107. : [val] "r" (val), [ptr] "r" (ptr)
  108. : "memory");
  109. break;
  110. case 8:
  111. __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
  112. :
  113. : [val] "r" (val), [ptr] "r" (ptr)
  114. : "memory");
  115. break;
  116. default:
  117. BUILD_BUG();
  118. }
  119. }
  120. static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
  121. int size)
  122. {
  123. switch (size) {
  124. case 1:
  125. case 2:
  126. return __xchg_small((volatile void *)ptr, val, size);
  127. case 4:
  128. return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
  129. case 8:
  130. return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
  131. default:
  132. BUILD_BUG();
  133. }
  134. return 0;
  135. }
  136. /* this_cpu_cmpxchg */
  137. #define _protect_cmpxchg_local(pcp, o, n) \
  138. ({ \
  139. typeof(*raw_cpu_ptr(&(pcp))) __ret; \
  140. preempt_disable_notrace(); \
  141. __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
  142. preempt_enable_notrace(); \
  143. __ret; \
  144. })
  145. #define _percpu_read(pcp) \
  146. ({ \
  147. typeof(pcp) __retval; \
  148. __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
  149. __retval; \
  150. })
  151. #define _percpu_write(pcp, val) \
  152. do { \
  153. __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
  154. } while (0) \
  155. #define _pcp_protect(operation, pcp, val) \
  156. ({ \
  157. typeof(pcp) __retval; \
  158. preempt_disable_notrace(); \
  159. __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
  160. (val), sizeof(pcp)); \
  161. preempt_enable_notrace(); \
  162. __retval; \
  163. })
  164. #define _percpu_add(pcp, val) \
  165. _pcp_protect(__percpu_add, pcp, val)
  166. #define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
  167. #define _percpu_and(pcp, val) \
  168. _pcp_protect(__percpu_and, pcp, val)
  169. #define _percpu_or(pcp, val) \
  170. _pcp_protect(__percpu_or, pcp, val)
  171. #define _percpu_xchg(pcp, val) ((typeof(pcp)) \
  172. _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
  173. #define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
  174. #define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
  175. #define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
  176. #define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
  177. #define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
  178. #define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
  179. #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
  180. #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
  181. #define this_cpu_read_1(pcp) _percpu_read(pcp)
  182. #define this_cpu_read_2(pcp) _percpu_read(pcp)
  183. #define this_cpu_read_4(pcp) _percpu_read(pcp)
  184. #define this_cpu_read_8(pcp) _percpu_read(pcp)
  185. #define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
  186. #define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
  187. #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
  188. #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
  189. #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
  190. #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
  191. #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
  192. #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
  193. #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  194. #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  195. #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  196. #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  197. #include <asm-generic/percpu.h>
  198. #endif /* __ASM_PERCPU_H */