fpsimd.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 ARM Ltd.
  4. */
  5. #ifndef __ASM_FP_H
  6. #define __ASM_FP_H
  7. #include <asm/errno.h>
  8. #include <asm/ptrace.h>
  9. #include <asm/processor.h>
  10. #include <asm/sigcontext.h>
  11. #include <asm/sysreg.h>
  12. #ifndef __ASSEMBLY__
  13. #include <linux/bitmap.h>
  14. #include <linux/build_bug.h>
  15. #include <linux/bug.h>
  16. #include <linux/cache.h>
  17. #include <linux/init.h>
  18. #include <linux/stddef.h>
  19. #include <linux/types.h>
  20. #ifdef CONFIG_COMPAT
  21. /* Masks for extracting the FPSR and FPCR from the FPSCR */
  22. #define VFP_FPSCR_STAT_MASK 0xf800009f
  23. #define VFP_FPSCR_CTRL_MASK 0x07f79f00
  24. /*
  25. * The VFP state has 32x64-bit registers and a single 32-bit
  26. * control/status register.
  27. */
  28. #define VFP_STATE_SIZE ((32 * 8) + 4)
  29. #endif
  30. /*
  31. * When we defined the maximum SVE vector length we defined the ABI so
  32. * that the maximum vector length included all the reserved for future
  33. * expansion bits in ZCR rather than those just currently defined by
  34. * the architecture. While SME follows a similar pattern the fact that
  35. * it includes a square matrix means that any allocations that attempt
  36. * to cover the maximum potential vector length (such as happen with
  37. * the regset used for ptrace) end up being extremely large. Define
  38. * the much lower actual limit for use in such situations.
  39. */
  40. #define SME_VQ_MAX 16
  41. struct task_struct;
  42. extern void fpsimd_save_state(struct user_fpsimd_state *state);
  43. extern void fpsimd_load_state(struct user_fpsimd_state *state);
  44. extern void fpsimd_thread_switch(struct task_struct *next);
  45. extern void fpsimd_flush_thread(void);
  46. extern void fpsimd_signal_preserve_current_state(void);
  47. extern void fpsimd_preserve_current_state(void);
  48. extern void fpsimd_restore_current_state(void);
  49. extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
  50. extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
  51. void *sve_state, unsigned int sve_vl,
  52. void *za_state, unsigned int sme_vl,
  53. u64 *svcr);
  54. extern void fpsimd_flush_task_state(struct task_struct *target);
  55. extern void fpsimd_save_and_flush_cpu_state(void);
  56. static inline bool thread_sm_enabled(struct thread_struct *thread)
  57. {
  58. return system_supports_sme() && (thread->svcr & SVCR_SM_MASK);
  59. }
  60. static inline bool thread_za_enabled(struct thread_struct *thread)
  61. {
  62. return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK);
  63. }
  64. /* Maximum VL that SVE/SME VL-agnostic software can transparently support */
  65. #define VL_ARCH_MAX 0x100
  66. /* Offset of FFR in the SVE register dump */
  67. static inline size_t sve_ffr_offset(int vl)
  68. {
  69. return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
  70. }
  71. static inline void *sve_pffr(struct thread_struct *thread)
  72. {
  73. unsigned int vl;
  74. if (system_supports_sme() && thread_sm_enabled(thread))
  75. vl = thread_get_sme_vl(thread);
  76. else
  77. vl = thread_get_sve_vl(thread);
  78. return (char *)thread->sve_state + sve_ffr_offset(vl);
  79. }
  80. extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr);
  81. extern void sve_load_state(void const *state, u32 const *pfpsr,
  82. int restore_ffr);
  83. extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
  84. extern unsigned int sve_get_vl(void);
  85. extern void sve_set_vq(unsigned long vq_minus_1);
  86. extern void sme_set_vq(unsigned long vq_minus_1);
  87. extern void za_save_state(void *state);
  88. extern void za_load_state(void const *state);
  89. struct arm64_cpu_capabilities;
  90. extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
  91. extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
  92. extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
  93. extern u64 read_zcr_features(void);
  94. extern u64 read_smcr_features(void);
  95. /*
  96. * Helpers to translate bit indices in sve_vq_map to VQ values (and
  97. * vice versa). This allows find_next_bit() to be used to find the
  98. * _maximum_ VQ not exceeding a certain value.
  99. */
  100. static inline unsigned int __vq_to_bit(unsigned int vq)
  101. {
  102. return SVE_VQ_MAX - vq;
  103. }
  104. static inline unsigned int __bit_to_vq(unsigned int bit)
  105. {
  106. return SVE_VQ_MAX - bit;
  107. }
  108. struct vl_info {
  109. enum vec_type type;
  110. const char *name; /* For display purposes */
  111. /* Minimum supported vector length across all CPUs */
  112. int min_vl;
  113. /* Maximum supported vector length across all CPUs */
  114. int max_vl;
  115. int max_virtualisable_vl;
  116. /*
  117. * Set of available vector lengths,
  118. * where length vq encoded as bit __vq_to_bit(vq):
  119. */
  120. DECLARE_BITMAP(vq_map, SVE_VQ_MAX);
  121. /* Set of vector lengths present on at least one cpu: */
  122. DECLARE_BITMAP(vq_partial_map, SVE_VQ_MAX);
  123. };
  124. #ifdef CONFIG_ARM64_SVE
  125. extern void sve_alloc(struct task_struct *task, bool flush);
  126. extern void fpsimd_release_task(struct task_struct *task);
  127. extern void fpsimd_sync_to_sve(struct task_struct *task);
  128. extern void fpsimd_force_sync_to_sve(struct task_struct *task);
  129. extern void sve_sync_to_fpsimd(struct task_struct *task);
  130. extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
  131. extern int vec_set_vector_length(struct task_struct *task, enum vec_type type,
  132. unsigned long vl, unsigned long flags);
  133. extern int sve_set_current_vl(unsigned long arg);
  134. extern int sve_get_current_vl(void);
  135. static inline void sve_user_disable(void)
  136. {
  137. sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
  138. }
  139. static inline void sve_user_enable(void)
  140. {
  141. sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
  142. }
  143. #define sve_cond_update_zcr_vq(val, reg) \
  144. do { \
  145. u64 __zcr = read_sysreg_s((reg)); \
  146. u64 __new = __zcr & ~ZCR_ELx_LEN_MASK; \
  147. __new |= (val) & ZCR_ELx_LEN_MASK; \
  148. if (__zcr != __new) \
  149. write_sysreg_s(__new, (reg)); \
  150. } while (0)
  151. /*
  152. * Probing and setup functions.
  153. * Calls to these functions must be serialised with one another.
  154. */
  155. enum vec_type;
  156. extern void __init vec_init_vq_map(enum vec_type type);
  157. extern void vec_update_vq_map(enum vec_type type);
  158. extern int vec_verify_vq_map(enum vec_type type);
  159. extern void __init sve_setup(void);
  160. extern __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX];
  161. static inline void write_vl(enum vec_type type, u64 val)
  162. {
  163. u64 tmp;
  164. switch (type) {
  165. #ifdef CONFIG_ARM64_SVE
  166. case ARM64_VEC_SVE:
  167. tmp = read_sysreg_s(SYS_ZCR_EL1) & ~ZCR_ELx_LEN_MASK;
  168. write_sysreg_s(tmp | val, SYS_ZCR_EL1);
  169. break;
  170. #endif
  171. #ifdef CONFIG_ARM64_SME
  172. case ARM64_VEC_SME:
  173. tmp = read_sysreg_s(SYS_SMCR_EL1) & ~SMCR_ELx_LEN_MASK;
  174. write_sysreg_s(tmp | val, SYS_SMCR_EL1);
  175. break;
  176. #endif
  177. default:
  178. WARN_ON_ONCE(1);
  179. break;
  180. }
  181. }
  182. static inline int vec_max_vl(enum vec_type type)
  183. {
  184. return vl_info[type].max_vl;
  185. }
  186. static inline int vec_max_virtualisable_vl(enum vec_type type)
  187. {
  188. return vl_info[type].max_virtualisable_vl;
  189. }
  190. static inline int sve_max_vl(void)
  191. {
  192. return vec_max_vl(ARM64_VEC_SVE);
  193. }
  194. static inline int sve_max_virtualisable_vl(void)
  195. {
  196. return vec_max_virtualisable_vl(ARM64_VEC_SVE);
  197. }
  198. /* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
  199. static inline bool vq_available(enum vec_type type, unsigned int vq)
  200. {
  201. return test_bit(__vq_to_bit(vq), vl_info[type].vq_map);
  202. }
  203. static inline bool sve_vq_available(unsigned int vq)
  204. {
  205. return vq_available(ARM64_VEC_SVE, vq);
  206. }
  207. size_t sve_state_size(struct task_struct const *task);
  208. #else /* ! CONFIG_ARM64_SVE */
  209. static inline void sve_alloc(struct task_struct *task, bool flush) { }
  210. static inline void fpsimd_release_task(struct task_struct *task) { }
  211. static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
  212. static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
  213. static inline int sve_max_virtualisable_vl(void)
  214. {
  215. return 0;
  216. }
  217. static inline int sve_set_current_vl(unsigned long arg)
  218. {
  219. return -EINVAL;
  220. }
  221. static inline int sve_get_current_vl(void)
  222. {
  223. return -EINVAL;
  224. }
  225. static inline int sve_max_vl(void)
  226. {
  227. return -EINVAL;
  228. }
  229. static inline bool sve_vq_available(unsigned int vq) { return false; }
  230. static inline void sve_user_disable(void) { BUILD_BUG(); }
  231. static inline void sve_user_enable(void) { BUILD_BUG(); }
  232. #define sve_cond_update_zcr_vq(val, reg) do { } while (0)
  233. static inline void vec_init_vq_map(enum vec_type t) { }
  234. static inline void vec_update_vq_map(enum vec_type t) { }
  235. static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
  236. static inline void sve_setup(void) { }
  237. static inline size_t sve_state_size(struct task_struct const *task)
  238. {
  239. return 0;
  240. }
  241. #endif /* ! CONFIG_ARM64_SVE */
  242. #ifdef CONFIG_ARM64_SME
  243. static inline void sme_user_disable(void)
  244. {
  245. sysreg_clear_set(cpacr_el1, CPACR_EL1_SMEN_EL0EN, 0);
  246. }
  247. static inline void sme_user_enable(void)
  248. {
  249. sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_SMEN_EL0EN);
  250. }
  251. static inline void sme_smstart_sm(void)
  252. {
  253. asm volatile(__msr_s(SYS_SVCR_SMSTART_SM_EL0, "xzr"));
  254. }
  255. static inline void sme_smstop_sm(void)
  256. {
  257. asm volatile(__msr_s(SYS_SVCR_SMSTOP_SM_EL0, "xzr"));
  258. }
  259. static inline void sme_smstop(void)
  260. {
  261. asm volatile(__msr_s(SYS_SVCR_SMSTOP_SMZA_EL0, "xzr"));
  262. }
  263. extern void __init sme_setup(void);
  264. static inline int sme_max_vl(void)
  265. {
  266. return vec_max_vl(ARM64_VEC_SME);
  267. }
  268. static inline int sme_max_virtualisable_vl(void)
  269. {
  270. return vec_max_virtualisable_vl(ARM64_VEC_SME);
  271. }
  272. extern void sme_alloc(struct task_struct *task, bool flush);
  273. extern unsigned int sme_get_vl(void);
  274. extern int sme_set_current_vl(unsigned long arg);
  275. extern int sme_get_current_vl(void);
  276. /*
  277. * Return how many bytes of memory are required to store the full SME
  278. * specific state (currently just ZA) for task, given task's currently
  279. * configured vector length.
  280. */
  281. static inline size_t za_state_size(struct task_struct const *task)
  282. {
  283. unsigned int vl = task_get_sme_vl(task);
  284. return ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl));
  285. }
  286. #else
  287. static inline void sme_user_disable(void) { BUILD_BUG(); }
  288. static inline void sme_user_enable(void) { BUILD_BUG(); }
  289. static inline void sme_smstart_sm(void) { }
  290. static inline void sme_smstop_sm(void) { }
  291. static inline void sme_smstop(void) { }
  292. static inline void sme_alloc(struct task_struct *task, bool flush) { }
  293. static inline void sme_setup(void) { }
  294. static inline unsigned int sme_get_vl(void) { return 0; }
  295. static inline int sme_max_vl(void) { return 0; }
  296. static inline int sme_max_virtualisable_vl(void) { return 0; }
  297. static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
  298. static inline int sme_get_current_vl(void) { return -EINVAL; }
  299. static inline size_t za_state_size(struct task_struct const *task)
  300. {
  301. return 0;
  302. }
  303. #endif /* ! CONFIG_ARM64_SME */
  304. /* For use by EFI runtime services calls only */
  305. extern void __efi_fpsimd_begin(void);
  306. extern void __efi_fpsimd_end(void);
  307. #endif
  308. #endif