processor.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. */
  5. #ifndef _ASM_PROCESSOR_H
  6. #define _ASM_PROCESSOR_H
  7. #include <linux/atomic.h>
  8. #include <linux/cpumask.h>
  9. #include <linux/sizes.h>
  10. #include <asm/cpu.h>
  11. #include <asm/cpu-info.h>
  12. #include <asm/loongarch.h>
  13. #include <asm/vdso/processor.h>
  14. #include <uapi/asm/ptrace.h>
  15. #include <uapi/asm/sigcontext.h>
  16. #ifdef CONFIG_32BIT
  17. #define TASK_SIZE 0x80000000UL
  18. #define TASK_SIZE_MIN TASK_SIZE
  19. #define STACK_TOP_MAX TASK_SIZE
  20. #define TASK_IS_32BIT_ADDR 1
  21. #endif
  22. #ifdef CONFIG_64BIT
  23. #define TASK_SIZE32 0x100000000UL
  24. #define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
  25. #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
  26. #define TASK_SIZE_MIN TASK_SIZE32
  27. #define STACK_TOP_MAX TASK_SIZE64
  28. #define TASK_SIZE_OF(tsk) \
  29. (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
  30. #define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
  31. #endif
  32. #define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
  33. unsigned long stack_top(void);
  34. #define STACK_TOP stack_top()
  35. /*
  36. * This decides where the kernel will search for a free chunk of vm
  37. * space during mmap's.
  38. */
  39. #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
  40. #define FPU_REG_WIDTH 256
  41. #define FPU_ALIGN __attribute__((aligned(32)))
  42. union fpureg {
  43. __u32 val32[FPU_REG_WIDTH / 32];
  44. __u64 val64[FPU_REG_WIDTH / 64];
  45. };
  46. #define FPR_IDX(width, idx) (idx)
  47. #define BUILD_FPR_ACCESS(width) \
  48. static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
  49. { \
  50. return fpr->val##width[FPR_IDX(width, idx)]; \
  51. } \
  52. \
  53. static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \
  54. u##width val) \
  55. { \
  56. fpr->val##width[FPR_IDX(width, idx)] = val; \
  57. }
  58. BUILD_FPR_ACCESS(32)
  59. BUILD_FPR_ACCESS(64)
  60. struct loongarch_fpu {
  61. unsigned int fcsr;
  62. uint64_t fcc; /* 8x8 */
  63. union fpureg fpr[NUM_FPU_REGS];
  64. };
  65. #define INIT_CPUMASK { \
  66. {0,} \
  67. }
  68. #define ARCH_MIN_TASKALIGN 32
  69. struct loongarch_vdso_info;
  70. /*
  71. * If you change thread_struct remember to change the #defines below too!
  72. */
  73. struct thread_struct {
  74. /* Main processor registers. */
  75. unsigned long reg01, reg03, reg22; /* ra sp fp */
  76. unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
  77. unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
  78. /* __schedule() return address / call frame address */
  79. unsigned long sched_ra;
  80. unsigned long sched_cfa;
  81. /* CSR registers */
  82. unsigned long csr_prmd;
  83. unsigned long csr_crmd;
  84. unsigned long csr_euen;
  85. unsigned long csr_ecfg;
  86. unsigned long csr_badvaddr; /* Last user fault */
  87. /* Scratch registers */
  88. unsigned long scr0;
  89. unsigned long scr1;
  90. unsigned long scr2;
  91. unsigned long scr3;
  92. /* Eflags register */
  93. unsigned long eflags;
  94. /* Other stuff associated with the thread. */
  95. unsigned long trap_nr;
  96. unsigned long error_code;
  97. struct loongarch_vdso_info *vdso;
  98. /*
  99. * FPU & vector registers, must be at last because
  100. * they are conditionally copied at fork().
  101. */
  102. struct loongarch_fpu fpu FPU_ALIGN;
  103. };
  104. #define thread_saved_ra(tsk) (tsk->thread.sched_ra)
  105. #define thread_saved_fp(tsk) (tsk->thread.sched_cfa)
  106. #define INIT_THREAD { \
  107. /* \
  108. * Main processor registers \
  109. */ \
  110. .reg01 = 0, \
  111. .reg03 = 0, \
  112. .reg22 = 0, \
  113. .reg23 = 0, \
  114. .reg24 = 0, \
  115. .reg25 = 0, \
  116. .reg26 = 0, \
  117. .reg27 = 0, \
  118. .reg28 = 0, \
  119. .reg29 = 0, \
  120. .reg30 = 0, \
  121. .reg31 = 0, \
  122. .sched_ra = 0, \
  123. .sched_cfa = 0, \
  124. .csr_crmd = 0, \
  125. .csr_prmd = 0, \
  126. .csr_euen = 0, \
  127. .csr_ecfg = 0, \
  128. .csr_badvaddr = 0, \
  129. /* \
  130. * Other stuff associated with the process \
  131. */ \
  132. .trap_nr = 0, \
  133. .error_code = 0, \
  134. /* \
  135. * FPU & vector registers \
  136. */ \
  137. .fpu = { \
  138. .fcsr = 0, \
  139. .fcc = 0, \
  140. .fpr = {{{0,},},}, \
  141. }, \
  142. }
  143. struct task_struct;
  144. enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL};
  145. extern unsigned long boot_option_idle_override;
  146. /*
  147. * Do necessary setup to start up a newly executed thread.
  148. */
  149. extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp);
  150. static inline void flush_thread(void)
  151. {
  152. }
  153. unsigned long __get_wchan(struct task_struct *p);
  154. #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
  155. THREAD_SIZE - sizeof(struct pt_regs))
  156. #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
  157. #define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
  158. #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
  159. #define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
  160. #define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
  161. #define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);})
  162. #ifdef CONFIG_CPU_HAS_PREFETCH
  163. #define ARCH_HAS_PREFETCH
  164. #define prefetch(x) __builtin_prefetch((x), 0, 1)
  165. #define ARCH_HAS_PREFETCHW
  166. #define prefetchw(x) __builtin_prefetch((x), 1, 1)
  167. #endif
  168. #endif /* _ASM_PROCESSOR_H */