processor.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * arch/arm/include/asm/processor.h
  4. *
  5. * Copyright (C) 1995-1999 Russell King
  6. */
  7. #ifndef __ASM_ARM_PROCESSOR_H
  8. #define __ASM_ARM_PROCESSOR_H
  9. #ifdef __KERNEL__
  10. #include <asm/hw_breakpoint.h>
  11. #include <asm/ptrace.h>
  12. #include <asm/types.h>
  13. #include <asm/unified.h>
  14. #include <asm/vdso/processor.h>
  15. #ifdef __KERNEL__
  16. #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
  17. TASK_SIZE : TASK_SIZE_26)
  18. #define STACK_TOP_MAX TASK_SIZE
  19. #endif
  20. struct debug_info {
  21. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  22. struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
  23. #endif
  24. };
  25. struct thread_struct {
  26. /* fault info */
  27. unsigned long address;
  28. unsigned long trap_no;
  29. unsigned long error_code;
  30. /* debugging */
  31. struct debug_info debug;
  32. };
  33. /*
  34. * Everything usercopied to/from thread_struct is statically-sized, so
  35. * no hardened usercopy whitelist is needed.
  36. */
  37. static inline void arch_thread_struct_whitelist(unsigned long *offset,
  38. unsigned long *size)
  39. {
  40. *offset = *size = 0;
  41. }
  42. #define INIT_THREAD { }
  43. #define start_thread(regs,pc,sp) \
  44. ({ \
  45. unsigned long r7, r8, r9; \
  46. \
  47. if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \
  48. r7 = regs->ARM_r7; \
  49. r8 = regs->ARM_r8; \
  50. r9 = regs->ARM_r9; \
  51. } \
  52. memset(regs->uregs, 0, sizeof(regs->uregs)); \
  53. if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \
  54. current->personality & FDPIC_FUNCPTRS) { \
  55. regs->ARM_r7 = r7; \
  56. regs->ARM_r8 = r8; \
  57. regs->ARM_r9 = r9; \
  58. regs->ARM_r10 = current->mm->start_data; \
  59. } else if (!IS_ENABLED(CONFIG_MMU)) \
  60. regs->ARM_r10 = current->mm->start_data; \
  61. if (current->personality & ADDR_LIMIT_32BIT) \
  62. regs->ARM_cpsr = USR_MODE; \
  63. else \
  64. regs->ARM_cpsr = USR26_MODE; \
  65. if (elf_hwcap & HWCAP_THUMB && pc & 1) \
  66. regs->ARM_cpsr |= PSR_T_BIT; \
  67. regs->ARM_cpsr |= PSR_ENDSTATE; \
  68. regs->ARM_pc = pc & ~1; /* pc */ \
  69. regs->ARM_sp = sp; /* sp */ \
  70. })
  71. /* Forward declaration, a strange C thing */
  72. struct task_struct;
  73. unsigned long __get_wchan(struct task_struct *p);
  74. #define task_pt_regs(p) \
  75. ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
  76. #define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
  77. #define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
  78. #ifdef CONFIG_SMP
  79. #define __ALT_SMP_ASM(smp, up) \
  80. "9998: " smp "\n" \
  81. " .pushsection \".alt.smp.init\", \"a\"\n" \
  82. " .align 2\n" \
  83. " .long 9998b - .\n" \
  84. " " up "\n" \
  85. " .popsection\n"
  86. #else
  87. #define __ALT_SMP_ASM(smp, up) up
  88. #endif
  89. /*
  90. * Prefetching support - only ARMv5.
  91. */
  92. #if __LINUX_ARM_ARCH__ >= 5
  93. #define ARCH_HAS_PREFETCH
  94. static inline void prefetch(const void *ptr)
  95. {
  96. __asm__ __volatile__(
  97. "pld\t%a0"
  98. :: "p" (ptr));
  99. }
  100. #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
  101. #define ARCH_HAS_PREFETCHW
  102. static inline void prefetchw(const void *ptr)
  103. {
  104. __asm__ __volatile__(
  105. ".arch_extension mp\n"
  106. __ALT_SMP_ASM(
  107. "pldw\t%a0",
  108. "pld\t%a0"
  109. )
  110. :: "p" (ptr));
  111. }
  112. #endif
  113. #endif
  114. #endif
  115. #endif /* __ASM_ARM_PROCESSOR_H */