arch-arm.h 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
  2. /*
  3. * ARM specific definitions for NOLIBC
  4. * Copyright (C) 2017-2022 Willy Tarreau <[email protected]>
  5. */
  6. #ifndef _NOLIBC_ARCH_ARM_H
  7. #define _NOLIBC_ARCH_ARM_H
  8. /* O_* macros for fcntl/open are architecture-specific */
  9. #define O_RDONLY 0
  10. #define O_WRONLY 1
  11. #define O_RDWR 2
  12. #define O_CREAT 0x40
  13. #define O_EXCL 0x80
  14. #define O_NOCTTY 0x100
  15. #define O_TRUNC 0x200
  16. #define O_APPEND 0x400
  17. #define O_NONBLOCK 0x800
  18. #define O_DIRECTORY 0x4000
  19. /* The struct returned by the stat() syscall, 32-bit only, the syscall returns
  20. * exactly 56 bytes (stops before the unused array). In big endian, the format
  21. * differs as devices are returned as short only.
  22. */
  23. struct sys_stat_struct {
  24. #if defined(__ARMEB__)
  25. unsigned short st_dev;
  26. unsigned short __pad1;
  27. #else
  28. unsigned long st_dev;
  29. #endif
  30. unsigned long st_ino;
  31. unsigned short st_mode;
  32. unsigned short st_nlink;
  33. unsigned short st_uid;
  34. unsigned short st_gid;
  35. #if defined(__ARMEB__)
  36. unsigned short st_rdev;
  37. unsigned short __pad2;
  38. #else
  39. unsigned long st_rdev;
  40. #endif
  41. unsigned long st_size;
  42. unsigned long st_blksize;
  43. unsigned long st_blocks;
  44. unsigned long st_atime;
  45. unsigned long st_atime_nsec;
  46. unsigned long st_mtime;
  47. unsigned long st_mtime_nsec;
  48. unsigned long st_ctime;
  49. unsigned long st_ctime_nsec;
  50. unsigned long __unused[2];
  51. };
  52. /* Syscalls for ARM in ARM or Thumb modes :
  53. * - registers are 32-bit
  54. * - stack is 8-byte aligned
  55. * ( http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html)
  56. * - syscall number is passed in r7
  57. * - arguments are in r0, r1, r2, r3, r4, r5
  58. * - the system call is performed by calling svc #0
  59. * - syscall return comes in r0.
  60. * - only lr is clobbered.
  61. * - the arguments are cast to long and assigned into the target registers
  62. * which are then simply passed as registers to the asm code, so that we
  63. * don't have to experience issues with register constraints.
  64. * - the syscall number is always specified last in order to allow to force
  65. * some registers before (gcc refuses a %-register at the last position).
  66. *
  67. * Also, ARM supports the old_select syscall if newselect is not available
  68. */
  69. #define __ARCH_WANT_SYS_OLD_SELECT
  70. #define my_syscall0(num) \
  71. ({ \
  72. register long _num __asm__ ("r7") = (num); \
  73. register long _arg1 __asm__ ("r0"); \
  74. \
  75. __asm__ volatile ( \
  76. "svc #0\n" \
  77. : "=r"(_arg1) \
  78. : "r"(_num) \
  79. : "memory", "cc", "lr" \
  80. ); \
  81. _arg1; \
  82. })
  83. #define my_syscall1(num, arg1) \
  84. ({ \
  85. register long _num __asm__ ("r7") = (num); \
  86. register long _arg1 __asm__ ("r0") = (long)(arg1); \
  87. \
  88. __asm__ volatile ( \
  89. "svc #0\n" \
  90. : "=r"(_arg1) \
  91. : "r"(_arg1), \
  92. "r"(_num) \
  93. : "memory", "cc", "lr" \
  94. ); \
  95. _arg1; \
  96. })
  97. #define my_syscall2(num, arg1, arg2) \
  98. ({ \
  99. register long _num __asm__ ("r7") = (num); \
  100. register long _arg1 __asm__ ("r0") = (long)(arg1); \
  101. register long _arg2 __asm__ ("r1") = (long)(arg2); \
  102. \
  103. __asm__ volatile ( \
  104. "svc #0\n" \
  105. : "=r"(_arg1) \
  106. : "r"(_arg1), "r"(_arg2), \
  107. "r"(_num) \
  108. : "memory", "cc", "lr" \
  109. ); \
  110. _arg1; \
  111. })
  112. #define my_syscall3(num, arg1, arg2, arg3) \
  113. ({ \
  114. register long _num __asm__ ("r7") = (num); \
  115. register long _arg1 __asm__ ("r0") = (long)(arg1); \
  116. register long _arg2 __asm__ ("r1") = (long)(arg2); \
  117. register long _arg3 __asm__ ("r2") = (long)(arg3); \
  118. \
  119. __asm__ volatile ( \
  120. "svc #0\n" \
  121. : "=r"(_arg1) \
  122. : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
  123. "r"(_num) \
  124. : "memory", "cc", "lr" \
  125. ); \
  126. _arg1; \
  127. })
  128. #define my_syscall4(num, arg1, arg2, arg3, arg4) \
  129. ({ \
  130. register long _num __asm__ ("r7") = (num); \
  131. register long _arg1 __asm__ ("r0") = (long)(arg1); \
  132. register long _arg2 __asm__ ("r1") = (long)(arg2); \
  133. register long _arg3 __asm__ ("r2") = (long)(arg3); \
  134. register long _arg4 __asm__ ("r3") = (long)(arg4); \
  135. \
  136. __asm__ volatile ( \
  137. "svc #0\n" \
  138. : "=r"(_arg1) \
  139. : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
  140. "r"(_num) \
  141. : "memory", "cc", "lr" \
  142. ); \
  143. _arg1; \
  144. })
  145. #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
  146. ({ \
  147. register long _num __asm__ ("r7") = (num); \
  148. register long _arg1 __asm__ ("r0") = (long)(arg1); \
  149. register long _arg2 __asm__ ("r1") = (long)(arg2); \
  150. register long _arg3 __asm__ ("r2") = (long)(arg3); \
  151. register long _arg4 __asm__ ("r3") = (long)(arg4); \
  152. register long _arg5 __asm__ ("r4") = (long)(arg5); \
  153. \
  154. __asm__ volatile ( \
  155. "svc #0\n" \
  156. : "=r" (_arg1) \
  157. : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
  158. "r"(_num) \
  159. : "memory", "cc", "lr" \
  160. ); \
  161. _arg1; \
  162. })
  163. /* startup code */
  164. __asm__ (".section .text\n"
  165. ".weak _start\n"
  166. "_start:\n"
  167. #if defined(__THUMBEB__) || defined(__THUMBEL__)
  168. /* We enter here in 32-bit mode but if some previous functions were in
  169. * 16-bit mode, the assembler cannot know, so we need to tell it we're in
  170. * 32-bit now, then switch to 16-bit (is there a better way to do it than
  171. * adding 1 by hand ?) and tell the asm we're now in 16-bit mode so that
  172. * it generates correct instructions. Note that we do not support thumb1.
  173. */
  174. ".code 32\n"
  175. "add r0, pc, #1\n"
  176. "bx r0\n"
  177. ".code 16\n"
  178. #endif
  179. "pop {%r0}\n" // argc was in the stack
  180. "mov %r1, %sp\n" // argv = sp
  181. "add %r2, %r1, %r0, lsl #2\n" // envp = argv + 4*argc ...
  182. "add %r2, %r2, $4\n" // ... + 4
  183. "and %r3, %r1, $-8\n" // AAPCS : sp must be 8-byte aligned in the
  184. "mov %sp, %r3\n" // callee, an bl doesn't push (lr=pc)
  185. "bl main\n" // main() returns the status code, we'll exit with it.
  186. "movs r7, $1\n" // NR_exit == 1
  187. "svc $0x00\n"
  188. "");
  189. #endif // _NOLIBC_ARCH_ARM_H