asmmacro.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_IA64_ASMMACRO_H
  3. #define _ASM_IA64_ASMMACRO_H
  4. /*
  5. * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
  6. * David Mosberger-Tang <[email protected]>
  7. */
  8. #define ENTRY(name) \
  9. .align 32; \
  10. .proc name; \
  11. name:
  12. #define ENTRY_MIN_ALIGN(name) \
  13. .align 16; \
  14. .proc name; \
  15. name:
  16. #define GLOBAL_ENTRY(name) \
  17. .global name; \
  18. ENTRY(name)
  19. #define END(name) \
  20. .endp name
  21. /*
  22. * Helper macros to make unwind directives more readable:
  23. */
  24. /* prologue_gr: */
  25. #define ASM_UNW_PRLG_RP 0x8
  26. #define ASM_UNW_PRLG_PFS 0x4
  27. #define ASM_UNW_PRLG_PSP 0x2
  28. #define ASM_UNW_PRLG_PR 0x1
  29. #define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
  30. /*
  31. * Helper macros for accessing user memory.
  32. *
  33. * When adding any new .section/.previous entries here, make sure to
  34. * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
  35. * unpleasant things will happen.
  36. */
  37. .section "__ex_table", "a" // declare section & section attributes
  38. .previous
  39. # define EX(y,x...) \
  40. .xdata4 "__ex_table", 99f-., y-.; \
  41. [99:] x
  42. # define EXCLR(y,x...) \
  43. .xdata4 "__ex_table", 99f-., y-.+4; \
  44. [99:] x
  45. /*
  46. * Tag MCA recoverable instruction ranges.
  47. */
  48. .section "__mca_table", "a" // declare section & section attributes
  49. .previous
  50. # define MCA_RECOVER_RANGE(y) \
  51. .xdata4 "__mca_table", y-., 99f-.; \
  52. [99:]
  53. /*
  54. * Mark instructions that need a load of a virtual address patched to be
  55. * a load of a physical address. We use this either in critical performance
  56. * path (ivt.S - TLB miss processing) or in places where it might not be
  57. * safe to use a "tpa" instruction (mca_asm.S - error recovery).
  58. */
  59. .section ".data..patch.vtop", "a" // declare section & section attributes
  60. .previous
  61. #define LOAD_PHYSICAL(pr, reg, obj) \
  62. [1:](pr)movl reg = obj; \
  63. .xdata4 ".data..patch.vtop", 1b-.
  64. /*
  65. * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
  66. * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
  67. */
  68. #define DO_MCKINLEY_E9_WORKAROUND
  69. #ifdef DO_MCKINLEY_E9_WORKAROUND
  70. .section ".data..patch.mckinley_e9", "a"
  71. .previous
  72. /* workaround for Itanium 2 Errata 9: */
  73. # define FSYS_RETURN \
  74. .xdata4 ".data..patch.mckinley_e9", 1f-.; \
  75. 1:{ .mib; \
  76. nop.m 0; \
  77. mov r16=ar.pfs; \
  78. br.call.sptk.many b7=2f;; \
  79. }; \
  80. 2:{ .mib; \
  81. nop.m 0; \
  82. mov ar.pfs=r16; \
  83. br.ret.sptk.many b6;; \
  84. }
  85. #else
  86. # define FSYS_RETURN br.ret.sptk.many b6
  87. #endif
  88. /*
  89. * If physical stack register size is different from DEF_NUM_STACK_REG,
  90. * dynamically patch the kernel for correct size.
  91. */
  92. .section ".data..patch.phys_stack_reg", "a"
  93. .previous
  94. #define LOAD_PHYS_STACK_REG_SIZE(reg) \
  95. [1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \
  96. .xdata4 ".data..patch.phys_stack_reg", 1b-.
  97. /*
  98. * Up until early 2004, use of .align within a function caused bad unwind info.
  99. * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
  100. * otherwise.
  101. */
  102. #ifdef HAVE_WORKING_TEXT_ALIGN
  103. # define TEXT_ALIGN(n) .align n
  104. #else
  105. # define TEXT_ALIGN(n)
  106. #endif
  107. #ifdef HAVE_SERIALIZE_DIRECTIVE
  108. # define dv_serialize_data .serialize.data
  109. # define dv_serialize_instruction .serialize.instruction
  110. #else
  111. # define dv_serialize_data
  112. # define dv_serialize_instruction
  113. #endif
  114. #endif /* _ASM_IA64_ASMMACRO_H */