thread_info.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2002-2003 Hewlett-Packard Co
  4. * David Mosberger-Tang <[email protected]>
  5. */
  6. #ifndef _ASM_IA64_THREAD_INFO_H
  7. #define _ASM_IA64_THREAD_INFO_H
  8. #ifndef ASM_OFFSETS_C
  9. #include <asm/asm-offsets.h>
  10. #endif
  11. #include <asm/processor.h>
  12. #include <asm/ptrace.h>
  13. #define THREAD_SIZE KERNEL_STACK_SIZE
  14. #ifndef __ASSEMBLY__
  15. /*
  16. * On IA-64, we want to keep the task structure and kernel stack together, so they can be
  17. * mapped by a single TLB entry and so they can be addressed by the "current" pointer
  18. * without having to do pointer masking.
  19. */
  20. struct thread_info {
  21. struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */
  22. __u32 flags; /* thread_info flags (see TIF_*) */
  23. __u32 cpu; /* current CPU */
  24. __u32 last_cpu; /* Last CPU thread ran on */
  25. __u32 status; /* Thread synchronous flags */
  26. int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
  27. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  28. __u64 utime;
  29. __u64 stime;
  30. __u64 gtime;
  31. __u64 hardirq_time;
  32. __u64 softirq_time;
  33. __u64 idle_time;
  34. __u64 ac_stamp;
  35. __u64 ac_leave;
  36. __u64 ac_stime;
  37. __u64 ac_utime;
  38. #endif
  39. };
  40. #define INIT_THREAD_INFO(tsk) \
  41. { \
  42. .task = &tsk, \
  43. .flags = 0, \
  44. .cpu = 0, \
  45. .preempt_count = INIT_PREEMPT_COUNT, \
  46. }
  47. #ifndef ASM_OFFSETS_C
  48. /* how to get the thread information struct from C */
  49. #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
  50. #define arch_alloc_thread_stack_node(tsk, node) \
  51. ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
  52. #define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
  53. #else
  54. #define current_thread_info() ((struct thread_info *) 0)
  55. #define arch_alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
  56. #define task_thread_info(tsk) ((struct thread_info *) 0)
  57. #endif
  58. #define arch_free_thread_stack(tsk) /* nothing */
  59. #define task_stack_page(tsk) ((void *)(tsk))
  60. #define __HAVE_THREAD_FUNCTIONS
  61. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  62. #define setup_thread_stack(p, org) \
  63. *task_thread_info(p) = *task_thread_info(org); \
  64. task_thread_info(p)->ac_stime = 0; \
  65. task_thread_info(p)->ac_utime = 0; \
  66. task_thread_info(p)->task = (p);
  67. #else
  68. #define setup_thread_stack(p, org) \
  69. *task_thread_info(p) = *task_thread_info(org); \
  70. task_thread_info(p)->task = (p);
  71. #endif
  72. #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
  73. #define alloc_task_struct_node(node) \
  74. ({ \
  75. struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \
  76. KERNEL_STACK_SIZE_ORDER); \
  77. struct task_struct *ret = page ? page_address(page) : NULL; \
  78. \
  79. ret; \
  80. })
  81. #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
  82. #endif /* !__ASSEMBLY */
  83. /*
  84. * thread information flags
  85. * - these are process state flags that various assembly files may need to access
  86. * - pending work-to-be-done flags are in least-significant 16 bits, other flags
  87. * in top 16 bits
  88. */
  89. #define TIF_SIGPENDING 0 /* signal pending */
  90. #define TIF_NEED_RESCHED 1 /* rescheduling necessary */
  91. #define TIF_SYSCALL_TRACE 2 /* syscall trace active */
  92. #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
  93. #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
  94. #define TIF_NOTIFY_SIGNAL 5 /* signal notification exist */
  95. #define TIF_NOTIFY_RESUME 6 /* resumption notification requested */
  96. #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
  97. #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
  98. #define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
  99. #define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
  100. #define TIF_POLLING_NRFLAG 22 /* idle is polling for TIF_NEED_RESCHED */
  101. #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
  102. #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
  103. #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
  104. #define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
  105. #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
  106. #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
  107. #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
  108. #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
  109. #define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
  110. #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
  111. #define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
  112. #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
  113. /* "work to do on user-return" bits */
  114. #define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\
  115. _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_NOTIFY_SIGNAL)
  116. /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
  117. #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
  118. #endif /* _ASM_IA64_THREAD_INFO_H */