stop_task.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * stop-task scheduling class.
  4. *
  5. * The stop task is the highest priority task in the system, it preempts
  6. * everything and will be preempted by nothing.
  7. *
  8. * See kernel/stop_machine.c
  9. */
  10. #ifdef CONFIG_SMP
  11. static int
  12. select_task_rq_stop(struct task_struct *p, int cpu, int flags)
  13. {
  14. return task_cpu(p); /* stop tasks as never migrate */
  15. }
  16. static int
  17. balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
  18. {
  19. return sched_stop_runnable(rq);
  20. }
  21. #endif /* CONFIG_SMP */
  22. static void
  23. check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
  24. {
  25. /* we're never preempted */
  26. }
  27. static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
  28. {
  29. stop->se.exec_start = rq_clock_task(rq);
  30. }
  31. static struct task_struct *pick_task_stop(struct rq *rq)
  32. {
  33. if (!sched_stop_runnable(rq))
  34. return NULL;
  35. return rq->stop;
  36. }
  37. static struct task_struct *pick_next_task_stop(struct rq *rq)
  38. {
  39. struct task_struct *p = pick_task_stop(rq);
  40. if (p)
  41. set_next_task_stop(rq, p, true);
  42. return p;
  43. }
  44. static void
  45. enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
  46. {
  47. add_nr_running(rq, 1);
  48. }
  49. static void
  50. dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
  51. {
  52. sub_nr_running(rq, 1);
  53. }
  54. static void yield_task_stop(struct rq *rq)
  55. {
  56. BUG(); /* the stop task should never yield, its pointless. */
  57. }
  58. static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
  59. {
  60. struct task_struct *curr = rq->curr;
  61. u64 now, delta_exec;
  62. now = rq_clock_task(rq);
  63. delta_exec = now - curr->se.exec_start;
  64. if (unlikely((s64)delta_exec < 0))
  65. delta_exec = 0;
  66. schedstat_set(curr->stats.exec_max,
  67. max(curr->stats.exec_max, delta_exec));
  68. update_current_exec_runtime(curr, now, delta_exec);
  69. }
  70. /*
  71. * scheduler tick hitting a task of our scheduling class.
  72. *
  73. * NOTE: This function can be called remotely by the tick offload that
  74. * goes along full dynticks. Therefore no local assumption can be made
  75. * and everything must be accessed through the @rq and @curr passed in
  76. * parameters.
  77. */
  78. static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
  79. {
  80. }
  81. static void switched_to_stop(struct rq *rq, struct task_struct *p)
  82. {
  83. BUG(); /* its impossible to change to this class */
  84. }
  85. static void
  86. prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
  87. {
  88. BUG(); /* how!?, what priority? */
  89. }
  90. static void update_curr_stop(struct rq *rq)
  91. {
  92. }
  93. /*
  94. * Simple, special scheduling class for the per-CPU stop tasks:
  95. */
  96. DEFINE_SCHED_CLASS(stop) = {
  97. .enqueue_task = enqueue_task_stop,
  98. .dequeue_task = dequeue_task_stop,
  99. .yield_task = yield_task_stop,
  100. .check_preempt_curr = check_preempt_curr_stop,
  101. .pick_next_task = pick_next_task_stop,
  102. .put_prev_task = put_prev_task_stop,
  103. .set_next_task = set_next_task_stop,
  104. #ifdef CONFIG_SMP
  105. .balance = balance_stop,
  106. .pick_task = pick_task_stop,
  107. .select_task_rq = select_task_rq_stop,
  108. .set_cpus_allowed = set_cpus_allowed_common,
  109. #endif
  110. .task_tick = task_tick_stop,
  111. .prio_changed = prio_changed_stop,
  112. .switched_to = switched_to_stop,
  113. .update_curr = update_curr_stop,
  114. };