Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (29 commits) sched: Export account_system_vtime() sched: Call tick_check_idle before __irq_enter sched: Remove irq time from available CPU power sched: Do not account irq time to current task x86: Add IRQ_TIME_ACCOUNTING sched: Add IRQ_TIME_ACCOUNTING, finer accounting of irq time sched: Add a PF flag for ksoftirqd identification sched: Consolidate account_system_vtime extern declaration sched: Fix softirq time accounting sched: Drop group_capacity to 1 only if local group has extra capacity sched: Force balancing on newidle balance if local group has capacity sched: Set group_imb only a task can be pulled from the busiest cpu sched: Do not consider SCHED_IDLE tasks to be cache hot sched: Drop all load weight manipulation for RT tasks sched: Create special class for stop/migrate work sched: Unindent labels sched: Comment updates: fix default latency and granularity numbers tracing/sched: Add sched_pi_setprio tracepoint sched: Give CPU bound RT tasks preference sched: Try not to migrate higher priority RT tasks ...
This commit is contained in:
@@ -875,6 +875,7 @@ enum sched_domain_level {
|
||||
SD_LV_NONE = 0,
|
||||
SD_LV_SIBLING,
|
||||
SD_LV_MC,
|
||||
SD_LV_BOOK,
|
||||
SD_LV_CPU,
|
||||
SD_LV_NODE,
|
||||
SD_LV_ALLNODES,
|
||||
@@ -1690,8 +1691,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
||||
/*
|
||||
* Per process flags
|
||||
*/
|
||||
#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
|
||||
/* Not implemented yet, only for 486*/
|
||||
#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
|
||||
#define PF_STARTING 0x00000002 /* being created */
|
||||
#define PF_EXITING 0x00000004 /* getting shut down */
|
||||
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
|
||||
@@ -1837,6 +1837,19 @@ extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
/*
|
||||
* An i/f to runtime opt-in for irq time accounting based off of sched_clock.
|
||||
* The reason for this explicit opt-in is not to have perf penalty with
|
||||
* slow sched_clocks.
|
||||
*/
|
||||
extern void enable_sched_clock_irqtime(void);
|
||||
extern void disable_sched_clock_irqtime(void);
|
||||
#else
|
||||
static inline void enable_sched_clock_irqtime(void) {}
|
||||
static inline void disable_sched_clock_irqtime(void) {}
|
||||
#endif
|
||||
|
||||
extern unsigned long long
|
||||
task_sched_runtime(struct task_struct *task);
|
||||
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
|
||||
@@ -2378,9 +2391,9 @@ extern int __cond_resched_lock(spinlock_t *lock);
|
||||
|
||||
extern int __cond_resched_softirq(void);
|
||||
|
||||
#define cond_resched_softirq() ({ \
|
||||
__might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
|
||||
__cond_resched_softirq(); \
|
||||
#define cond_resched_softirq() ({ \
|
||||
__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
|
||||
__cond_resched_softirq(); \
|
||||
})
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user