Merge branch 'master' into for-next
This commit is contained in:
@@ -209,6 +209,31 @@ void exit_creds(struct task_struct *tsk)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* get_task_cred - Get another task's objective credentials
|
||||
* @task: The task to query
|
||||
*
|
||||
* Get the objective credentials of a task, pinning them so that they can't go
|
||||
* away. Accessing a task's credentials directly is not permitted.
|
||||
*
|
||||
* The caller must also make sure task doesn't get deleted, either by holding a
|
||||
* ref on task or by holding tasklist_lock to prevent it from being unlinked.
|
||||
*/
|
||||
const struct cred *get_task_cred(struct task_struct *task)
|
||||
{
|
||||
const struct cred *cred;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
do {
|
||||
cred = __task_cred((task));
|
||||
BUG_ON(!cred);
|
||||
} while (!atomic_inc_not_zero(&((struct cred *)cred)->usage));
|
||||
|
||||
rcu_read_unlock();
|
||||
return cred;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate blank credentials, such that the credentials can be filled in at a
|
||||
* later date without risk of ENOMEM.
|
||||
|
@@ -605,13 +605,13 @@ cpu_master_loop:
|
||||
if (dbg_kdb_mode) {
|
||||
kgdb_connected = 1;
|
||||
error = kdb_stub(ks);
|
||||
kgdb_connected = 0;
|
||||
} else {
|
||||
error = gdb_serial_stub(ks);
|
||||
}
|
||||
|
||||
if (error == DBG_PASS_EVENT) {
|
||||
dbg_kdb_mode = !dbg_kdb_mode;
|
||||
kgdb_connected = 0;
|
||||
} else if (error == DBG_SWITCH_CPU_EVENT) {
|
||||
dbg_cpu_switch(cpu, dbg_switch_cpu);
|
||||
goto cpu_loop;
|
||||
|
@@ -621,10 +621,8 @@ static void gdb_cmd_query(struct kgdb_state *ks)
|
||||
switch (remcom_in_buffer[1]) {
|
||||
case 's':
|
||||
case 'f':
|
||||
if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
|
||||
error_packet(remcom_out_buffer, -EINVAL);
|
||||
if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10))
|
||||
break;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
remcom_out_buffer[0] = 'm';
|
||||
@@ -665,10 +663,9 @@ static void gdb_cmd_query(struct kgdb_state *ks)
|
||||
pack_threadid(remcom_out_buffer + 2, thref);
|
||||
break;
|
||||
case 'T':
|
||||
if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
|
||||
error_packet(remcom_out_buffer, -EINVAL);
|
||||
if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16))
|
||||
break;
|
||||
}
|
||||
|
||||
ks->threadid = 0;
|
||||
ptr = remcom_in_buffer + 17;
|
||||
kgdb_hex2long(&ptr, &ks->threadid);
|
||||
|
@@ -1820,9 +1820,8 @@ static int kdb_sr(int argc, const char **argv)
|
||||
{
|
||||
if (argc != 1)
|
||||
return KDB_ARGCOUNT;
|
||||
sysrq_toggle_support(1);
|
||||
kdb_trap_printk++;
|
||||
handle_sysrq(*argv[1], NULL);
|
||||
__handle_sysrq(*argv[1], NULL, 0);
|
||||
kdb_trap_printk--;
|
||||
|
||||
return 0;
|
||||
@@ -1883,6 +1882,7 @@ static int kdb_lsmod(int argc, const char **argv)
|
||||
kdb_printf(" (Loading)");
|
||||
else
|
||||
kdb_printf(" (Live)");
|
||||
kdb_printf(" 0x%p", mod->module_core);
|
||||
|
||||
#ifdef CONFIG_MODULE_UNLOAD
|
||||
{
|
||||
@@ -2291,6 +2291,9 @@ static int kdb_ll(int argc, const char **argv)
|
||||
while (va) {
|
||||
char buf[80];
|
||||
|
||||
if (KDB_FLAG(CMD_INTERRUPT))
|
||||
return 0;
|
||||
|
||||
sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
|
||||
diag = kdb_parse(buf);
|
||||
if (diag)
|
||||
|
@@ -7,6 +7,8 @@
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/early_res.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
/*
|
||||
* Early reserved memory areas.
|
||||
@@ -319,6 +321,8 @@ void __init free_early(u64 start, u64 end)
|
||||
struct early_res *r;
|
||||
int i;
|
||||
|
||||
kmemleak_free_part(__va(start), end - start);
|
||||
|
||||
i = find_overlapped_early(start, end);
|
||||
r = &early_res[i];
|
||||
if (i >= max_early_res || r->end != end || r->start != start)
|
||||
@@ -333,6 +337,8 @@ void __init free_early_partial(u64 start, u64 end)
|
||||
struct early_res *r;
|
||||
int i;
|
||||
|
||||
kmemleak_free_part(__va(start), end - start);
|
||||
|
||||
if (start == end)
|
||||
return;
|
||||
|
||||
|
@@ -429,20 +429,11 @@ static void free_pi_state(struct futex_pi_state *pi_state)
|
||||
static struct task_struct * futex_find_get_task(pid_t pid)
|
||||
{
|
||||
struct task_struct *p;
|
||||
const struct cred *cred = current_cred(), *pcred;
|
||||
|
||||
rcu_read_lock();
|
||||
p = find_task_by_vpid(pid);
|
||||
if (!p) {
|
||||
p = ERR_PTR(-ESRCH);
|
||||
} else {
|
||||
pcred = __task_cred(p);
|
||||
if (cred->euid != pcred->euid &&
|
||||
cred->euid != pcred->uid)
|
||||
p = ERR_PTR(-ESRCH);
|
||||
else
|
||||
get_task_struct(p);
|
||||
}
|
||||
if (p)
|
||||
get_task_struct(p);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
@@ -564,8 +555,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||
if (!pid)
|
||||
return -ESRCH;
|
||||
p = futex_find_get_task(pid);
|
||||
if (IS_ERR(p))
|
||||
return PTR_ERR(p);
|
||||
if (!p)
|
||||
return -ESRCH;
|
||||
|
||||
/*
|
||||
* We need to look at the task state flags to figure out,
|
||||
|
@@ -456,6 +456,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
||||
/* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
|
||||
desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
|
||||
desc->status |= flags;
|
||||
|
||||
if (chip != desc->chip)
|
||||
irq_chip_set_defaults(desc->chip);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@@ -1089,9 +1089,10 @@ void crash_kexec(struct pt_regs *regs)
|
||||
|
||||
size_t crash_get_memory_size(void)
|
||||
{
|
||||
size_t size;
|
||||
size_t size = 0;
|
||||
mutex_lock(&kexec_mutex);
|
||||
size = crashk_res.end - crashk_res.start + 1;
|
||||
if (crashk_res.end != crashk_res.start)
|
||||
size = crashk_res.end - crashk_res.start + 1;
|
||||
mutex_unlock(&kexec_mutex);
|
||||
return size;
|
||||
}
|
||||
@@ -1134,7 +1135,7 @@ int crash_shrink_memory(unsigned long new_size)
|
||||
|
||||
free_reserved_phys_range(end, crashk_res.end);
|
||||
|
||||
if (start == end)
|
||||
if ((start == end) && (crashk_res.parent != NULL))
|
||||
release_resource(&crashk_res);
|
||||
crashk_res.end = end - 1;
|
||||
|
||||
|
@@ -787,7 +787,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
|
||||
|
||||
/* Store the name of the last unloaded module for diagnostic purposes */
|
||||
strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
|
||||
ddebug_remove_module(mod->name);
|
||||
|
||||
free_module(mod);
|
||||
return 0;
|
||||
@@ -1550,6 +1549,9 @@ static void free_module(struct module *mod)
|
||||
remove_sect_attrs(mod);
|
||||
mod_kobject_remove(mod);
|
||||
|
||||
/* Remove dynamic debug info */
|
||||
ddebug_remove_module(mod->name);
|
||||
|
||||
/* Arch-specific cleanup. */
|
||||
module_arch_cleanup(mod);
|
||||
|
||||
@@ -2062,6 +2064,12 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void dynamic_debug_remove(struct _ddebug *debug)
|
||||
{
|
||||
if (debug)
|
||||
ddebug_remove_module(debug->modname);
|
||||
}
|
||||
|
||||
static void *module_alloc_update_bounds(unsigned long size)
|
||||
{
|
||||
void *ret = module_alloc(size);
|
||||
@@ -2124,6 +2132,8 @@ static noinline struct module *load_module(void __user *umod,
|
||||
void *ptr = NULL; /* Stops spurious gcc warning */
|
||||
unsigned long symoffs, stroffs, *strmap;
|
||||
void __percpu *percpu;
|
||||
struct _ddebug *debug = NULL;
|
||||
unsigned int num_debug = 0;
|
||||
|
||||
mm_segment_t old_fs;
|
||||
|
||||
@@ -2476,15 +2486,9 @@ static noinline struct module *load_module(void __user *umod,
|
||||
kfree(strmap);
|
||||
strmap = NULL;
|
||||
|
||||
if (!mod->taints) {
|
||||
struct _ddebug *debug;
|
||||
unsigned int num_debug;
|
||||
|
||||
if (!mod->taints)
|
||||
debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
|
||||
sizeof(*debug), &num_debug);
|
||||
if (debug)
|
||||
dynamic_debug_setup(debug, num_debug);
|
||||
}
|
||||
|
||||
err = module_finalize(hdr, sechdrs, mod);
|
||||
if (err < 0)
|
||||
@@ -2526,10 +2530,13 @@ static noinline struct module *load_module(void __user *umod,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (debug)
|
||||
dynamic_debug_setup(debug, num_debug);
|
||||
|
||||
/* Find duplicate symbols */
|
||||
err = verify_export_symbols(mod);
|
||||
if (err < 0)
|
||||
goto unlock;
|
||||
goto ddebug;
|
||||
|
||||
list_add_rcu(&mod->list, &modules);
|
||||
mutex_unlock(&module_mutex);
|
||||
@@ -2557,6 +2564,8 @@ static noinline struct module *load_module(void __user *umod,
|
||||
mutex_lock(&module_mutex);
|
||||
/* Unlink carefully: kallsyms could be walking list. */
|
||||
list_del_rcu(&mod->list);
|
||||
ddebug:
|
||||
dynamic_debug_remove(debug);
|
||||
unlock:
|
||||
mutex_unlock(&module_mutex);
|
||||
synchronize_sched();
|
||||
|
@@ -99,9 +99,13 @@ config PM_SLEEP_ADVANCED_DEBUG
|
||||
depends on PM_ADVANCED_DEBUG
|
||||
default n
|
||||
|
||||
config SUSPEND_NVS
|
||||
bool
|
||||
|
||||
config SUSPEND
|
||||
bool "Suspend to RAM and standby"
|
||||
depends on PM && ARCH_SUSPEND_POSSIBLE
|
||||
select SUSPEND_NVS if HAS_IOMEM
|
||||
default y
|
||||
---help---
|
||||
Allow the system to enter sleep states in which main memory is
|
||||
@@ -130,13 +134,10 @@ config SUSPEND_FREEZER
|
||||
|
||||
Turning OFF this setting is NOT recommended! If in doubt, say Y.
|
||||
|
||||
config HIBERNATION_NVS
|
||||
bool
|
||||
|
||||
config HIBERNATION
|
||||
bool "Hibernation (aka 'suspend to disk')"
|
||||
depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
|
||||
select HIBERNATION_NVS if HAS_IOMEM
|
||||
select SUSPEND_NVS if HAS_IOMEM
|
||||
---help---
|
||||
Enable the suspend to disk (STD) functionality, which is usually
|
||||
called "hibernation" in user interfaces. STD checkpoints the
|
||||
|
@@ -10,6 +10,6 @@ obj-$(CONFIG_SUSPEND) += suspend.o
|
||||
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
|
||||
obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
|
||||
block_io.o
|
||||
obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
|
||||
obj-$(CONFIG_SUSPEND_NVS) += nvs.o
|
||||
|
||||
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
|
||||
|
@@ -15,7 +15,7 @@
|
||||
|
||||
/*
|
||||
* Platforms, like ACPI, may want us to save some memory used by them during
|
||||
* hibernation and to restore the contents of this memory during the subsequent
|
||||
* suspend and to restore the contents of this memory during the subsequent
|
||||
* resume. The code below implements a mechanism allowing us to do that.
|
||||
*/
|
||||
|
||||
@@ -30,7 +30,7 @@ struct nvs_page {
|
||||
static LIST_HEAD(nvs_list);
|
||||
|
||||
/**
|
||||
* hibernate_nvs_register - register platform NVS memory region to save
|
||||
* suspend_nvs_register - register platform NVS memory region to save
|
||||
* @start - physical address of the region
|
||||
* @size - size of the region
|
||||
*
|
||||
@@ -38,7 +38,7 @@ static LIST_HEAD(nvs_list);
|
||||
* things so that the data from page-aligned addresses in this region will
|
||||
* be copied into separate RAM pages.
|
||||
*/
|
||||
int hibernate_nvs_register(unsigned long start, unsigned long size)
|
||||
int suspend_nvs_register(unsigned long start, unsigned long size)
|
||||
{
|
||||
struct nvs_page *entry, *next;
|
||||
|
||||
@@ -68,9 +68,9 @@ int hibernate_nvs_register(unsigned long start, unsigned long size)
|
||||
}
|
||||
|
||||
/**
|
||||
* hibernate_nvs_free - free data pages allocated for saving NVS regions
|
||||
* suspend_nvs_free - free data pages allocated for saving NVS regions
|
||||
*/
|
||||
void hibernate_nvs_free(void)
|
||||
void suspend_nvs_free(void)
|
||||
{
|
||||
struct nvs_page *entry;
|
||||
|
||||
@@ -86,16 +86,16 @@ void hibernate_nvs_free(void)
|
||||
}
|
||||
|
||||
/**
|
||||
* hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
|
||||
* suspend_nvs_alloc - allocate memory necessary for saving NVS regions
|
||||
*/
|
||||
int hibernate_nvs_alloc(void)
|
||||
int suspend_nvs_alloc(void)
|
||||
{
|
||||
struct nvs_page *entry;
|
||||
|
||||
list_for_each_entry(entry, &nvs_list, node) {
|
||||
entry->data = (void *)__get_free_page(GFP_KERNEL);
|
||||
if (!entry->data) {
|
||||
hibernate_nvs_free();
|
||||
suspend_nvs_free();
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@@ -103,9 +103,9 @@ int hibernate_nvs_alloc(void)
|
||||
}
|
||||
|
||||
/**
|
||||
* hibernate_nvs_save - save NVS memory regions
|
||||
* suspend_nvs_save - save NVS memory regions
|
||||
*/
|
||||
void hibernate_nvs_save(void)
|
||||
void suspend_nvs_save(void)
|
||||
{
|
||||
struct nvs_page *entry;
|
||||
|
||||
@@ -119,12 +119,12 @@ void hibernate_nvs_save(void)
|
||||
}
|
||||
|
||||
/**
|
||||
* hibernate_nvs_restore - restore NVS memory regions
|
||||
* suspend_nvs_restore - restore NVS memory regions
|
||||
*
|
||||
* This function is going to be called with interrupts disabled, so it
|
||||
* cannot iounmap the virtual addresses used to access the NVS region.
|
||||
*/
|
||||
void hibernate_nvs_restore(void)
|
||||
void suspend_nvs_restore(void)
|
||||
{
|
||||
struct nvs_page *entry;
|
||||
|
@@ -16,6 +16,12 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
|
137
kernel/sched.c
137
kernel/sched.c
@@ -306,52 +306,6 @@ static int init_task_group_load = INIT_TASK_GROUP_LOAD;
|
||||
*/
|
||||
struct task_group init_task_group;
|
||||
|
||||
/* return group to which a task belongs */
|
||||
static inline struct task_group *task_group(struct task_struct *p)
|
||||
{
|
||||
struct task_group *tg;
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
|
||||
struct task_group, css);
|
||||
#else
|
||||
tg = &init_task_group;
|
||||
#endif
|
||||
return tg;
|
||||
}
|
||||
|
||||
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
|
||||
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* Strictly speaking this rcu_read_lock() is not needed since the
|
||||
* task_group is tied to the cgroup, which in turn can never go away
|
||||
* as long as there are tasks attached to it.
|
||||
*
|
||||
* However since task_group() uses task_subsys_state() which is an
|
||||
* rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
|
||||
p->se.parent = task_group(p)->se[cpu];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
|
||||
p->rt.parent = task_group(p)->rt_se[cpu];
|
||||
#endif
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
|
||||
static inline struct task_group *task_group(struct task_struct *p)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CGROUP_SCHED */
|
||||
|
||||
/* CFS-related fields in a runqueue */
|
||||
@@ -644,6 +598,49 @@ static inline int cpu_of(struct rq *rq)
|
||||
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
||||
#define raw_rq() (&__raw_get_cpu_var(runqueues))
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
|
||||
/*
|
||||
* Return the group to which this tasks belongs.
|
||||
*
|
||||
* We use task_subsys_state_check() and extend the RCU verification
|
||||
* with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
|
||||
* holds that lock for each task it moves into the cgroup. Therefore
|
||||
* by holding that lock, we pin the task to the current cgroup.
|
||||
*/
|
||||
static inline struct task_group *task_group(struct task_struct *p)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
|
||||
lockdep_is_held(&task_rq(p)->lock));
|
||||
return container_of(css, struct task_group, css);
|
||||
}
|
||||
|
||||
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
|
||||
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
||||
{
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
|
||||
p->se.parent = task_group(p)->se[cpu];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
|
||||
p->rt.parent = task_group(p)->rt_se[cpu];
|
||||
#endif
|
||||
}
|
||||
|
||||
#else /* CONFIG_CGROUP_SCHED */
|
||||
|
||||
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
|
||||
static inline struct task_group *task_group(struct task_struct *p)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CGROUP_SCHED */
|
||||
|
||||
inline void update_rq_clock(struct rq *rq)
|
||||
{
|
||||
if (!rq->skip_clock_update)
|
||||
@@ -1257,6 +1254,12 @@ static void sched_avg_update(struct rq *rq)
|
||||
s64 period = sched_avg_period();
|
||||
|
||||
while ((s64)(rq->clock - rq->age_stamp) > period) {
|
||||
/*
|
||||
* Inline assembly required to prevent the compiler
|
||||
* optimising this loop into a divmod call.
|
||||
* See __iter_div_u64_rem() for another example of this.
|
||||
*/
|
||||
asm("" : "+rm" (rq->age_stamp));
|
||||
rq->age_stamp += period;
|
||||
rq->rt_avg /= 2;
|
||||
}
|
||||
@@ -1660,9 +1663,6 @@ static void update_shares(struct sched_domain *sd)
|
||||
|
||||
static void update_h_load(long cpu)
|
||||
{
|
||||
if (root_task_group_empty())
|
||||
return;
|
||||
|
||||
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
|
||||
}
|
||||
|
||||
@@ -2494,7 +2494,16 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
||||
if (p->sched_class->task_fork)
|
||||
p->sched_class->task_fork(p);
|
||||
|
||||
/*
|
||||
* The child is not yet in the pid-hash so no cgroup attach races,
|
||||
* and the cgroup is pinned to this child due to cgroup_fork()
|
||||
* is ran before sched_fork().
|
||||
*
|
||||
* Silence PROVE_RCU.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
set_task_cpu(p, cpu);
|
||||
rcu_read_unlock();
|
||||
|
||||
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
||||
if (likely(sched_info_on()))
|
||||
@@ -2864,9 +2873,9 @@ unsigned long nr_iowait(void)
|
||||
return sum;
|
||||
}
|
||||
|
||||
unsigned long nr_iowait_cpu(void)
|
||||
unsigned long nr_iowait_cpu(int cpu)
|
||||
{
|
||||
struct rq *this = this_rq();
|
||||
struct rq *this = cpu_rq(cpu);
|
||||
return atomic_read(&this->nr_iowait);
|
||||
}
|
||||
|
||||
@@ -4465,16 +4474,6 @@ recheck:
|
||||
}
|
||||
|
||||
if (user) {
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
/*
|
||||
* Do not allow realtime tasks into groups that have no runtime
|
||||
* assigned.
|
||||
*/
|
||||
if (rt_bandwidth_enabled() && rt_policy(policy) &&
|
||||
task_group(p)->rt_bandwidth.rt_runtime == 0)
|
||||
return -EPERM;
|
||||
#endif
|
||||
|
||||
retval = security_task_setscheduler(p, policy, param);
|
||||
if (retval)
|
||||
return retval;
|
||||
@@ -4490,6 +4489,22 @@ recheck:
|
||||
* runqueue lock must be held.
|
||||
*/
|
||||
rq = __task_rq_lock(p);
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (user) {
|
||||
/*
|
||||
* Do not allow realtime tasks into groups that have no runtime
|
||||
* assigned.
|
||||
*/
|
||||
if (rt_bandwidth_enabled() && rt_policy(policy) &&
|
||||
task_group(p)->rt_bandwidth.rt_runtime == 0) {
|
||||
__task_rq_unlock(rq);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* recheck policy now with rq lock held */
|
||||
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
|
||||
policy = oldpolicy = -1;
|
||||
|
@@ -1240,6 +1240,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
||||
* effect of the currently running task from the load
|
||||
* of the current CPU:
|
||||
*/
|
||||
rcu_read_lock();
|
||||
if (sync) {
|
||||
tg = task_group(current);
|
||||
weight = current->se.load.weight;
|
||||
@@ -1275,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
||||
balanced = this_eff_load <= prev_eff_load;
|
||||
} else
|
||||
balanced = true;
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* If the currently running task will sleep within
|
||||
|
@@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now)
|
||||
* Updates the per cpu time idle statistics counters
|
||||
*/
|
||||
static void
|
||||
update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time)
|
||||
update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
|
||||
{
|
||||
ktime_t delta;
|
||||
|
||||
if (ts->idle_active) {
|
||||
delta = ktime_sub(now, ts->idle_entrytime);
|
||||
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
|
||||
if (nr_iowait_cpu() > 0)
|
||||
if (nr_iowait_cpu(cpu) > 0)
|
||||
ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
|
||||
ts->idle_entrytime = now;
|
||||
}
|
||||
@@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
|
||||
{
|
||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
|
||||
update_ts_time_stats(ts, now, NULL);
|
||||
update_ts_time_stats(cpu, ts, now, NULL);
|
||||
ts->idle_active = 0;
|
||||
|
||||
sched_clock_idle_wakeup_event(0);
|
||||
}
|
||||
|
||||
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
|
||||
static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
|
||||
{
|
||||
ktime_t now;
|
||||
|
||||
now = ktime_get();
|
||||
|
||||
update_ts_time_stats(ts, now, NULL);
|
||||
update_ts_time_stats(cpu, ts, now, NULL);
|
||||
|
||||
ts->idle_entrytime = now;
|
||||
ts->idle_active = 1;
|
||||
@@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
|
||||
if (!tick_nohz_enabled)
|
||||
return -1;
|
||||
|
||||
update_ts_time_stats(ts, ktime_get(), last_update_time);
|
||||
update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
|
||||
|
||||
return ktime_to_us(ts->idle_sleeptime);
|
||||
}
|
||||
@@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
|
||||
if (!tick_nohz_enabled)
|
||||
return -1;
|
||||
|
||||
update_ts_time_stats(ts, ktime_get(), last_update_time);
|
||||
update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
|
||||
|
||||
return ktime_to_us(ts->iowait_sleeptime);
|
||||
}
|
||||
@@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
*/
|
||||
ts->inidle = 1;
|
||||
|
||||
now = tick_nohz_start_idle(ts);
|
||||
now = tick_nohz_start_idle(cpu, ts);
|
||||
|
||||
/*
|
||||
* If this cpu is offline and it is the one which updates
|
||||
@@ -315,9 +315,6 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (nohz_ratelimit(cpu))
|
||||
goto end;
|
||||
|
||||
ts->idle_calls++;
|
||||
/* Read jiffies and the time when jiffies were updated last */
|
||||
do {
|
||||
@@ -328,7 +325,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
} while (read_seqretry(&xtime_lock, seq));
|
||||
|
||||
if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
|
||||
arch_needs_cpu(cpu)) {
|
||||
arch_needs_cpu(cpu) || nohz_ratelimit(cpu)) {
|
||||
next_jiffies = last_jiffies + 1;
|
||||
delta_jiffies = 1;
|
||||
} else {
|
||||
|
@@ -96,7 +96,9 @@ int perf_trace_init(struct perf_event *p_event)
|
||||
mutex_lock(&event_mutex);
|
||||
list_for_each_entry(tp_event, &ftrace_events, list) {
|
||||
if (tp_event->event.type == event_id &&
|
||||
tp_event->class && tp_event->class->perf_probe &&
|
||||
tp_event->class &&
|
||||
(tp_event->class->perf_probe ||
|
||||
tp_event->class->reg) &&
|
||||
try_module_get(tp_event->mod)) {
|
||||
ret = perf_trace_event_init(tp_event, p_event);
|
||||
break;
|
||||
|
Reference in New Issue
Block a user