Merge branch 'linus' into sched/clock

This commit is contained in:
Ingo Molnar
2008-08-11 08:59:21 +02:00
4238 changed files with 83626 additions and 37084 deletions

View File

@@ -707,12 +707,14 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (status_get->mask & AUDIT_STATUS_ENABLED) {
err = audit_set_enabled(status_get->enabled,
loginuid, sessionid, sid);
if (err < 0) return err;
if (err < 0)
return err;
}
if (status_get->mask & AUDIT_STATUS_FAILURE) {
err = audit_set_failure(status_get->failure,
loginuid, sessionid, sid);
if (err < 0) return err;
if (err < 0)
return err;
}
if (status_get->mask & AUDIT_STATUS_PID) {
int new_pid = status_get->pid;
@@ -725,9 +727,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
audit_pid = new_pid;
audit_nlk_pid = NETLINK_CB(skb).pid;
}
if (status_get->mask & AUDIT_STATUS_RATE_LIMIT)
if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
err = audit_set_rate_limit(status_get->rate_limit,
loginuid, sessionid, sid);
if (err < 0)
return err;
}
if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
err = audit_set_backlog_limit(status_get->backlog_limit,
loginuid, sessionid, sid);
@@ -1366,7 +1371,7 @@ int audit_string_contains_control(const char *string, size_t len)
{
const unsigned char *p;
for (p = string; p < (const unsigned char *)string + len && *p; p++) {
if (*p == '"' || *p < 0x21 || *p > 0x7f)
if (*p == '"' || *p < 0x21 || *p > 0x7e)
return 1;
}
return 0;

View File

@@ -1022,8 +1022,11 @@ static void audit_update_watch(struct audit_parent *parent,
struct audit_buffer *ab;
ab = audit_log_start(NULL, GFP_KERNEL,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, "auid=%u ses=%u",
audit_get_loginuid(current),
audit_get_sessionid(current));
audit_log_format(ab,
"op=updated rules specifying path=");
" op=updated rules specifying path=");
audit_log_untrustedstring(ab, owatch->path);
audit_log_format(ab, " with dev=%u ino=%lu\n",
dev, ino);
@@ -1058,7 +1061,10 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
struct audit_buffer *ab;
ab = audit_log_start(NULL, GFP_KERNEL,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, "op=remove rule path=");
audit_log_format(ab, "auid=%u ses=%u",
audit_get_loginuid(current),
audit_get_sessionid(current));
audit_log_format(ab, " op=remove rule path=");
audit_log_untrustedstring(ab, w->path);
if (r->filterkey) {
audit_log_format(ab, " key=");

View File

@@ -243,6 +243,9 @@ static inline int open_arg(int flags, int mask)
static int audit_match_perm(struct audit_context *ctx, int mask)
{
if (unlikely(!ctx))
return 0;
unsigned n = ctx->major;
switch (audit_classify_syscall(ctx->arch, n)) {
case 0: /* native */
@@ -284,6 +287,10 @@ static int audit_match_filetype(struct audit_context *ctx, int which)
{
unsigned index = which & ~S_IFMT;
mode_t mode = which & S_IFMT;
if (unlikely(!ctx))
return 0;
if (index >= ctx->name_count)
return 0;
if (ctx->names[index].ino == -1)
@@ -610,7 +617,7 @@ static int audit_filter_rules(struct task_struct *tsk,
if (!result)
return 0;
}
if (rule->filterkey)
if (rule->filterkey && ctx)
ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
switch (rule->action) {
case AUDIT_NEVER: *state = AUDIT_DISABLED; break;
@@ -2375,7 +2382,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
struct audit_context *ctx = tsk->audit_context;
if (audit_pid && t->tgid == audit_pid) {
if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) {
if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
audit_sig_pid = tsk->pid;
if (tsk->loginuid != -1)
audit_sig_uid = tsk->loginuid;

View File

@@ -355,32 +355,6 @@ static struct css_set *find_existing_css_set(
return NULL;
}
/*
* allocate_cg_links() allocates "count" cg_cgroup_link structures
* and chains them on tmp through their cgrp_link_list fields. Returns 0 on
* success or a negative error
*/
static int allocate_cg_links(int count, struct list_head *tmp)
{
struct cg_cgroup_link *link;
struct cg_cgroup_link *saved_link;
int i;
INIT_LIST_HEAD(tmp);
for (i = 0; i < count; i++) {
link = kmalloc(sizeof(*link), GFP_KERNEL);
if (!link) {
list_for_each_entry_safe(link, saved_link, tmp,
cgrp_link_list) {
list_del(&link->cgrp_link_list);
kfree(link);
}
return -ENOMEM;
}
list_add(&link->cgrp_link_list, tmp);
}
return 0;
}
static void free_cg_links(struct list_head *tmp)
{
struct cg_cgroup_link *link;
@@ -392,6 +366,27 @@ static void free_cg_links(struct list_head *tmp)
}
}
/*
* allocate_cg_links() allocates "count" cg_cgroup_link structures
* and chains them on tmp through their cgrp_link_list fields. Returns 0 on
* success or a negative error
*/
static int allocate_cg_links(int count, struct list_head *tmp)
{
struct cg_cgroup_link *link;
int i;
INIT_LIST_HEAD(tmp);
for (i = 0; i < count; i++) {
link = kmalloc(sizeof(*link), GFP_KERNEL);
if (!link) {
free_cg_links(tmp);
return -ENOMEM;
}
list_add(&link->cgrp_link_list, tmp);
}
return 0;
}
/*
* find_css_set() takes an existing cgroup group and a
* cgroup object, and returns a css_set object that's
@@ -956,7 +951,6 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
struct super_block *sb;
struct cgroupfs_root *root;
struct list_head tmp_cg_links;
INIT_LIST_HEAD(&tmp_cg_links);
/* First find the desired set of subsystems */
ret = parse_cgroupfs_options(data, &opts);
@@ -1424,14 +1418,17 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
if (buffer == NULL)
return -ENOMEM;
}
if (nbytes && copy_from_user(buffer, userbuf, nbytes))
return -EFAULT;
if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
retval = -EFAULT;
goto out;
}
buffer[nbytes] = 0; /* nul-terminate */
strstrip(buffer);
retval = cft->write_string(cgrp, cft, buffer);
if (!retval)
retval = nbytes;
out:
if (buffer != local_buffer)
kfree(buffer);
return retval;
@@ -2371,7 +2368,7 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return cgroup_create(c_parent, dentry, mode | S_IFDIR);
}
static inline int cgroup_has_css_refs(struct cgroup *cgrp)
static int cgroup_has_css_refs(struct cgroup *cgrp)
{
/* Check the reference count on each subsystem. Since we
* already established that there are no tasks in the

View File

@@ -54,7 +54,6 @@
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <linux/mutex.h>
#include <linux/kfifo.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
@@ -486,13 +485,38 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
static void
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
{
if (!dattr)
return;
if (dattr->relax_domain_level < c->relax_domain_level)
dattr->relax_domain_level = c->relax_domain_level;
return;
}
static void
update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
{
LIST_HEAD(q);
list_add(&c->stack_list, &q);
while (!list_empty(&q)) {
struct cpuset *cp;
struct cgroup *cont;
struct cpuset *child;
cp = list_first_entry(&q, struct cpuset, stack_list);
list_del(q.next);
if (cpus_empty(cp->cpus_allowed))
continue;
if (is_sched_load_balance(cp))
update_domain_attr(dattr, cp);
list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
child = cgroup_cs(cont);
list_add_tail(&child->stack_list, &q);
}
}
}
/*
* rebuild_sched_domains()
*
@@ -532,7 +556,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
* So the reverse nesting would risk an ABBA deadlock.
*
* The three key local variables below are:
* q - a kfifo queue of cpuset pointers, used to implement a
* q - a linked-list queue of cpuset pointers, used to implement a
* top-down scan of all cpusets. This scan loads a pointer
* to each cpuset marked is_sched_load_balance into the
* array 'csa'. For our purposes, rebuilding the schedulers
@@ -567,7 +591,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
void rebuild_sched_domains(void)
{
struct kfifo *q; /* queue of cpusets to be scanned */
LIST_HEAD(q); /* queue of cpusets to be scanned*/
struct cpuset *cp; /* scans q */
struct cpuset **csa; /* array of all cpuset ptrs */
int csn; /* how many cpuset ptrs in csa so far */
@@ -577,7 +601,6 @@ void rebuild_sched_domains(void)
int ndoms; /* number of sched domains in result */
int nslot; /* next empty doms[] cpumask_t slot */
q = NULL;
csa = NULL;
doms = NULL;
dattr = NULL;
@@ -591,35 +614,42 @@ void rebuild_sched_domains(void)
dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
if (dattr) {
*dattr = SD_ATTR_INIT;
update_domain_attr(dattr, &top_cpuset);
update_domain_attr_tree(dattr, &top_cpuset);
}
*doms = top_cpuset.cpus_allowed;
goto rebuild;
}
q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
if (IS_ERR(q))
goto done;
csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
if (!csa)
goto done;
csn = 0;
cp = &top_cpuset;
__kfifo_put(q, (void *)&cp, sizeof(cp));
while (__kfifo_get(q, (void *)&cp, sizeof(cp))) {
list_add(&top_cpuset.stack_list, &q);
while (!list_empty(&q)) {
struct cgroup *cont;
struct cpuset *child; /* scans child cpusets of cp */
cp = list_first_entry(&q, struct cpuset, stack_list);
list_del(q.next);
if (cpus_empty(cp->cpus_allowed))
continue;
if (is_sched_load_balance(cp))
/*
* All child cpusets contain a subset of the parent's cpus, so
* just skip them, and then we call update_domain_attr_tree()
* to calc relax_domain_level of the corresponding sched
* domain.
*/
if (is_sched_load_balance(cp)) {
csa[csn++] = cp;
continue;
}
list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
child = cgroup_cs(cont);
__kfifo_put(q, (void *)&child, sizeof(cp));
list_add_tail(&child->stack_list, &q);
}
}
@@ -686,7 +716,7 @@ restart:
cpus_or(*dp, *dp, b->cpus_allowed);
b->pn = -1;
if (dattr)
update_domain_attr(dattr
update_domain_attr_tree(dattr
+ nslot, b);
}
}
@@ -702,8 +732,6 @@ rebuild:
put_online_cpus();
done:
if (q && !IS_ERR(q))
kfifo_free(q);
kfree(csa);
/* Don't kfree(doms) -- partition_sched_domains() does that. */
/* Don't kfree(dattr) -- partition_sched_domains() does that. */
@@ -1833,24 +1861,21 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
*/
static void scan_for_empty_cpusets(const struct cpuset *root)
{
LIST_HEAD(queue);
struct cpuset *cp; /* scans cpusets being updated */
struct cpuset *child; /* scans child cpusets of cp */
struct list_head queue;
struct cgroup *cont;
nodemask_t oldmems;
INIT_LIST_HEAD(&queue);
list_add_tail((struct list_head *)&root->stack_list, &queue);
while (!list_empty(&queue)) {
cp = container_of(queue.next, struct cpuset, stack_list);
cp = list_first_entry(&queue, struct cpuset, stack_list);
list_del(queue.next);
list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
child = cgroup_cs(cont);
list_add_tail(&child->stack_list, &queue);
}
cont = cp->css.cgroup;
/* Continue past cpusets with all cpus, mems online */
if (cpus_subset(cp->cpus_allowed, cpu_online_map) &&

View File

@@ -77,15 +77,14 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
{
struct dma_coherent_mem *mem = dev->dma_mem;
int pos, err;
int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
pages >>= PAGE_SHIFT;
size += device_addr & ~PAGE_MASK;
if (!mem)
return ERR_PTR(-EINVAL);
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
if (err != 0)
return ERR_PTR(err);
return mem->virt_base + (pos << PAGE_SHIFT);

View File

@@ -911,10 +911,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
tsk->exit_signal = SIGCHLD;
signal = tracehook_notify_death(tsk, &cookie, group_dead);
if (signal > 0)
if (signal >= 0)
signal = do_notify_parent(tsk, signal);
tsk->exit_state = signal < 0 ? EXIT_DEAD : EXIT_ZOMBIE;
tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
/* mt-exec, de_thread() is waiting for us */
if (thread_group_leader(tsk) &&
@@ -927,7 +927,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
tracehook_report_death(tsk, signal, cookie, group_dead);
/* If the process is dead, release it - nobody will wait for it */
if (signal < 0)
if (signal == DEATH_REAP)
release_task(tsk);
}

View File

@@ -323,7 +323,8 @@ static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq,
ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
if (ret)
pr_err("setting flow type for irq %u failed (%pF)\n",
pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
(int)(flags & IRQF_TRIGGER_MASK),
irq, chip->set_type);
return ret;

View File

@@ -56,12 +56,14 @@
static int kgdb_break_asap;
#define KGDB_MAX_THREAD_QUERY 17
struct kgdb_state {
int ex_vector;
int signo;
int err_code;
int cpu;
int pass_exception;
unsigned long thr_query;
unsigned long threadid;
long kgdb_usethreadid;
struct pt_regs *linux_regs;
@@ -166,13 +168,6 @@ early_param("nokgdbroundup", opt_nokgdbroundup);
* Weak aliases for breakpoint management,
* can be overriden by architectures when needed:
*/
int __weak kgdb_validate_break_address(unsigned long addr)
{
char tmp_variable[BREAK_INSTR_SIZE];
return probe_kernel_read(tmp_variable, (char *)addr, BREAK_INSTR_SIZE);
}
int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
{
int err;
@@ -191,6 +186,25 @@ int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
(char *)bundle, BREAK_INSTR_SIZE);
}
int __weak kgdb_validate_break_address(unsigned long addr)
{
char tmp_variable[BREAK_INSTR_SIZE];
int err;
/* Validate setting the breakpoint and then removing it. In the
* remove fails, the kernel needs to emit a bad message because we
* are deep trouble not being able to put things back the way we
* found them.
*/
err = kgdb_arch_set_breakpoint(addr, tmp_variable);
if (err)
return err;
err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
if (err)
printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
"memory destroyed at: %lx", addr);
return err;
}
unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
{
return instruction_pointer(regs);
@@ -433,9 +447,14 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val)
{
int hex_val;
int num = 0;
int negate = 0;
*long_val = 0;
if (**ptr == '-') {
negate = 1;
(*ptr)++;
}
while (**ptr) {
hex_val = hex(**ptr);
if (hex_val < 0)
@@ -446,6 +465,9 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val)
(*ptr)++;
}
if (negate)
*long_val = -*long_val;
return num;
}
@@ -515,10 +537,16 @@ static void int_to_threadref(unsigned char *id, int value)
static struct task_struct *getthread(struct pt_regs *regs, int tid)
{
/*
* Non-positive TIDs are remapped idle tasks:
* Non-positive TIDs are remapped to the cpu shadow information
*/
if (tid <= 0)
return idle_task(-tid);
if (tid == 0 || tid == -1)
tid = -atomic_read(&kgdb_active) - 2;
if (tid < 0) {
if (kgdb_info[-tid - 2].task)
return kgdb_info[-tid - 2].task;
else
return idle_task(-tid - 2);
}
/*
* find_task_by_pid_ns() does not take the tasklist lock anymore
@@ -725,14 +753,15 @@ setundefined:
}
/*
* Remap normal tasks to their real PID, idle tasks to -1 ... -NR_CPUs:
* Remap normal tasks to their real PID,
* CPU shadow threads are mapped to -CPU - 2
*/
static inline int shadow_pid(int realpid)
{
if (realpid)
return realpid;
return -1-raw_smp_processor_id();
return -raw_smp_processor_id() - 2;
}
static char gdbmsgbuf[BUFMAX + 1];
@@ -826,7 +855,7 @@ static void gdb_cmd_getregs(struct kgdb_state *ks)
local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
} else {
local_debuggerinfo = NULL;
for (i = 0; i < NR_CPUS; i++) {
for_each_online_cpu(i) {
/*
* Try to find the task on some other
* or possibly this node if we do not
@@ -960,10 +989,13 @@ static int gdb_cmd_reboot(struct kgdb_state *ks)
/* Handle the 'q' query packets */
static void gdb_cmd_query(struct kgdb_state *ks)
{
struct task_struct *thread;
struct task_struct *g;
struct task_struct *p;
unsigned char thref[8];
char *ptr;
int i;
int cpu;
int finished = 0;
switch (remcom_in_buffer[1]) {
case 's':
@@ -973,22 +1005,34 @@ static void gdb_cmd_query(struct kgdb_state *ks)
break;
}
if (remcom_in_buffer[1] == 'f')
ks->threadid = 1;
i = 0;
remcom_out_buffer[0] = 'm';
ptr = remcom_out_buffer + 1;
for (i = 0; i < 17; ks->threadid++) {
thread = getthread(ks->linux_regs, ks->threadid);
if (thread) {
int_to_threadref(thref, ks->threadid);
if (remcom_in_buffer[1] == 'f') {
/* Each cpu is a shadow thread */
for_each_online_cpu(cpu) {
ks->thr_query = 0;
int_to_threadref(thref, -cpu - 2);
pack_threadid(ptr, thref);
ptr += BUF_THREAD_ID_SIZE;
*(ptr++) = ',';
i++;
}
}
do_each_thread(g, p) {
if (i >= ks->thr_query && !finished) {
int_to_threadref(thref, p->pid);
pack_threadid(ptr, thref);
ptr += BUF_THREAD_ID_SIZE;
*(ptr++) = ',';
ks->thr_query++;
if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0)
finished = 1;
}
i++;
} while_each_thread(g, p);
*(--ptr) = '\0';
break;
@@ -1011,15 +1055,15 @@ static void gdb_cmd_query(struct kgdb_state *ks)
error_packet(remcom_out_buffer, -EINVAL);
break;
}
if (ks->threadid > 0) {
if ((int)ks->threadid > 0) {
kgdb_mem2hex(getthread(ks->linux_regs,
ks->threadid)->comm,
remcom_out_buffer, 16);
} else {
static char tmpstr[23 + BUF_THREAD_ID_SIZE];
sprintf(tmpstr, "Shadow task %d for pid 0",
(int)(-ks->threadid-1));
sprintf(tmpstr, "shadowCPU%d",
(int)(-ks->threadid - 2));
kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
}
break;

View File

@@ -125,6 +125,11 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
} else {
struct marker_probe_closure *multi;
int i;
/*
* Read mdata->ptype before mdata->multi.
*/
smp_rmb();
multi = mdata->multi;
/*
* multi points to an array, therefore accessing the array
* depends on reading multi. However, even in this case,
@@ -133,7 +138,6 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
* in the fast path, so put the explicit barrier here.
*/
smp_read_barrier_depends();
multi = mdata->multi;
for (i = 0; multi[i].func; i++) {
va_start(args, call_private);
multi[i].func(multi[i].probe_private, call_private,
@@ -174,6 +178,11 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
} else {
struct marker_probe_closure *multi;
int i;
/*
* Read mdata->ptype before mdata->multi.
*/
smp_rmb();
multi = mdata->multi;
/*
* multi points to an array, therefore accessing the array
* depends on reading multi. However, even in this case,
@@ -182,7 +191,6 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
* in the fast path, so put the explicit barrier here.
*/
smp_read_barrier_depends();
multi = mdata->multi;
for (i = 0; multi[i].func; i++)
multi[i].func(multi[i].probe_private, call_private,
mdata->format, &args);

View File

@@ -34,6 +34,7 @@
/***
* mutex_init - initialize the mutex
* @lock: the mutex to be initialized
* @key: the lock_class_key for the class; used by mutex lock debugging
*
* Initialize the mutex to unlocked state.
*

View File

@@ -24,7 +24,7 @@
* requirement that the application has is cleaned up when closes the file
* pointer or exits the pm_qos_object will get an opportunity to clean up.
*
* mark gross mgross@linux.intel.com
* Mark Gross <mgross@linux.intel.com>
*/
#include <linux/pm_qos_params.h>
@@ -211,8 +211,8 @@ EXPORT_SYMBOL_GPL(pm_qos_requirement);
* @value: defines the qos request
*
* This function inserts a new entry in the pm_qos_class list of requested qos
* performance charactoistics. It recomputes the agregate QoS expectations for
* the pm_qos_class of parrameters.
* performance characteristics. It recomputes the aggregate QoS expectations
* for the pm_qos_class of parameters.
*/
int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value)
{
@@ -250,10 +250,10 @@ EXPORT_SYMBOL_GPL(pm_qos_add_requirement);
* @name: identifies the request
* @value: defines the qos request
*
* Updates an existing qos requierement for the pm_qos_class of parameters along
* Updates an existing qos requirement for the pm_qos_class of parameters along
* with updating the target pm_qos_class value.
*
* If the named request isn't in the lest then no change is made.
* If the named request isn't in the list then no change is made.
*/
int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value)
{
@@ -287,7 +287,7 @@ EXPORT_SYMBOL_GPL(pm_qos_update_requirement);
* @pm_qos_class: identifies which list of qos request to us
* @name: identifies the request
*
* Will remove named qos request from pm_qos_class list of parrameters and
* Will remove named qos request from pm_qos_class list of parameters and
* recompute the current target value for the pm_qos_class.
*/
void pm_qos_remove_requirement(int pm_qos_class, char *name)
@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
* @notifier: notifier block managed by caller.
*
* will register the notifier into a notification chain that gets called
* uppon changes to the pm_qos_class target value.
* upon changes to the pm_qos_class target value.
*/
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
{
@@ -338,7 +338,7 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
* @notifier: notifier block to be removed.
*
* will remove the notifier from the notification chain that gets called
* uppon changes to the pm_qos_class target value.
* upon changes to the pm_qos_class target value.
*/
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
{

View File

@@ -1309,14 +1309,14 @@ void tty_write_message(struct tty_struct *tty, char *msg)
#if defined CONFIG_PRINTK
DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
/*
* printk rate limiting, lifted from the networking subsystem.
*
* This enforces a rate limit: not more than one kernel message
* every printk_ratelimit_jiffies to make a denial-of-service
* attack impossible.
* This enforces a rate limit: not more than 10 kernel messages
* every 5s to make a denial-of-service attack impossible.
*/
DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
int printk_ratelimit(void)
{
return __ratelimit(&printk_ratelimit_state);

View File

@@ -944,6 +944,10 @@ static void relay_file_read_consume(struct rchan_buf *buf,
size_t n_subbufs = buf->chan->n_subbufs;
size_t read_subbuf;
if (buf->subbufs_produced == buf->subbufs_consumed &&
buf->offset == buf->bytes_consumed)
return;
if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
relay_subbufs_consumed(buf->chan, buf->cpu, 1);
buf->bytes_consumed = 0;
@@ -975,6 +979,8 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
relay_file_read_consume(buf, read_pos, 0);
consumed = buf->subbufs_consumed;
if (unlikely(buf->offset > subbuf_size)) {
if (produced == consumed)
return 0;
@@ -993,8 +999,12 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
if (consumed > produced)
produced += n_subbufs * subbuf_size;
if (consumed == produced)
if (consumed == produced) {
if (buf->offset == subbuf_size &&
buf->subbufs_produced > buf->subbufs_consumed)
return 1;
return 0;
}
return 1;
}

View File

@@ -490,7 +490,7 @@ resource_size_t resource_alignment(struct resource *res)
{
switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
case IORESOURCE_SIZEALIGN:
return res->end - res->start + 1;
return resource_size(res);
case IORESOURCE_STARTALIGN:
return res->start;
default:

View File

@@ -5004,19 +5004,21 @@ recheck:
return -EPERM;
}
if (user) {
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
*/
if (user
&& rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
return -EPERM;
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
*/
if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
return -EPERM;
#endif
retval = security_task_setscheduler(p, policy, param);
if (retval)
return retval;
retval = security_task_setscheduler(p, policy, param);
if (retval)
return retval;
}
/*
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
@@ -7671,34 +7673,34 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
}
#ifdef CONFIG_SCHED_MC
static ssize_t sched_mc_power_savings_show(struct sys_device *dev,
struct sysdev_attribute *attr, char *page)
static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
char *page)
{
return sprintf(page, "%u\n", sched_mc_power_savings);
}
static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
struct sysdev_attribute *attr,
static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 0);
}
static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
sched_mc_power_savings_store);
static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
sched_mc_power_savings_show,
sched_mc_power_savings_store);
#endif
#ifdef CONFIG_SCHED_SMT
static ssize_t sched_smt_power_savings_show(struct sys_device *dev,
struct sysdev_attribute *attr, char *page)
static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
char *page)
{
return sprintf(page, "%u\n", sched_smt_power_savings);
}
static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
struct sysdev_attribute *attr,
static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 1);
}
static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
sched_smt_power_savings_show,
sched_smt_power_savings_store);
#endif

View File

@@ -212,9 +212,7 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
waiter.up = 0;
for (;;) {
if (state == TASK_INTERRUPTIBLE && signal_pending(task))
goto interrupted;
if (state == TASK_KILLABLE && fatal_signal_pending(task))
if (signal_pending_state(state, task))
goto interrupted;
if (timeout <= 0)
goto timed_out;

View File

@@ -830,10 +830,21 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
start_workqueue_thread(cwq, -1);
} else {
cpu_maps_update_begin();
/*
* We must place this wq on list even if the code below fails.
* cpu_down(cpu) can remove cpu from cpu_populated_map before
* destroy_workqueue() takes the lock, in that case we leak
* cwq[cpu]->thread.
*/
spin_lock(&workqueue_lock);
list_add(&wq->list, &workqueues);
spin_unlock(&workqueue_lock);
/*
* We must initialize cwqs for each possible cpu even if we
* are going to call destroy_workqueue() finally. Otherwise
* cpu_up() can hit the uninitialized cwq once we drop the
* lock.
*/
for_each_possible_cpu(cpu) {
cwq = init_cpu_workqueue(wq, cpu);
if (err || !cpu_online(cpu))