Merge commit 'v2.6.26-rc9' into sched/devel
This commit is contained in:
@@ -738,7 +738,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
if (!audit_enabled && msg_type != AUDIT_USER_AVC)
|
||||
return 0;
|
||||
|
||||
err = audit_filter_user(&NETLINK_CB(skb), msg_type);
|
||||
err = audit_filter_user(&NETLINK_CB(skb));
|
||||
if (err == 1) {
|
||||
err = 0;
|
||||
if (msg_type == AUDIT_USER_TTY) {
|
||||
@@ -779,7 +779,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
}
|
||||
/* fallthrough */
|
||||
case AUDIT_LIST:
|
||||
err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
|
||||
err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
|
||||
uid, seq, data, nlmsg_len(nlh),
|
||||
loginuid, sessionid, sid);
|
||||
break;
|
||||
@@ -798,7 +798,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
}
|
||||
/* fallthrough */
|
||||
case AUDIT_LIST_RULES:
|
||||
err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
|
||||
err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
|
||||
uid, seq, data, nlmsg_len(nlh),
|
||||
loginuid, sessionid, sid);
|
||||
break;
|
||||
|
||||
@@ -1544,6 +1544,7 @@ static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid,
|
||||
* @data: payload data
|
||||
* @datasz: size of payload data
|
||||
* @loginuid: loginuid of sender
|
||||
* @sessionid: sessionid for netlink audit message
|
||||
* @sid: SE Linux Security ID of sender
|
||||
*/
|
||||
int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
|
||||
@@ -1720,7 +1721,7 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
|
||||
return 1;
|
||||
}
|
||||
|
||||
int audit_filter_user(struct netlink_skb_parms *cb, int type)
|
||||
int audit_filter_user(struct netlink_skb_parms *cb)
|
||||
{
|
||||
enum audit_state state = AUDIT_DISABLED;
|
||||
struct audit_entry *e;
|
||||
|
||||
@@ -121,6 +121,27 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
|
||||
* uninteresting and/or not to be changed.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Atomically modify the effective capabilities returning the original
|
||||
* value. No permission check is performed here - it is assumed that the
|
||||
* caller is permitted to set the desired effective capabilities.
|
||||
*/
|
||||
kernel_cap_t cap_set_effective(const kernel_cap_t pE_new)
|
||||
{
|
||||
kernel_cap_t pE_old;
|
||||
|
||||
spin_lock(&task_capability_lock);
|
||||
|
||||
pE_old = current->cap_effective;
|
||||
current->cap_effective = pE_new;
|
||||
|
||||
spin_unlock(&task_capability_lock);
|
||||
|
||||
return pE_old;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(cap_set_effective);
|
||||
|
||||
/**
|
||||
* sys_capget - get the capabilities of a given process.
|
||||
* @header: pointer to struct that contains capability version and
|
||||
|
||||
@@ -1003,10 +1003,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
|
||||
*/
|
||||
raise = timer->state == HRTIMER_STATE_PENDING;
|
||||
|
||||
/*
|
||||
* We use preempt_disable to prevent this task from migrating after
|
||||
* setting up the softirq and raising it. Otherwise, if me migrate
|
||||
* we will raise the softirq on the wrong CPU.
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
unlock_hrtimer_base(timer, &flags);
|
||||
|
||||
if (raise)
|
||||
hrtimer_raise_softirq();
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -89,8 +89,22 @@ static void force_quiescent_state(struct rcu_data *rdp,
|
||||
/*
|
||||
* Don't send IPI to itself. With irqs disabled,
|
||||
* rdp->cpu is the current cpu.
|
||||
*
|
||||
* cpu_online_map is updated by the _cpu_down()
|
||||
* using stop_machine_run(). Since we're in irqs disabled
|
||||
* section, stop_machine_run() is not exectuting, hence
|
||||
* the cpu_online_map is stable.
|
||||
*
|
||||
* However, a cpu might have been offlined _just_ before
|
||||
* we disabled irqs while entering here.
|
||||
* And rcu subsystem might not yet have handled the CPU_DEAD
|
||||
* notification, leading to the offlined cpu's bit
|
||||
* being set in the rcp->cpumask.
|
||||
*
|
||||
* Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent
|
||||
* sending smp_reschedule() to an offlined CPU.
|
||||
*/
|
||||
cpumask = rcp->cpumask;
|
||||
cpus_and(cpumask, rcp->cpumask, cpu_online_map);
|
||||
cpu_clear(rdp->cpu, cpumask);
|
||||
for_each_cpu_mask(cpu, cpumask)
|
||||
smp_send_reschedule(cpu);
|
||||
|
||||
@@ -6075,6 +6075,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
|
||||
next = pick_next_task(rq, rq->curr);
|
||||
if (!next)
|
||||
break;
|
||||
next->sched_class->put_prev_task(rq, next);
|
||||
migrate_dead(dead_cpu, next);
|
||||
|
||||
}
|
||||
@@ -8755,6 +8756,9 @@ int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
|
||||
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
|
||||
rt_runtime = tg->rt_bandwidth.rt_runtime;
|
||||
|
||||
if (rt_period == 0)
|
||||
return -EINVAL;
|
||||
|
||||
return tg_set_bandwidth(tg, rt_period, rt_runtime);
|
||||
}
|
||||
|
||||
|
||||
@@ -120,6 +120,7 @@ void softlockup_tick(void)
|
||||
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
|
||||
this_cpu, now - touch_timestamp,
|
||||
current->comm, task_pid_nr(current));
|
||||
print_modules();
|
||||
if (regs)
|
||||
show_regs(regs);
|
||||
else
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
|
||||
* Theodore Ts'o <tytso@mit.edu>
|
||||
*
|
||||
* Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
|
||||
* Made to use alloc_percpu by Christoph Lameter.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
Reference in New Issue
Block a user