Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "This fixes the cputime scaling overflow problems for good without
  having bad 32-bit overhead, and gets rid of the div64_u64_rem() helper
  as well."

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  Revert "math64: New div64_u64_rem helper"
  sched: Avoid prev->stime underflow
  sched: Do not account bogus utime
  sched: Avoid cputime scaling overflow
このコミットが含まれているのは:
Linus Torvalds
2013-05-02 14:56:31 -07:00
コミット 0279b3c0ad
3個のファイルの変更58行の追加60行の削除

ファイルの表示

@@ -506,34 +506,47 @@ void account_idle_ticks(unsigned long ticks)
}
/*
* Perform (stime * rtime) / total with reduced chances
* of multiplication overflows by using smaller factors
* like quotient and remainders of divisions between
* rtime and total.
* Perform (stime * rtime) / total, but avoid multiplication overflow by
* loosing precision when the numbers are big.
*/
static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
{
u64 rem, res, scaled;
u64 scaled;
if (rtime >= total) {
/*
* Scale up to rtime / total then add
* the remainder scaled to stime / total.
*/
res = div64_u64_rem(rtime, total, &rem);
scaled = stime * res;
scaled += div64_u64(stime * rem, total);
} else {
/*
* Same in reverse: scale down to total / rtime
* then substract that result scaled to
* to the remaining part.
*/
res = div64_u64_rem(total, rtime, &rem);
scaled = div64_u64(stime, res);
scaled -= div64_u64(scaled * rem, total);
for (;;) {
/* Make sure "rtime" is the bigger of stime/rtime */
if (stime > rtime) {
u64 tmp = rtime; rtime = stime; stime = tmp;
}
/* Make sure 'total' fits in 32 bits */
if (total >> 32)
goto drop_precision;
/* Does rtime (and thus stime) fit in 32 bits? */
if (!(rtime >> 32))
break;
/* Can we just balance rtime/stime rather than dropping bits? */
if (stime >> 31)
goto drop_precision;
/* We can grow stime and shrink rtime and try to make them both fit */
stime <<= 1;
rtime >>= 1;
continue;
drop_precision:
/* We drop from rtime, it has more bits than stime */
rtime >>= 1;
total >>= 1;
}
/*
* Make sure gcc understands that this is a 32x32->64 multiply,
* followed by a 64/32->64 divide.
*/
scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
return (__force cputime_t) scaled;
}
@@ -545,7 +558,7 @@ static void cputime_adjust(struct task_cputime *curr,
struct cputime *prev,
cputime_t *ut, cputime_t *st)
{
cputime_t rtime, stime, total;
cputime_t rtime, stime, utime, total;
if (vtime_accounting_enabled()) {
*ut = curr->utime;
@@ -568,13 +581,21 @@ static void cputime_adjust(struct task_cputime *curr,
*/
rtime = nsecs_to_cputime(curr->sum_exec_runtime);
if (!rtime) {
stime = 0;
} else if (!total) {
stime = rtime;
} else {
/*
* Update userspace visible utime/stime values only if actual execution
* time is bigger than already exported. Note that can happen, that we
* provided bigger values due to scaling inaccuracy on big numbers.
*/
if (prev->stime + prev->utime >= rtime)
goto out;
if (total) {
stime = scale_stime((__force u64)stime,
(__force u64)rtime, (__force u64)total);
utime = rtime - stime;
} else {
stime = rtime;
utime = 0;
}
/*
@@ -583,8 +604,9 @@ static void cputime_adjust(struct task_cputime *curr,
* Let's enforce monotonicity.
*/
prev->stime = max(prev->stime, stime);
prev->utime = max(prev->utime, rtime - prev->stime);
prev->utime = max(prev->utime, utime);
out:
*ut = prev->utime;
*st = prev->stime;
}