Merge branch 'for-4.20-fixes' into for-4.21
This commit is contained in:
@@ -553,7 +553,6 @@ bool is_bpf_text_address(unsigned long addr)
|
||||
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
char *sym)
|
||||
{
|
||||
unsigned long symbol_start, symbol_end;
|
||||
struct bpf_prog_aux *aux;
|
||||
unsigned int it = 0;
|
||||
int ret = -ERANGE;
|
||||
@@ -566,10 +565,9 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
if (it++ != symnum)
|
||||
continue;
|
||||
|
||||
bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
|
||||
bpf_get_prog_name(aux->prog, sym);
|
||||
|
||||
*value = symbol_start;
|
||||
*value = (unsigned long)aux->prog->bpf_func;
|
||||
*type = BPF_SYM_ELF_TYPE;
|
||||
|
||||
ret = 0;
|
||||
|
@@ -2078,6 +2078,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
||||
info.jited_prog_len = 0;
|
||||
info.xlated_prog_len = 0;
|
||||
info.nr_jited_ksyms = 0;
|
||||
info.nr_jited_func_lens = 0;
|
||||
goto done;
|
||||
}
|
||||
|
||||
@@ -2158,11 +2159,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
||||
}
|
||||
|
||||
ulen = info.nr_jited_ksyms;
|
||||
info.nr_jited_ksyms = prog->aux->func_cnt;
|
||||
info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
|
||||
if (info.nr_jited_ksyms && ulen) {
|
||||
if (bpf_dump_raw_ok()) {
|
||||
unsigned long ksym_addr;
|
||||
u64 __user *user_ksyms;
|
||||
ulong ksym_addr;
|
||||
u32 i;
|
||||
|
||||
/* copy the address of the kernel symbol
|
||||
@@ -2170,10 +2171,17 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
||||
*/
|
||||
ulen = min_t(u32, info.nr_jited_ksyms, ulen);
|
||||
user_ksyms = u64_to_user_ptr(info.jited_ksyms);
|
||||
for (i = 0; i < ulen; i++) {
|
||||
ksym_addr = (ulong) prog->aux->func[i]->bpf_func;
|
||||
ksym_addr &= PAGE_MASK;
|
||||
if (put_user((u64) ksym_addr, &user_ksyms[i]))
|
||||
if (prog->aux->func_cnt) {
|
||||
for (i = 0; i < ulen; i++) {
|
||||
ksym_addr = (unsigned long)
|
||||
prog->aux->func[i]->bpf_func;
|
||||
if (put_user((u64) ksym_addr,
|
||||
&user_ksyms[i]))
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
ksym_addr = (unsigned long) prog->bpf_func;
|
||||
if (put_user((u64) ksym_addr, &user_ksyms[0]))
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
@@ -2182,7 +2190,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
||||
}
|
||||
|
||||
ulen = info.nr_jited_func_lens;
|
||||
info.nr_jited_func_lens = prog->aux->func_cnt;
|
||||
info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
|
||||
if (info.nr_jited_func_lens && ulen) {
|
||||
if (bpf_dump_raw_ok()) {
|
||||
u32 __user *user_lens;
|
||||
@@ -2191,9 +2199,16 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
||||
/* copy the JITed image lengths for each function */
|
||||
ulen = min_t(u32, info.nr_jited_func_lens, ulen);
|
||||
user_lens = u64_to_user_ptr(info.jited_func_lens);
|
||||
for (i = 0; i < ulen; i++) {
|
||||
func_len = prog->aux->func[i]->jited_len;
|
||||
if (put_user(func_len, &user_lens[i]))
|
||||
if (prog->aux->func_cnt) {
|
||||
for (i = 0; i < ulen; i++) {
|
||||
func_len =
|
||||
prog->aux->func[i]->jited_len;
|
||||
if (put_user(func_len, &user_lens[i]))
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
func_len = prog->jited_len;
|
||||
if (put_user(func_len, &user_lens[0]))
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
|
@@ -4207,20 +4207,25 @@ static void css_task_iter_advance(struct css_task_iter *it)
|
||||
|
||||
lockdep_assert_held(&css_set_lock);
|
||||
repeat:
|
||||
/*
|
||||
* Advance iterator to find next entry. cset->tasks is consumed
|
||||
* first and then ->mg_tasks. After ->mg_tasks, we move onto the
|
||||
* next cset.
|
||||
*/
|
||||
next = it->task_pos->next;
|
||||
if (it->task_pos) {
|
||||
/*
|
||||
* Advance iterator to find next entry. cset->tasks is
|
||||
* consumed first and then ->mg_tasks. After ->mg_tasks,
|
||||
* we move onto the next cset.
|
||||
*/
|
||||
next = it->task_pos->next;
|
||||
|
||||
if (next == it->tasks_head)
|
||||
next = it->mg_tasks_head->next;
|
||||
if (next == it->tasks_head)
|
||||
next = it->mg_tasks_head->next;
|
||||
|
||||
if (next == it->mg_tasks_head)
|
||||
if (next == it->mg_tasks_head)
|
||||
css_task_iter_advance_css_set(it);
|
||||
else
|
||||
it->task_pos = next;
|
||||
} else {
|
||||
/* called from start, proceed to the first cset */
|
||||
css_task_iter_advance_css_set(it);
|
||||
else
|
||||
it->task_pos = next;
|
||||
}
|
||||
|
||||
/* if PROCS, skip over tasks which aren't group leaders */
|
||||
if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
|
||||
@@ -4260,7 +4265,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
|
||||
|
||||
it->cset_head = it->cset_pos;
|
||||
|
||||
css_task_iter_advance_css_set(it);
|
||||
css_task_iter_advance(it);
|
||||
|
||||
spin_unlock_irq(&css_set_lock);
|
||||
}
|
||||
|
@@ -179,14 +179,14 @@ kdb_bt(int argc, const char **argv)
|
||||
kdb_printf("no process for cpu %ld\n", cpu);
|
||||
return 0;
|
||||
}
|
||||
sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
|
||||
sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
|
||||
kdb_parse(buf);
|
||||
return 0;
|
||||
}
|
||||
kdb_printf("btc: cpu status: ");
|
||||
kdb_parse("cpu\n");
|
||||
for_each_online_cpu(cpu) {
|
||||
sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
|
||||
sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
|
||||
kdb_parse(buf);
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
|
@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
||||
int count;
|
||||
int i;
|
||||
int diag, dtab_count;
|
||||
int key;
|
||||
int key, buf_size, ret;
|
||||
|
||||
|
||||
diag = kdbgetintenv("DTABCOUNT", &dtab_count);
|
||||
@@ -336,9 +336,8 @@ poll_again:
|
||||
else
|
||||
p_tmp = tmpbuffer;
|
||||
len = strlen(p_tmp);
|
||||
count = kallsyms_symbol_complete(p_tmp,
|
||||
sizeof(tmpbuffer) -
|
||||
(p_tmp - tmpbuffer));
|
||||
buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer);
|
||||
count = kallsyms_symbol_complete(p_tmp, buf_size);
|
||||
if (tab == 2 && count > 0) {
|
||||
kdb_printf("\n%d symbols are found.", count);
|
||||
if (count > dtab_count) {
|
||||
@@ -350,9 +349,13 @@ poll_again:
|
||||
}
|
||||
kdb_printf("\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
|
||||
ret = kallsyms_symbol_next(p_tmp, i, buf_size);
|
||||
if (WARN_ON(!ret))
|
||||
break;
|
||||
kdb_printf("%s ", p_tmp);
|
||||
if (ret != -E2BIG)
|
||||
kdb_printf("%s ", p_tmp);
|
||||
else
|
||||
kdb_printf("%s... ", p_tmp);
|
||||
*(p_tmp + len) = '\0';
|
||||
}
|
||||
if (i >= dtab_count)
|
||||
|
@@ -173,11 +173,11 @@ int kdb_get_kbd_char(void)
|
||||
case KT_LATIN:
|
||||
if (isprint(keychar))
|
||||
break; /* printable characters */
|
||||
/* drop through */
|
||||
/* fall through */
|
||||
case KT_SPEC:
|
||||
if (keychar == K_ENTER)
|
||||
break;
|
||||
/* drop through */
|
||||
/* fall through */
|
||||
default:
|
||||
return -1; /* ignore unprintables */
|
||||
}
|
||||
|
@@ -1192,7 +1192,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
|
||||
if (reason == KDB_REASON_DEBUG) {
|
||||
/* special case below */
|
||||
} else {
|
||||
kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
|
||||
kdb_printf("\nEntering kdb (current=0x%px, pid %d) ",
|
||||
kdb_current, kdb_current ? kdb_current->pid : 0);
|
||||
#if defined(CONFIG_SMP)
|
||||
kdb_printf("on processor %d ", raw_smp_processor_id());
|
||||
@@ -1208,7 +1208,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
|
||||
*/
|
||||
switch (db_result) {
|
||||
case KDB_DB_BPT:
|
||||
kdb_printf("\nEntering kdb (0x%p, pid %d) ",
|
||||
kdb_printf("\nEntering kdb (0x%px, pid %d) ",
|
||||
kdb_current, kdb_current->pid);
|
||||
#if defined(CONFIG_SMP)
|
||||
kdb_printf("on processor %d ", raw_smp_processor_id());
|
||||
@@ -1493,6 +1493,7 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
|
||||
char cbuf[32];
|
||||
char *c = cbuf;
|
||||
int i;
|
||||
int j;
|
||||
unsigned long word;
|
||||
|
||||
memset(cbuf, '\0', sizeof(cbuf));
|
||||
@@ -1538,25 +1539,9 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
|
||||
wc.word = word;
|
||||
#define printable_char(c) \
|
||||
({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
|
||||
switch (bytesperword) {
|
||||
case 8:
|
||||
for (j = 0; j < bytesperword; j++)
|
||||
*c++ = printable_char(*cp++);
|
||||
*c++ = printable_char(*cp++);
|
||||
*c++ = printable_char(*cp++);
|
||||
*c++ = printable_char(*cp++);
|
||||
addr += 4;
|
||||
case 4:
|
||||
*c++ = printable_char(*cp++);
|
||||
*c++ = printable_char(*cp++);
|
||||
addr += 2;
|
||||
case 2:
|
||||
*c++ = printable_char(*cp++);
|
||||
addr++;
|
||||
case 1:
|
||||
*c++ = printable_char(*cp++);
|
||||
addr++;
|
||||
break;
|
||||
}
|
||||
addr += bytesperword;
|
||||
#undef printable_char
|
||||
}
|
||||
}
|
||||
@@ -2048,7 +2033,7 @@ static int kdb_lsmod(int argc, const char **argv)
|
||||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
|
||||
kdb_printf("%-20s%8u 0x%p ", mod->name,
|
||||
kdb_printf("%-20s%8u 0x%px ", mod->name,
|
||||
mod->core_layout.size, (void *)mod);
|
||||
#ifdef CONFIG_MODULE_UNLOAD
|
||||
kdb_printf("%4d ", module_refcount(mod));
|
||||
@@ -2059,7 +2044,7 @@ static int kdb_lsmod(int argc, const char **argv)
|
||||
kdb_printf(" (Loading)");
|
||||
else
|
||||
kdb_printf(" (Live)");
|
||||
kdb_printf(" 0x%p", mod->core_layout.base);
|
||||
kdb_printf(" 0x%px", mod->core_layout.base);
|
||||
|
||||
#ifdef CONFIG_MODULE_UNLOAD
|
||||
{
|
||||
@@ -2341,7 +2326,7 @@ void kdb_ps1(const struct task_struct *p)
|
||||
return;
|
||||
|
||||
cpu = kdb_process_cpu(p);
|
||||
kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n",
|
||||
kdb_printf("0x%px %8d %8d %d %4d %c 0x%px %c%s\n",
|
||||
(void *)p, p->pid, p->parent->pid,
|
||||
kdb_task_has_cpu(p), kdb_process_cpu(p),
|
||||
kdb_task_state_char(p),
|
||||
@@ -2354,7 +2339,7 @@ void kdb_ps1(const struct task_struct *p)
|
||||
} else {
|
||||
if (KDB_TSK(cpu) != p)
|
||||
kdb_printf(" Error: does not match running "
|
||||
"process table (0x%p)\n", KDB_TSK(cpu));
|
||||
"process table (0x%px)\n", KDB_TSK(cpu));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2687,7 +2672,7 @@ int kdb_register_flags(char *cmd,
|
||||
for_each_kdbcmd(kp, i) {
|
||||
if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
|
||||
kdb_printf("Duplicate kdb command registered: "
|
||||
"%s, func %p help %s\n", cmd, func, help);
|
||||
"%s, func %px help %s\n", cmd, func, help);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@@ -83,7 +83,7 @@ typedef struct __ksymtab {
|
||||
unsigned long sym_start;
|
||||
unsigned long sym_end;
|
||||
} kdb_symtab_t;
|
||||
extern int kallsyms_symbol_next(char *prefix_name, int flag);
|
||||
extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size);
|
||||
extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
|
||||
|
||||
/* Exported Symbols for kernel loadable modules to use. */
|
||||
|
@@ -40,7 +40,7 @@
|
||||
int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
|
||||
{
|
||||
if (KDB_DEBUG(AR))
|
||||
kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname,
|
||||
kdb_printf("kdbgetsymval: symname=%s, symtab=%px\n", symname,
|
||||
symtab);
|
||||
memset(symtab, 0, sizeof(*symtab));
|
||||
symtab->sym_start = kallsyms_lookup_name(symname);
|
||||
@@ -88,7 +88,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
|
||||
char *knt1 = NULL;
|
||||
|
||||
if (KDB_DEBUG(AR))
|
||||
kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab);
|
||||
kdb_printf("kdbnearsym: addr=0x%lx, symtab=%px\n", addr, symtab);
|
||||
memset(symtab, 0, sizeof(*symtab));
|
||||
|
||||
if (addr < 4096)
|
||||
@@ -149,7 +149,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
|
||||
symtab->mod_name = "kernel";
|
||||
if (KDB_DEBUG(AR))
|
||||
kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
|
||||
"symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret,
|
||||
"symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", ret,
|
||||
symtab->sym_start, symtab->mod_name, symtab->sym_name,
|
||||
symtab->sym_name);
|
||||
|
||||
@@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
|
||||
* Parameters:
|
||||
* prefix_name prefix of a symbol name to lookup
|
||||
* flag 0 means search from the head, 1 means continue search.
|
||||
* buf_size maximum length that can be written to prefix_name
|
||||
* buffer
|
||||
* Returns:
|
||||
* 1 if a symbol matches the given prefix.
|
||||
* 0 if no string found
|
||||
*/
|
||||
int kallsyms_symbol_next(char *prefix_name, int flag)
|
||||
int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size)
|
||||
{
|
||||
int prefix_len = strlen(prefix_name);
|
||||
static loff_t pos;
|
||||
@@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag)
|
||||
pos = 0;
|
||||
|
||||
while ((name = kdb_walk_kallsyms(&pos))) {
|
||||
if (strncmp(name, prefix_name, prefix_len) == 0) {
|
||||
strncpy(prefix_name, name, strlen(name)+1);
|
||||
return 1;
|
||||
}
|
||||
if (!strncmp(name, prefix_name, prefix_len))
|
||||
return strscpy(prefix_name, name, buf_size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -432,7 +432,7 @@ int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
|
||||
*word = w8;
|
||||
break;
|
||||
}
|
||||
/* drop through */
|
||||
/* fall through */
|
||||
default:
|
||||
diag = KDB_BADWIDTH;
|
||||
kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
|
||||
@@ -481,7 +481,7 @@ int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
|
||||
*word = w8;
|
||||
break;
|
||||
}
|
||||
/* drop through */
|
||||
/* fall through */
|
||||
default:
|
||||
diag = KDB_BADWIDTH;
|
||||
kdb_printf("kdb_getword: bad width %ld\n", (long) size);
|
||||
@@ -525,7 +525,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size)
|
||||
diag = kdb_putarea(addr, w8);
|
||||
break;
|
||||
}
|
||||
/* drop through */
|
||||
/* fall through */
|
||||
default:
|
||||
diag = KDB_BADWIDTH;
|
||||
kdb_printf("kdb_putword: bad width %ld\n", (long) size);
|
||||
@@ -887,13 +887,13 @@ void debug_kusage(void)
|
||||
__func__, dah_first);
|
||||
if (dah_first) {
|
||||
h_used = (struct debug_alloc_header *)debug_alloc_pool;
|
||||
kdb_printf("%s: h_used %p size %d\n", __func__, h_used,
|
||||
kdb_printf("%s: h_used %px size %d\n", __func__, h_used,
|
||||
h_used->size);
|
||||
}
|
||||
do {
|
||||
h_used = (struct debug_alloc_header *)
|
||||
((char *)h_free + dah_overhead + h_free->size);
|
||||
kdb_printf("%s: h_used %p size %d caller %p\n",
|
||||
kdb_printf("%s: h_used %px size %d caller %px\n",
|
||||
__func__, h_used, h_used->size, h_used->caller);
|
||||
h_free = (struct debug_alloc_header *)
|
||||
(debug_alloc_pool + h_free->next);
|
||||
@@ -902,7 +902,7 @@ void debug_kusage(void)
|
||||
((char *)h_free + dah_overhead + h_free->size);
|
||||
if ((char *)h_used - debug_alloc_pool !=
|
||||
sizeof(debug_alloc_pool_aligned))
|
||||
kdb_printf("%s: h_used %p size %d caller %p\n",
|
||||
kdb_printf("%s: h_used %px size %d caller %px\n",
|
||||
__func__, h_used, h_used->size, h_used->caller);
|
||||
out:
|
||||
spin_unlock(&dap_lock);
|
||||
|
@@ -319,16 +319,23 @@ int release_resource(struct resource *old)
|
||||
EXPORT_SYMBOL(release_resource);
|
||||
|
||||
/**
|
||||
* Finds the lowest iomem resource that covers part of [start..end]. The
|
||||
* caller must specify start, end, flags, and desc (which may be
|
||||
* Finds the lowest iomem resource that covers part of [@start..@end]. The
|
||||
* caller must specify @start, @end, @flags, and @desc (which may be
|
||||
* IORES_DESC_NONE).
|
||||
*
|
||||
* If a resource is found, returns 0 and *res is overwritten with the part
|
||||
* of the resource that's within [start..end]; if none is found, returns
|
||||
* -1.
|
||||
* If a resource is found, returns 0 and @*res is overwritten with the part
|
||||
* of the resource that's within [@start..@end]; if none is found, returns
|
||||
* -1 or -EINVAL for other invalid parameters.
|
||||
*
|
||||
* This function walks the whole tree and not just first level children
|
||||
* unless @first_lvl is true.
|
||||
*
|
||||
* @start: start address of the resource searched for
|
||||
* @end: end address of same resource
|
||||
* @flags: flags which the resource must have
|
||||
* @desc: descriptor the resource must have
|
||||
* @first_lvl: walk only the first level children, if set
|
||||
* @res: return ptr, if resource found
|
||||
*/
|
||||
static int find_next_iomem_res(resource_size_t start, resource_size_t end,
|
||||
unsigned long flags, unsigned long desc,
|
||||
@@ -399,6 +406,8 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
|
||||
* @flags: I/O resource flags
|
||||
* @start: start addr
|
||||
* @end: end addr
|
||||
* @arg: function argument for the callback @func
|
||||
* @func: callback function that is called for each qualifying resource area
|
||||
*
|
||||
* NOTE: For a new descriptor search, define a new IORES_DESC in
|
||||
* <linux/ioport.h> and set it in 'desc' of a target resource entry.
|
||||
|
@@ -5851,11 +5851,14 @@ void __init sched_init_smp(void)
|
||||
/*
|
||||
* There's no userspace yet to cause hotplug operations; hence all the
|
||||
* CPU masks are stable and all blatant races in the below code cannot
|
||||
* happen.
|
||||
* happen. The hotplug lock is nevertheless taken to satisfy lockdep,
|
||||
* but there won't be any contention on it.
|
||||
*/
|
||||
cpus_read_lock();
|
||||
mutex_lock(&sched_domains_mutex);
|
||||
sched_init_domains(cpu_active_mask);
|
||||
mutex_unlock(&sched_domains_mutex);
|
||||
cpus_read_unlock();
|
||||
|
||||
/* Move init over to a non-isolated CPU */
|
||||
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
|
||||
|
@@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
|
||||
local = 1;
|
||||
|
||||
/*
|
||||
* Retry task to preferred node migration periodically, in case it
|
||||
* case it previously failed, or the scheduler moved us.
|
||||
* Retry to migrate task to preferred node periodically, in case it
|
||||
* previously failed, or the scheduler moved us.
|
||||
*/
|
||||
if (time_after(jiffies, p->numa_migrate_retry)) {
|
||||
task_numa_placement(p);
|
||||
@@ -5674,11 +5674,11 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
|
||||
return target;
|
||||
}
|
||||
|
||||
static unsigned long cpu_util_wake(int cpu, struct task_struct *p);
|
||||
static unsigned long cpu_util_without(int cpu, struct task_struct *p);
|
||||
|
||||
static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
|
||||
static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
|
||||
{
|
||||
return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0);
|
||||
return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -5738,7 +5738,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
||||
|
||||
avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
|
||||
|
||||
spare_cap = capacity_spare_wake(i, p);
|
||||
spare_cap = capacity_spare_without(i, p);
|
||||
|
||||
if (spare_cap > max_spare_cap)
|
||||
max_spare_cap = spare_cap;
|
||||
@@ -5889,8 +5889,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
|
||||
return prev_cpu;
|
||||
|
||||
/*
|
||||
* We need task's util for capacity_spare_wake, sync it up to prev_cpu's
|
||||
* last_update_time.
|
||||
* We need task's util for capacity_spare_without, sync it up to
|
||||
* prev_cpu's last_update_time.
|
||||
*/
|
||||
if (!(sd_flag & SD_BALANCE_FORK))
|
||||
sync_entity_load_avg(&p->se);
|
||||
@@ -6216,10 +6216,19 @@ static inline unsigned long cpu_util(int cpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu_util_wake: Compute CPU utilization with any contributions from
|
||||
* the waking task p removed.
|
||||
* cpu_util_without: compute cpu utilization without any contributions from *p
|
||||
* @cpu: the CPU which utilization is requested
|
||||
* @p: the task which utilization should be discounted
|
||||
*
|
||||
* The utilization of a CPU is defined by the utilization of tasks currently
|
||||
* enqueued on that CPU as well as tasks which are currently sleeping after an
|
||||
* execution on that CPU.
|
||||
*
|
||||
* This method returns the utilization of the specified CPU by discounting the
|
||||
* utilization of the specified task, whenever the task is currently
|
||||
* contributing to the CPU utilization.
|
||||
*/
|
||||
static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
|
||||
static unsigned long cpu_util_without(int cpu, struct task_struct *p)
|
||||
{
|
||||
struct cfs_rq *cfs_rq;
|
||||
unsigned int util;
|
||||
@@ -6231,7 +6240,7 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
|
||||
cfs_rq = &cpu_rq(cpu)->cfs;
|
||||
util = READ_ONCE(cfs_rq->avg.util_avg);
|
||||
|
||||
/* Discount task's blocked util from CPU's util */
|
||||
/* Discount task's util from CPU's util */
|
||||
util -= min_t(unsigned int, util, task_util(p));
|
||||
|
||||
/*
|
||||
@@ -6240,14 +6249,14 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
|
||||
* a) if *p is the only task sleeping on this CPU, then:
|
||||
* cpu_util (== task_util) > util_est (== 0)
|
||||
* and thus we return:
|
||||
* cpu_util_wake = (cpu_util - task_util) = 0
|
||||
* cpu_util_without = (cpu_util - task_util) = 0
|
||||
*
|
||||
* b) if other tasks are SLEEPING on this CPU, which is now exiting
|
||||
* IDLE, then:
|
||||
* cpu_util >= task_util
|
||||
* cpu_util > util_est (== 0)
|
||||
* and thus we discount *p's blocked utilization to return:
|
||||
* cpu_util_wake = (cpu_util - task_util) >= 0
|
||||
* cpu_util_without = (cpu_util - task_util) >= 0
|
||||
*
|
||||
* c) if other tasks are RUNNABLE on that CPU and
|
||||
* util_est > cpu_util
|
||||
@@ -6260,8 +6269,33 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
|
||||
* covered by the following code when estimated utilization is
|
||||
* enabled.
|
||||
*/
|
||||
if (sched_feat(UTIL_EST))
|
||||
util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
|
||||
if (sched_feat(UTIL_EST)) {
|
||||
unsigned int estimated =
|
||||
READ_ONCE(cfs_rq->avg.util_est.enqueued);
|
||||
|
||||
/*
|
||||
* Despite the following checks we still have a small window
|
||||
* for a possible race, when an execl's select_task_rq_fair()
|
||||
* races with LB's detach_task():
|
||||
*
|
||||
* detach_task()
|
||||
* p->on_rq = TASK_ON_RQ_MIGRATING;
|
||||
* ---------------------------------- A
|
||||
* deactivate_task() \
|
||||
* dequeue_task() + RaceTime
|
||||
* util_est_dequeue() /
|
||||
* ---------------------------------- B
|
||||
*
|
||||
* The additional check on "current == p" it's required to
|
||||
* properly fix the execl regression and it helps in further
|
||||
* reducing the chances for the above race.
|
||||
*/
|
||||
if (unlikely(task_on_rq_queued(p) || current == p)) {
|
||||
estimated -= min_t(unsigned int, estimated,
|
||||
(_task_util_est(p) | UTIL_AVG_UNCHANGED));
|
||||
}
|
||||
util = max(util, estimated);
|
||||
}
|
||||
|
||||
/*
|
||||
* Utilization (estimated) can exceed the CPU capacity, thus let's
|
||||
|
@@ -633,38 +633,39 @@ void psi_cgroup_free(struct cgroup *cgroup)
|
||||
*/
|
||||
void cgroup_move_task(struct task_struct *task, struct css_set *to)
|
||||
{
|
||||
bool move_psi = !psi_disabled;
|
||||
unsigned int task_flags = 0;
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
|
||||
if (move_psi) {
|
||||
rq = task_rq_lock(task, &rf);
|
||||
|
||||
if (task_on_rq_queued(task))
|
||||
task_flags = TSK_RUNNING;
|
||||
else if (task->in_iowait)
|
||||
task_flags = TSK_IOWAIT;
|
||||
|
||||
if (task->flags & PF_MEMSTALL)
|
||||
task_flags |= TSK_MEMSTALL;
|
||||
|
||||
if (task_flags)
|
||||
psi_task_change(task, task_flags, 0);
|
||||
if (psi_disabled) {
|
||||
/*
|
||||
* Lame to do this here, but the scheduler cannot be locked
|
||||
* from the outside, so we move cgroups from inside sched/.
|
||||
*/
|
||||
rcu_assign_pointer(task->cgroups, to);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lame to do this here, but the scheduler cannot be locked
|
||||
* from the outside, so we move cgroups from inside sched/.
|
||||
*/
|
||||
rq = task_rq_lock(task, &rf);
|
||||
|
||||
if (task_on_rq_queued(task))
|
||||
task_flags = TSK_RUNNING;
|
||||
else if (task->in_iowait)
|
||||
task_flags = TSK_IOWAIT;
|
||||
|
||||
if (task->flags & PF_MEMSTALL)
|
||||
task_flags |= TSK_MEMSTALL;
|
||||
|
||||
if (task_flags)
|
||||
psi_task_change(task, task_flags, 0);
|
||||
|
||||
/* See comment above */
|
||||
rcu_assign_pointer(task->cgroups, to);
|
||||
|
||||
if (move_psi) {
|
||||
if (task_flags)
|
||||
psi_task_change(task, 0, task_flags);
|
||||
if (task_flags)
|
||||
psi_task_change(task, 0, task_flags);
|
||||
|
||||
task_rq_unlock(rq, task, &rf);
|
||||
}
|
||||
task_rq_unlock(rq, task, &rf);
|
||||
}
|
||||
#endif /* CONFIG_CGROUPS */
|
||||
|
||||
|
@@ -917,9 +917,6 @@ static void check_process_timers(struct task_struct *tsk,
|
||||
struct task_cputime cputime;
|
||||
unsigned long soft;
|
||||
|
||||
if (dl_task(tsk))
|
||||
check_dl_overrun(tsk);
|
||||
|
||||
/*
|
||||
* If cputimer is not running, then there are no active
|
||||
* process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
|
||||
|
@@ -535,7 +535,7 @@ int traceprobe_update_arg(struct probe_arg *arg)
|
||||
if (code[1].op != FETCH_OP_IMM)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = strpbrk("+-", code->data);
|
||||
tmp = strpbrk(code->data, "+-");
|
||||
if (tmp)
|
||||
c = *tmp;
|
||||
ret = traceprobe_split_symbol_offset(code->data,
|
||||
|
@@ -974,10 +974,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
|
||||
if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
|
||||
goto out;
|
||||
|
||||
ret = sort_idmaps(&new_map);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = -EPERM;
|
||||
/* Map the lower ids from the parent user namespace to the
|
||||
* kernel global id space.
|
||||
@@ -1004,6 +1000,14 @@ static ssize_t map_write(struct file *file, const char __user *buf,
|
||||
e->lower_first = lower_first;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we want to use binary search for lookup, this clones the extent
|
||||
* array and sorts both copies.
|
||||
*/
|
||||
ret = sort_idmaps(&new_map);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* Install the map */
|
||||
if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
|
||||
memcpy(map->extent, new_map.extent,
|
||||
|
Reference in New Issue
Block a user