Merge tag 'v5.2' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tento commit je obsažen v:
@@ -1964,6 +1964,9 @@ static ssize_t write_cpuhp_fail(struct device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Cannot fail STARTING/DYING callbacks.
|
||||
*/
|
||||
@@ -2339,6 +2342,9 @@ static int __init mitigations_parse_cmdline(char *arg)
|
||||
cpu_mitigations = CPU_MITIGATIONS_AUTO;
|
||||
else if (!strcmp(arg, "auto,nosmt"))
|
||||
cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
|
||||
else
|
||||
pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
|
||||
arg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -5007,6 +5007,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||
if (perf_event_check_period(event, value))
|
||||
return -EINVAL;
|
||||
|
||||
if (!event->attr.freq && (value & (1ULL << 63)))
|
||||
return -EINVAL;
|
||||
|
||||
event_function_call(event, __perf_event_period, &value);
|
||||
|
||||
return 0;
|
||||
@@ -5925,7 +5928,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user,
|
||||
if (user_mode(regs)) {
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
regs_user->regs = regs;
|
||||
} else if (current->mm) {
|
||||
} else if (!(current->flags & PF_KTHREAD)) {
|
||||
perf_get_regs_user(regs_user, regs, regs_user_copy);
|
||||
} else {
|
||||
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
|
||||
@@ -10041,6 +10044,12 @@ void perf_pmu_unregister(struct pmu *pmu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
|
||||
|
||||
static inline bool has_extended_regs(struct perf_event *event)
|
||||
{
|
||||
return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) ||
|
||||
(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK);
|
||||
}
|
||||
|
||||
static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
|
||||
{
|
||||
struct perf_event_context *ctx = NULL;
|
||||
@@ -10072,12 +10081,16 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
|
||||
perf_event_ctx_unlock(event->group_leader, ctx);
|
||||
|
||||
if (!ret) {
|
||||
if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
|
||||
has_extended_regs(event))
|
||||
ret = -EOPNOTSUPP;
|
||||
|
||||
if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
|
||||
event_has_any_exclude_flag(event)) {
|
||||
if (event->destroy)
|
||||
event->destroy(event);
|
||||
event_has_any_exclude_flag(event))
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (ret && event->destroy)
|
||||
event->destroy(event);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
@@ -248,7 +248,11 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
|
||||
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
|
||||
THREAD_SIZE_ORDER);
|
||||
|
||||
return page ? page_address(page) : NULL;
|
||||
if (likely(page)) {
|
||||
tsk->stack = page_address(page);
|
||||
return tsk->stack;
|
||||
}
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1712,31 +1716,6 @@ const struct file_operations pidfd_fops = {
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* pidfd_create() - Create a new pid file descriptor.
|
||||
*
|
||||
* @pid: struct pid that the pidfd will reference
|
||||
*
|
||||
* This creates a new pid file descriptor with the O_CLOEXEC flag set.
|
||||
*
|
||||
* Note, that this function can only be called after the fd table has
|
||||
* been unshared to avoid leaking the pidfd to the new process.
|
||||
*
|
||||
* Return: On success, a cloexec pidfd is returned.
|
||||
* On error, a negative errno number will be returned.
|
||||
*/
|
||||
static int pidfd_create(struct pid *pid)
|
||||
{
|
||||
int fd;
|
||||
|
||||
fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
|
||||
O_RDWR | O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
put_pid(pid);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
static void __delayed_free_task(struct rcu_head *rhp)
|
||||
{
|
||||
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
|
||||
@@ -1774,6 +1753,7 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
int pidfd = -1, retval;
|
||||
struct task_struct *p;
|
||||
struct multiprocess_signals delayed;
|
||||
struct file *pidfile = NULL;
|
||||
|
||||
/*
|
||||
* Don't allow sharing the root directory with processes in a different
|
||||
@@ -1822,8 +1802,6 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
}
|
||||
|
||||
if (clone_flags & CLONE_PIDFD) {
|
||||
int reserved;
|
||||
|
||||
/*
|
||||
* - CLONE_PARENT_SETTID is useless for pidfds and also
|
||||
* parent_tidptr is used to return pidfds.
|
||||
@@ -1834,16 +1812,6 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
if (clone_flags &
|
||||
(CLONE_DETACHED | CLONE_PARENT_SETTID | CLONE_THREAD))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/*
|
||||
* Verify that parent_tidptr is sane so we can potentially
|
||||
* reuse it later.
|
||||
*/
|
||||
if (get_user(reserved, parent_tidptr))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
if (reserved != 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2058,11 +2026,21 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
* if the fd table isn't shared).
|
||||
*/
|
||||
if (clone_flags & CLONE_PIDFD) {
|
||||
retval = pidfd_create(pid);
|
||||
retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
|
||||
if (retval < 0)
|
||||
goto bad_fork_free_pid;
|
||||
|
||||
pidfd = retval;
|
||||
|
||||
pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid,
|
||||
O_RDWR | O_CLOEXEC);
|
||||
if (IS_ERR(pidfile)) {
|
||||
put_unused_fd(pidfd);
|
||||
retval = PTR_ERR(pidfile);
|
||||
goto bad_fork_free_pid;
|
||||
}
|
||||
get_pid(pid); /* held by pidfile now */
|
||||
|
||||
retval = put_user(pidfd, parent_tidptr);
|
||||
if (retval)
|
||||
goto bad_fork_put_pidfd;
|
||||
@@ -2180,6 +2158,9 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
goto bad_fork_cancel_cgroup;
|
||||
}
|
||||
|
||||
/* past the last point of failure */
|
||||
if (pidfile)
|
||||
fd_install(pidfd, pidfile);
|
||||
|
||||
init_task_pid_links(p);
|
||||
if (likely(p->pid)) {
|
||||
@@ -2246,8 +2227,10 @@ bad_fork_cancel_cgroup:
|
||||
bad_fork_cgroup_threadgroup_change_end:
|
||||
cgroup_threadgroup_change_end(current);
|
||||
bad_fork_put_pidfd:
|
||||
if (clone_flags & CLONE_PIDFD)
|
||||
ksys_close(pidfd);
|
||||
if (clone_flags & CLONE_PIDFD) {
|
||||
fput(pidfile);
|
||||
put_unused_fd(pidfd);
|
||||
}
|
||||
bad_fork_free_pid:
|
||||
if (pid != &init_struct_pid)
|
||||
free_pid(pid);
|
||||
|
@@ -493,6 +493,9 @@ int suspend_devices_and_enter(suspend_state_t state)
|
||||
|
||||
pm_suspend_target_state = state;
|
||||
|
||||
if (state == PM_SUSPEND_TO_IDLE)
|
||||
pm_set_suspend_no_platform();
|
||||
|
||||
error = platform_suspend_begin(state);
|
||||
if (error)
|
||||
goto Close;
|
||||
|
@@ -79,9 +79,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
|
||||
*/
|
||||
static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
|
||||
{
|
||||
rcu_read_lock();
|
||||
__ptrace_link(child, new_parent, __task_cred(new_parent));
|
||||
rcu_read_unlock();
|
||||
__ptrace_link(child, new_parent, current_cred());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -2912,7 +2912,8 @@ EXPORT_SYMBOL(set_compat_user_sigmask);
|
||||
* This is useful for syscalls such as ppoll, pselect, io_pgetevents and
|
||||
* epoll_pwait where a new sigmask is passed in from userland for the syscalls.
|
||||
*/
|
||||
void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
|
||||
void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved,
|
||||
bool interrupted)
|
||||
{
|
||||
|
||||
if (!usigmask)
|
||||
@@ -2922,7 +2923,7 @@ void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
|
||||
* Restoring sigmask here can lead to delivering signals that the above
|
||||
* syscalls are intended to block because of the sigmask passed in.
|
||||
*/
|
||||
if (signal_pending(current)) {
|
||||
if (interrupted) {
|
||||
current->saved_sigmask = *sigsaved;
|
||||
set_restore_sigmask();
|
||||
return;
|
||||
|
@@ -34,7 +34,6 @@
|
||||
#include <linux/hash.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/memory.h>
|
||||
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
@@ -2611,12 +2610,10 @@ static void ftrace_run_update_code(int command)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&text_mutex);
|
||||
|
||||
ret = ftrace_arch_code_modify_prepare();
|
||||
FTRACE_WARN_ON(ret);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
return;
|
||||
|
||||
/*
|
||||
* By default we use stop_machine() to modify the code.
|
||||
@@ -2628,9 +2625,6 @@ static void ftrace_run_update_code(int command)
|
||||
|
||||
ret = ftrace_arch_code_modify_post_process();
|
||||
FTRACE_WARN_ON(ret);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
||||
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
|
||||
@@ -5784,7 +5778,6 @@ void ftrace_module_enable(struct module *mod)
|
||||
struct ftrace_page *pg;
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
mutex_lock(&text_mutex);
|
||||
|
||||
if (ftrace_disabled)
|
||||
goto out_unlock;
|
||||
@@ -5846,7 +5839,6 @@ void ftrace_module_enable(struct module *mod)
|
||||
ftrace_arch_code_modify_post_process();
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&text_mutex);
|
||||
mutex_unlock(&ftrace_lock);
|
||||
|
||||
process_cached_mods(mod->name);
|
||||
|
@@ -6719,11 +6719,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
if (!tr->allocated_snapshot) {
|
||||
if (tr->allocated_snapshot)
|
||||
ret = resize_buffer_duplicate_size(&tr->max_buffer,
|
||||
&tr->trace_buffer, iter->cpu_file);
|
||||
else
|
||||
ret = tracing_alloc_snapshot_instance(tr);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
if (ret < 0)
|
||||
break;
|
||||
local_irq_disable();
|
||||
/* Now, we're going to swap */
|
||||
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
|
||||
@@ -7126,12 +7128,24 @@ static ssize_t tracing_err_log_write(struct file *file,
|
||||
return count;
|
||||
}
|
||||
|
||||
static int tracing_err_log_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct trace_array *tr = inode->i_private;
|
||||
|
||||
trace_array_put(tr);
|
||||
|
||||
if (file->f_mode & FMODE_READ)
|
||||
seq_release(inode, file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations tracing_err_log_fops = {
|
||||
.open = tracing_err_log_open,
|
||||
.write = tracing_err_log_write,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = tracing_release_generic_tr,
|
||||
.release = tracing_err_log_release,
|
||||
};
|
||||
|
||||
static int tracing_buffers_open(struct inode *inode, struct file *filp)
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele