Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - the rest of MM - procfs updates - various misc things - more y2038 fixes - get_maintainer updates - lib/ updates - checkpatch updates - various epoll updates - autofs updates - hfsplus - some reiserfs work - fatfs updates - signal.c cleanups - ipc/ updates * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (166 commits) ipc/util.c: update return value of ipc_getref from int to bool ipc/util.c: further variable name cleanups ipc: simplify ipc initialization ipc: get rid of ids->tables_initialized hack lib/rhashtable: guarantee initial hashtable allocation lib/rhashtable: simplify bucket_table_alloc() ipc: drop ipc_lock() ipc/util.c: correct comment in ipc_obtain_object_check ipc: rename ipcctl_pre_down_nolock() ipc/util.c: use ipc_rcu_putref() for failues in ipc_addid() ipc: reorganize initialization of kern_ipc_perm.seq ipc: compute kern_ipc_perm.id under the ipc lock init/Kconfig: remove EXPERT from CHECKPOINT_RESTORE fs/sysv/inode.c: use ktime_get_real_seconds() for superblock stamp adfs: use timespec64 for time conversion kernel/sysctl.c: fix typos in comments drivers/rapidio/devices/rio_mport_cdev.c: remove redundant pointer md fork: don't copy inconsistent signal handler state to child signal: make get_signal() return bool signal: make sigkill_pending() return bool ...
This commit is contained in:
170
kernel/signal.c
170
kernel/signal.c
@@ -65,14 +65,14 @@ static void __user *sig_handler(struct task_struct *t, int sig)
|
||||
return t->sighand->action[sig - 1].sa.sa_handler;
|
||||
}
|
||||
|
||||
static int sig_handler_ignored(void __user *handler, int sig)
|
||||
static inline bool sig_handler_ignored(void __user *handler, int sig)
|
||||
{
|
||||
/* Is it explicitly or implicitly ignored? */
|
||||
return handler == SIG_IGN ||
|
||||
(handler == SIG_DFL && sig_kernel_ignore(sig));
|
||||
(handler == SIG_DFL && sig_kernel_ignore(sig));
|
||||
}
|
||||
|
||||
static int sig_task_ignored(struct task_struct *t, int sig, bool force)
|
||||
static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
|
||||
{
|
||||
void __user *handler;
|
||||
|
||||
@@ -80,12 +80,12 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
|
||||
|
||||
if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
|
||||
handler == SIG_DFL && !(force && sig_kernel_only(sig)))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
return sig_handler_ignored(handler, sig);
|
||||
}
|
||||
|
||||
static int sig_ignored(struct task_struct *t, int sig, bool force)
|
||||
static bool sig_ignored(struct task_struct *t, int sig, bool force)
|
||||
{
|
||||
/*
|
||||
* Blocked signals are never ignored, since the
|
||||
@@ -93,7 +93,7 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
|
||||
* unblocked.
|
||||
*/
|
||||
if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Tracers may want to know about even ignored signal unless it
|
||||
@@ -101,7 +101,7 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
|
||||
* by SIGNAL_UNKILLABLE task.
|
||||
*/
|
||||
if (t->ptrace && sig != SIGKILL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
return sig_task_ignored(t, sig, force);
|
||||
}
|
||||
@@ -110,7 +110,7 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
|
||||
* Re-calculate pending state from the set of locally pending
|
||||
* signals, globally pending signals, and blocked signals.
|
||||
*/
|
||||
static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
|
||||
static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
|
||||
{
|
||||
unsigned long ready;
|
||||
long i;
|
||||
@@ -138,20 +138,21 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
|
||||
|
||||
#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
|
||||
|
||||
static int recalc_sigpending_tsk(struct task_struct *t)
|
||||
static bool recalc_sigpending_tsk(struct task_struct *t)
|
||||
{
|
||||
if ((t->jobctl & JOBCTL_PENDING_MASK) ||
|
||||
PENDING(&t->pending, &t->blocked) ||
|
||||
PENDING(&t->signal->shared_pending, &t->blocked)) {
|
||||
set_tsk_thread_flag(t, TIF_SIGPENDING);
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* We must never clear the flag in another thread, or in current
|
||||
* when it's possible the current syscall is returning -ERESTART*.
|
||||
* So we don't clear it here, and only callers who know they should do.
|
||||
*/
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -529,13 +530,15 @@ flush_signal_handlers(struct task_struct *t, int force_default)
|
||||
}
|
||||
}
|
||||
|
||||
int unhandled_signal(struct task_struct *tsk, int sig)
|
||||
bool unhandled_signal(struct task_struct *tsk, int sig)
|
||||
{
|
||||
void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
|
||||
if (is_global_init(tsk))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
if (handler != SIG_IGN && handler != SIG_DFL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/* if ptraced, let the tracer determine */
|
||||
return !tsk->ptrace;
|
||||
}
|
||||
@@ -709,14 +712,14 @@ void signal_wake_up_state(struct task_struct *t, unsigned int state)
|
||||
*
|
||||
* All callers must be holding the siglock.
|
||||
*/
|
||||
static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
|
||||
static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
|
||||
{
|
||||
struct sigqueue *q, *n;
|
||||
sigset_t m;
|
||||
|
||||
sigandsets(&m, mask, &s->signal);
|
||||
if (sigisemptyset(&m))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
sigandnsets(&s->signal, &s->signal, mask);
|
||||
list_for_each_entry_safe(q, n, &s->list, list) {
|
||||
@@ -725,7 +728,6 @@ static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
|
||||
__sigqueue_free(q);
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int is_si_special(const struct siginfo *info)
|
||||
@@ -742,21 +744,16 @@ static inline bool si_fromuser(const struct siginfo *info)
|
||||
/*
|
||||
* called with RCU read lock from check_kill_permission()
|
||||
*/
|
||||
static int kill_ok_by_cred(struct task_struct *t)
|
||||
static bool kill_ok_by_cred(struct task_struct *t)
|
||||
{
|
||||
const struct cred *cred = current_cred();
|
||||
const struct cred *tcred = __task_cred(t);
|
||||
|
||||
if (uid_eq(cred->euid, tcred->suid) ||
|
||||
uid_eq(cred->euid, tcred->uid) ||
|
||||
uid_eq(cred->uid, tcred->suid) ||
|
||||
uid_eq(cred->uid, tcred->uid))
|
||||
return 1;
|
||||
|
||||
if (ns_capable(tcred->user_ns, CAP_KILL))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
return uid_eq(cred->euid, tcred->suid) ||
|
||||
uid_eq(cred->euid, tcred->uid) ||
|
||||
uid_eq(cred->uid, tcred->suid) ||
|
||||
uid_eq(cred->uid, tcred->uid) ||
|
||||
ns_capable(tcred->user_ns, CAP_KILL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -907,16 +904,20 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
|
||||
* as soon as they're available, so putting the signal on the shared queue
|
||||
* will be equivalent to sending it to one such thread.
|
||||
*/
|
||||
static inline int wants_signal(int sig, struct task_struct *p)
|
||||
static inline bool wants_signal(int sig, struct task_struct *p)
|
||||
{
|
||||
if (sigismember(&p->blocked, sig))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (p->flags & PF_EXITING)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (sig == SIGKILL)
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
if (task_is_stopped_or_traced(p))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
return task_curr(p) || !signal_pending(p);
|
||||
}
|
||||
|
||||
@@ -996,7 +997,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
|
||||
return;
|
||||
}
|
||||
|
||||
static inline int legacy_queue(struct sigpending *signals, int sig)
|
||||
static inline bool legacy_queue(struct sigpending *signals, int sig)
|
||||
{
|
||||
return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
|
||||
}
|
||||
@@ -1380,14 +1381,15 @@ static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int kill_as_cred_perm(const struct cred *cred,
|
||||
struct task_struct *target)
|
||||
static inline bool kill_as_cred_perm(const struct cred *cred,
|
||||
struct task_struct *target)
|
||||
{
|
||||
const struct cred *pcred = __task_cred(target);
|
||||
if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
|
||||
!uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
|
||||
return 0;
|
||||
return 1;
|
||||
|
||||
return uid_eq(cred->euid, pcred->suid) ||
|
||||
uid_eq(cred->euid, pcred->uid) ||
|
||||
uid_eq(cred->uid, pcred->suid) ||
|
||||
uid_eq(cred->uid, pcred->uid);
|
||||
}
|
||||
|
||||
/* like kill_pid_info(), but doesn't use uid/euid of "current" */
|
||||
@@ -1500,8 +1502,7 @@ send_sig(int sig, struct task_struct *p, int priv)
|
||||
return send_sig_info(sig, __si_special(priv), p);
|
||||
}
|
||||
|
||||
void
|
||||
force_sig(int sig, struct task_struct *p)
|
||||
void force_sig(int sig, struct task_struct *p)
|
||||
{
|
||||
force_sig_info(sig, SEND_SIG_PRIV, p);
|
||||
}
|
||||
@@ -1512,8 +1513,7 @@ force_sig(int sig, struct task_struct *p)
|
||||
* the problem was already a SIGSEGV, we'll want to
|
||||
* make sure we don't even try to deliver the signal..
|
||||
*/
|
||||
int
|
||||
force_sigsegv(int sig, struct task_struct *p)
|
||||
void force_sigsegv(int sig, struct task_struct *p)
|
||||
{
|
||||
if (sig == SIGSEGV) {
|
||||
unsigned long flags;
|
||||
@@ -1522,7 +1522,6 @@ force_sigsegv(int sig, struct task_struct *p)
|
||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
||||
}
|
||||
force_sig(SIGSEGV, p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int force_sig_fault(int sig, int code, void __user *addr
|
||||
@@ -1923,10 +1922,10 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
|
||||
spin_unlock_irqrestore(&sighand->siglock, flags);
|
||||
}
|
||||
|
||||
static inline int may_ptrace_stop(void)
|
||||
static inline bool may_ptrace_stop(void)
|
||||
{
|
||||
if (!likely(current->ptrace))
|
||||
return 0;
|
||||
return false;
|
||||
/*
|
||||
* Are we in the middle of do_coredump?
|
||||
* If so and our tracer is also part of the coredump stopping
|
||||
@@ -1942,19 +1941,19 @@ static inline int may_ptrace_stop(void)
|
||||
*/
|
||||
if (unlikely(current->mm->core_state) &&
|
||||
unlikely(current->mm == current->parent->mm))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return non-zero if there is a SIGKILL that should be waking us up.
|
||||
* Called with the siglock held.
|
||||
*/
|
||||
static int sigkill_pending(struct task_struct *tsk)
|
||||
static bool sigkill_pending(struct task_struct *tsk)
|
||||
{
|
||||
return sigismember(&tsk->pending.signal, SIGKILL) ||
|
||||
sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
|
||||
return sigismember(&tsk->pending.signal, SIGKILL) ||
|
||||
sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2334,7 +2333,7 @@ static int ptrace_signal(int signr, siginfo_t *info)
|
||||
return signr;
|
||||
}
|
||||
|
||||
int get_signal(struct ksignal *ksig)
|
||||
bool get_signal(struct ksignal *ksig)
|
||||
{
|
||||
struct sighand_struct *sighand = current->sighand;
|
||||
struct signal_struct *signal = current->signal;
|
||||
@@ -2344,7 +2343,7 @@ int get_signal(struct ksignal *ksig)
|
||||
task_work_run();
|
||||
|
||||
if (unlikely(uprobe_deny_signal()))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Do this once, we can't return to user-mode if freezing() == T.
|
||||
@@ -2801,7 +2800,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
|
||||
}
|
||||
#endif
|
||||
|
||||
static int do_sigpending(sigset_t *set)
|
||||
static void do_sigpending(sigset_t *set)
|
||||
{
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
sigorsets(set, ¤t->pending.signal,
|
||||
@@ -2810,7 +2809,6 @@ static int do_sigpending(sigset_t *set)
|
||||
|
||||
/* Outside the lock because only this thread touches it. */
|
||||
sigandsets(set, ¤t->blocked, set);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2822,15 +2820,16 @@ static int do_sigpending(sigset_t *set)
|
||||
SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
|
||||
{
|
||||
sigset_t set;
|
||||
int err;
|
||||
|
||||
if (sigsetsize > sizeof(*uset))
|
||||
return -EINVAL;
|
||||
|
||||
err = do_sigpending(&set);
|
||||
if (!err && copy_to_user(uset, &set, sigsetsize))
|
||||
err = -EFAULT;
|
||||
return err;
|
||||
do_sigpending(&set);
|
||||
|
||||
if (copy_to_user(uset, &set, sigsetsize))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
@@ -2838,15 +2837,13 @@ COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
|
||||
compat_size_t, sigsetsize)
|
||||
{
|
||||
sigset_t set;
|
||||
int err;
|
||||
|
||||
if (sigsetsize > sizeof(*uset))
|
||||
return -EINVAL;
|
||||
|
||||
err = do_sigpending(&set);
|
||||
if (!err)
|
||||
err = put_compat_sigset(uset, &set, sigsetsize);
|
||||
return err;
|
||||
do_sigpending(&set);
|
||||
|
||||
return put_compat_sigset(uset, &set, sigsetsize);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -3608,25 +3605,26 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
|
||||
SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
|
||||
{
|
||||
sigset_t set;
|
||||
int err;
|
||||
|
||||
if (sizeof(old_sigset_t) > sizeof(*uset))
|
||||
return -EINVAL;
|
||||
|
||||
err = do_sigpending(&set);
|
||||
if (!err && copy_to_user(uset, &set, sizeof(old_sigset_t)))
|
||||
err = -EFAULT;
|
||||
return err;
|
||||
do_sigpending(&set);
|
||||
|
||||
if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
|
||||
{
|
||||
sigset_t set;
|
||||
int err = do_sigpending(&set);
|
||||
if (!err)
|
||||
err = put_user(set.sig[0], set32);
|
||||
return err;
|
||||
|
||||
do_sigpending(&set);
|
||||
|
||||
return put_user(set.sig[0], set32);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -3697,25 +3695,23 @@ SYSCALL_DEFINE4(rt_sigaction, int, sig,
|
||||
size_t, sigsetsize)
|
||||
{
|
||||
struct k_sigaction new_sa, old_sa;
|
||||
int ret = -EINVAL;
|
||||
int ret;
|
||||
|
||||
/* XXX: Don't preclude handling different sized sigset_t's. */
|
||||
if (sigsetsize != sizeof(sigset_t))
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
|
||||
if (act) {
|
||||
if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!ret && oact) {
|
||||
if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
|
||||
return -EFAULT;
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#ifdef CONFIG_COMPAT
|
||||
COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
|
||||
|
Reference in New Issue
Block a user