Merge branch 'akpm' (patches from Andrew)
Merge second patch-bomb from Andrew Morton: - most of the rest of MM - procfs - lib/ updates - printk updates - bitops infrastructure tweaks - checkpatch updates - nilfs2 update - signals - various other misc bits: coredump, seqfile, kexec, pidns, zlib, ipc, dma-debug, dma-mapping, ... * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (102 commits) ipc,msg: drop dst nil validation in copy_msg include/linux/zutil.h: fix usage example of zlib_adler32() panic: release stale console lock to always get the logbuf printed out dma-debug: check nents in dma_sync_sg* dma-mapping: tidy up dma_parms default handling pidns: fix set/getpriority and ioprio_set/get in PRIO_USER mode kexec: use file name as the output message prefix fs, seqfile: always allow oom killer seq_file: reuse string_escape_str() fs/seq_file: use seq_* helpers in seq_hex_dump() coredump: change zap_threads() and zap_process() to use for_each_thread() coredump: ensure all coredumping tasks have SIGNAL_GROUP_COREDUMP signal: remove jffs2_garbage_collect_thread()->allow_signal(SIGCONT) signal: introduce kernel_signal_stop() to fix jffs2_garbage_collect_thread() signal: turn dequeue_signal_lock() into kernel_dequeue_signal() signals: kill block_all_signals() and unblock_all_signals() nilfs2: fix gcc uninitialized-variable warnings in powerpc build nilfs2: fix gcc unused-but-set-variable warnings MAINTAINERS: nilfs2: add header file for tracing nilfs2: add tracepoints for analyzing reading and writing metadata files ...
This commit is contained in:
@@ -1371,16 +1371,16 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
|
||||
if (unlikely(audit_filter_type(type)))
|
||||
return NULL;
|
||||
|
||||
if (gfp_mask & __GFP_WAIT) {
|
||||
if (gfp_mask & __GFP_DIRECT_RECLAIM) {
|
||||
if (audit_pid && audit_pid == current->pid)
|
||||
gfp_mask &= ~__GFP_WAIT;
|
||||
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
|
||||
else
|
||||
reserve = 0;
|
||||
}
|
||||
|
||||
while (audit_backlog_limit
|
||||
&& skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) {
|
||||
if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time) {
|
||||
if (gfp_mask & __GFP_DIRECT_RECLAIM && audit_backlog_wait_time) {
|
||||
long sleep_time;
|
||||
|
||||
sleep_time = timeout_start + audit_backlog_wait_time - jiffies;
|
||||
|
@@ -299,7 +299,7 @@ static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
|
||||
|
||||
idr_preload(gfp_mask);
|
||||
spin_lock_bh(&cgroup_idr_lock);
|
||||
ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_WAIT);
|
||||
ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
|
||||
spin_unlock_bh(&cgroup_idr_lock);
|
||||
idr_preload_end();
|
||||
return ret;
|
||||
|
@@ -6,6 +6,8 @@
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/capability.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/file.h>
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "kexec: " fmt
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/capability.h>
|
||||
#include <linux/mm.h>
|
||||
@@ -1027,7 +1027,7 @@ static int __init crash_notes_memory_init(void)
|
||||
|
||||
crash_notes = __alloc_percpu(size, align);
|
||||
if (!crash_notes) {
|
||||
pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
|
||||
pr_warn("Memory allocation for saving cpu register states failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
@@ -9,6 +9,8 @@
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/capability.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/file.h>
|
||||
|
@@ -2738,7 +2738,7 @@ static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
|
||||
return;
|
||||
|
||||
/* no reclaim without waiting on it */
|
||||
if (!(gfp_mask & __GFP_WAIT))
|
||||
if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
|
||||
return;
|
||||
|
||||
/* this guy won't enter reclaim */
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/console.h>
|
||||
|
||||
#define PANIC_TIMER_STEP 100
|
||||
#define PANIC_BLINK_SPD 18
|
||||
@@ -147,6 +148,15 @@ void panic(const char *fmt, ...)
|
||||
|
||||
bust_spinlocks(0);
|
||||
|
||||
/*
|
||||
* We may have ended up stopping the CPU holding the lock (in
|
||||
* smp_send_stop()) while still having some valuable data in the console
|
||||
* buffer. Try to acquire the lock then release it regardless of the
|
||||
* result. The release will also print the buffers out.
|
||||
*/
|
||||
console_trylock();
|
||||
console_unlock();
|
||||
|
||||
if (!panic_blink)
|
||||
panic_blink = no_blink;
|
||||
|
||||
|
@@ -325,10 +325,11 @@ int param_get_charp(char *buffer, const struct kernel_param *kp)
|
||||
}
|
||||
EXPORT_SYMBOL(param_get_charp);
|
||||
|
||||
static void param_free_charp(void *arg)
|
||||
void param_free_charp(void *arg)
|
||||
{
|
||||
maybe_kfree_parameter(*((char **)arg));
|
||||
}
|
||||
EXPORT_SYMBOL(param_free_charp);
|
||||
|
||||
const struct kernel_param_ops param_ops_charp = {
|
||||
.set = param_set_charp,
|
||||
|
@@ -1779,7 +1779,7 @@ alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
|
||||
while (to_alloc-- > 0) {
|
||||
struct page *page;
|
||||
|
||||
page = alloc_image_page(__GFP_HIGHMEM);
|
||||
page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
|
||||
memory_bm_set_bit(bm, page_to_pfn(page));
|
||||
}
|
||||
return nr_highmem;
|
||||
|
@@ -257,7 +257,7 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
|
||||
struct bio *bio;
|
||||
int error = 0;
|
||||
|
||||
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
|
||||
bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
|
||||
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
|
||||
bio->bi_bdev = hib_resume_bdev;
|
||||
|
||||
@@ -356,7 +356,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
|
||||
return -ENOSPC;
|
||||
|
||||
if (hb) {
|
||||
src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
|
||||
src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
|
||||
__GFP_NORETRY);
|
||||
if (src) {
|
||||
copy_page(src, buf);
|
||||
@@ -364,7 +364,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
|
||||
ret = hib_wait_io(hb); /* Free pages */
|
||||
if (ret)
|
||||
return ret;
|
||||
src = (void *)__get_free_page(__GFP_WAIT |
|
||||
src = (void *)__get_free_page(__GFP_RECLAIM |
|
||||
__GFP_NOWARN |
|
||||
__GFP_NORETRY);
|
||||
if (src) {
|
||||
@@ -672,7 +672,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
||||
nr_threads = num_online_cpus() - 1;
|
||||
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
|
||||
|
||||
page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
|
||||
page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
|
||||
if (!page) {
|
||||
printk(KERN_ERR "PM: Failed to allocate LZO page\n");
|
||||
ret = -ENOMEM;
|
||||
@@ -975,7 +975,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
|
||||
last = tmp;
|
||||
|
||||
tmp->map = (struct swap_map_page *)
|
||||
__get_free_page(__GFP_WAIT | __GFP_HIGH);
|
||||
__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
|
||||
if (!tmp->map) {
|
||||
release_swap_reader(handle);
|
||||
return -ENOMEM;
|
||||
@@ -1242,9 +1242,9 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
||||
|
||||
for (i = 0; i < read_pages; i++) {
|
||||
page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
|
||||
__GFP_WAIT | __GFP_HIGH :
|
||||
__GFP_WAIT | __GFP_NOWARN |
|
||||
__GFP_NORETRY);
|
||||
__GFP_RECLAIM | __GFP_HIGH :
|
||||
__GFP_RECLAIM | __GFP_NOWARN |
|
||||
__GFP_NORETRY);
|
||||
|
||||
if (!page[i]) {
|
||||
if (i < LZO_CMP_PAGES) {
|
||||
|
@@ -269,6 +269,9 @@ static u32 clear_idx;
|
||||
#define PREFIX_MAX 32
|
||||
#define LOG_LINE_MAX (1024 - PREFIX_MAX)
|
||||
|
||||
#define LOG_LEVEL(v) ((v) & 0x07)
|
||||
#define LOG_FACILITY(v) ((v) >> 3 & 0xff)
|
||||
|
||||
/* record buffer */
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||||
#define LOG_ALIGN 4
|
||||
@@ -612,7 +615,6 @@ struct devkmsg_user {
|
||||
static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
char *buf, *line;
|
||||
int i;
|
||||
int level = default_message_loglevel;
|
||||
int facility = 1; /* LOG_USER */
|
||||
size_t len = iov_iter_count(from);
|
||||
@@ -642,12 +644,13 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
line = buf;
|
||||
if (line[0] == '<') {
|
||||
char *endp = NULL;
|
||||
unsigned int u;
|
||||
|
||||
i = simple_strtoul(line+1, &endp, 10);
|
||||
u = simple_strtoul(line + 1, &endp, 10);
|
||||
if (endp && endp[0] == '>') {
|
||||
level = i & 7;
|
||||
if (i >> 3)
|
||||
facility = i >> 3;
|
||||
level = LOG_LEVEL(u);
|
||||
if (LOG_FACILITY(u) != 0)
|
||||
facility = LOG_FACILITY(u);
|
||||
endp++;
|
||||
len -= endp - line;
|
||||
line = endp;
|
||||
|
@@ -503,41 +503,6 @@ int unhandled_signal(struct task_struct *tsk, int sig)
|
||||
return !tsk->ptrace;
|
||||
}
|
||||
|
||||
/*
|
||||
* Notify the system that a driver wants to block all signals for this
|
||||
* process, and wants to be notified if any signals at all were to be
|
||||
* sent/acted upon. If the notifier routine returns non-zero, then the
|
||||
* signal will be acted upon after all. If the notifier routine returns 0,
|
||||
* then then signal will be blocked. Only one block per process is
|
||||
* allowed. priv is a pointer to private data that the notifier routine
|
||||
* can use to determine if the signal should be blocked or not.
|
||||
*/
|
||||
void
|
||||
block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(¤t->sighand->siglock, flags);
|
||||
current->notifier_mask = mask;
|
||||
current->notifier_data = priv;
|
||||
current->notifier = notifier;
|
||||
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
|
||||
}
|
||||
|
||||
/* Notify the system that blocking has ended. */
|
||||
|
||||
void
|
||||
unblock_all_signals(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(¤t->sighand->siglock, flags);
|
||||
current->notifier = NULL;
|
||||
current->notifier_data = NULL;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
|
||||
}
|
||||
|
||||
static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
|
||||
{
|
||||
struct sigqueue *q, *first = NULL;
|
||||
@@ -580,19 +545,8 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
|
||||
{
|
||||
int sig = next_signal(pending, mask);
|
||||
|
||||
if (sig) {
|
||||
if (current->notifier) {
|
||||
if (sigismember(current->notifier_mask, sig)) {
|
||||
if (!(current->notifier)(current->notifier_data)) {
|
||||
clear_thread_flag(TIF_SIGPENDING);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (sig)
|
||||
collect_signal(sig, pending, info);
|
||||
}
|
||||
|
||||
return sig;
|
||||
}
|
||||
|
||||
@@ -834,7 +788,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
|
||||
sigset_t flush;
|
||||
|
||||
if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
|
||||
if (signal->flags & SIGNAL_GROUP_COREDUMP)
|
||||
if (!(signal->flags & SIGNAL_GROUP_EXIT))
|
||||
return sig == SIGKILL;
|
||||
/*
|
||||
* The process is in the middle of dying, nothing to do.
|
||||
@@ -2483,9 +2437,6 @@ EXPORT_SYMBOL(force_sig);
|
||||
EXPORT_SYMBOL(send_sig);
|
||||
EXPORT_SYMBOL(send_sig_info);
|
||||
EXPORT_SYMBOL(sigprocmask);
|
||||
EXPORT_SYMBOL(block_all_signals);
|
||||
EXPORT_SYMBOL(unblock_all_signals);
|
||||
|
||||
|
||||
/*
|
||||
* System call entry points.
|
||||
|
@@ -669,7 +669,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
cpumask_var_t cpus;
|
||||
int cpu, ret;
|
||||
|
||||
might_sleep_if(gfp_flags & __GFP_WAIT);
|
||||
might_sleep_if(gfpflags_allow_blocking(gfp_flags));
|
||||
|
||||
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
|
||||
preempt_disable();
|
||||
|
@@ -222,7 +222,7 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
|
||||
goto out_unlock; /* No processes for this user */
|
||||
}
|
||||
do_each_thread(g, p) {
|
||||
if (uid_eq(task_uid(p), uid))
|
||||
if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
|
||||
error = set_one_prio(p, niceval, error);
|
||||
} while_each_thread(g, p);
|
||||
if (!uid_eq(uid, cred->uid))
|
||||
@@ -290,7 +290,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
|
||||
goto out_unlock; /* No processes for this user */
|
||||
}
|
||||
do_each_thread(g, p) {
|
||||
if (uid_eq(task_uid(p), uid)) {
|
||||
if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
|
||||
niceval = nice_to_rlimit(task_nice(p));
|
||||
if (niceval > retval)
|
||||
retval = niceval;
|
||||
|
Reference in New Issue
Block a user