Merge tag 'v5.1-rc6' into for-5.2/block

Pull in v5.1-rc6 to resolve two conflicts. One is in BFQ, in just a
comment, and is trivial. The other one is a conflict due to a later fix
in the bio multi-page work, and needs a bit more care.

* tag 'v5.1-rc6': (770 commits)
  Linux 5.1-rc6
  block: make sure that bvec length can't be overflow
  block: kill all_q_node in request_queue
  x86/cpu/intel: Lower the "ENERGY_PERF_BIAS: Set to normal" message's log priority
  coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
  mm/kmemleak.c: fix unused-function warning
  init: initialize jump labels before command line option parsing
  kernel/watchdog_hld.c: hard lockup message should end with a newline
  kcov: improve CONFIG_ARCH_HAS_KCOV help text
  mm: fix inactive list balancing between NUMA nodes and cgroups
  mm/hotplug: treat CMA pages as unmovable
  proc: fixup proc-pid-vm test
  proc: fix map_files test on F29
  mm/vmstat.c: fix /proc/vmstat format for CONFIG_DEBUG_TLBFLUSH=y CONFIG_SMP=n
  mm/memory_hotplug: do not unlock after failing to take the device_hotplug_lock
  mm: swapoff: shmem_unuse() stop eviction without igrab()
  mm: swapoff: take notice of completion sooner
  mm: swapoff: remove too limiting SWAP_UNUSE_MAX_TRIES
  mm: swapoff: shmem_find_swap_entries() filter out other types
  slab: store tagged freelist for off-slab slabmgmt
  ...

Signed-off-by: Jens Axboe <axboe@kernel.dk>
这个提交包含在:
Jens Axboe
2019-04-22 09:47:36 -06:00
当前提交 5c61ee2cd5
修改 777 个文件,包含 8124 行新增4471 行删除

查看文件

@@ -753,9 +753,9 @@ endmenu # "Memory Debugging"
config ARCH_HAS_KCOV
bool
help
KCOV does not have any arch-specific code, but currently it is enabled
only for x86_64. KCOV requires testing on other archs, and most likely
disabling of instrumentation for some early boot code.
An architecture should select this when it can successfully
build and run with CONFIG_KCOV. This typically requires
disabling instrumentation for some early boot code.
config CC_HAS_SANCOV_TRACE_PC
def_bool $(cc-option,-fsanitize-coverage=trace-pc)

查看文件

@@ -1528,6 +1528,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i)
{
#ifdef CONFIG_CRYPTO
struct ahash_request *hash = hashp;
struct scatterlist sg;
size_t copied;
@@ -1537,6 +1538,9 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
ahash_request_set_crypt(hash, &sg, NULL, copied);
crypto_ahash_update(hash);
return copied;
#else
return 0;
#endif
}
EXPORT_SYMBOL(hash_and_copy_to_iter);

查看文件

@@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
{
const unsigned char *ip = in;
unsigned char *op = out;
unsigned char *data_start;
size_t l = in_len;
size_t t = 0;
signed char state_offset = -2;
unsigned int m4_max_offset;
// LZO v0 will never write 17 as first byte,
// so this is used to version the bitstream
// LZO v0 will never write 17 as first byte (except for zero-length
// input), so this is used to version the bitstream
if (bitstream_version > 0) {
*op++ = 17;
*op++ = bitstream_version;
@@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
m4_max_offset = M4_MAX_OFFSET_V0;
}
data_start = op;
while (l > 20) {
size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
uintptr_t ll_end = (uintptr_t) ip + ll;
@@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
if (t > 0) {
const unsigned char *ii = in + in_len - t;
if (op == out && t <= 238) {
if (op == data_start && t <= 238) {
*op++ = (17 + t);
} else if (t <= 3) {
op[state_offset] |= t;

查看文件

@@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
if (unlikely(in_len < 3))
goto input_overrun;
if (likely(*ip == 17)) {
if (likely(in_len >= 5) && likely(*ip == 17)) {
bitstream_version = ip[1];
ip += 2;
if (unlikely(in_len < 5))
goto input_overrun;
} else {
bitstream_version = 0;
}

查看文件

@@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
EXPORT_SYMBOL(memcmp);
#endif
#ifndef __HAVE_ARCH_BCMP
/**
* bcmp - returns 0 if and only if the buffers have identical contents.
* @a: pointer to first buffer.
* @b: pointer to second buffer.
* @len: size of buffers.
*
* The sign or magnitude of a non-zero return value has no particular
* meaning, and architectures may implement their own more efficient bcmp(). So
* while this particular implementation is a simple (tail) call to memcmp, do
* not rely on anything but whether the return value is zero or non-zero.
*/
#undef bcmp
int bcmp(const void *a, const void *b, size_t len)
{
return memcmp(a, b, len);
}
EXPORT_SYMBOL(bcmp);
#endif
#ifndef __HAVE_ARCH_MEMSCAN
/**
* memscan - Find a character in an area of memory.

查看文件

@@ -5,16 +5,14 @@
#include <linux/export.h>
#include <asm/syscall.h>
static int collect_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc)
static int collect_syscall(struct task_struct *target, struct syscall_info *info)
{
struct pt_regs *regs;
if (!try_get_task_stack(target)) {
/* Task has no stack, so the task isn't in a syscall. */
*sp = *pc = 0;
*callno = -1;
memset(info, 0, sizeof(*info));
info->data.nr = -1;
return 0;
}
@@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno,
return -EAGAIN;
}
*sp = user_stack_pointer(regs);
*pc = instruction_pointer(regs);
info->sp = user_stack_pointer(regs);
info->data.instruction_pointer = instruction_pointer(regs);
*callno = syscall_get_nr(target, regs);
if (*callno != -1L && maxargs > 0)
syscall_get_arguments(target, regs, 0, maxargs, args);
info->data.nr = syscall_get_nr(target, regs);
if (info->data.nr != -1L)
syscall_get_arguments(target, regs,
(unsigned long *)&info->data.args[0]);
put_task_stack(target);
return 0;
@@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno,
/**
* task_current_syscall - Discover what a blocked task is doing.
* @target: thread to examine
* @callno: filled with system call number or -1
* @args: filled with @maxargs system call arguments
* @maxargs: number of elements in @args to fill
* @sp: filled with user stack pointer
* @pc: filled with user PC
* @info: structure with the following fields:
* .sp - filled with user stack pointer
* .data.nr - filled with system call number or -1
* .data.args - filled with @maxargs system call arguments
* .data.instruction_pointer - filled with user PC
*
* If @target is blocked in a system call, returns zero with *@callno
* set to the the call's number and @args filled in with its arguments.
* Registers not used for system call arguments may not be available and
* it is not kosher to use &struct user_regset calls while the system
* If @target is blocked in a system call, returns zero with @info.data.nr
* set to the the call's number and @info.data.args filled in with its
* arguments. Registers not used for system call arguments may not be available
* and it is not kosher to use &struct user_regset calls while the system
* call is still in progress. Note we may get this result if @target
* has finished its system call but not yet returned to user mode, such
* as when it's stopped for signal handling or syscall exit tracing.
*
* If @target is blocked in the kernel during a fault or exception,
* returns zero with *@callno set to -1 and does not fill in @args.
* If so, it's now safe to examine @target using &struct user_regset
* get() calls as long as we're sure @target won't return to user mode.
* returns zero with *@info.data.nr set to -1 and does not fill in
* @info.data.args. If so, it's now safe to examine @target using
* &struct user_regset get() calls as long as we're sure @target won't return
* to user mode.
*
* Returns -%EAGAIN if @target does not remain blocked.
*
* Returns -%EINVAL if @maxargs is too large (maximum is six).
*/
int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc)
int task_current_syscall(struct task_struct *target, struct syscall_info *info)
{
long state;
unsigned long ncsw;
if (unlikely(maxargs > 6))
return -EINVAL;
if (target == current)
return collect_syscall(target, callno, args, maxargs, sp, pc);
return collect_syscall(target, info);
state = target->state;
if (unlikely(!state))
@@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno,
ncsw = wait_task_inactive(target, state);
if (unlikely(!ncsw) ||
unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
unlikely(collect_syscall(target, info)) ||
unlikely(wait_task_inactive(target, state) != ncsw))
return -EAGAIN;