Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:

 - some of the rest of MM

 - various misc things

 - dynamic-debug updates

 - checkpatch

 - some epoll speedups

 - autofs

 - rapidio

 - lib/, lib/lzo/ updates

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (83 commits)
  samples/mic/mpssd/mpssd.h: remove duplicate header
  kernel/fork.c: remove duplicated include
  include/linux/relay.h: fix percpu annotation in struct rchan
  arch/nios2/mm/fault.c: remove duplicate include
  unicore32: stop printing the virtual memory layout
  MAINTAINERS: fix GTA02 entry and mark as orphan
  mm: create the new vm_fault_t type
  arm, s390, unicore32: remove oneliner wrappers for memblock_alloc()
  arch: simplify several early memory allocations
  openrisc: simplify pte_alloc_one_kernel()
  sh: prefer memblock APIs returning virtual address
  microblaze: prefer memblock API returning virtual address
  powerpc: prefer memblock APIs returning virtual address
  lib/lzo: separate lzo-rle from lzo
  lib/lzo: implement run-length encoding
  lib/lzo: fast 8-byte copy on arm64
  lib/lzo: 64-bit CTZ on arm64
  lib/lzo: tidy-up ifdefs
  ipc/sem.c: replace kvmalloc/memset with kvzalloc and use struct_size
  ipc: annotate implicit fall through
  ...
This commit is contained in:
Linus Torvalds
2019-03-07 19:25:37 -08:00
148 changed files with 1102 additions and 660 deletions

View File

@@ -28,7 +28,7 @@
static void *__init alloc_paca_data(unsigned long size, unsigned long align,
unsigned long limit, int cpu)
{
unsigned long pa;
void *ptr;
int nid;
/*
@@ -43,17 +43,15 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align,
nid = early_cpu_to_node(cpu);
}
pa = memblock_alloc_base_nid(size, align, limit, nid, MEMBLOCK_NONE);
if (!pa) {
pa = memblock_alloc_base(size, align, limit);
if (!pa)
panic("cannot allocate paca data");
}
ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
limit, nid);
if (!ptr)
panic("cannot allocate paca data");
if (cpu == boot_cpuid)
memblock_set_bottom_up(false);
return __va(pa);
return ptr;
}
#ifdef CONFIG_PPC_PSERIES
@@ -119,7 +117,6 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
}
s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
memset(s, 0, sizeof(*s));
s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
s->buffer_length = cpu_to_be32(sizeof(*s));
@@ -223,7 +220,6 @@ void __init allocate_paca(int cpu)
paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
limit, cpu);
paca_ptrs[cpu] = paca;
memset(paca, 0, sizeof(struct paca_struct));
initialise_paca(paca, cpu);
#ifdef CONFIG_PPC_PSERIES

View File

@@ -459,8 +459,8 @@ void __init smp_setup_cpu_maps(void)
DBG("smp_setup_cpu_maps()\n");
cpu_to_phys_id = __va(memblock_phys_alloc(nr_cpu_ids * sizeof(u32), __alignof__(u32)));
memset(cpu_to_phys_id, 0, nr_cpu_ids * sizeof(u32));
cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
__alignof__(u32));
for_each_node_by_type(dn, "cpu") {
const __be32 *intserv;

View File

@@ -902,8 +902,9 @@ static void __ref init_fallback_flush(void)
* hardware prefetch runoff. We don't have a recipe for load patterns to
* reliably avoid the prefetcher.
*/
l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
memset(l1d_flush_fallback_area, 0, l1d_size * 2);
l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
l1d_size, MEMBLOCK_LOW_LIMIT,
limit, NUMA_NO_NODE);
for_each_possible_cpu(cpu) {
struct paca_struct *paca = paca_ptrs[cpu];