Merge branch 'akpm' (patches from Andrew)

Merge yet more updates from Andrew Morton:

 - the rest of ocfs2

 - various hotfixes, mainly MM

 - quite a bit of misc stuff - drivers, fork, exec, signals, etc.

 - printk updates

 - firmware

 - checkpatch

 - nilfs2

 - more kexec stuff than usual

 - rapidio updates

 - w1 things

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (111 commits)
  ipc: delete "nr_ipc_ns"
  kcov: allow more fine-grained coverage instrumentation
  init/Kconfig: add clarification for out-of-tree modules
  config: add android config fragments
  init/Kconfig: ban CONFIG_LOCALVERSION_AUTO with allmodconfig
  relay: add global mode support for buffer-only channels
  init: allow blacklisting of module_init functions
  w1:omap_hdq: fix regression
  w1: add helper macro module_w1_family
  w1: remove need for ida and use PLATFORM_DEVID_AUTO
  rapidio/switches: add driver for IDT gen3 switches
  powerpc/fsl_rio: apply changes for RIO spec rev 3
  rapidio: modify for rev.3 specification changes
  rapidio: change inbound window size type to u64
  rapidio/idt_gen2: fix locking warning
  rapidio: fix error handling in mbox request/release functions
  rapidio/tsi721_dma: advance queue processing from transfer submit call
  rapidio/tsi721: add messaging mbox selector parameter
  rapidio/tsi721: add PCIe MRRS override parameter
  rapidio/tsi721_dma: add channel mask and queue size parameters
  ...
This commit is contained in:
Linus Torvalds
2016-08-02 21:08:07 -04:00
206 changed files with 5750 additions and 2013 deletions

View File

@@ -2216,6 +2216,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
/* yield cpu to avoid soft lockup */
cond_resched();
if (hstate_is_gigantic(h))
ret = alloc_fresh_gigantic_page(h, nodes_allowed);
else
@@ -4306,7 +4310,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
return pte;
}

View File

@@ -442,11 +442,6 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
kasan_poison_shadow(object,
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
KASAN_KMALLOC_REDZONE);
if (cache->flags & SLAB_KASAN) {
struct kasan_alloc_meta *alloc_info =
get_alloc_info(cache, object);
alloc_info->state = KASAN_STATE_INIT;
}
}
static inline int in_irqentry_text(unsigned long ptr)
@@ -510,6 +505,17 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
return (void *)object + cache->kasan_info.free_meta_offset;
}
void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
{
struct kasan_alloc_meta *alloc_info;
if (!(cache->flags & SLAB_KASAN))
return;
alloc_info = get_alloc_info(cache, object);
__memset(alloc_info, 0, sizeof(*alloc_info));
}
void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
{
kasan_kmalloc(cache, object, cache->object_size, flags);
@@ -529,34 +535,26 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
bool kasan_slab_free(struct kmem_cache *cache, void *object)
{
s8 shadow_byte;
/* RCU slabs could be legally used after free within the RCU period */
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
return false;
if (likely(cache->flags & SLAB_KASAN)) {
struct kasan_alloc_meta *alloc_info;
struct kasan_free_meta *free_info;
alloc_info = get_alloc_info(cache, object);
free_info = get_free_info(cache, object);
switch (alloc_info->state) {
case KASAN_STATE_ALLOC:
alloc_info->state = KASAN_STATE_QUARANTINE;
quarantine_put(free_info, cache);
set_track(&free_info->track, GFP_NOWAIT);
kasan_poison_slab_free(cache, object);
return true;
case KASAN_STATE_QUARANTINE:
case KASAN_STATE_FREE:
pr_err("Double free");
dump_stack();
break;
default:
break;
}
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
kasan_report_double_free(cache, object, shadow_byte);
return true;
}
return false;
kasan_poison_slab_free(cache, object);
if (unlikely(!(cache->flags & SLAB_KASAN)))
return false;
set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
quarantine_put(get_free_info(cache, object), cache);
return true;
}
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
@@ -565,7 +563,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
unsigned long redzone_start;
unsigned long redzone_end;
if (flags & __GFP_RECLAIM)
if (gfpflags_allow_blocking(flags))
quarantine_reduce();
if (unlikely(object == NULL))
@@ -579,14 +577,9 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
kasan_unpoison_shadow(object, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
KASAN_KMALLOC_REDZONE);
if (cache->flags & SLAB_KASAN) {
struct kasan_alloc_meta *alloc_info =
get_alloc_info(cache, object);
alloc_info->state = KASAN_STATE_ALLOC;
alloc_info->alloc_size = size;
set_track(&alloc_info->track, flags);
}
if (cache->flags & SLAB_KASAN)
set_track(&get_alloc_info(cache, object)->alloc_track, flags);
}
EXPORT_SYMBOL(kasan_kmalloc);
@@ -596,7 +589,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
unsigned long redzone_start;
unsigned long redzone_end;
if (flags & __GFP_RECLAIM)
if (gfpflags_allow_blocking(flags))
quarantine_reduce();
if (unlikely(ptr == NULL))

View File

@@ -59,13 +59,6 @@ struct kasan_global {
* Structures to keep alloc and free tracks *
*/
enum kasan_state {
KASAN_STATE_INIT,
KASAN_STATE_ALLOC,
KASAN_STATE_QUARANTINE,
KASAN_STATE_FREE
};
#define KASAN_STACK_DEPTH 64
struct kasan_track {
@@ -74,9 +67,8 @@ struct kasan_track {
};
struct kasan_alloc_meta {
struct kasan_track track;
u32 state : 2; /* enum kasan_state */
u32 alloc_size : 30;
struct kasan_track alloc_track;
struct kasan_track free_track;
};
struct qlist_node {
@@ -87,7 +79,6 @@ struct kasan_free_meta {
* Otherwise it might be used for the allocator freelist.
*/
struct qlist_node quarantine_link;
struct kasan_track track;
};
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
@@ -108,6 +99,8 @@ static inline bool kasan_report_enabled(void)
void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
void kasan_report_double_free(struct kmem_cache *cache, void *object,
s8 shadow);
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);

View File

@@ -144,13 +144,15 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
{
void *object = qlink_to_object(qlink, cache);
struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
unsigned long flags;
local_irq_save(flags);
alloc_info->state = KASAN_STATE_FREE;
if (IS_ENABLED(CONFIG_SLAB))
local_irq_save(flags);
___cache_free(cache, object, _THIS_IP_);
local_irq_restore(flags);
if (IS_ENABLED(CONFIG_SLAB))
local_irq_restore(flags);
}
static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
@@ -196,7 +198,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
void quarantine_reduce(void)
{
size_t new_quarantine_size;
size_t new_quarantine_size, percpu_quarantines;
unsigned long flags;
struct qlist_head to_free = QLIST_INIT;
size_t size_to_free = 0;
@@ -214,7 +216,12 @@ void quarantine_reduce(void)
*/
new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
QUARANTINE_FRACTION;
new_quarantine_size -= QUARANTINE_PERCPU_SIZE * num_online_cpus();
percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
if (WARN_ONCE(new_quarantine_size < percpu_quarantines,
"Too little memory, disabling global KASAN quarantine.\n"))
new_quarantine_size = 0;
else
new_quarantine_size -= percpu_quarantines;
WRITE_ONCE(quarantine_size, new_quarantine_size);
last = global_quarantine.head;

View File

@@ -116,6 +116,26 @@ static inline bool init_task_stack_addr(const void *addr)
sizeof(init_thread_union.stack));
}
static DEFINE_SPINLOCK(report_lock);
static void kasan_start_report(unsigned long *flags)
{
/*
* Make sure we don't end up in loop.
*/
kasan_disable_current();
spin_lock_irqsave(&report_lock, *flags);
pr_err("==================================================================\n");
}
static void kasan_end_report(unsigned long *flags)
{
pr_err("==================================================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irqrestore(&report_lock, *flags);
kasan_enable_current();
}
static void print_track(struct kasan_track *track)
{
pr_err("PID = %u\n", track->pid);
@@ -129,37 +149,33 @@ static void print_track(struct kasan_track *track)
}
}
static void kasan_object_err(struct kmem_cache *cache, struct page *page,
void *object, char *unused_reason)
static void kasan_object_err(struct kmem_cache *cache, void *object)
{
struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
struct kasan_free_meta *free_info;
dump_stack();
pr_err("Object at %p, in cache %s\n", object, cache->name);
pr_err("Object at %p, in cache %s size: %d\n", object, cache->name,
cache->object_size);
if (!(cache->flags & SLAB_KASAN))
return;
switch (alloc_info->state) {
case KASAN_STATE_INIT:
pr_err("Object not allocated yet\n");
break;
case KASAN_STATE_ALLOC:
pr_err("Object allocated with size %u bytes.\n",
alloc_info->alloc_size);
pr_err("Allocation:\n");
print_track(&alloc_info->track);
break;
case KASAN_STATE_FREE:
case KASAN_STATE_QUARANTINE:
pr_err("Object freed, allocated with size %u bytes\n",
alloc_info->alloc_size);
free_info = get_free_info(cache, object);
pr_err("Allocation:\n");
print_track(&alloc_info->track);
pr_err("Deallocation:\n");
print_track(&free_info->track);
break;
}
pr_err("Allocated:\n");
print_track(&alloc_info->alloc_track);
pr_err("Freed:\n");
print_track(&alloc_info->free_track);
}
void kasan_report_double_free(struct kmem_cache *cache, void *object,
s8 shadow)
{
unsigned long flags;
kasan_start_report(&flags);
pr_err("BUG: Double free or freeing an invalid pointer\n");
pr_err("Unexpected shadow byte: 0x%hhX\n", shadow);
kasan_object_err(cache, object);
kasan_end_report(&flags);
}
static void print_address_description(struct kasan_access_info *info)
@@ -175,8 +191,7 @@ static void print_address_description(struct kasan_access_info *info)
struct kmem_cache *cache = page->slab_cache;
object = nearest_obj(cache, page,
(void *)info->access_addr);
kasan_object_err(cache, page, object,
"kasan: bad access detected");
kasan_object_err(cache, object);
return;
}
dump_page(page, "kasan: bad access detected");
@@ -241,19 +256,13 @@ static void print_shadow_for_address(const void *addr)
}
}
static DEFINE_SPINLOCK(report_lock);
static void kasan_report_error(struct kasan_access_info *info)
{
unsigned long flags;
const char *bug_type;
/*
* Make sure we don't end up in loop.
*/
kasan_disable_current();
spin_lock_irqsave(&report_lock, flags);
pr_err("==================================================================\n");
kasan_start_report(&flags);
if (info->access_addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) {
if ((unsigned long)info->access_addr < PAGE_SIZE)
@@ -274,10 +283,8 @@ static void kasan_report_error(struct kasan_access_info *info)
print_address_description(info);
print_shadow_for_address(info->first_bad_addr);
}
pr_err("==================================================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irqrestore(&report_lock, flags);
kasan_enable_current();
kasan_end_report(&flags);
}
void kasan_report(unsigned long addr, size_t size,

View File

@@ -2559,6 +2559,15 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
return 0;
mctz = soft_limit_tree_node(pgdat->node_id);
/*
* Do not even bother to check the largest node if the root
* is empty. Do it lockless to prevent lock bouncing. Races
* are acceptable as soft limit is best effort anyway.
*/
if (RB_EMPTY_ROOT(&mctz->rb_root))
return 0;
/*
* This loop can run a while, specially if mem_cgroup's continuously
* keep exceeding their soft limit and putting the system under

View File

@@ -2642,6 +2642,7 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
if (page == swapcache) {
do_page_add_anon_rmap(page, vma, fe->address, exclusive);
mem_cgroup_commit_charge(page, memcg, true, false);
activate_page(page);
} else { /* ksm created a completely new copy */
page_add_new_anon_rmap(page, vma, fe->address, false);
mem_cgroup_commit_charge(page, memcg, false, false);
@@ -3133,6 +3134,8 @@ static int do_fault_around(struct fault_env *fe, pgoff_t start_pgoff)
if (pmd_none(*fe->pmd)) {
fe->prealloc_pte = pte_alloc_one(fe->vma->vm_mm, fe->address);
if (!fe->prealloc_pte)
goto out;
smp_wmb(); /* See comment in __pte_alloc() */
}

View File

@@ -2653,16 +2653,18 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
static int do_brk(unsigned long addr, unsigned long len)
static int do_brk(unsigned long addr, unsigned long request)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
unsigned long flags;
unsigned long flags, len;
struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
len = PAGE_ALIGN(len);
len = PAGE_ALIGN(request);
if (len < request)
return -ENOMEM;
if (!len)
return 0;

View File

@@ -5276,7 +5276,7 @@ void __init setup_per_cpu_pageset(void)
setup_zone_pageset(zone);
}
static noinline __init_refok
static noinline __ref
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
@@ -5903,7 +5903,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
}
}
static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
{
unsigned long __maybe_unused start = 0;
unsigned long __maybe_unused offset = 0;

View File

@@ -1877,7 +1877,7 @@ static struct array_cache __percpu *alloc_kmem_cache_cpus(
return cpu_cache;
}
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{
if (slab_state >= FULL)
return enable_cpucache(cachep, gfp);
@@ -2604,9 +2604,11 @@ static void cache_init_objs(struct kmem_cache *cachep,
}
for (i = 0; i < cachep->num; i++) {
objp = index_to_obj(cachep, page, i);
kasan_init_slab_obj(cachep, objp);
/* constructor could break poison info */
if (DEBUG == 0 && cachep->ctor) {
objp = index_to_obj(cachep, page, i);
kasan_unpoison_object_data(cachep, objp);
cachep->ctor(objp);
kasan_poison_object_data(cachep, objp);

View File

@@ -1384,6 +1384,7 @@ static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
kasan_init_slab_obj(s, object);
if (unlikely(s->ctor)) {
kasan_unpoison_object_data(s, object);
s->ctor(object);

View File

@@ -36,7 +36,7 @@
* Uses the main allocators if they are available, else bootmem.
*/
static void * __init_refok __earlyonly_bootmem_alloc(int node,
static void * __ref __earlyonly_bootmem_alloc(int node,
unsigned long size,
unsigned long align,
unsigned long goal)

View File

@@ -59,7 +59,7 @@ static inline void set_section_nid(unsigned long section_nr, int nid)
#endif
#ifdef CONFIG_SPARSEMEM_EXTREME
static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
static noinline struct mem_section __ref *sparse_index_alloc(int nid)
{
struct mem_section *section = NULL;
unsigned long array_size = SECTIONS_PER_ROOT *

View File

@@ -2561,7 +2561,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
node_lru_pages += lru_pages;
if (!global_reclaim(sc))
if (memcg)
shrink_slab(sc->gfp_mask, pgdat->node_id,
memcg, sc->nr_scanned - scanned,
lru_pages);