Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "A bunch of fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  slub: mark the dangling ifdef #else of CONFIG_SLUB_DEBUG
  slub: avoid irqoff/on in bulk allocation
  slub: create new ___slab_alloc function that can be called with irqs disabled
  mm: fix up sparse warning in gfpflags_allow_blocking
  ocfs2: fix umask ignored issue
  PM/OPP: add entry in MAINTAINERS
  kernel/panic.c: turn off locks debug before releasing console lock
  kernel/signal.c: unexport sigsuspend()
  kasan: fix kmemleak false-positive in kasan_module_alloc()
  fat: fix fake_offset handling on error path
  mm/hugetlbfs: fix bugs in fallocate hole punch of areas with holes
  mm/page-writeback.c: initialize m_dirty to avoid compile warning
  various: fix pci_set_dma_mask return value checking
  mm: loosen MADV_NOHUGEPAGE to enable Qemu postcopy on s390
  mm: vmalloc: don't remove inexistent guard hole in remove_vm_area()
  tools/vm/page-types.c: support KPF_IDLE
  ncpfs: don't allow negative timeouts
  configfs: allow dynamic group creation
  MAINTAINERS: add Moritz as reviewer for FPGA Manager Framework
  slab.h: sprinkle __assume_aligned attributes
This commit is contained in:
Linus Torvalds
2015-11-21 10:49:13 -08:00
28 changed files with 283 additions and 111 deletions

View File

@@ -2009,7 +2009,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
/*
* Be somewhat over-protective like KSM for now!
*/
if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
if (*vm_flags & VM_NO_THP)
return -EINVAL;
*vm_flags &= ~VM_NOHUGEPAGE;
*vm_flags |= VM_HUGEPAGE;
@@ -2025,7 +2025,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
/*
* Be somewhat over-protective like KSM for now!
*/
if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
if (*vm_flags & VM_NO_THP)
return -EINVAL;
*vm_flags &= ~VM_HUGEPAGE;
*vm_flags |= VM_NOHUGEPAGE;

View File

@@ -19,6 +19,7 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
@@ -444,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size)
if (ret) {
find_vm_area(addr)->flags |= VM_KASAN;
kmemleak_ignore(ret);
return 0;
}

View File

@@ -1542,7 +1542,9 @@ static void balance_dirty_pages(struct address_space *mapping,
for (;;) {
unsigned long now = jiffies;
unsigned long dirty, thresh, bg_thresh;
unsigned long m_dirty, m_thresh, m_bg_thresh;
unsigned long m_dirty = 0; /* stop bogus uninit warnings */
unsigned long m_thresh = 0;
unsigned long m_bg_thresh = 0;
/*
* Unstable writes are a feature of certain networked

View File

@@ -1204,7 +1204,7 @@ unsigned long kmem_cache_flags(unsigned long object_size,
return flags;
}
#else
#else /* !CONFIG_SLUB_DEBUG */
static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {}
@@ -2295,23 +2295,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
* And if we were unable to get a new slab from the partial slab lists then
* we need to allocate a new slab. This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab.
*
* Version of __slab_alloc to use when we know that interrupts are
* already disabled (which is the case for bulk allocation).
*/
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
void *freelist;
struct page *page;
unsigned long flags;
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
/*
* We may have been preempted and rescheduled on a different
* cpu before disabling interrupts. Need to reload cpu area
* pointer.
*/
c = this_cpu_ptr(s->cpu_slab);
#endif
page = c->page;
if (!page)
@@ -2369,7 +2361,6 @@ load_freelist:
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
local_irq_restore(flags);
return freelist;
new_slab:
@@ -2386,7 +2377,6 @@ new_slab:
if (unlikely(!freelist)) {
slab_out_of_memory(s, gfpflags, node);
local_irq_restore(flags);
return NULL;
}
@@ -2402,10 +2392,34 @@ new_slab:
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
local_irq_restore(flags);
return freelist;
}
/*
* Another one that disabled interrupt and compensates for possible
* cpu changes by refetching the per cpu area pointer.
*/
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
void *p;
unsigned long flags;
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
/*
* We may have been preempted and rescheduled on a different
* cpu before disabling interrupts. Need to reload cpu area
* pointer.
*/
c = this_cpu_ptr(s->cpu_slab);
#endif
p = ___slab_alloc(s, gfpflags, node, addr, c);
local_irq_restore(flags);
return p;
}
/*
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
* have the fastpath folded into their functions. So no function call
@@ -2804,30 +2818,23 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void *object = c->freelist;
if (unlikely(!object)) {
local_irq_enable();
/*
* Invoking slow path likely have side-effect
* of re-populating per CPU c->freelist
*/
p[i] = __slab_alloc(s, flags, NUMA_NO_NODE,
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
_RET_IP_, c);
if (unlikely(!p[i])) {
__kmem_cache_free_bulk(s, i, p);
return false;
}
local_irq_disable();
if (unlikely(!p[i]))
goto error;
c = this_cpu_ptr(s->cpu_slab);
continue; /* goto for-loop */
}
/* kmem_cache debug support */
s = slab_pre_alloc_hook(s, flags);
if (unlikely(!s)) {
__kmem_cache_free_bulk(s, i, p);
c->tid = next_tid(c->tid);
local_irq_enable();
return false;
}
if (unlikely(!s))
goto error;
c->freelist = get_freepointer(s, object);
p[i] = object;
@@ -2847,6 +2854,11 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
return true;
error:
__kmem_cache_free_bulk(s, i, p);
local_irq_enable();
return false;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

View File

@@ -1443,7 +1443,6 @@ struct vm_struct *remove_vm_area(const void *addr)
vmap_debug_free_range(va->va_start, va->va_end);
kasan_free_shadow(vm);
free_unmap_vmap_area(va);
vm->size -= PAGE_SIZE;
return vm;
}
@@ -1468,8 +1467,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
return;
}
debug_check_no_locks_freed(addr, area->size);
debug_check_no_obj_freed(addr, area->size);
debug_check_no_locks_freed(addr, get_vm_area_size(area));
debug_check_no_obj_freed(addr, get_vm_area_size(area));
if (deallocate_pages) {
int i;