Merge branch 'master' into for-next
Pull linus#master to merge PER_CPU_DEF_ATTRIBUTES and alpha build fix changes. As alpha in percpu tree uses 'weak' attribute instead of inline assembly, there's no need for __used attribute. Conflicts: arch/alpha/include/asm/percpu.h arch/mn10300/kernel/vmlinux.lds.S include/linux/percpu-defs.h
This commit is contained in:
@@ -86,10 +86,12 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
unsigned pages = 0;
|
||||
unsigned blocks = 0;
|
||||
|
||||
spin_lock_irq(&pool->lock);
|
||||
list_for_each_entry(page, &pool->page_list, page_list) {
|
||||
pages++;
|
||||
blocks += page->in_use;
|
||||
}
|
||||
spin_unlock_irq(&pool->lock);
|
||||
|
||||
/* per-pool info, no real statistics yet */
|
||||
temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
|
||||
|
17
mm/hugetlb.c
17
mm/hugetlb.c
@@ -1985,7 +1985,7 @@ static struct page *hugetlbfs_pagecache_page(struct hstate *h,
|
||||
}
|
||||
|
||||
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep, int write_access)
|
||||
unsigned long address, pte_t *ptep, unsigned int flags)
|
||||
{
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
int ret = VM_FAULT_SIGBUS;
|
||||
@@ -2053,7 +2053,7 @@ retry:
|
||||
* any allocations necessary to record that reservation occur outside
|
||||
* the spinlock.
|
||||
*/
|
||||
if (write_access && !(vma->vm_flags & VM_SHARED))
|
||||
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
|
||||
if (vma_needs_reservation(h, vma, address) < 0) {
|
||||
ret = VM_FAULT_OOM;
|
||||
goto backout_unlocked;
|
||||
@@ -2072,7 +2072,7 @@ retry:
|
||||
&& (vma->vm_flags & VM_SHARED)));
|
||||
set_huge_pte_at(mm, address, ptep, new_pte);
|
||||
|
||||
if (write_access && !(vma->vm_flags & VM_SHARED)) {
|
||||
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
|
||||
/* Optimization, do the COW without a second fault */
|
||||
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
|
||||
}
|
||||
@@ -2091,7 +2091,7 @@ backout_unlocked:
|
||||
}
|
||||
|
||||
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, int write_access)
|
||||
unsigned long address, unsigned int flags)
|
||||
{
|
||||
pte_t *ptep;
|
||||
pte_t entry;
|
||||
@@ -2112,7 +2112,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
mutex_lock(&hugetlb_instantiation_mutex);
|
||||
entry = huge_ptep_get(ptep);
|
||||
if (huge_pte_none(entry)) {
|
||||
ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
|
||||
ret = hugetlb_no_page(mm, vma, address, ptep, flags);
|
||||
goto out_mutex;
|
||||
}
|
||||
|
||||
@@ -2126,7 +2126,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
* page now as it is used to determine if a reservation has been
|
||||
* consumed.
|
||||
*/
|
||||
if (write_access && !pte_write(entry)) {
|
||||
if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
|
||||
if (vma_needs_reservation(h, vma, address) < 0) {
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out_mutex;
|
||||
@@ -2143,7 +2143,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
goto out_page_table_lock;
|
||||
|
||||
|
||||
if (write_access) {
|
||||
if (flags & FAULT_FLAG_WRITE) {
|
||||
if (!pte_write(entry)) {
|
||||
ret = hugetlb_cow(mm, vma, address, ptep, entry,
|
||||
pagecache_page);
|
||||
@@ -2152,7 +2152,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
entry = pte_mkdirty(entry);
|
||||
}
|
||||
entry = pte_mkyoung(entry);
|
||||
if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access))
|
||||
if (huge_ptep_set_access_flags(vma, address, ptep, entry,
|
||||
flags & FAULT_FLAG_WRITE))
|
||||
update_mmu_cache(vma, address, entry);
|
||||
|
||||
out_page_table_lock:
|
||||
|
245
mm/kmemleak.c
245
mm/kmemleak.c
@@ -48,10 +48,10 @@
|
||||
* scanned. This list is only modified during a scanning episode when the
|
||||
* scan_mutex is held. At the end of a scan, the gray_list is always empty.
|
||||
* Note that the kmemleak_object.use_count is incremented when an object is
|
||||
* added to the gray_list and therefore cannot be freed
|
||||
* - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs
|
||||
* file together with modifications to the memory scanning parameters
|
||||
* including the scan_thread pointer
|
||||
* added to the gray_list and therefore cannot be freed. This mutex also
|
||||
* prevents multiple users of the "kmemleak" debugfs file together with
|
||||
* modifications to the memory scanning parameters including the scan_thread
|
||||
* pointer
|
||||
*
|
||||
* The kmemleak_object structures have a use_count incremented or decremented
|
||||
* using the get_object()/put_object() functions. When the use_count becomes
|
||||
@@ -61,6 +61,8 @@
|
||||
* structure.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
@@ -103,7 +105,6 @@
|
||||
#define MAX_TRACE 16 /* stack trace length */
|
||||
#define REPORTS_NR 50 /* maximum number of reported leaks */
|
||||
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
|
||||
#define MSECS_SCAN_YIELD 10 /* CPU yielding period */
|
||||
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
|
||||
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
|
||||
|
||||
@@ -184,19 +185,16 @@ static atomic_t kmemleak_error = ATOMIC_INIT(0);
|
||||
static unsigned long min_addr = ULONG_MAX;
|
||||
static unsigned long max_addr;
|
||||
|
||||
/* used for yielding the CPU to other tasks during scanning */
|
||||
static unsigned long next_scan_yield;
|
||||
static struct task_struct *scan_thread;
|
||||
static unsigned long jiffies_scan_yield;
|
||||
/* used to avoid reporting of recently allocated objects */
|
||||
static unsigned long jiffies_min_age;
|
||||
static unsigned long jiffies_last_scan;
|
||||
/* delay between automatic memory scannings */
|
||||
static signed long jiffies_scan_wait;
|
||||
/* enables or disables the task stacks scanning */
|
||||
static int kmemleak_stack_scan;
|
||||
/* mutex protecting the memory scanning */
|
||||
static int kmemleak_stack_scan = 1;
|
||||
/* protects the memory scanning, parameters and debug/kmemleak file access */
|
||||
static DEFINE_MUTEX(scan_mutex);
|
||||
/* mutex protecting the access to the /sys/kernel/debug/kmemleak file */
|
||||
static DEFINE_MUTEX(kmemleak_mutex);
|
||||
|
||||
/* number of leaks reported (for limitation purposes) */
|
||||
static int reported_leaks;
|
||||
@@ -233,7 +231,7 @@ struct early_log {
|
||||
};
|
||||
|
||||
/* early logging buffer and current position */
|
||||
static struct early_log early_log[200];
|
||||
static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
|
||||
static int crt_early_log;
|
||||
|
||||
static void kmemleak_disable(void);
|
||||
@@ -276,15 +274,6 @@ static int color_gray(const struct kmemleak_object *object)
|
||||
return object->min_count != -1 && object->count >= object->min_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Objects are considered referenced if their color is gray and they have not
|
||||
* been deleted.
|
||||
*/
|
||||
static int referenced_object(struct kmemleak_object *object)
|
||||
{
|
||||
return (object->flags & OBJECT_ALLOCATED) && color_gray(object);
|
||||
}
|
||||
|
||||
/*
|
||||
* Objects are considered unreferenced only if their color is white, they have
|
||||
* not be deleted and have a minimum age to avoid false positives caused by
|
||||
@@ -293,42 +282,28 @@ static int referenced_object(struct kmemleak_object *object)
|
||||
static int unreferenced_object(struct kmemleak_object *object)
|
||||
{
|
||||
return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
|
||||
time_is_before_eq_jiffies(object->jiffies + jiffies_min_age);
|
||||
time_before_eq(object->jiffies + jiffies_min_age,
|
||||
jiffies_last_scan);
|
||||
}
|
||||
|
||||
/*
|
||||
* Printing of the (un)referenced objects information, either to the seq file
|
||||
* or to the kernel log. The print_referenced/print_unreferenced functions
|
||||
* must be called with the object->lock held.
|
||||
* Printing of the unreferenced objects information to the seq file. The
|
||||
* print_unreferenced function must be called with the object->lock held.
|
||||
*/
|
||||
#define print_helper(seq, x...) do { \
|
||||
struct seq_file *s = (seq); \
|
||||
if (s) \
|
||||
seq_printf(s, x); \
|
||||
else \
|
||||
pr_info(x); \
|
||||
} while (0)
|
||||
|
||||
static void print_referenced(struct kmemleak_object *object)
|
||||
{
|
||||
pr_info("kmemleak: referenced object 0x%08lx (size %zu)\n",
|
||||
object->pointer, object->size);
|
||||
}
|
||||
|
||||
static void print_unreferenced(struct seq_file *seq,
|
||||
struct kmemleak_object *object)
|
||||
{
|
||||
int i;
|
||||
|
||||
print_helper(seq, "kmemleak: unreferenced object 0x%08lx (size %zu):\n",
|
||||
object->pointer, object->size);
|
||||
print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n",
|
||||
object->comm, object->pid, object->jiffies);
|
||||
print_helper(seq, " backtrace:\n");
|
||||
seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
|
||||
object->pointer, object->size);
|
||||
seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
|
||||
object->comm, object->pid, object->jiffies);
|
||||
seq_printf(seq, " backtrace:\n");
|
||||
|
||||
for (i = 0; i < object->trace_len; i++) {
|
||||
void *ptr = (void *)object->trace[i];
|
||||
print_helper(seq, " [<%p>] %pS\n", ptr, ptr);
|
||||
seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,7 +319,7 @@ static void dump_object_info(struct kmemleak_object *object)
|
||||
trace.nr_entries = object->trace_len;
|
||||
trace.entries = object->trace;
|
||||
|
||||
pr_notice("kmemleak: Object 0x%08lx (size %zu):\n",
|
||||
pr_notice("Object 0x%08lx (size %zu):\n",
|
||||
object->tree_node.start, object->size);
|
||||
pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
|
||||
object->comm, object->pid, object->jiffies);
|
||||
@@ -372,7 +347,7 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
|
||||
object = prio_tree_entry(node, struct kmemleak_object,
|
||||
tree_node);
|
||||
if (!alias && object->pointer != ptr) {
|
||||
kmemleak_warn("kmemleak: Found object by alias");
|
||||
kmemleak_warn("Found object by alias");
|
||||
object = NULL;
|
||||
}
|
||||
} else
|
||||
@@ -467,8 +442,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
|
||||
|
||||
object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
|
||||
if (!object) {
|
||||
kmemleak_stop("kmemleak: Cannot allocate a kmemleak_object "
|
||||
"structure\n");
|
||||
kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -527,8 +501,8 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
|
||||
if (node != &object->tree_node) {
|
||||
unsigned long flags;
|
||||
|
||||
kmemleak_stop("kmemleak: Cannot insert 0x%lx into the object "
|
||||
"search tree (already existing)\n", ptr);
|
||||
kmemleak_stop("Cannot insert 0x%lx into the object search tree "
|
||||
"(already existing)\n", ptr);
|
||||
object = lookup_object(ptr, 1);
|
||||
spin_lock_irqsave(&object->lock, flags);
|
||||
dump_object_info(object);
|
||||
@@ -553,8 +527,10 @@ static void delete_object(unsigned long ptr)
|
||||
write_lock_irqsave(&kmemleak_lock, flags);
|
||||
object = lookup_object(ptr, 0);
|
||||
if (!object) {
|
||||
kmemleak_warn("kmemleak: Freeing unknown object at 0x%08lx\n",
|
||||
#ifdef DEBUG
|
||||
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
|
||||
ptr);
|
||||
#endif
|
||||
write_unlock_irqrestore(&kmemleak_lock, flags);
|
||||
return;
|
||||
}
|
||||
@@ -570,8 +546,6 @@ static void delete_object(unsigned long ptr)
|
||||
* cannot be freed when it is being scanned.
|
||||
*/
|
||||
spin_lock_irqsave(&object->lock, flags);
|
||||
if (object->flags & OBJECT_REPORTED)
|
||||
print_referenced(object);
|
||||
object->flags &= ~OBJECT_ALLOCATED;
|
||||
spin_unlock_irqrestore(&object->lock, flags);
|
||||
put_object(object);
|
||||
@@ -588,8 +562,7 @@ static void make_gray_object(unsigned long ptr)
|
||||
|
||||
object = find_and_get_object(ptr, 0);
|
||||
if (!object) {
|
||||
kmemleak_warn("kmemleak: Graying unknown object at 0x%08lx\n",
|
||||
ptr);
|
||||
kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -610,8 +583,7 @@ static void make_black_object(unsigned long ptr)
|
||||
|
||||
object = find_and_get_object(ptr, 0);
|
||||
if (!object) {
|
||||
kmemleak_warn("kmemleak: Blacking unknown object at 0x%08lx\n",
|
||||
ptr);
|
||||
kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -634,21 +606,20 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
|
||||
|
||||
object = find_and_get_object(ptr, 0);
|
||||
if (!object) {
|
||||
kmemleak_warn("kmemleak: Adding scan area to unknown "
|
||||
"object at 0x%08lx\n", ptr);
|
||||
kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
|
||||
ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
|
||||
if (!area) {
|
||||
kmemleak_warn("kmemleak: Cannot allocate a scan area\n");
|
||||
kmemleak_warn("Cannot allocate a scan area\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&object->lock, flags);
|
||||
if (offset + length > object->size) {
|
||||
kmemleak_warn("kmemleak: Scan area larger than object "
|
||||
"0x%08lx\n", ptr);
|
||||
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
|
||||
dump_object_info(object);
|
||||
kmem_cache_free(scan_area_cache, area);
|
||||
goto out_unlock;
|
||||
@@ -677,8 +648,7 @@ static void object_no_scan(unsigned long ptr)
|
||||
|
||||
object = find_and_get_object(ptr, 0);
|
||||
if (!object) {
|
||||
kmemleak_warn("kmemleak: Not scanning unknown object at "
|
||||
"0x%08lx\n", ptr);
|
||||
kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -699,7 +669,8 @@ static void log_early(int op_type, const void *ptr, size_t size,
|
||||
struct early_log *log;
|
||||
|
||||
if (crt_early_log >= ARRAY_SIZE(early_log)) {
|
||||
kmemleak_stop("kmemleak: Early log buffer exceeded\n");
|
||||
pr_warning("Early log buffer exceeded\n");
|
||||
kmemleak_disable();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -810,21 +781,6 @@ void kmemleak_no_scan(const void *ptr)
|
||||
}
|
||||
EXPORT_SYMBOL(kmemleak_no_scan);
|
||||
|
||||
/*
|
||||
* Yield the CPU so that other tasks get a chance to run. The yielding is
|
||||
* rate-limited to avoid excessive number of calls to the schedule() function
|
||||
* during memory scanning.
|
||||
*/
|
||||
static void scan_yield(void)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
if (time_is_before_eq_jiffies(next_scan_yield)) {
|
||||
schedule();
|
||||
next_scan_yield = jiffies + jiffies_scan_yield;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory scanning is a long process and it needs to be interruptable. This
|
||||
* function checks whether such interrupt condition occured.
|
||||
@@ -865,15 +821,6 @@ static void scan_block(void *_start, void *_end,
|
||||
if (scan_should_stop())
|
||||
break;
|
||||
|
||||
/*
|
||||
* When scanning a memory block with a corresponding
|
||||
* kmemleak_object, the CPU yielding is handled in the calling
|
||||
* code since it holds the object->lock to avoid the block
|
||||
* freeing.
|
||||
*/
|
||||
if (!scanned)
|
||||
scan_yield();
|
||||
|
||||
object = find_and_get_object(pointer, 1);
|
||||
if (!object)
|
||||
continue;
|
||||
@@ -955,6 +902,9 @@ static void kmemleak_scan(void)
|
||||
struct kmemleak_object *object, *tmp;
|
||||
struct task_struct *task;
|
||||
int i;
|
||||
int new_leaks = 0;
|
||||
|
||||
jiffies_last_scan = jiffies;
|
||||
|
||||
/* prepare the kmemleak_object's */
|
||||
rcu_read_lock();
|
||||
@@ -966,7 +916,7 @@ static void kmemleak_scan(void)
|
||||
* 1 reference to any object at this point.
|
||||
*/
|
||||
if (atomic_read(&object->use_count) > 1) {
|
||||
pr_debug("kmemleak: object->use_count = %d\n",
|
||||
pr_debug("object->use_count = %d\n",
|
||||
atomic_read(&object->use_count));
|
||||
dump_object_info(object);
|
||||
}
|
||||
@@ -1036,7 +986,7 @@ static void kmemleak_scan(void)
|
||||
*/
|
||||
object = list_entry(gray_list.next, typeof(*object), gray_list);
|
||||
while (&object->gray_list != &gray_list) {
|
||||
scan_yield();
|
||||
cond_resched();
|
||||
|
||||
/* may add new objects to the list */
|
||||
if (!scan_should_stop())
|
||||
@@ -1052,6 +1002,32 @@ static void kmemleak_scan(void)
|
||||
object = tmp;
|
||||
}
|
||||
WARN_ON(!list_empty(&gray_list));
|
||||
|
||||
/*
|
||||
* If scanning was stopped do not report any new unreferenced objects.
|
||||
*/
|
||||
if (scan_should_stop())
|
||||
return;
|
||||
|
||||
/*
|
||||
* Scanning result reporting.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(object, &object_list, object_list) {
|
||||
spin_lock_irqsave(&object->lock, flags);
|
||||
if (unreferenced_object(object) &&
|
||||
!(object->flags & OBJECT_REPORTED)) {
|
||||
object->flags |= OBJECT_REPORTED;
|
||||
new_leaks++;
|
||||
}
|
||||
spin_unlock_irqrestore(&object->lock, flags);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (new_leaks)
|
||||
pr_info("%d new suspected memory leaks (see "
|
||||
"/sys/kernel/debug/kmemleak)\n", new_leaks);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1062,7 +1038,7 @@ static int kmemleak_scan_thread(void *arg)
|
||||
{
|
||||
static int first_run = 1;
|
||||
|
||||
pr_info("kmemleak: Automatic memory scanning thread started\n");
|
||||
pr_info("Automatic memory scanning thread started\n");
|
||||
|
||||
/*
|
||||
* Wait before the first scan to allow the system to fully initialize.
|
||||
@@ -1073,49 +1049,25 @@ static int kmemleak_scan_thread(void *arg)
|
||||
}
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
struct kmemleak_object *object;
|
||||
signed long timeout = jiffies_scan_wait;
|
||||
|
||||
mutex_lock(&scan_mutex);
|
||||
|
||||
kmemleak_scan();
|
||||
reported_leaks = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(object, &object_list, object_list) {
|
||||
unsigned long flags;
|
||||
|
||||
if (reported_leaks >= REPORTS_NR)
|
||||
break;
|
||||
spin_lock_irqsave(&object->lock, flags);
|
||||
if (!(object->flags & OBJECT_REPORTED) &&
|
||||
unreferenced_object(object)) {
|
||||
print_unreferenced(NULL, object);
|
||||
object->flags |= OBJECT_REPORTED;
|
||||
reported_leaks++;
|
||||
} else if ((object->flags & OBJECT_REPORTED) &&
|
||||
referenced_object(object)) {
|
||||
print_referenced(object);
|
||||
object->flags &= ~OBJECT_REPORTED;
|
||||
}
|
||||
spin_unlock_irqrestore(&object->lock, flags);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
mutex_unlock(&scan_mutex);
|
||||
|
||||
/* wait before the next scan */
|
||||
while (timeout && !kthread_should_stop())
|
||||
timeout = schedule_timeout_interruptible(timeout);
|
||||
}
|
||||
|
||||
pr_info("kmemleak: Automatic memory scanning thread ended\n");
|
||||
pr_info("Automatic memory scanning thread ended\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the automatic memory scanning thread. This function must be called
|
||||
* with the kmemleak_mutex held.
|
||||
* with the scan_mutex held.
|
||||
*/
|
||||
void start_scan_thread(void)
|
||||
{
|
||||
@@ -1123,14 +1075,14 @@ void start_scan_thread(void)
|
||||
return;
|
||||
scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
|
||||
if (IS_ERR(scan_thread)) {
|
||||
pr_warning("kmemleak: Failed to create the scan thread\n");
|
||||
pr_warning("Failed to create the scan thread\n");
|
||||
scan_thread = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop the automatic memory scanning thread. This function must be called
|
||||
* with the kmemleak_mutex held.
|
||||
* with the scan_mutex held.
|
||||
*/
|
||||
void stop_scan_thread(void)
|
||||
{
|
||||
@@ -1150,10 +1102,8 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
struct kmemleak_object *object;
|
||||
loff_t n = *pos;
|
||||
|
||||
if (!n) {
|
||||
kmemleak_scan();
|
||||
if (!n)
|
||||
reported_leaks = 0;
|
||||
}
|
||||
if (reported_leaks >= REPORTS_NR)
|
||||
return NULL;
|
||||
|
||||
@@ -1214,11 +1164,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&object->lock, flags);
|
||||
if (!unreferenced_object(object))
|
||||
goto out;
|
||||
print_unreferenced(seq, object);
|
||||
reported_leaks++;
|
||||
out:
|
||||
if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) {
|
||||
print_unreferenced(seq, object);
|
||||
reported_leaks++;
|
||||
}
|
||||
spin_unlock_irqrestore(&object->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
@@ -1237,13 +1186,10 @@ static int kmemleak_open(struct inode *inode, struct file *file)
|
||||
if (!atomic_read(&kmemleak_enabled))
|
||||
return -EBUSY;
|
||||
|
||||
ret = mutex_lock_interruptible(&kmemleak_mutex);
|
||||
ret = mutex_lock_interruptible(&scan_mutex);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (file->f_mode & FMODE_READ) {
|
||||
ret = mutex_lock_interruptible(&scan_mutex);
|
||||
if (ret < 0)
|
||||
goto kmemleak_unlock;
|
||||
ret = seq_open(file, &kmemleak_seq_ops);
|
||||
if (ret < 0)
|
||||
goto scan_unlock;
|
||||
@@ -1252,8 +1198,6 @@ static int kmemleak_open(struct inode *inode, struct file *file)
|
||||
|
||||
scan_unlock:
|
||||
mutex_unlock(&scan_mutex);
|
||||
kmemleak_unlock:
|
||||
mutex_unlock(&kmemleak_mutex);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@@ -1262,11 +1206,9 @@ static int kmemleak_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (file->f_mode & FMODE_READ) {
|
||||
if (file->f_mode & FMODE_READ)
|
||||
seq_release(inode, file);
|
||||
mutex_unlock(&scan_mutex);
|
||||
}
|
||||
mutex_unlock(&kmemleak_mutex);
|
||||
mutex_unlock(&scan_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1281,6 +1223,7 @@ static int kmemleak_release(struct inode *inode, struct file *file)
|
||||
* scan=off - stop the automatic memory scanning thread
|
||||
* scan=... - set the automatic memory scanning period in seconds (0 to
|
||||
* disable it)
|
||||
* scan - trigger a memory scan
|
||||
*/
|
||||
static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
|
||||
size_t size, loff_t *ppos)
|
||||
@@ -1318,7 +1261,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
|
||||
jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
|
||||
start_scan_thread();
|
||||
}
|
||||
} else
|
||||
} else if (strncmp(buf, "scan", 4) == 0)
|
||||
kmemleak_scan();
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
/* ignore the rest of the buffer, only one command at a time */
|
||||
@@ -1343,11 +1288,9 @@ static int kmemleak_cleanup_thread(void *arg)
|
||||
{
|
||||
struct kmemleak_object *object;
|
||||
|
||||
mutex_lock(&kmemleak_mutex);
|
||||
stop_scan_thread();
|
||||
mutex_unlock(&kmemleak_mutex);
|
||||
|
||||
mutex_lock(&scan_mutex);
|
||||
stop_scan_thread();
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(object, &object_list, object_list)
|
||||
delete_object(object->pointer);
|
||||
@@ -1367,7 +1310,7 @@ static void kmemleak_cleanup(void)
|
||||
cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
|
||||
"kmemleak-clean");
|
||||
if (IS_ERR(cleanup_thread))
|
||||
pr_warning("kmemleak: Failed to create the clean-up thread\n");
|
||||
pr_warning("Failed to create the clean-up thread\n");
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1414,7 +1357,6 @@ void __init kmemleak_init(void)
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD);
|
||||
jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
|
||||
jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
|
||||
|
||||
@@ -1488,11 +1430,10 @@ static int __init kmemleak_late_init(void)
|
||||
dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
|
||||
&kmemleak_fops);
|
||||
if (!dentry)
|
||||
pr_warning("kmemleak: Failed to create the debugfs kmemleak "
|
||||
"file\n");
|
||||
mutex_lock(&kmemleak_mutex);
|
||||
pr_warning("Failed to create the debugfs kmemleak file\n");
|
||||
mutex_lock(&scan_mutex);
|
||||
start_scan_thread();
|
||||
mutex_unlock(&kmemleak_mutex);
|
||||
mutex_unlock(&scan_mutex);
|
||||
|
||||
pr_info("Kernel memory leak detector initialized\n");
|
||||
|
||||
|
34
mm/memory.c
34
mm/memory.c
@@ -1207,8 +1207,8 @@ static inline int use_zero_page(struct vm_area_struct *vma)
|
||||
|
||||
|
||||
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int flags,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
unsigned long start, int nr_pages, int flags,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
int i;
|
||||
unsigned int vm_flags = 0;
|
||||
@@ -1217,7 +1217,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
|
||||
int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
|
||||
|
||||
if (len <= 0)
|
||||
if (nr_pages <= 0)
|
||||
return 0;
|
||||
/*
|
||||
* Require read or write permissions.
|
||||
@@ -1269,7 +1269,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
vmas[i] = gate_vma;
|
||||
i++;
|
||||
start += PAGE_SIZE;
|
||||
len--;
|
||||
nr_pages--;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1280,7 +1280,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
|
||||
if (is_vm_hugetlb_page(vma)) {
|
||||
i = follow_hugetlb_page(mm, vma, pages, vmas,
|
||||
&start, &len, i, write);
|
||||
&start, &nr_pages, i, write);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1311,8 +1311,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
while (!(page = follow_page(vma, start, foll_flags))) {
|
||||
int ret;
|
||||
|
||||
/* FOLL_WRITE matches FAULT_FLAG_WRITE! */
|
||||
ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE);
|
||||
ret = handle_mm_fault(mm, vma, start,
|
||||
(foll_flags & FOLL_WRITE) ?
|
||||
FAULT_FLAG_WRITE : 0);
|
||||
|
||||
if (ret & VM_FAULT_ERROR) {
|
||||
if (ret & VM_FAULT_OOM)
|
||||
return i ? i : -ENOMEM;
|
||||
@@ -1355,9 +1357,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
vmas[i] = vma;
|
||||
i++;
|
||||
start += PAGE_SIZE;
|
||||
len--;
|
||||
} while (len && start < vma->vm_end);
|
||||
} while (len);
|
||||
nr_pages--;
|
||||
} while (nr_pages && start < vma->vm_end);
|
||||
} while (nr_pages);
|
||||
return i;
|
||||
}
|
||||
|
||||
@@ -1366,7 +1368,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
* @tsk: task_struct of target task
|
||||
* @mm: mm_struct of target mm
|
||||
* @start: starting user address
|
||||
* @len: number of pages from start to pin
|
||||
* @nr_pages: number of pages from start to pin
|
||||
* @write: whether pages will be written to by the caller
|
||||
* @force: whether to force write access even if user mapping is
|
||||
* readonly. This will result in the page being COWed even
|
||||
@@ -1378,7 +1380,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
* Or NULL if the caller does not require them.
|
||||
*
|
||||
* Returns number of pages pinned. This may be fewer than the number
|
||||
* requested. If len is 0 or negative, returns 0. If no pages
|
||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||
* were pinned, returns -errno. Each page returned must be released
|
||||
* with a put_page() call when it is finished with. vmas will only
|
||||
* remain valid while mmap_sem is held.
|
||||
@@ -1412,7 +1414,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
* See also get_user_pages_fast, for performance critical applications.
|
||||
*/
|
||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int write, int force,
|
||||
unsigned long start, int nr_pages, int write, int force,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
int flags = 0;
|
||||
@@ -1422,9 +1424,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
if (force)
|
||||
flags |= GUP_FLAGS_FORCE;
|
||||
|
||||
return __get_user_pages(tsk, mm,
|
||||
start, len, flags,
|
||||
pages, vmas);
|
||||
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(get_user_pages);
|
||||
@@ -2517,7 +2517,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
|
||||
page = lookup_swap_cache(entry);
|
||||
if (!page) {
|
||||
grab_swap_token(); /* Contend for token _before_ read-in */
|
||||
grab_swap_token(mm); /* Contend for token _before_ read-in */
|
||||
page = swapin_readahead(entry,
|
||||
GFP_HIGHUSER_MOVABLE, vma, address);
|
||||
if (!page) {
|
||||
|
33
mm/nommu.c
33
mm/nommu.c
@@ -173,8 +173,8 @@ unsigned int kobjsize(const void *objp)
|
||||
}
|
||||
|
||||
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int flags,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
unsigned long start, int nr_pages, int flags,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long vm_flags;
|
||||
@@ -189,7 +189,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
|
||||
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
vma = find_vma(mm, start);
|
||||
if (!vma)
|
||||
goto finish_or_fault;
|
||||
@@ -224,7 +224,7 @@ finish_or_fault:
|
||||
* - don't permit access to VMAs that don't support it, such as I/O mappings
|
||||
*/
|
||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int write, int force,
|
||||
unsigned long start, int nr_pages, int write, int force,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
int flags = 0;
|
||||
@@ -234,12 +234,31 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
if (force)
|
||||
flags |= GUP_FLAGS_FORCE;
|
||||
|
||||
return __get_user_pages(tsk, mm,
|
||||
start, len, flags,
|
||||
pages, vmas);
|
||||
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages);
|
||||
|
||||
/**
|
||||
* follow_pfn - look up PFN at a user virtual address
|
||||
* @vma: memory mapping
|
||||
* @address: user virtual address
|
||||
* @pfn: location to store found PFN
|
||||
*
|
||||
* Only IO mappings and raw PFN mappings are allowed.
|
||||
*
|
||||
* Returns zero and the pfn at @pfn on success, -ve otherwise.
|
||||
*/
|
||||
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long *pfn)
|
||||
{
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
return -EINVAL;
|
||||
|
||||
*pfn = address >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(follow_pfn);
|
||||
|
||||
DEFINE_RWLOCK(vmlist_lock);
|
||||
struct vm_struct *vmlist;
|
||||
|
||||
|
@@ -541,8 +541,11 @@ static void balance_dirty_pages(struct address_space *mapping)
|
||||
* filesystems (i.e. NFS) in which data may have been
|
||||
* written to the server's write cache, but has not yet
|
||||
* been flushed to permanent storage.
|
||||
* Only move pages to writeback if this bdi is over its
|
||||
* threshold otherwise wait until the disk writes catch
|
||||
* up.
|
||||
*/
|
||||
if (bdi_nr_reclaimable) {
|
||||
if (bdi_nr_reclaimable > bdi_thresh) {
|
||||
writeback_inodes(&wbc);
|
||||
pages_written += write_chunk - wbc.nr_to_write;
|
||||
get_dirty_limits(&background_thresh, &dirty_thresh,
|
||||
|
@@ -1153,10 +1153,10 @@ again:
|
||||
* properly detect and handle allocation failures.
|
||||
*
|
||||
* We most definitely don't want callers attempting to
|
||||
* allocate greater than single-page units with
|
||||
* allocate greater than order-1 page units with
|
||||
* __GFP_NOFAIL.
|
||||
*/
|
||||
WARN_ON_ONCE(order > 0);
|
||||
WARN_ON_ONCE(order > 1);
|
||||
}
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
page = __rmqueue(zone, order, migratetype);
|
||||
@@ -3026,7 +3026,7 @@ bad:
|
||||
if (dzone == zone)
|
||||
break;
|
||||
kfree(zone_pcp(dzone, cpu));
|
||||
zone_pcp(dzone, cpu) = NULL;
|
||||
zone_pcp(dzone, cpu) = &boot_pageset[cpu];
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -3041,7 +3041,7 @@ static inline void free_zone_pagesets(int cpu)
|
||||
/* Free per_cpu_pageset if it is slab allocated */
|
||||
if (pset != &boot_pageset[cpu])
|
||||
kfree(pset);
|
||||
zone_pcp(zone, cpu) = NULL;
|
||||
zone_pcp(zone, cpu) = &boot_pageset[cpu];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4032,6 +4032,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
|
||||
int i, nid;
|
||||
unsigned long usable_startpfn;
|
||||
unsigned long kernelcore_node, kernelcore_remaining;
|
||||
/* save the state before borrow the nodemask */
|
||||
nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
|
||||
unsigned long totalpages = early_calculate_totalpages();
|
||||
int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
|
||||
|
||||
@@ -4059,7 +4061,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
|
||||
|
||||
/* If kernelcore was not specified, there is no ZONE_MOVABLE */
|
||||
if (!required_kernelcore)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
|
||||
find_usable_zone_for_movable();
|
||||
@@ -4158,6 +4160,10 @@ restart:
|
||||
for (nid = 0; nid < MAX_NUMNODES; nid++)
|
||||
zone_movable_pfn[nid] =
|
||||
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
|
||||
|
||||
out:
|
||||
/* restore the node_state */
|
||||
node_states[N_HIGH_MEMORY] = saved_node_state;
|
||||
}
|
||||
|
||||
/* Any regular memory on that node ? */
|
||||
@@ -4242,11 +4248,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
|
||||
early_node_map[i].start_pfn,
|
||||
early_node_map[i].end_pfn);
|
||||
|
||||
/*
|
||||
* find_zone_movable_pfns_for_nodes/early_calculate_totalpages init
|
||||
* that node_mask, clear it at first
|
||||
*/
|
||||
nodes_clear(node_states[N_HIGH_MEMORY]);
|
||||
/* Initialise every node */
|
||||
mminit_verify_pageflags_layout();
|
||||
setup_nr_node_ids();
|
||||
@@ -4659,7 +4660,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
|
||||
ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
|
||||
if (!write || (ret == -EINVAL))
|
||||
return ret;
|
||||
for_each_zone(zone) {
|
||||
for_each_populated_zone(zone) {
|
||||
for_each_online_cpu(cpu) {
|
||||
unsigned long high;
|
||||
high = zone->present_pages / percpu_pagelist_fraction;
|
||||
|
@@ -1558,6 +1558,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, int mode,
|
||||
spin_lock_init(&info->lock);
|
||||
info->flags = flags & VM_NORESERVE;
|
||||
INIT_LIST_HEAD(&info->swaplist);
|
||||
cache_no_acl(inode);
|
||||
|
||||
switch (mode & S_IFMT) {
|
||||
default:
|
||||
@@ -2388,7 +2389,6 @@ static void shmem_destroy_inode(struct inode *inode)
|
||||
/* only struct inode is valid if it's an inline symlink */
|
||||
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
|
||||
}
|
||||
shmem_acl_destroy_inode(inode);
|
||||
kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
|
||||
}
|
||||
|
||||
@@ -2397,10 +2397,6 @@ static void init_once(void *foo)
|
||||
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
|
||||
|
||||
inode_init_once(&p->vfs_inode);
|
||||
#ifdef CONFIG_TMPFS_POSIX_ACL
|
||||
p->i_acl = NULL;
|
||||
p->i_default_acl = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
@@ -22,11 +22,11 @@ shmem_get_acl(struct inode *inode, int type)
|
||||
spin_lock(&inode->i_lock);
|
||||
switch(type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
acl = posix_acl_dup(SHMEM_I(inode)->i_acl);
|
||||
acl = posix_acl_dup(inode->i_acl);
|
||||
break;
|
||||
|
||||
case ACL_TYPE_DEFAULT:
|
||||
acl = posix_acl_dup(SHMEM_I(inode)->i_default_acl);
|
||||
acl = posix_acl_dup(inode->i_default_acl);
|
||||
break;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
@@ -45,13 +45,13 @@ shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl)
|
||||
spin_lock(&inode->i_lock);
|
||||
switch(type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
free = SHMEM_I(inode)->i_acl;
|
||||
SHMEM_I(inode)->i_acl = posix_acl_dup(acl);
|
||||
free = inode->i_acl;
|
||||
inode->i_acl = posix_acl_dup(acl);
|
||||
break;
|
||||
|
||||
case ACL_TYPE_DEFAULT:
|
||||
free = SHMEM_I(inode)->i_default_acl;
|
||||
SHMEM_I(inode)->i_default_acl = posix_acl_dup(acl);
|
||||
free = inode->i_default_acl;
|
||||
inode->i_default_acl = posix_acl_dup(acl);
|
||||
break;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
@@ -154,23 +154,6 @@ shmem_acl_init(struct inode *inode, struct inode *dir)
|
||||
return generic_acl_init(inode, dir, &shmem_acl_ops);
|
||||
}
|
||||
|
||||
/**
|
||||
* shmem_acl_destroy_inode - destroy acls hanging off the in-memory inode
|
||||
*
|
||||
* This is done before destroying the actual inode.
|
||||
*/
|
||||
|
||||
void
|
||||
shmem_acl_destroy_inode(struct inode *inode)
|
||||
{
|
||||
if (SHMEM_I(inode)->i_acl)
|
||||
posix_acl_release(SHMEM_I(inode)->i_acl);
|
||||
SHMEM_I(inode)->i_acl = NULL;
|
||||
if (SHMEM_I(inode)->i_default_acl)
|
||||
posix_acl_release(SHMEM_I(inode)->i_default_acl);
|
||||
SHMEM_I(inode)->i_default_acl = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* shmem_check_acl - check_acl() callback for generic_permission()
|
||||
*/
|
||||
|
10
mm/slub.c
10
mm/slub.c
@@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||
{
|
||||
struct page *page;
|
||||
struct kmem_cache_order_objects oo = s->oo;
|
||||
gfp_t alloc_gfp;
|
||||
|
||||
flags |= s->allocflags;
|
||||
|
||||
page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
|
||||
oo);
|
||||
/*
|
||||
* Let the initial higher-order allocation fail under memory pressure
|
||||
* so we fall-back to the minimum order allocation.
|
||||
*/
|
||||
alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
|
||||
|
||||
page = alloc_slab_page(alloc_gfp, node, oo);
|
||||
if (unlikely(!page)) {
|
||||
oo = s->min;
|
||||
/*
|
||||
|
32
mm/thrash.c
32
mm/thrash.c
@@ -26,47 +26,45 @@ static DEFINE_SPINLOCK(swap_token_lock);
|
||||
struct mm_struct *swap_token_mm;
|
||||
static unsigned int global_faults;
|
||||
|
||||
void grab_swap_token(void)
|
||||
void grab_swap_token(struct mm_struct *mm)
|
||||
{
|
||||
int current_interval;
|
||||
|
||||
global_faults++;
|
||||
|
||||
current_interval = global_faults - current->mm->faultstamp;
|
||||
current_interval = global_faults - mm->faultstamp;
|
||||
|
||||
if (!spin_trylock(&swap_token_lock))
|
||||
return;
|
||||
|
||||
/* First come first served */
|
||||
if (swap_token_mm == NULL) {
|
||||
current->mm->token_priority = current->mm->token_priority + 2;
|
||||
swap_token_mm = current->mm;
|
||||
mm->token_priority = mm->token_priority + 2;
|
||||
swap_token_mm = mm;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (current->mm != swap_token_mm) {
|
||||
if (current_interval < current->mm->last_interval)
|
||||
current->mm->token_priority++;
|
||||
if (mm != swap_token_mm) {
|
||||
if (current_interval < mm->last_interval)
|
||||
mm->token_priority++;
|
||||
else {
|
||||
if (likely(current->mm->token_priority > 0))
|
||||
current->mm->token_priority--;
|
||||
if (likely(mm->token_priority > 0))
|
||||
mm->token_priority--;
|
||||
}
|
||||
/* Check if we deserve the token */
|
||||
if (current->mm->token_priority >
|
||||
swap_token_mm->token_priority) {
|
||||
current->mm->token_priority += 2;
|
||||
swap_token_mm = current->mm;
|
||||
if (mm->token_priority > swap_token_mm->token_priority) {
|
||||
mm->token_priority += 2;
|
||||
swap_token_mm = mm;
|
||||
}
|
||||
} else {
|
||||
/* Token holder came in again! */
|
||||
current->mm->token_priority += 2;
|
||||
mm->token_priority += 2;
|
||||
}
|
||||
|
||||
out:
|
||||
current->mm->faultstamp = global_faults;
|
||||
current->mm->last_interval = current_interval;
|
||||
mm->faultstamp = global_faults;
|
||||
mm->last_interval = current_interval;
|
||||
spin_unlock(&swap_token_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Called on process exit. */
|
||||
|
@@ -932,7 +932,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
|
||||
continue;
|
||||
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
|
||||
list_move(&cursor_page->lru, dst);
|
||||
mem_cgroup_del_lru(page);
|
||||
mem_cgroup_del_lru(cursor_page);
|
||||
nr_taken++;
|
||||
scan++;
|
||||
}
|
||||
|
Reference in New Issue
Block a user