Merge branch 'master' into next

Conflicts:
	fs/exec.c

Removed IMA changes (the IMA checks are now performed via may_open()).

Signed-off-by: James Morris <jmorris@namei.org>
This commit is contained in:
James Morris
2009-05-22 18:40:59 +10:00
537 changed files with 8700 additions and 5829 deletions

View File

@@ -112,14 +112,6 @@ static long madvise_willneed(struct vm_area_struct * vma,
if (!file)
return -EBADF;
/*
* Page cache readahead assumes page cache pages are order-0 which
* is not the case for hugetlbfs. Do not give a bad return value
* but ignore the advice.
*/
if (vma->vm_flags & VM_HUGETLB)
return 0;
if (file->f_mapping->a_ops->get_xip_mem) {
/* no bad return value, but ignore advice */
return 0;

View File

@@ -6,6 +6,7 @@
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
@@ -72,3 +73,17 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
*zone = zonelist_zone(z);
return z;
}
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
int memmap_valid_within(unsigned long pfn,
struct page *page, struct zone *zone)
{
if (page_to_pfn(page) != pfn)
return 0;
if (page_zone(page) != zone)
return 0;
return 1;
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */

View File

@@ -94,12 +94,12 @@ unsigned long vm_dirty_bytes;
/*
* The interval between `kupdate'-style writebacks
*/
unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */
unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
/*
* The longest time for which data is allowed to remain dirty
*/
unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */
unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
/*
* Flag that makes the machine dump writes/reads and block dirtyings.
@@ -770,7 +770,7 @@ static void wb_kupdate(unsigned long arg)
sync_supers();
oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval);
oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
start_jif = jiffies;
next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
nr_to_write = global_page_state(NR_FILE_DIRTY) +

View File

@@ -57,14 +57,6 @@ static DEFINE_SPINLOCK(pdflush_lock);
*/
int nr_pdflush_threads = 0;
/*
* The max/min number of pdflush threads. R/W by sysctl at
* /proc/sys/vm/nr_pdflush_threads_max/min
*/
int nr_pdflush_threads_max __read_mostly = MAX_PDFLUSH_THREADS;
int nr_pdflush_threads_min __read_mostly = MIN_PDFLUSH_THREADS;
/*
* The time at which the pdflush thread pool last went empty
*/
@@ -76,7 +68,7 @@ static unsigned long last_empty_jifs;
* Thread pool management algorithm:
*
* - The minimum and maximum number of pdflush instances are bound
* by nr_pdflush_threads_min and nr_pdflush_threads_max.
* by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
*
* - If there have been no idle pdflush instances for 1 second, create
* a new one.
@@ -142,13 +134,14 @@ static int __pdflush(struct pdflush_work *my_work)
* To throttle creation, we reset last_empty_jifs.
*/
if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
if (list_empty(&pdflush_list) &&
nr_pdflush_threads < nr_pdflush_threads_max) {
last_empty_jifs = jiffies;
nr_pdflush_threads++;
spin_unlock_irq(&pdflush_lock);
start_one_pdflush_thread();
spin_lock_irq(&pdflush_lock);
if (list_empty(&pdflush_list)) {
if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
last_empty_jifs = jiffies;
nr_pdflush_threads++;
spin_unlock_irq(&pdflush_lock);
start_one_pdflush_thread();
spin_lock_irq(&pdflush_lock);
}
}
}
@@ -160,7 +153,7 @@ static int __pdflush(struct pdflush_work *my_work)
*/
if (list_empty(&pdflush_list))
continue;
if (nr_pdflush_threads <= nr_pdflush_threads_min)
if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
continue;
pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
@@ -266,9 +259,9 @@ static int __init pdflush_init(void)
* Pre-set nr_pdflush_threads... If we fail to create,
* the count will be decremented.
*/
nr_pdflush_threads = nr_pdflush_threads_min;
nr_pdflush_threads = MIN_PDFLUSH_THREADS;
for (i = 0; i < nr_pdflush_threads_min; i++)
for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
start_one_pdflush_thread();
return 0;
}

View File

@@ -60,6 +60,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -255,6 +256,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
static void slob_free_pages(void *b, int order)
{
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += 1 << order;
free_pages((unsigned long)b, order);
}
@@ -407,7 +410,7 @@ static void slob_free(void *block, int size)
spin_unlock_irqrestore(&slob_lock, flags);
clear_slob_page(sp);
free_slob_page(sp);
free_page((unsigned long)b);
slob_free_pages(b, 0);
return;
}

View File

@@ -9,6 +9,7 @@
*/
#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
@@ -1170,6 +1171,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlab(page);
reset_page_mapcount(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
__free_pages(page, order);
}
@@ -1909,7 +1912,7 @@ static inline int calculate_order(int size)
* Doh this slab cannot be placed using slub_max_order.
*/
order = slab_order(size, 1, MAX_ORDER, 1);
if (order <= MAX_ORDER)
if (order < MAX_ORDER)
return order;
return -ENOSYS;
}
@@ -2522,6 +2525,7 @@ __setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str)
{
get_option(&str, &slub_max_order);
slub_max_order = min(slub_max_order, MAX_ORDER - 1);
return 1;
}

View File

@@ -509,22 +509,11 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
continue;
page = pfn_to_page(pfn);
#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES
/*
* Ordinarily, memory holes in flatmem still have a valid
* memmap for the PFN range. However, an architecture for
* embedded systems (e.g. ARM) can free up the memmap backing
* holes to save memory on the assumption the memmap is
* never used. The page_zone linkages are then broken even
* though pfn_valid() returns true. Skip the page if the
* linkages are broken. Even if this test passed, the impact
* is that the counters for the movable type are off but
* fragmentation monitoring is likely meaningless on small
* systems.
*/
if (page_zone(page) != zone)
/* Watch for unexpected holes punched in the memmap */
if (!memmap_valid_within(pfn, page, zone))
continue;
#endif
mtype = get_pageblock_migratetype(page);
if (mtype < MIGRATE_TYPES)