Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
This commit is contained in:
@@ -14,6 +14,7 @@
|
||||
#include <linux/hash.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/blktrace_api.h>
|
||||
#include <trace/block.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#define POOL_SIZE 64
|
||||
@@ -21,6 +22,8 @@
|
||||
|
||||
static mempool_t *page_pool, *isa_page_pool;
|
||||
|
||||
DEFINE_TRACE(block_bio_bounce);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
static __init int init_emergency_pool(void)
|
||||
{
|
||||
@@ -222,7 +225,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||
if (!bio)
|
||||
return;
|
||||
|
||||
blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
|
||||
trace_block_bio_bounce(q, *bio_orig);
|
||||
|
||||
/*
|
||||
* at least one page was bounced, fill in possible non-highmem
|
||||
|
70
mm/memory.c
70
mm/memory.c
@@ -669,6 +669,16 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
|
||||
|
||||
if (unlikely(is_pfn_mapping(vma))) {
|
||||
/*
|
||||
* We do not free on error cases below as remove_vma
|
||||
* gets called on error from higher level routine
|
||||
*/
|
||||
ret = track_pfn_vma_copy(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to invalidate the secondary MMU mappings only when
|
||||
* there could be a permission downgrade on the ptes of the
|
||||
@@ -915,6 +925,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
|
||||
if (vma->vm_flags & VM_ACCOUNT)
|
||||
*nr_accounted += (end - start) >> PAGE_SHIFT;
|
||||
|
||||
if (unlikely(is_pfn_mapping(vma)))
|
||||
untrack_pfn_vma(vma, 0, 0);
|
||||
|
||||
while (start != end) {
|
||||
if (!tlb_start_valid) {
|
||||
tlb_start = start;
|
||||
@@ -1430,6 +1443,7 @@ out:
|
||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
int ret;
|
||||
/*
|
||||
* Technically, architectures with pte_special can avoid all these
|
||||
* restrictions (same for remap_pfn_range). However we would like
|
||||
@@ -1444,7 +1458,15 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
|
||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||
return -EFAULT;
|
||||
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
||||
if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
||||
|
||||
if (ret)
|
||||
untrack_pfn_vma(vma, pfn, PAGE_SIZE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(vm_insert_pfn);
|
||||
|
||||
@@ -1575,14 +1597,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
||||
* behaviour that some programs depend on. We mark the "original"
|
||||
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
|
||||
*/
|
||||
if (is_cow_mapping(vma->vm_flags)) {
|
||||
if (addr != vma->vm_start || end != vma->vm_end)
|
||||
return -EINVAL;
|
||||
if (addr == vma->vm_start && end == vma->vm_end)
|
||||
vma->vm_pgoff = pfn;
|
||||
}
|
||||
else if (is_cow_mapping(vma->vm_flags))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
||||
|
||||
err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
|
||||
BUG_ON(addr >= end);
|
||||
pfn -= addr >> PAGE_SHIFT;
|
||||
pgd = pgd_offset(mm, addr);
|
||||
@@ -1594,6 +1619,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
||||
if (err)
|
||||
break;
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
|
||||
if (err)
|
||||
untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(remap_pfn_range);
|
||||
@@ -2865,9 +2894,9 @@ int in_gate_area_no_task(unsigned long addr)
|
||||
#endif /* __HAVE_ARCH_GATE_AREA */
|
||||
|
||||
#ifdef CONFIG_HAVE_IOREMAP_PROT
|
||||
static resource_size_t follow_phys(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags,
|
||||
unsigned long *prot)
|
||||
int follow_phys(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags,
|
||||
unsigned long *prot, resource_size_t *phys)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
@@ -2876,24 +2905,26 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
|
||||
spinlock_t *ptl;
|
||||
resource_size_t phys_addr = 0;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int ret = -EINVAL;
|
||||
|
||||
VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP)));
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
goto out;
|
||||
|
||||
pgd = pgd_offset(mm, address);
|
||||
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
||||
goto no_page_table;
|
||||
goto out;
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
|
||||
goto no_page_table;
|
||||
goto out;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
|
||||
goto no_page_table;
|
||||
goto out;
|
||||
|
||||
/* We cannot handle huge page PFN maps. Luckily they don't exist. */
|
||||
if (pmd_huge(*pmd))
|
||||
goto no_page_table;
|
||||
goto out;
|
||||
|
||||
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
|
||||
if (!ptep)
|
||||
@@ -2908,13 +2939,13 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
|
||||
phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
|
||||
|
||||
*prot = pgprot_val(pte_pgprot(pte));
|
||||
*phys = phys_addr;
|
||||
ret = 0;
|
||||
|
||||
unlock:
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
out:
|
||||
return phys_addr;
|
||||
no_page_table:
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
||||
@@ -2925,12 +2956,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
||||
void *maddr;
|
||||
int offset = addr & (PAGE_SIZE-1);
|
||||
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
return -EINVAL;
|
||||
|
||||
phys_addr = follow_phys(vma, addr, write, &prot);
|
||||
|
||||
if (!phys_addr)
|
||||
if (follow_phys(vma, addr, write, &prot, &phys_addr))
|
||||
return -EINVAL;
|
||||
|
||||
maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
|
||||
|
@@ -1114,6 +1114,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
|
||||
const unsigned long __user *old_nodes,
|
||||
const unsigned long __user *new_nodes)
|
||||
{
|
||||
const struct cred *cred = current_cred(), *tcred;
|
||||
struct mm_struct *mm;
|
||||
struct task_struct *task;
|
||||
nodemask_t old;
|
||||
@@ -1148,12 +1149,16 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
|
||||
* capabilities, superuser privileges or the same
|
||||
* userid as the target process.
|
||||
*/
|
||||
if ((current->euid != task->suid) && (current->euid != task->uid) &&
|
||||
(current->uid != task->suid) && (current->uid != task->uid) &&
|
||||
rcu_read_lock();
|
||||
tcred = __task_cred(task);
|
||||
if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
|
||||
cred->uid != tcred->suid && cred->uid != tcred->uid &&
|
||||
!capable(CAP_SYS_NICE)) {
|
||||
rcu_read_unlock();
|
||||
err = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
task_nodes = cpuset_mems_allowed(task);
|
||||
/* Is the user allowed to access the target nodes? */
|
||||
|
11
mm/migrate.c
11
mm/migrate.c
@@ -998,7 +998,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
|
||||
unsigned long addr = (unsigned long)(*pages);
|
||||
struct vm_area_struct *vma;
|
||||
struct page *page;
|
||||
int err;
|
||||
int err = -EFAULT;
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (!vma)
|
||||
@@ -1075,6 +1075,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
|
||||
const int __user *nodes,
|
||||
int __user *status, int flags)
|
||||
{
|
||||
const struct cred *cred = current_cred(), *tcred;
|
||||
struct task_struct *task;
|
||||
struct mm_struct *mm;
|
||||
int err;
|
||||
@@ -1105,12 +1106,16 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
|
||||
* capabilities, superuser privileges or the same
|
||||
* userid as the target process.
|
||||
*/
|
||||
if ((current->euid != task->suid) && (current->euid != task->uid) &&
|
||||
(current->uid != task->suid) && (current->uid != task->uid) &&
|
||||
rcu_read_lock();
|
||||
tcred = __task_cred(task);
|
||||
if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
|
||||
cred->uid != tcred->suid && cred->uid != tcred->uid &&
|
||||
!capable(CAP_SYS_NICE)) {
|
||||
rcu_read_unlock();
|
||||
err = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
err = security_task_movememory(task);
|
||||
if (err)
|
||||
|
45
mm/mlock.c
45
mm/mlock.c
@@ -667,3 +667,48 @@ void user_shm_unlock(size_t size, struct user_struct *user)
|
||||
spin_unlock(&shmlock_user_lock);
|
||||
free_uid(user);
|
||||
}
|
||||
|
||||
void *alloc_locked_buffer(size_t size)
|
||||
{
|
||||
unsigned long rlim, vm, pgsz;
|
||||
void *buffer = NULL;
|
||||
|
||||
pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
|
||||
rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
|
||||
vm = current->mm->total_vm + pgsz;
|
||||
if (rlim < vm)
|
||||
goto out;
|
||||
|
||||
rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
|
||||
vm = current->mm->locked_vm + pgsz;
|
||||
if (rlim < vm)
|
||||
goto out;
|
||||
|
||||
buffer = kzalloc(size, GFP_KERNEL);
|
||||
if (!buffer)
|
||||
goto out;
|
||||
|
||||
current->mm->total_vm += pgsz;
|
||||
current->mm->locked_vm += pgsz;
|
||||
|
||||
out:
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void free_locked_buffer(void *buffer, size_t size)
|
||||
{
|
||||
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
|
||||
current->mm->total_vm -= pgsz;
|
||||
current->mm->locked_vm -= pgsz;
|
||||
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
||||
kfree(buffer);
|
||||
}
|
||||
|
@@ -128,8 +128,8 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
|
||||
* Superuser processes are usually more important, so we make it
|
||||
* less likely that we kill those.
|
||||
*/
|
||||
if (has_capability(p, CAP_SYS_ADMIN) ||
|
||||
has_capability(p, CAP_SYS_RESOURCE))
|
||||
if (has_capability_noaudit(p, CAP_SYS_ADMIN) ||
|
||||
has_capability_noaudit(p, CAP_SYS_RESOURCE))
|
||||
points /= 4;
|
||||
|
||||
/*
|
||||
@@ -138,7 +138,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
|
||||
* tend to only have this flag set on applications they think
|
||||
* of as important.
|
||||
*/
|
||||
if (has_capability(p, CAP_SYS_RAWIO))
|
||||
if (has_capability_noaudit(p, CAP_SYS_RAWIO))
|
||||
points /= 4;
|
||||
|
||||
/*
|
||||
@@ -299,9 +299,9 @@ static void dump_tasks(const struct mem_cgroup *mem)
|
||||
|
||||
task_lock(p);
|
||||
printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
|
||||
p->pid, p->uid, p->tgid, p->mm->total_vm,
|
||||
get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj,
|
||||
p->comm);
|
||||
p->pid, __task_cred(p)->uid, p->tgid,
|
||||
p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p),
|
||||
p->oomkilladj, p->comm);
|
||||
task_unlock(p);
|
||||
} while_each_thread(g, p);
|
||||
}
|
||||
|
@@ -1513,8 +1513,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
|
||||
inode = new_inode(sb);
|
||||
if (inode) {
|
||||
inode->i_mode = mode;
|
||||
inode->i_uid = current->fsuid;
|
||||
inode->i_gid = current->fsgid;
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = current_fsgid();
|
||||
inode->i_blocks = 0;
|
||||
inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
@@ -2278,8 +2278,8 @@ static int shmem_fill_super(struct super_block *sb,
|
||||
sbinfo->max_blocks = 0;
|
||||
sbinfo->max_inodes = 0;
|
||||
sbinfo->mode = S_IRWXUGO | S_ISVTX;
|
||||
sbinfo->uid = current->fsuid;
|
||||
sbinfo->gid = current->fsgid;
|
||||
sbinfo->uid = current_fsuid();
|
||||
sbinfo->gid = current_fsgid();
|
||||
sbinfo->mpol = NULL;
|
||||
sb->s_fs_info = sbinfo;
|
||||
|
||||
|
@@ -535,7 +535,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
||||
struct kmem_cache *c;
|
||||
|
||||
c = slob_alloc(sizeof(struct kmem_cache),
|
||||
flags, ARCH_KMALLOC_MINALIGN, -1);
|
||||
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
|
||||
|
||||
if (c) {
|
||||
c->name = name;
|
||||
|
@@ -1462,6 +1462,15 @@ static int __init procswaps_init(void)
|
||||
__initcall(procswaps_init);
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
#ifdef MAX_SWAPFILES_CHECK
|
||||
static int __init max_swapfiles_check(void)
|
||||
{
|
||||
MAX_SWAPFILES_CHECK();
|
||||
return 0;
|
||||
}
|
||||
late_initcall(max_swapfiles_check);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Written 01/25/92 by Simmule Turner, heavily changed by Linus.
|
||||
*
|
||||
|
Reference in New Issue
Block a user