uml: remove code made redundant by CHOOSE_MODE removal

This patch makes a number of simplifications enabled by the removal of
CHOOSE_MODE.  There were lots of functions that looked like

	int foo(args){
		foo_skas(args);
	}

The bodies of foo_skas are now folded into foo, and their declarations (and
sometimes entire header files) are deleted.

In addition, the union uml_pt_regs, which was a union between the tt and skas
register formats, is now a struct, with the tt-mode arm of the union being
removed.

It turns out that usr2_handler was unused, so it is gone.

Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Jeff Dike
2007-10-16 01:26:58 -07:00
committed by Linus Torvalds
父節點 ae2587e412
當前提交 77bf440031
共有 63 個文件被更改,包括 510 次插入810 次删除

查看文件

@@ -18,17 +18,31 @@
#include "irq_user.h"
#include "tlb.h"
#include "os.h"
#include "mode_kern.h"
#include "skas/skas.h"
void flush_thread(void)
{
void *data = NULL;
unsigned long end = proc_mm ? task_size : CONFIG_STUB_START;
int ret;
arch_flush_thread(&current->thread.arch);
flush_thread_skas();
ret = unmap(&current->mm->context.skas.id, 0, end, 1, &data);
if(ret){
printk("flush_thread - clearing address space failed, "
"err = %d\n", ret);
force_sig(SIGKILL, current);
}
__switch_mm(&current->mm->context.skas.id);
}
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
{
start_thread_skas(regs, eip, esp);
set_fs(USER_DS);
PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp;
}
#ifdef CONFIG_TTY_LOG

查看文件

@@ -91,7 +91,7 @@ static struct irq_fd **last_irq_ptr = &active_fds;
extern void free_irqs(void);
void sigio_handler(int sig, union uml_pt_regs *regs)
void sigio_handler(int sig, struct uml_pt_regs *regs)
{
struct irq_fd *irq_fd;
int n;
@@ -344,7 +344,7 @@ int deactivate_all_fds(void)
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
unsigned int do_IRQ(int irq, union uml_pt_regs *regs)
unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
irq_enter();

查看文件

@@ -10,7 +10,7 @@
#include "as-layout.h"
#include "init.h"
#include "kern.h"
#include "mode_kern.h"
#include "mem_user.h"
#include "os.h"
static int physmem_fd = -1;
@@ -61,7 +61,7 @@ static unsigned long kmem_top = 0;
unsigned long get_kmem_end(void)
{
if (kmem_top == 0)
kmem_top = kmem_end_skas;
kmem_top = host_task_size - 1024 * 1024;
return kmem_top;
}

查看文件

@@ -43,8 +43,7 @@
#include "frame_kern.h"
#include "sigcontext.h"
#include "os.h"
#include "mode.h"
#include "mode_kern.h"
#include "skas.h"
/* This is a per-cpu array. A processor only modifies its entry and it only
* cares about its entry, so it's OK if another processor is modifying its
@@ -54,7 +53,8 @@ struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
static inline int external_pid(struct task_struct *task)
{
return external_pid_skas(task);
/* FIXME: Need to look up userspace_pid by cpu */
return(userspace_pid[0]);
}
int pid_to_processor_id(int pid)
@@ -104,6 +104,8 @@ static inline void set_current(struct task_struct *task)
{ external_pid(task), task });
}
extern void arch_switch_to(struct task_struct *from, struct task_struct *to);
void *_switch_to(void *prev, void *next, void *last)
{
struct task_struct *from = prev;
@@ -114,7 +116,19 @@ void *_switch_to(void *prev, void *next, void *last)
do {
current->thread.saved_task = NULL;
switch_to_skas(prev, next);
/* XXX need to check runqueues[cpu].idle */
if(current->pid == 0)
switch_timers(0);
switch_threads(&from->thread.switch_buf,
&to->thread.switch_buf);
arch_switch_to(current->thread.prev_sched, current);
if(current->pid == 0)
switch_timers(1);
if(current->thread.saved_task)
show_regs(&(current->thread.regs));
next= current->thread.saved_task;
@@ -133,11 +147,6 @@ void interrupt_end(void)
do_signal();
}
void release_thread(struct task_struct *task)
{
release_thread_skas(task);
}
void exit_thread(void)
{
}
@@ -147,27 +156,95 @@ void *get_current(void)
return current;
}
extern void schedule_tail(struct task_struct *prev);
/* This is called magically, by its address being stuffed in a jmp_buf
* and being longjmp-d to.
*/
void new_thread_handler(void)
{
int (*fn)(void *), n;
void *arg;
if(current->thread.prev_sched != NULL)
schedule_tail(current->thread.prev_sched);
current->thread.prev_sched = NULL;
fn = current->thread.request.u.thread.proc;
arg = current->thread.request.u.thread.arg;
/* The return value is 1 if the kernel thread execs a process,
* 0 if it just exits
*/
n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
if(n == 1){
/* Handle any immediate reschedules or signals */
interrupt_end();
userspace(&current->thread.regs.regs);
}
else do_exit(0);
}
/* Called magically, see new_thread_handler above */
void fork_handler(void)
{
force_flush_all();
if(current->thread.prev_sched == NULL)
panic("blech");
schedule_tail(current->thread.prev_sched);
/* XXX: if interrupt_end() calls schedule, this call to
* arch_switch_to isn't needed. We could want to apply this to
* improve performance. -bb */
arch_switch_to(current->thread.prev_sched, current);
current->thread.prev_sched = NULL;
/* Handle any immediate reschedules or signals */
interrupt_end();
userspace(&current->thread.regs.regs);
}
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long stack_top, struct task_struct * p,
struct pt_regs *regs)
{
int ret;
void (*handler)(void);
int ret = 0;
p->thread = (struct thread_struct) INIT_THREAD;
ret = copy_thread_skas(nr, clone_flags, sp, stack_top, p, regs);
if (ret || !current->thread.forking)
goto out;
if(current->thread.forking){
memcpy(&p->thread.regs.regs, &regs->regs,
sizeof(p->thread.regs.regs));
REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0);
if(sp != 0)
REGS_SP(p->thread.regs.regs.regs) = sp;
clear_flushed_tls(p);
handler = fork_handler;
/*
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS)
ret = arch_copy_tls(p);
arch_copy_thread(&current->thread.arch, &p->thread.arch);
}
else {
init_thread_registers(&p->thread.regs.regs);
p->thread.request.u.thread = current->thread.request.u.thread;
handler = new_thread_handler;
}
new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
if (current->thread.forking) {
clear_flushed_tls(p);
/*
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS)
ret = arch_copy_tls(p);
}
out:
return ret;
}
@@ -198,7 +275,8 @@ void default_idle(void)
void cpu_idle(void)
{
init_idle_skas();
cpu_tasks[current_thread->cpu].pid = os_getpid();
default_idle();
}
void *um_virt_to_phys(struct task_struct *task, unsigned long addr,

查看文件

@@ -228,7 +228,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_ARCH_PRCTL
case PTRACE_ARCH_PRCTL:
/* XXX Calls ptrace on the host - needs some SMP thinking */
ret = arch_prctl_skas(child, data, (void *) addr);
ret = arch_prctl(child, data, (void *) addr);
break;
#endif
default:
@@ -239,7 +239,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
void send_sigtrap(struct task_struct *tsk, union uml_pt_regs *regs,
void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs,
int error_code)
{
struct siginfo info;
@@ -258,7 +258,7 @@ void send_sigtrap(struct task_struct *tsk, union uml_pt_regs *regs,
/* XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
* PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
*/
void syscall_trace(union uml_pt_regs *regs, int entryexit)
void syscall_trace(struct uml_pt_regs *regs, int entryexit)
{
int is_singlestep = (current->ptrace & PT_DTRACE) && entryexit;
int tracesysgood;

查看文件

@@ -9,13 +9,30 @@
#include "kern_util.h"
#include "kern.h"
#include "os.h"
#include "mode.h"
#include "skas.h"
void (*pm_power_off)(void);
static void kill_off_processes(void)
{
kill_off_processes_skas();
if(proc_mm)
/*
* FIXME: need to loop over userspace_pids
*/
os_kill_ptraced_process(userspace_pid[0], 1);
else {
struct task_struct *p;
int pid, me;
me = os_getpid();
for_each_process(p){
if(p->mm == NULL)
continue;
pid = p->mm->context.skas.id.u.pid;
os_kill_ptraced_process(pid, 1);
}
}
}
void uml_cleanup(void)

查看文件

@@ -23,7 +23,6 @@
#include "kern.h"
#include "frame_kern.h"
#include "sigcontext.h"
#include "mode.h"
EXPORT_SYMBOL(block_signals);
EXPORT_SYMBOL(unblock_signals);

查看文件

@@ -3,7 +3,7 @@
# Licensed under the GPL
#
obj-y := clone.o exec.o mem.o mmu.o process.o syscall.o tlb.o uaccess.o
obj-y := clone.o mmu.o process.o syscall.o uaccess.o
# clone.o is in the stub, so it can't be built with profiling
# GCC hardened also auto-enables -fpic, but we need %ebx so it can't work ->

查看文件

@@ -1,40 +0,0 @@
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#include "linux/kernel.h"
#include "asm/current.h"
#include "asm/page.h"
#include "asm/signal.h"
#include "asm/ptrace.h"
#include "asm/uaccess.h"
#include "asm/mmu_context.h"
#include "tlb.h"
#include "skas.h"
#include "um_mmu.h"
#include "os.h"
void flush_thread_skas(void)
{
void *data = NULL;
unsigned long end = proc_mm ? task_size : CONFIG_STUB_START;
int ret;
ret = unmap(&current->mm->context.skas.id, 0, end, 1, &data);
if(ret){
printk("flush_thread_skas - clearing address space failed, "
"err = %d\n", ret);
force_sig(SIGKILL, current);
}
switch_mm_skas(&current->mm->context.skas.id);
}
void start_thread_skas(struct pt_regs *regs, unsigned long eip,
unsigned long esp)
{
set_fs(USER_DS);
PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp;
}

查看文件

@@ -1,22 +0,0 @@
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#include "linux/mm.h"
#include "asm/pgtable.h"
#include "mem_user.h"
#include "skas.h"
unsigned long set_task_sizes_skas(unsigned long *task_size_out)
{
/* Round up to the nearest 4M */
unsigned long host_task_size = ROUND_4M((unsigned long)
&host_task_size);
if (!skas_needs_stub)
*task_size_out = host_task_size;
else *task_size_out = CONFIG_STUB_START & PGDIR_MASK;
return host_task_size;
}

查看文件

@@ -71,7 +71,7 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
return(-ENOMEM);
}
int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
int init_new_context(struct task_struct *task, struct mm_struct *mm)
{
struct mmu_context_skas *from_mm = NULL;
struct mmu_context_skas *to_mm = &mm->context.skas;
@@ -137,7 +137,7 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
return ret;
}
void destroy_context_skas(struct mm_struct *mm)
void destroy_context(struct mm_struct *mm)
{
struct mmu_context_skas *mmu = &mm->context.skas;

查看文件

@@ -18,129 +18,22 @@
#include "os.h"
#include "tlb.h"
#include "kern.h"
#include "mode.h"
#include "registers.h"
void switch_to_skas(void *prev, void *next)
{
struct task_struct *from, *to;
from = prev;
to = next;
/* XXX need to check runqueues[cpu].idle */
if(current->pid == 0)
switch_timers(0);
switch_threads(&from->thread.mode.skas.switch_buf,
&to->thread.mode.skas.switch_buf);
arch_switch_to_skas(current->thread.prev_sched, current);
if(current->pid == 0)
switch_timers(1);
}
extern void schedule_tail(struct task_struct *prev);
/* This is called magically, by its address being stuffed in a jmp_buf
* and being longjmp-d to.
*/
void new_thread_handler(void)
{
int (*fn)(void *), n;
void *arg;
if(current->thread.prev_sched != NULL)
schedule_tail(current->thread.prev_sched);
current->thread.prev_sched = NULL;
fn = current->thread.request.u.thread.proc;
arg = current->thread.request.u.thread.arg;
/* The return value is 1 if the kernel thread execs a process,
* 0 if it just exits
*/
n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
if(n == 1){
/* Handle any immediate reschedules or signals */
interrupt_end();
userspace(&current->thread.regs.regs);
}
else do_exit(0);
}
void release_thread_skas(struct task_struct *task)
{
}
/* Called magically, see new_thread_handler above */
void fork_handler(void)
{
force_flush_all();
if(current->thread.prev_sched == NULL)
panic("blech");
schedule_tail(current->thread.prev_sched);
/* XXX: if interrupt_end() calls schedule, this call to
* arch_switch_to_skas isn't needed. We could want to apply this to
* improve performance. -bb */
arch_switch_to_skas(current->thread.prev_sched, current);
current->thread.prev_sched = NULL;
/* Handle any immediate reschedules or signals */
interrupt_end();
userspace(&current->thread.regs.regs);
}
int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long stack_top, struct task_struct * p,
struct pt_regs *regs)
{
void (*handler)(void);
if(current->thread.forking){
memcpy(&p->thread.regs.regs.skas, &regs->regs.skas,
sizeof(p->thread.regs.regs.skas));
REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0);
if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
handler = fork_handler;
arch_copy_thread(&current->thread.arch, &p->thread.arch);
}
else {
init_thread_registers(&p->thread.regs.regs);
p->thread.request.u.thread = current->thread.request.u.thread;
handler = new_thread_handler;
}
new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf,
handler);
return(0);
}
int new_mm(unsigned long stack)
{
int fd;
fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
if(fd < 0)
return(fd);
return fd;
if(skas_needs_stub)
map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
return(fd);
}
void init_idle_skas(void)
{
cpu_tasks[current_thread->cpu].pid = os_getpid();
default_idle();
return fd;
}
extern void start_kernel(void);
@@ -158,14 +51,14 @@ static int __init start_kernel_proc(void *unused)
cpu_online_map = cpumask_of_cpu(0);
#endif
start_kernel();
return(0);
return 0;
}
extern int userspace_pid[];
extern char cpu0_irqstack[];
int __init start_uml_skas(void)
int __init start_uml(void)
{
stack_protections((unsigned long) &cpu0_irqstack);
set_sigstack(cpu0_irqstack, THREAD_SIZE);
@@ -176,49 +69,14 @@ int __init start_uml_skas(void)
init_task.thread.request.u.thread.proc = start_kernel_proc;
init_task.thread.request.u.thread.arg = NULL;
return(start_idle_thread(task_stack_page(&init_task),
&init_task.thread.mode.skas.switch_buf));
}
int external_pid_skas(struct task_struct *task)
{
/* FIXME: Need to look up userspace_pid by cpu */
return(userspace_pid[0]);
}
int thread_pid_skas(struct task_struct *task)
{
/* FIXME: Need to look up userspace_pid by cpu */
return(userspace_pid[0]);
}
void kill_off_processes_skas(void)
{
if(proc_mm)
/*
* FIXME: need to loop over userspace_pids in
* kill_off_processes_skas
*/
os_kill_ptraced_process(userspace_pid[0], 1);
else {
struct task_struct *p;
int pid, me;
me = os_getpid();
for_each_process(p){
if(p->mm == NULL)
continue;
pid = p->mm->context.skas.id.u.pid;
os_kill_ptraced_process(pid, 1);
}
}
return start_idle_thread(task_stack_page(&init_task),
&init_task.thread.switch_buf);
}
unsigned long current_stub_stack(void)
{
if(current->mm == NULL)
return(0);
return 0;
return(current->mm->context.skas.id.stack);
return current->mm->context.skas.id.stack;
}

查看文件

@@ -13,7 +13,7 @@
#include "kern_util.h"
#include "syscall.h"
void handle_syscall(union uml_pt_regs *r)
void handle_syscall(struct uml_pt_regs *r)
{
struct pt_regs *regs = container_of(r, struct pt_regs, regs);
long result;
@@ -37,7 +37,7 @@ void handle_syscall(union uml_pt_regs *r)
result = -ENOSYS;
else result = EXECUTE_SYSCALL(syscall, regs);
REGS_SET_SYSCALL_RETURN(r->skas.regs, result);
REGS_SET_SYSCALL_RETURN(r->regs, result);
syscall_trace(r, 1);
}

查看文件

@@ -1,164 +0,0 @@
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Copyright 2003 PathScale, Inc.
* Licensed under the GPL
*/
#include "linux/stddef.h"
#include "linux/sched.h"
#include "linux/mm.h"
#include "asm/page.h"
#include "asm/pgtable.h"
#include "asm/mmu.h"
#include "mem_user.h"
#include "mem.h"
#include "skas.h"
#include "os.h"
#include "tlb.h"
static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
int finished, void **flush)
{
struct host_vm_op *op;
int i, ret = 0;
for(i = 0; i <= last && !ret; i++){
op = &ops[i];
switch(op->type){
case MMAP:
ret = map(&mmu->skas.id, op->u.mmap.addr,
op->u.mmap.len, op->u.mmap.prot,
op->u.mmap.fd, op->u.mmap.offset, finished,
flush);
break;
case MUNMAP:
ret = unmap(&mmu->skas.id, op->u.munmap.addr,
op->u.munmap.len, finished, flush);
break;
case MPROTECT:
ret = protect(&mmu->skas.id, op->u.mprotect.addr,
op->u.mprotect.len, op->u.mprotect.prot,
finished, flush);
break;
default:
printk("Unknown op type %d in do_ops\n", op->type);
break;
}
}
return ret;
}
extern int proc_mm;
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
if(!proc_mm && (end_addr > CONFIG_STUB_START))
end_addr = CONFIG_STUB_START;
fix_range_common(mm, start_addr, end_addr, force, do_ops);
}
void __flush_tlb_one_skas(unsigned long addr)
{
flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
}
void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
if(vma->vm_mm == NULL)
flush_tlb_kernel_range_common(start, end);
else fix_range(vma->vm_mm, start, end, 0);
}
void flush_tlb_mm_skas(struct mm_struct *mm)
{
unsigned long end;
/* Don't bother flushing if this address space is about to be
* destroyed.
*/
if(atomic_read(&mm->mm_users) == 0)
return;
end = proc_mm ? task_size : CONFIG_STUB_START;
fix_range(mm, 0, end, 0);
}
void force_flush_all_skas(void)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = mm->mmap;
while(vma != NULL) {
fix_range(mm, vma->vm_start, vma->vm_end, 1);
vma = vma->vm_next;
}
}
void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
struct mm_struct *mm = vma->vm_mm;
void *flush = NULL;
int r, w, x, prot, err = 0;
struct mm_id *mm_id;
pgd = pgd_offset(mm, address);
if(!pgd_present(*pgd))
goto kill;
pud = pud_offset(pgd, address);
if(!pud_present(*pud))
goto kill;
pmd = pmd_offset(pud, address);
if(!pmd_present(*pmd))
goto kill;
pte = pte_offset_kernel(pmd, address);
r = pte_read(*pte);
w = pte_write(*pte);
x = pte_exec(*pte);
if (!pte_young(*pte)) {
r = 0;
w = 0;
} else if (!pte_dirty(*pte)) {
w = 0;
}
mm_id = &mm->context.skas.id;
prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
(x ? UM_PROT_EXEC : 0));
if(pte_newpage(*pte)){
if(pte_present(*pte)){
unsigned long long offset;
int fd;
fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
1, &flush);
}
else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
}
else if(pte_newprot(*pte))
err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
if(err)
goto kill;
*pte = pte_mkuptodate(*pte);
return;
kill:
printk("Failed to flush page for address 0x%lx\n", address);
force_sig(SIGKILL, current);
}

查看文件

@@ -20,7 +20,6 @@
#include "asm/uaccess.h"
#include "kern_util.h"
#include "sysdep/syscalls.h"
#include "mode_kern.h"
/* Unlocked, I don't care if this is a bit off */
int nsyscalls = 0;

查看文件

@@ -18,7 +18,6 @@
#include "asm/param.h"
#include "asm/current.h"
#include "kern_util.h"
#include "mode.h"
#include "os.h"
int hz(void)
@@ -39,7 +38,7 @@ static unsigned long long prev_nsecs[NR_CPUS];
static long long delta[NR_CPUS]; /* Deviation per interval */
#endif
void timer_irq(union uml_pt_regs *regs)
void timer_irq(struct uml_pt_regs *regs)
{
unsigned long long ticks = 0;
#ifdef CONFIG_UML_REAL_TIME_CLOCK
@@ -175,13 +174,13 @@ int do_settimeofday(struct timespec *tv)
return 0;
}
void timer_handler(int sig, union uml_pt_regs *regs)
void timer_handler(int sig, struct uml_pt_regs *regs)
{
if(current_thread->cpu == 0)
timer_irq(regs);
local_irq_disable();
irq_enter();
update_process_times((regs)->skas.is_user);
update_process_times(regs->is_user);
irq_exit();
local_irq_enable();
}

查看文件

@@ -8,12 +8,12 @@
#include "asm/pgalloc.h"
#include "asm/pgtable.h"
#include "asm/tlbflush.h"
#include "mode_kern.h"
#include "as-layout.h"
#include "tlb.h"
#include "mem.h"
#include "mem_user.h"
#include "os.h"
#include "skas.h"
static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
unsigned int prot, struct host_vm_op *ops, int *index,
@@ -341,6 +341,71 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
return(updated);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
struct mm_struct *mm = vma->vm_mm;
void *flush = NULL;
int r, w, x, prot, err = 0;
struct mm_id *mm_id;
address &= PAGE_MASK;
pgd = pgd_offset(mm, address);
if(!pgd_present(*pgd))
goto kill;
pud = pud_offset(pgd, address);
if(!pud_present(*pud))
goto kill;
pmd = pmd_offset(pud, address);
if(!pmd_present(*pmd))
goto kill;
pte = pte_offset_kernel(pmd, address);
r = pte_read(*pte);
w = pte_write(*pte);
x = pte_exec(*pte);
if (!pte_young(*pte)) {
r = 0;
w = 0;
} else if (!pte_dirty(*pte)) {
w = 0;
}
mm_id = &mm->context.skas.id;
prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
(x ? UM_PROT_EXEC : 0));
if(pte_newpage(*pte)){
if(pte_present(*pte)){
unsigned long long offset;
int fd;
fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
1, &flush);
}
else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
}
else if(pte_newprot(*pte))
err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
if(err)
goto kill;
*pte = pte_mkuptodate(*pte);
return;
kill:
printk("Failed to flush page for address 0x%lx\n", address);
force_sig(SIGKILL, current);
}
pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
{
return(pgd_offset(mm, address));
@@ -387,21 +452,80 @@ void flush_tlb_kernel_vm(void)
void __flush_tlb_one(unsigned long addr)
{
__flush_tlb_one_skas(addr);
flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
}
static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
int finished, void **flush)
{
struct host_vm_op *op;
int i, ret = 0;
for(i = 0; i <= last && !ret; i++){
op = &ops[i];
switch(op->type){
case MMAP:
ret = map(&mmu->skas.id, op->u.mmap.addr,
op->u.mmap.len, op->u.mmap.prot,
op->u.mmap.fd, op->u.mmap.offset, finished,
flush);
break;
case MUNMAP:
ret = unmap(&mmu->skas.id, op->u.munmap.addr,
op->u.munmap.len, finished, flush);
break;
case MPROTECT:
ret = protect(&mmu->skas.id, op->u.mprotect.addr,
op->u.mprotect.len, op->u.mprotect.prot,
finished, flush);
break;
default:
printk("Unknown op type %d in do_ops\n", op->type);
break;
}
}
return ret;
}
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
if(!proc_mm && (end_addr > CONFIG_STUB_START))
end_addr = CONFIG_STUB_START;
fix_range_common(mm, start_addr, end_addr, force, do_ops);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
flush_tlb_range_skas(vma, start, end);
if(vma->vm_mm == NULL)
flush_tlb_kernel_range_common(start, end);
else fix_range(vma->vm_mm, start, end, 0);
}
void flush_tlb_mm(struct mm_struct *mm)
{
flush_tlb_mm_skas(mm);
unsigned long end;
/* Don't bother flushing if this address space is about to be
* destroyed.
*/
if(atomic_read(&mm->mm_users) == 0)
return;
end = proc_mm ? task_size : CONFIG_STUB_START;
fix_range(mm, 0, end, 0);
}
void force_flush_all(void)
{
force_flush_all_skas();
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = mm->mmap;
while(vma != NULL) {
fix_range(mm, vma->vm_start, vma->vm_end, 1);
vma = vma->vm_next;
}
}

查看文件

@@ -128,7 +128,7 @@ static void bad_segv(struct faultinfo fi, unsigned long ip)
force_sig_info(SIGSEGV, &si, current);
}
static void segv_handler(int sig, union uml_pt_regs *regs)
static void segv_handler(int sig, struct uml_pt_regs *regs)
{
struct faultinfo * fi = UPT_FAULTINFO(regs);
@@ -146,7 +146,7 @@ static void segv_handler(int sig, union uml_pt_regs *regs)
* give us bad data!
*/
unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
union uml_pt_regs *regs)
struct uml_pt_regs *regs)
{
struct siginfo si;
void *catcher;
@@ -214,7 +214,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
return 0;
}
void relay_signal(int sig, union uml_pt_regs *regs)
void relay_signal(int sig, struct uml_pt_regs *regs)
{
if (arch_handle_signal(sig, regs))
return;
@@ -230,14 +230,14 @@ void relay_signal(int sig, union uml_pt_regs *regs)
force_sig(sig, current);
}
static void bus_handler(int sig, union uml_pt_regs *regs)
static void bus_handler(int sig, struct uml_pt_regs *regs)
{
if (current->thread.fault_catcher != NULL)
do_longjmp(current->thread.fault_catcher, 1);
else relay_signal(sig, regs);
}
static void winch(int sig, union uml_pt_regs *regs)
static void winch(int sig, struct uml_pt_regs *regs)
{
do_IRQ(WINCH_IRQ, regs);
}

查看文件

@@ -35,8 +35,6 @@
#include "initrd.h"
#include "init.h"
#include "os.h"
#include "mode_kern.h"
#include "mode.h"
#include "skas.h"
#define DEFAULT_COMMAND_LINE "root=98:0"
@@ -67,7 +65,8 @@ struct cpuinfo_um boot_cpu_data = {
unsigned long thread_saved_pc(struct task_struct *task)
{
return os_process_pc(thread_pid_skas(task));
/* FIXME: Need to look up userspace_pid by cpu */
return os_process_pc(userspace_pid[0]);
}
/* Changed in setup_arch, which is called in early boot */
@@ -253,6 +252,19 @@ EXPORT_SYMBOL(end_iomem);
extern char __binary_start;
static unsigned long set_task_sizes_skas(unsigned long *task_size_out)
{
/* Round up to the nearest 4M */
unsigned long host_task_size = ROUND_4M((unsigned long)
&host_task_size);
if (!skas_needs_stub)
*task_size_out = host_task_size;
else *task_size_out = CONFIG_STUB_START & PGDIR_MASK;
return host_task_size;
}
int __init linux_main(int argc, char **argv)
{
unsigned long avail, diff;
@@ -289,7 +301,7 @@ int __init linux_main(int argc, char **argv)
os_fill_handlinfo(handlinfo_kern);
brk_start = (unsigned long) sbrk(0);
before_mem_skas(brk_start);
/* Increase physical memory size for exec-shield users
so they actually get what they asked for. This should
add zero for non-exec shield users */
@@ -354,7 +366,7 @@ int __init linux_main(int argc, char **argv)
stack_protections((unsigned long) &init_thread_info);
os_flush_stdout();
return start_uml_skas();
return start_uml();
}
extern int uml_exitcode;