um: Remove SKAS3/4 support
Before we had SKAS0 UML had two modes of operation TT (tracing thread) and SKAS3/4 (separated kernel address space). TT was known to be insecure and got removed a long time ago. SKAS3/4 required a few (3 or 4) patches on the host side which never went mainline. The last host patch is 10 years old. With SKAS0 mode (separated kernel address space using 0 host patches), default since 2005, SKAS3/4 is obsolete and can be removed. Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
@@ -8,9 +8,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <skas_ptrace.h>
|
||||
|
||||
|
||||
|
||||
void user_enable_single_step(struct task_struct *child)
|
||||
{
|
||||
@@ -104,35 +101,6 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
ret = ptrace_set_thread_area(child, addr, vp);
|
||||
break;
|
||||
|
||||
case PTRACE_FAULTINFO: {
|
||||
/*
|
||||
* Take the info from thread->arch->faultinfo,
|
||||
* but transfer max. sizeof(struct ptrace_faultinfo).
|
||||
* On i386, ptrace_faultinfo is smaller!
|
||||
*/
|
||||
ret = copy_to_user(p, &child->thread.arch.faultinfo,
|
||||
sizeof(struct ptrace_faultinfo)) ?
|
||||
-EIO : 0;
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef PTRACE_LDT
|
||||
case PTRACE_LDT: {
|
||||
struct ptrace_ldt ldt;
|
||||
|
||||
if (copy_from_user(&ldt, p, sizeof(ldt))) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* This one is confusing, so just punt and return -EIO for
|
||||
* now
|
||||
*/
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
if (ret == -EIO)
|
||||
|
@@ -15,28 +15,21 @@ void (*pm_power_off)(void);
|
||||
|
||||
static void kill_off_processes(void)
|
||||
{
|
||||
if (proc_mm)
|
||||
/*
|
||||
* FIXME: need to loop over userspace_pids
|
||||
*/
|
||||
os_kill_ptraced_process(userspace_pid[0], 1);
|
||||
else {
|
||||
struct task_struct *p;
|
||||
int pid;
|
||||
struct task_struct *p;
|
||||
int pid;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process(p) {
|
||||
struct task_struct *t;
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process(p) {
|
||||
struct task_struct *t;
|
||||
|
||||
t = find_lock_task_mm(p);
|
||||
if (!t)
|
||||
continue;
|
||||
pid = t->mm->context.id.u.pid;
|
||||
task_unlock(t);
|
||||
os_kill_ptraced_process(pid, 1);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
t = find_lock_task_mm(p);
|
||||
if (!t)
|
||||
continue;
|
||||
pid = t->mm->context.id.u.pid;
|
||||
task_unlock(t);
|
||||
os_kill_ptraced_process(pid, 1);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
||||
void uml_cleanup(void)
|
||||
|
@@ -54,35 +54,22 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
|
||||
unsigned long stack = 0;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (skas_needs_stub) {
|
||||
stack = get_zeroed_page(GFP_KERNEL);
|
||||
if (stack == 0)
|
||||
goto out;
|
||||
}
|
||||
stack = get_zeroed_page(GFP_KERNEL);
|
||||
if (stack == 0)
|
||||
goto out;
|
||||
|
||||
to_mm->id.stack = stack;
|
||||
if (current->mm != NULL && current->mm != &init_mm)
|
||||
from_mm = ¤t->mm->context;
|
||||
|
||||
if (proc_mm) {
|
||||
ret = new_mm(stack);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "init_new_context_skas - "
|
||||
"new_mm failed, errno = %d\n", ret);
|
||||
goto out_free;
|
||||
}
|
||||
to_mm->id.u.mm_fd = ret;
|
||||
}
|
||||
else {
|
||||
if (from_mm)
|
||||
to_mm->id.u.pid = copy_context_skas0(stack,
|
||||
from_mm->id.u.pid);
|
||||
else to_mm->id.u.pid = start_userspace(stack);
|
||||
if (from_mm)
|
||||
to_mm->id.u.pid = copy_context_skas0(stack,
|
||||
from_mm->id.u.pid);
|
||||
else to_mm->id.u.pid = start_userspace(stack);
|
||||
|
||||
if (to_mm->id.u.pid < 0) {
|
||||
ret = to_mm->id.u.pid;
|
||||
goto out_free;
|
||||
}
|
||||
if (to_mm->id.u.pid < 0) {
|
||||
ret = to_mm->id.u.pid;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = init_new_ldt(to_mm, from_mm);
|
||||
@@ -105,9 +92,6 @@ void uml_setup_stubs(struct mm_struct *mm)
|
||||
{
|
||||
int err, ret;
|
||||
|
||||
if (!skas_needs_stub)
|
||||
return;
|
||||
|
||||
ret = init_stub_pte(mm, STUB_CODE,
|
||||
(unsigned long) &__syscall_stub_start);
|
||||
if (ret)
|
||||
@@ -154,25 +138,19 @@ void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
struct mm_context *mmu = &mm->context;
|
||||
|
||||
if (proc_mm)
|
||||
os_close_file(mmu->id.u.mm_fd);
|
||||
else {
|
||||
/*
|
||||
* If init_new_context wasn't called, this will be
|
||||
* zero, resulting in a kill(0), which will result in the
|
||||
* whole UML suddenly dying. Also, cover negative and
|
||||
* 1 cases, since they shouldn't happen either.
|
||||
*/
|
||||
if (mmu->id.u.pid < 2) {
|
||||
printk(KERN_ERR "corrupt mm_context - pid = %d\n",
|
||||
mmu->id.u.pid);
|
||||
return;
|
||||
}
|
||||
os_kill_ptraced_process(mmu->id.u.pid, 1);
|
||||
/*
|
||||
* If init_new_context wasn't called, this will be
|
||||
* zero, resulting in a kill(0), which will result in the
|
||||
* whole UML suddenly dying. Also, cover negative and
|
||||
* 1 cases, since they shouldn't happen either.
|
||||
*/
|
||||
if (mmu->id.u.pid < 2) {
|
||||
printk(KERN_ERR "corrupt mm_context - pid = %d\n",
|
||||
mmu->id.u.pid);
|
||||
return;
|
||||
}
|
||||
os_kill_ptraced_process(mmu->id.u.pid, 1);
|
||||
|
||||
if (skas_needs_stub)
|
||||
free_page(mmu->id.stack);
|
||||
|
||||
free_page(mmu->id.stack);
|
||||
free_ldt(mmu);
|
||||
}
|
||||
|
@@ -10,25 +10,6 @@
|
||||
#include <os.h>
|
||||
#include <skas.h>
|
||||
|
||||
int new_mm(unsigned long stack)
|
||||
{
|
||||
int fd, err;
|
||||
|
||||
fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
if (skas_needs_stub) {
|
||||
err = map_stub_pages(fd, STUB_CODE, STUB_DATA, stack);
|
||||
if (err) {
|
||||
os_close_file(fd);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
extern void start_kernel(void);
|
||||
|
||||
static int __init start_kernel_proc(void *unused)
|
||||
@@ -55,14 +36,6 @@ int __init start_uml(void)
|
||||
{
|
||||
stack_protections((unsigned long) &cpu0_irqstack);
|
||||
set_sigstack(cpu0_irqstack, THREAD_SIZE);
|
||||
if (proc_mm) {
|
||||
userspace_pid[0] = start_userspace(0);
|
||||
if (userspace_pid[0] < 0) {
|
||||
printf("start_uml - start_userspace returned %d\n",
|
||||
userspace_pid[0]);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
init_new_thread_signals();
|
||||
|
||||
|
@@ -220,7 +220,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
|
||||
panic("Segfault with no mm");
|
||||
}
|
||||
|
||||
if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi))
|
||||
if (SEGV_IS_FIXABLE(&fi))
|
||||
err = handle_page_fault(address, ip, is_write, is_user,
|
||||
&si.si_code);
|
||||
else {
|
||||
|
@@ -268,7 +268,6 @@ int __init linux_main(int argc, char **argv)
|
||||
unsigned long stack;
|
||||
unsigned int i;
|
||||
int add;
|
||||
char * mode;
|
||||
|
||||
for (i = 1; i < argc; i++) {
|
||||
if ((i == 1) && (argv[i][0] == ' '))
|
||||
@@ -291,15 +290,6 @@ int __init linux_main(int argc, char **argv)
|
||||
/* OS sanity checks that need to happen before the kernel runs */
|
||||
os_early_checks();
|
||||
|
||||
can_do_skas();
|
||||
|
||||
if (proc_mm && ptrace_faultinfo)
|
||||
mode = "SKAS3";
|
||||
else
|
||||
mode = "SKAS0";
|
||||
|
||||
printf("UML running in %s mode\n", mode);
|
||||
|
||||
brk_start = (unsigned long) sbrk(0);
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user