um: Remove SKAS3/4 support

Before we had SKAS0 UML had two modes of operation
TT (tracing thread) and SKAS3/4 (separated kernel address space).
TT was known to be insecure and got removed a long time ago.
SKAS3/4 required a few (3 or 4) patches on the host side which never went
mainline. The last host patch is 10 years old.

With SKAS0 mode (separated kernel address space using 0 host patches),
default since 2005, SKAS3/4 is obsolete and can be removed.

Signed-off-by: Richard Weinberger <richard@nod.at>
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
index 8e08176..5c0b711 100644
--- a/arch/x86/um/ldt.c
+++ b/arch/x86/um/ldt.c
@@ -8,9 +8,7 @@
 #include <linux/slab.h>
 #include <asm/unistd.h>
 #include <os.h>
-#include <proc_mm.h>
 #include <skas.h>
-#include <skas_ptrace.h>
 #include <sysdep/tls.h>
 
 extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
@@ -19,105 +17,20 @@
 		     struct user_desc *desc, void **addr, int done)
 {
 	long res;
-
-	if (proc_mm) {
-		/*
-		 * This is a special handling for the case, that the mm to
-		 * modify isn't current->active_mm.
-		 * If this is called directly by modify_ldt,
-		 *     (current->active_mm->context.skas.u == mm_idp)
-		 * will be true. So no call to __switch_mm(mm_idp) is done.
-		 * If this is called in case of init_new_ldt or PTRACE_LDT,
-		 * mm_idp won't belong to current->active_mm, but child->mm.
-		 * So we need to switch child's mm into our userspace, then
-		 * later switch back.
-		 *
-		 * Note: I'm unsure: should interrupts be disabled here?
-		 */
-		if (!current->active_mm || current->active_mm == &init_mm ||
-		    mm_idp != &current->active_mm->context.id)
-			__switch_mm(mm_idp);
+	void *stub_addr;
+	res = syscall_stub_data(mm_idp, (unsigned long *)desc,
+				(sizeof(*desc) + sizeof(long) - 1) &
+				    ~(sizeof(long) - 1),
+				addr, &stub_addr);
+	if (!res) {
+		unsigned long args[] = { func,
+					 (unsigned long)stub_addr,
+					 sizeof(*desc),
+					 0, 0, 0 };
+		res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
+				       0, addr, done);
 	}
 
-	if (ptrace_ldt) {
-		struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
-			.func = func,
-			.ptr = desc,
-			.bytecount = sizeof(*desc)};
-		u32 cpu;
-		int pid;
-
-		if (!proc_mm)
-			pid = mm_idp->u.pid;
-		else {
-			cpu = get_cpu();
-			pid = userspace_pid[cpu];
-		}
-
-		res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
-
-		if (proc_mm)
-			put_cpu();
-	}
-	else {
-		void *stub_addr;
-		res = syscall_stub_data(mm_idp, (unsigned long *)desc,
-					(sizeof(*desc) + sizeof(long) - 1) &
-					    ~(sizeof(long) - 1),
-					addr, &stub_addr);
-		if (!res) {
-			unsigned long args[] = { func,
-						 (unsigned long)stub_addr,
-						 sizeof(*desc),
-						 0, 0, 0 };
-			res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
-					       0, addr, done);
-		}
-	}
-
-	if (proc_mm) {
-		/*
-		 * This is the second part of special handling, that makes
-		 * PTRACE_LDT possible to implement.
-		 */
-		if (current->active_mm && current->active_mm != &init_mm &&
-		    mm_idp != &current->active_mm->context.id)
-			__switch_mm(&current->active_mm->context.id);
-	}
-
-	return res;
-}
-
-static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
-{
-	int res, n;
-	struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
-			.func = 0,
-			.bytecount = bytecount,
-			.ptr = kmalloc(bytecount, GFP_KERNEL)};
-	u32 cpu;
-
-	if (ptrace_ldt.ptr == NULL)
-		return -ENOMEM;
-
-	/*
-	 * This is called from sys_modify_ldt only, so userspace_pid gives
-	 * us the right number
-	 */
-
-	cpu = get_cpu();
-	res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
-	put_cpu();
-	if (res < 0)
-		goto out;
-
-	n = copy_to_user(ptr, ptrace_ldt.ptr, res);
-	if (n != 0)
-		res = -EFAULT;
-
-  out:
-	kfree(ptrace_ldt.ptr);
-
 	return res;
 }
 
@@ -145,9 +58,6 @@
 		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
 	err = bytecount;
 
-	if (ptrace_ldt)
-		return read_ldt_from_host(ptr, bytecount);
-
 	mutex_lock(&ldt->lock);
 	if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
 		size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
@@ -229,17 +139,11 @@
 			goto out;
 	}
 
-	if (!ptrace_ldt)
-		mutex_lock(&ldt->lock);
+	mutex_lock(&ldt->lock);
 
 	err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
 	if (err)
 		goto out_unlock;
-	else if (ptrace_ldt) {
-		/* With PTRACE_LDT available, this is used as a flag only */
-		ldt->entry_count = 1;
-		goto out;
-	}
 
 	if (ldt_info.entry_number >= ldt->entry_count &&
 	    ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
@@ -393,91 +297,56 @@
 	int i;
 	long page, err=0;
 	void *addr = NULL;
-	struct proc_mm_op copy;
 
 
-	if (!ptrace_ldt)
-		mutex_init(&new_mm->arch.ldt.lock);
+	mutex_init(&new_mm->arch.ldt.lock);
 
 	if (!from_mm) {
 		memset(&desc, 0, sizeof(desc));
 		/*
-		 * We have to initialize a clean ldt.
+		 * Now we try to retrieve info about the ldt, we
+		 * inherited from the host. All ldt-entries found
+		 * will be reset in the following loop
 		 */
-		if (proc_mm) {
-			/*
-			 * If the new mm was created using proc_mm, host's
-			 * default-ldt currently is assigned, which normally
-			 * contains the call-gates for lcall7 and lcall27.
-			 * To remove these gates, we simply write an empty
-			 * entry as number 0 to the host.
-			 */
-			err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1);
-		}
-		else{
-			/*
-			 * Now we try to retrieve info about the ldt, we
-			 * inherited from the host. All ldt-entries found
-			 * will be reset in the following loop
-			 */
-			ldt_get_host_info();
-			for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
-				desc.entry_number = *num_p;
-				err = write_ldt_entry(&new_mm->id, 1, &desc,
-						      &addr, *(num_p + 1) == -1);
-				if (err)
-					break;
-			}
+		ldt_get_host_info();
+		for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
+			desc.entry_number = *num_p;
+			err = write_ldt_entry(&new_mm->id, 1, &desc,
+					      &addr, *(num_p + 1) == -1);
+			if (err)
+				break;
 		}
 		new_mm->arch.ldt.entry_count = 0;
 
 		goto out;
 	}
 
-	if (proc_mm) {
-		/*
-		 * We have a valid from_mm, so we now have to copy the LDT of
-		 * from_mm to new_mm, because using proc_mm an new mm with
-		 * an empty/default LDT was created in new_mm()
-		 */
-		copy = ((struct proc_mm_op) { .op 	= MM_COPY_SEGMENTS,
-					      .u 	=
-					      { .copy_segments =
-							from_mm->id.u.mm_fd } } );
-		i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
-		if (i != sizeof(copy))
-			printk(KERN_ERR "new_mm : /proc/mm copy_segments "
-			       "failed, err = %d\n", -i);
-	}
-
-	if (!ptrace_ldt) {
-		/*
-		 * Our local LDT is used to supply the data for
-		 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
-		 * i.e., we have to use the stub for modify_ldt, which
-		 * can't handle the big read buffer of up to 64kB.
-		 */
-		mutex_lock(&from_mm->arch.ldt.lock);
-		if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
-			memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
-			       sizeof(new_mm->arch.ldt.u.entries));
-		else {
-			i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
-			while (i-->0) {
-				page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
-				if (!page) {
-					err = -ENOMEM;
-					break;
-				}
-				new_mm->arch.ldt.u.pages[i] =
-					(struct ldt_entry *) page;
-				memcpy(new_mm->arch.ldt.u.pages[i],
-				       from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
+	/*
+	 * Our local LDT is used to supply the data for
+	 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
+	 * i.e., we have to use the stub for modify_ldt, which
+	 * can't handle the big read buffer of up to 64kB.
+	 */
+	mutex_lock(&from_mm->arch.ldt.lock);
+	if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
+		memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
+		       sizeof(new_mm->arch.ldt.u.entries));
+	else {
+		i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
+		while (i-->0) {
+			page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
+			if (!page) {
+				err = -ENOMEM;
+				break;
 			}
+			new_mm->arch.ldt.u.pages[i] =
+				(struct ldt_entry *) page;
+			memcpy(new_mm->arch.ldt.u.pages[i],
+			       from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
 		}
-		new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
-		mutex_unlock(&from_mm->arch.ldt.lock);
 	}
+	new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
+	mutex_unlock(&from_mm->arch.ldt.lock);
 
     out:
 	return err;
@@ -488,7 +357,7 @@
 {
 	int i;
 
-	if (!ptrace_ldt && mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
+	if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
 		i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
 		while (i-- > 0)
 			free_page((long) mm->arch.ldt.u.pages[i]);