Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
new file mode 100644
index 0000000..ab79af8
--- /dev/null
+++ b/arch/s390/Kconfig
@@ -0,0 +1,480 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+config MMU
+	bool
+	default y
+
+config RWSEM_GENERIC_SPINLOCK
+	bool
+
+config RWSEM_XCHGADD_ALGORITHM
+	bool
+	default y
+
+config GENERIC_CALIBRATE_DELAY
+	bool
+	default y
+
+config GENERIC_BUST_SPINLOCK
+	bool
+
+mainmenu "Linux Kernel Configuration"
+
+config ARCH_S390
+	bool
+	default y
+
+config UID16
+	bool
+	default y
+	depends on ARCH_S390X = 'n'
+
+source "init/Kconfig"
+
+menu "Base setup"
+
+comment "Processor type and features"
+
+config ARCH_S390X
+	bool "64 bit kernel"
+	help
+	  Select this option if you have a 64 bit IBM zSeries machine
+	  and want to use the 64 bit addressing mode.
+
+config 64BIT
+	def_bool ARCH_S390X
+
+config ARCH_S390_31
+	bool
+	depends on ARCH_S390X = 'n'
+	default y
+
+config SMP
+	bool "Symmetric multi-processing support"
+	---help---
+	  This enables support for systems with more than one CPU. If you have
+	  a system with only one CPU, like most personal computers, say N. If
+	  you have a system with more than one CPU, say Y.
+
+	  If you say N here, the kernel will run on single and multiprocessor
+	  machines, but will use only one CPU of a multiprocessor machine. If
+	  you say Y here, the kernel will run on many, but not all,
+	  singleprocessor machines. On a singleprocessor machine, the kernel
+	  will run faster if you say N here.
+
+	  See also the <file:Documentation/smp.txt> and the SMP-HOWTO
+	  available at <http://www.tldp.org/docs.html#howto>.
+
+	  Even if you don't know what to do here, say Y.
+
+config NR_CPUS
+	int "Maximum number of CPUs (2-64)"
+	range 2 64
+	depends on SMP
+	default "32"
+	help
+	  This allows you to specify the maximum number of CPUs which this
+	  kernel will support.  The maximum supported value is 64 and the
+	  minimum value which makes sense is 2.
+
+	  This is purely to save memory - each supported CPU adds
+	  approximately sixteen kilobytes to the kernel image.
+
+config HOTPLUG_CPU
+	bool "Support for hot-pluggable CPUs"
+	depends on SMP
+	select HOTPLUG
+	default n
+	help
+	  Say Y here to be able to turn CPUs off and on. CPUs
+	  can be controlled through /sys/devices/system/cpu/cpu#.
+	  Say N if you want to disable CPU hotplug.
+
+config MATHEMU
+	bool "IEEE FPU emulation"
+	depends on MARCH_G5
+	help
+	  This option is required for IEEE compliant floating point arithmetic
+	  on older S/390 machines. Say Y unless you know your machine doesn't
+	  need this.
+
+config S390_SUPPORT
+	bool "Kernel support for 31 bit emulation"
+	depends on ARCH_S390X
+	help
+	  Select this option if you want to enable your system kernel to
+	  handle system-calls from ELF binaries for 31 bit ESA.  This option
+	  (and some other stuff like libraries and such) is needed for
+	  executing 31 bit applications.  It is safe to say "Y".
+
+config COMPAT
+	bool
+	depends on S390_SUPPORT
+	default y
+
+config SYSVIPC_COMPAT
+	bool
+	depends on COMPAT && SYSVIPC
+	default y
+
+config BINFMT_ELF32
+	tristate "Kernel support for 31 bit ELF binaries"
+	depends on S390_SUPPORT
+	help
+	  This allows you to run 32-bit Linux/ELF binaries on your zSeries
+	  in 64 bit mode. Everybody wants this; say Y.
+
+comment "Code generation options"
+
+choice
+	prompt "Processor type"
+	default MARCH_G5
+
+config MARCH_G5
+	bool "S/390 model G5 and G6"
+	depends on ARCH_S390_31
+	help
+	  Select this to build a 31 bit kernel that works
+	  on all S/390 and zSeries machines.
+
+config MARCH_Z900
+	bool "IBM eServer zSeries model z800 and z900"
+	help
+	  Select this to optimize for zSeries machines. This
+	  will enable some optimizations that are not available
+	  on older 31 bit only CPUs.
+
+config MARCH_Z990
+	bool "IBM eServer zSeries model z890 and z990"
+	help
+	  Select this enable optimizations for model z890/z990.
+	  This will be slightly faster but does not work on
+	  older machines such as the z900.
+
+endchoice
+
+config PACK_STACK
+	bool "Pack kernel stack"
+	help
+	  This option enables the compiler option -mkernel-backchain if it
+	  is available. If the option is available the compiler supports
+	  the new stack layout which dramatically reduces the minimum stack
+	  frame size. With an old compiler a non-leaf function needs a
+	  minimum of 96 bytes on 31 bit and 160 bytes on 64 bit. With
+	  -mkernel-backchain the minimum size drops to 16 byte on 31 bit
+	  and 24 byte on 64 bit.
+
+	  Say Y if you are unsure.
+
+config SMALL_STACK
+	bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb"
+	depends on PACK_STACK
+	help
+	  If you say Y here and the compiler supports the -mkernel-backchain
+	  option the kernel will use a smaller kernel stack size. For 31 bit
+	  the reduced size is 4kb instead of 8kb and for 64 bit it is 8kb
+	  instead of 16kb. This allows to run more thread on a system and
+	  reduces the pressure on the memory management for higher order
+	  page allocations.
+
+	  Say N if you are unsure.
+
+
+config CHECK_STACK
+	bool "Detect kernel stack overflow"
+	help
+	  This option enables the compiler option -mstack-guard and
+	  -mstack-size if they are available. If the compiler supports them
+	  it will emit additional code to each function prolog to trigger
+	  an illegal operation if the kernel stack is about to overflow.
+
+	  Say N if you are unsure.
+
+config STACK_GUARD
+	int "Size of the guard area (128-1024)"
+	range 128 1024
+	depends on CHECK_STACK
+	default "256"
+	help
+	  This allows you to specify the size of the guard area at the lower
+	  end of the kernel stack. If the kernel stack points into the guard
+	  area on function entry an illegal operation is triggered. The size
+	  needs to be a power of 2. Please keep in mind that the size of an
+	  interrupt frame is 184 bytes for 31 bit and 328 bytes on 64 bit.
+	  The minimum size for the stack guard should be 256 for 31 bit and
+	  512 for 64 bit.
+
+config WARN_STACK
+	bool "Emit compiler warnings for function with broken stack usage"
+	help
+	  This option enables the compiler options -mwarn-framesize and
+	  -mwarn-dynamicstack. If the compiler supports these options it
+	  will generate warnings for function which either use alloca or
+	  create a stack frame bigger then CONFIG_WARN_STACK_SIZE.
+
+	  Say N if you are unsure.
+
+config WARN_STACK_SIZE
+	int "Maximum frame size considered safe (128-2048)"
+	range 128 2048
+	depends on WARN_STACK
+	default "256"
+	help
+	  This allows you to specify the maximum frame size a function may
+	  have without the compiler complaining about it.
+
+comment "I/O subsystem configuration"
+
+config MACHCHK_WARNING
+	bool "Process warning machine checks"
+	help
+	  Select this option if you want the machine check handler on IBM S/390 or
+	  zSeries to process warning machine checks (e.g. on power failures).
+	  If unsure, say "Y".
+
+config QDIO
+	tristate "QDIO support"
+	---help---
+	  This driver provides the Queued Direct I/O base support for the
+	  IBM S/390 (G5 and G6) and eServer zSeries (z800, z890, z900 and z990).
+
+	  For details please refer to the documentation provided by IBM at
+	  <http://www10.software.ibm.com/developerworks/opensource/linux390>
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called qdio.
+
+	  If unsure, say Y.
+
+config QDIO_PERF_STATS
+	bool "Performance statistics in /proc"
+	depends on QDIO
+	help
+	  Say Y here to get performance statistics in /proc/qdio_perf
+
+	  If unsure, say N.
+
+config QDIO_DEBUG
+	bool "Extended debugging information"
+	depends on QDIO
+	help
+	  Say Y here to get extended debugging output in /proc/s390dbf/qdio...
+	  Warning: this option reduces the performance of the QDIO module.
+
+	  If unsure, say N.
+
+comment "Misc"
+
+config PREEMPT
+	bool "Preemptible Kernel"
+	help
+	  This option reduces the latency of the kernel when reacting to
+	  real-time or interactive events by allowing a low priority process to
+	  be preempted even if it is in kernel mode executing a system call.
+	  This allows applications to run more reliably even when the system is
+	  under load.
+
+	  Say N if you are unsure.
+
+config IPL
+	bool "Builtin IPL record support"
+	help
+	  If you want to use the produced kernel to IPL directly from a
+	  device, you have to merge a bootsector specific to the device
+	  into the first bytes of the kernel. You will have to select the
+	  IPL device.
+
+choice
+	prompt "IPL method generated into head.S"
+	depends on IPL
+	default IPL_TAPE
+	help
+	  Select "tape" if you want to IPL the image from a Tape.
+
+	  Select "vm_reader" if you are running under VM/ESA and want
+	  to IPL the image from the emulated card reader.
+
+config IPL_TAPE
+	bool "tape"
+
+config IPL_VM
+	bool "vm_reader"
+
+endchoice
+
+source "fs/Kconfig.binfmt"
+
+config PROCESS_DEBUG
+	bool "Show crashed user process info"
+	help
+	  Say Y to print all process fault locations to the console.  This is
+	  a debugging option; you probably do not want to set it unless you
+	  are an S390 port maintainer.
+
+config PFAULT
+	bool "Pseudo page fault support"
+	help
+	  Select this option, if you want to use PFAULT pseudo page fault
+	  handling under VM. If running native or in LPAR, this option
+	  has no effect. If your VM does not support PFAULT, PAGEEX
+	  pseudo page fault handling will be used.
+	  Note that VM 4.2 supports PFAULT but has a bug in its
+	  implementation that causes some problems.
+	  Everybody who wants to run Linux under VM != VM4.2 should select
+	  this option.
+
+config SHARED_KERNEL
+	bool "VM shared kernel support"
+	help
+	  Select this option, if you want to share the text segment of the
+	  Linux kernel between different VM guests. This reduces memory
+	  usage with lots of guests but greatly increases kernel size.
+	  You should only select this option if you know what you are
+	  doing and want to exploit this feature.
+
+config CMM
+	tristate "Cooperative memory management"
+	help
+	  Select this option, if you want to enable the kernel interface
+	  to reduce the memory size of the system. This is accomplished
+	  by allocating pages of memory and put them "on hold". This only
+	  makes sense for a system running under VM where the unused pages
+	  will be reused by VM for other guest systems. The interface
+	  allows an external monitor to balance memory of many systems.
+	  Everybody who wants to run Linux under VM should select this
+	  option.
+
+config CMM_PROC
+	bool "/proc interface to cooperative memory management"
+	depends on CMM
+	help
+	  Select this option to enable the /proc interface to the
+	  cooperative memory management.
+
+config CMM_IUCV
+	bool "IUCV special message interface to cooperative memory management"
+	depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV)
+	help
+	  Select this option to enable the special message interface to
+	  the cooperative memory management.
+
+config VIRT_TIMER
+	bool "Virtual CPU timer support"
+	help
+	  This provides a kernel interface for virtual CPU timers.
+	  Default is disabled.
+
+config VIRT_CPU_ACCOUNTING
+	bool "Base user process accounting on virtual cpu timer"
+	depends on VIRT_TIMER
+	help
+	  Select this option to use CPU timer deltas to do user
+	  process accounting.
+
+config APPLDATA_BASE
+	bool "Linux - VM Monitor Stream, base infrastructure"
+	depends on PROC_FS && VIRT_TIMER=y
+	help
+	  This provides a kernel interface for creating and updating z/VM APPLDATA
+	  monitor records. The monitor records are updated at certain time
+	  intervals, once the timer is started.
+	  Writing 1 or 0 to /proc/appldata/timer starts(1) or stops(0) the timer,
+	  i.e. enables or disables monitoring on the Linux side.
+	  A custom interval value (in seconds) can be written to
+	  /proc/appldata/interval.
+
+	  Defaults are 60 seconds interval and timer off.
+	  The /proc entries can also be read from, showing the current settings.
+
+config APPLDATA_MEM
+	tristate "Monitor memory management statistics"
+	depends on APPLDATA_BASE
+	help
+	  This provides memory management related data to the Linux - VM Monitor
+	  Stream, like paging/swapping rate, memory utilisation, etc.
+	  Writing 1 or 0 to /proc/appldata/memory creates(1) or removes(0) a z/VM
+	  APPLDATA monitor record, i.e. enables or disables monitoring this record
+	  on the z/VM side.
+
+	  Default is disabled.
+	  The /proc entry can also be read from, showing the current settings.
+
+	  This can also be compiled as a module, which will be called
+	  appldata_mem.o.
+
+config APPLDATA_OS
+	tristate "Monitor OS statistics"
+	depends on APPLDATA_BASE
+	help
+	  This provides OS related data to the Linux - VM Monitor Stream, like
+	  CPU utilisation, etc.
+	  Writing 1 or 0 to /proc/appldata/os creates(1) or removes(0) a z/VM
+	  APPLDATA monitor record, i.e. enables or disables monitoring this record
+	  on the z/VM side.
+
+	  Default is disabled.
+	  This can also be compiled as a module, which will be called
+	  appldata_os.o.
+
+config APPLDATA_NET_SUM
+	tristate "Monitor overall network statistics"
+	depends on APPLDATA_BASE
+	help
+	  This provides network related data to the Linux - VM Monitor Stream,
+	  currently there is only a total sum of network I/O statistics, no
+	  per-interface data.
+	  Writing 1 or 0 to /proc/appldata/net_sum creates(1) or removes(0) a z/VM
+	  APPLDATA monitor record, i.e. enables or disables monitoring this record
+	  on the z/VM side.
+
+	  Default is disabled.
+	  This can also be compiled as a module, which will be called
+	  appldata_net_sum.o.
+
+config NO_IDLE_HZ
+	bool "No HZ timer ticks in idle"
+	help
+	  Switches the regular HZ timer off when the system is going idle.
+	  This helps z/VM to detect that the Linux system is idle. VM can
+	  then "swap-out" this guest which reduces memory usage. It also
+	  reduces the overhead of idle systems.
+
+	  The HZ timer can be switched on/off via /proc/sys/kernel/hz_timer.
+	  hz_timer=0 means HZ timer is disabled. hz_timer=1 means HZ
+	  timer is active.
+
+config NO_IDLE_HZ_INIT
+	bool "HZ timer in idle off by default"
+	depends on NO_IDLE_HZ
+	help
+	  The HZ timer is switched off in idle by default. That means the
+	  HZ timer is already disabled at boot time.
+
+endmenu
+
+config PCMCIA
+	bool
+	default n
+
+source "drivers/base/Kconfig"
+
+source "drivers/scsi/Kconfig"
+
+source "drivers/s390/Kconfig"
+
+source "net/Kconfig"
+
+source "fs/Kconfig"
+
+source "arch/s390/oprofile/Kconfig"
+
+source "arch/s390/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
new file mode 100644
index 0000000..f53b6d5
--- /dev/null
+++ b/arch/s390/Kconfig.debug
@@ -0,0 +1,5 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+endmenu
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
new file mode 100644
index 0000000..3cd8dd2
--- /dev/null
+++ b/arch/s390/Makefile
@@ -0,0 +1,116 @@
+#
+# s390/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+
+ifdef CONFIG_ARCH_S390_31
+LDFLAGS		:= -m elf_s390
+CFLAGS		+= -m31
+AFLAGS		+= -m31
+UTS_MACHINE	:= s390
+STACK_SIZE	:= 8192
+endif
+
+ifdef CONFIG_ARCH_S390X
+LDFLAGS		:= -m elf64_s390
+MODFLAGS	+= -fpic -D__PIC__
+CFLAGS		+= -m64
+AFLAGS		+= -m64
+UTS_MACHINE	:= s390x
+STACK_SIZE	:= 16384
+endif
+
+cflags-$(CONFIG_MARCH_G5)   += $(call cc-option,-march=g5)
+cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
+cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
+
+# old style option for packed stacks
+ifeq ($(call cc-option-yn,-mkernel-backchain),y)
+cflags-$(CONFIG_PACK_STACK)  += -mkernel-backchain -D__PACK_STACK
+aflags-$(CONFIG_PACK_STACK)  += -D__PACK_STACK
+cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
+aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
+ifdef CONFIG_SMALL_STACK
+STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
+endif
+endif
+
+# new style option for packed stacks
+ifeq ($(call cc-option-yn,-mpacked-stack),y)
+cflags-$(CONFIG_PACK_STACK)  += -mpacked-stack -D__PACK_STACK
+aflags-$(CONFIG_PACK_STACK)  += -D__PACK_STACK
+cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
+aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
+ifdef CONFIG_SMALL_STACK
+STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
+endif
+endif
+
+ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
+cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
+cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
+endif
+
+ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
+cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack
+cflags-$(CONFIG_WARN_STACK) += -mwarn-framesize=$(CONFIG_WARN_STACK_SIZE)
+endif
+
+CFLAGS		+= -mbackchain -msoft-float $(cflags-y)
+CFLAGS		+= $(call cc-option,-finline-limit=10000)
+CFLAGS 		+= -pipe -fno-strength-reduce -Wno-sign-compare 
+AFLAGS		+= $(aflags-y)
+
+OBJCOPYFLAGS	:= -O binary
+LDFLAGS_vmlinux := -e start
+
+head-$(CONFIG_ARCH_S390_31)	+= arch/$(ARCH)/kernel/head.o
+head-$(CONFIG_ARCH_S390X)	+= arch/$(ARCH)/kernel/head64.o
+head-y				+= arch/$(ARCH)/kernel/init_task.o
+
+core-y		+= arch/$(ARCH)/mm/ arch/$(ARCH)/kernel/ arch/$(ARCH)/crypto/ \
+		   arch/$(ARCH)/appldata/
+libs-y		+= arch/$(ARCH)/lib/
+drivers-y	+= drivers/s390/
+drivers-$(CONFIG_MATHEMU) += arch/$(ARCH)/math-emu/
+
+# must be linked after kernel
+drivers-$(CONFIG_OPROFILE)	+= arch/s390/oprofile/
+
+boot		:= arch/$(ARCH)/boot
+
+all: image
+
+install: vmlinux
+	$(Q)$(MAKE) $(build)=$(boot) $@
+
+image: vmlinux
+	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+archclean:
+	$(Q)$(MAKE) $(clean)=$(boot)
+
+prepare: include/asm-$(ARCH)/offsets.h
+
+arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
+				   include/config/MARKER
+
+include/asm-$(ARCH)/offsets.h: arch/$(ARCH)/kernel/asm-offsets.s
+	$(call filechk,gen-asm-offsets)
+
+CLEAN_FILES += include/asm-$(ARCH)/offsets.h
+
+# Don't use tabs in echo arguments
+define archhelp
+  echo  '* image           - Kernel image for IPL ($(boot)/image)'
+endef
diff --git a/arch/s390/appldata/Makefile b/arch/s390/appldata/Makefile
new file mode 100644
index 0000000..99f1cf0
--- /dev/null
+++ b/arch/s390/appldata/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux - z/VM Monitor Stream.
+#
+
+obj-$(CONFIG_APPLDATA_BASE) += appldata_base.o
+obj-$(CONFIG_APPLDATA_MEM) += appldata_mem.o
+obj-$(CONFIG_APPLDATA_OS) += appldata_os.o
+obj-$(CONFIG_APPLDATA_NET_SUM) += appldata_net_sum.o
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h
new file mode 100644
index 0000000..e806a89
--- /dev/null
+++ b/arch/s390/appldata/appldata.h
@@ -0,0 +1,59 @@
+/*
+ * arch/s390/appldata/appldata.h
+ *
+ * Definitions and interface for Linux - z/VM Monitor Stream.
+ *
+ * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ *
+ * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ */
+
+//#define APPLDATA_DEBUG			/* Debug messages on/off */
+
+#define APPLDATA_MAX_REC_SIZE	  4024	/* Maximum size of the */
+					/* data buffer */
+#define APPLDATA_MAX_PROCS 100
+
+#define APPLDATA_PROC_NAME_LENGTH 16	/* Max. length of /proc name */
+
+#define APPLDATA_RECORD_MEM_ID		0x01	/* IDs to identify the */
+#define APPLDATA_RECORD_OS_ID		0x02	/* individual records, */
+#define APPLDATA_RECORD_NET_SUM_ID	0x03	/* must be < 256 !     */
+#define APPLDATA_RECORD_PROC_ID		0x04
+
+#define CTL_APPLDATA 		2120	/* sysctl IDs, must be unique */
+#define CTL_APPLDATA_TIMER 	2121
+#define CTL_APPLDATA_INTERVAL 	2122
+#define CTL_APPLDATA_MEM	2123
+#define CTL_APPLDATA_OS		2124
+#define CTL_APPLDATA_NET_SUM	2125
+#define CTL_APPLDATA_PROC	2126
+
+#define P_INFO(x...)	printk(KERN_INFO MY_PRINT_NAME " info: " x)
+#define P_ERROR(x...)	printk(KERN_ERR MY_PRINT_NAME " error: " x)
+#define P_WARNING(x...)	printk(KERN_WARNING MY_PRINT_NAME " status: " x)
+
+#ifdef APPLDATA_DEBUG
+#define P_DEBUG(x...)   printk(KERN_DEBUG MY_PRINT_NAME " debug: " x)
+#else
+#define P_DEBUG(x...)   do {} while (0)
+#endif
+
+struct appldata_ops {
+	struct list_head list;
+	struct ctl_table_header *sysctl_header;
+	struct ctl_table *ctl_table;
+	int    active;				/* monitoring status */
+
+	/* fill in from here */
+	unsigned int ctl_nr;			/* sysctl ID */
+	char name[APPLDATA_PROC_NAME_LENGTH];	/* name of /proc fs node */
+	unsigned char record_nr;		/* Record Nr. for Product ID */
+	void (*callback)(void *data);		/* callback function */
+	void *data;				/* record data */
+	unsigned int size;			/* size of record */
+	struct module *owner;			/* THIS_MODULE */
+};
+
+extern int appldata_register_ops(struct appldata_ops *ops);
+extern void appldata_unregister_ops(struct appldata_ops *ops);
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
new file mode 100644
index 0000000..01ae196
--- /dev/null
+++ b/arch/s390/appldata/appldata_base.c
@@ -0,0 +1,770 @@
+/*
+ * arch/s390/appldata/appldata_base.c
+ *
+ * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
+ * Exports appldata_register_ops() and appldata_unregister_ops() for the
+ * data gathering modules.
+ *
+ * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ *
+ * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/page-flags.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include <linux/sysctl.h>
+#include <asm/timer.h>
+//#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+
+#include "appldata.h"
+
+
+#define MY_PRINT_NAME	"appldata"		/* for debug messages, etc. */
+#define APPLDATA_CPU_INTERVAL	10000		/* default (CPU) time for
+						   sampling interval in
+						   milliseconds */
+
+#define TOD_MICRO	0x01000			/* nr. of TOD clock units
+						   for 1 microsecond */
+#ifndef CONFIG_ARCH_S390X
+
+#define APPLDATA_START_INTERVAL_REC 0x00   	/* Function codes for */
+#define APPLDATA_STOP_REC	    0x01	/* DIAG 0xDC	  */
+#define APPLDATA_GEN_EVENT_RECORD   0x02
+#define APPLDATA_START_CONFIG_REC   0x03
+
+#else
+
+#define APPLDATA_START_INTERVAL_REC 0x80
+#define APPLDATA_STOP_REC   	    0x81
+#define APPLDATA_GEN_EVENT_RECORD   0x82
+#define APPLDATA_START_CONFIG_REC   0x83
+
+#endif /* CONFIG_ARCH_S390X */
+
+
+/*
+ * Parameter list for DIAGNOSE X'DC'
+ */
+#ifndef CONFIG_ARCH_S390X
+struct appldata_parameter_list {
+	u16 diag;		/* The DIAGNOSE code X'00DC'          */
+	u8  function;		/* The function code for the DIAGNOSE */
+	u8  parlist_length;	/* Length of the parameter list       */
+	u32 product_id_addr;	/* Address of the 16-byte product ID  */
+	u16 reserved;
+	u16 buffer_length;	/* Length of the application data buffer  */
+	u32 buffer_addr;	/* Address of the application data buffer */
+};
+#else
+struct appldata_parameter_list {
+	u16 diag;
+	u8  function;
+	u8  parlist_length;
+	u32 unused01;
+	u16 reserved;
+	u16 buffer_length;
+	u32 unused02;
+	u64 product_id_addr;
+	u64 buffer_addr;
+};
+#endif /* CONFIG_ARCH_S390X */
+
+/*
+ * /proc entries (sysctl)
+ */
+static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
+static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
+				  void __user *buffer, size_t *lenp, loff_t *ppos);
+static int appldata_interval_handler(ctl_table *ctl, int write,
+					 struct file *filp,
+					 void __user *buffer,
+					 size_t *lenp, loff_t *ppos);
+
+static struct ctl_table_header *appldata_sysctl_header;
+static struct ctl_table appldata_table[] = {
+	{
+		.ctl_name	= CTL_APPLDATA_TIMER,
+		.procname	= "timer",
+		.mode		= S_IRUGO | S_IWUSR,
+		.proc_handler	= &appldata_timer_handler,
+	},
+	{
+		.ctl_name	= CTL_APPLDATA_INTERVAL,
+		.procname	= "interval",
+		.mode		= S_IRUGO | S_IWUSR,
+		.proc_handler	= &appldata_interval_handler,
+	},
+	{ .ctl_name = 0 }
+};
+
+static struct ctl_table appldata_dir_table[] = {
+	{
+		.ctl_name	= CTL_APPLDATA,
+		.procname	= appldata_proc_name,
+		.maxlen		= 0,
+		.mode		= S_IRUGO | S_IXUGO,
+		.child		= appldata_table,
+	},
+	{ .ctl_name = 0 }
+};
+
+/*
+ * Timer
+ */
+DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
+static atomic_t appldata_expire_count = ATOMIC_INIT(0);
+
+static DEFINE_SPINLOCK(appldata_timer_lock);
+static int appldata_interval = APPLDATA_CPU_INTERVAL;
+static int appldata_timer_active;
+
+/*
+ * Tasklet
+ */
+static struct tasklet_struct appldata_tasklet_struct;
+
+/*
+ * Ops list
+ */
+static DEFINE_SPINLOCK(appldata_ops_lock);
+static LIST_HEAD(appldata_ops_list);
+
+
+/************************* timer, tasklet, DIAG ******************************/
+/*
+ * appldata_timer_function()
+ *
+ * schedule tasklet and reschedule timer
+ */
+static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
+{
+	P_DEBUG("   -= Timer =-\n");
+	P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
+		atomic_read(&appldata_expire_count));
+	if (atomic_dec_and_test(&appldata_expire_count)) {
+		atomic_set(&appldata_expire_count, num_online_cpus());
+		tasklet_schedule((struct tasklet_struct *) data);
+	}
+}
+
+/*
+ * appldata_tasklet_function()
+ *
+ * call data gathering function for each (active) module
+ */
+static void appldata_tasklet_function(unsigned long data)
+{
+	struct list_head *lh;
+	struct appldata_ops *ops;
+	int i;
+
+	P_DEBUG("  -= Tasklet =-\n");
+	i = 0;
+	spin_lock(&appldata_ops_lock);
+	list_for_each(lh, &appldata_ops_list) {
+		ops = list_entry(lh, struct appldata_ops, list);
+		P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n",
+			++i, ops->active, ops->name);
+		if (ops->active == 1) {
+			ops->callback(ops->data);
+		}
+	}
+	spin_unlock(&appldata_ops_lock);
+}
+
+/*
+ * appldata_diag()
+ *
+ * prepare parameter list, issue DIAG 0xDC
+ */
+static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
+			u16 length)
+{
+	unsigned long ry;
+	struct appldata_product_id {
+		char prod_nr[7];			/* product nr. */
+		char prod_fn[2];			/* product function */
+		char record_nr;				/* record nr. */
+		char version_nr[2];			/* version */
+		char release_nr[2];			/* release */
+		char mod_lvl[2];			/* modification lvl. */
+	} appldata_product_id = {
+	/* all strings are EBCDIC, record_nr is byte */
+		.prod_nr    = {0xD3, 0xC9, 0xD5, 0xE4,
+				0xE7, 0xD2, 0xD9},	/* "LINUXKR" */
+		.prod_fn    = {0xD5, 0xD3},		/* "NL" */
+		.record_nr  = record_nr,
+		.version_nr = {0xF2, 0xF6},		/* "26" */
+		.release_nr = {0xF0, 0xF1},		/* "01" */
+		.mod_lvl    = {0xF0, 0xF0},		/* "00" */
+	};
+	struct appldata_parameter_list appldata_parameter_list = {
+				.diag = 0xDC,
+				.function = function,
+				.parlist_length =
+					sizeof(appldata_parameter_list),
+				.buffer_length = length,
+				.product_id_addr =
+					(unsigned long) &appldata_product_id,
+				.buffer_addr = virt_to_phys((void *) buffer)
+	};
+
+	if (!MACHINE_IS_VM)
+		return -ENOSYS;
+	ry = -1;
+	asm volatile(
+			"diag %1,%0,0xDC\n\t"
+			: "=d" (ry) : "d" (&(appldata_parameter_list)) : "cc");
+	return (int) ry;
+}
+/********************** timer, tasklet, DIAG <END> ***************************/
+
+
+/****************************** /proc stuff **********************************/
+
+/*
+ * appldata_mod_vtimer_wrap()
+ *
+ * wrapper function for mod_virt_timer(), because smp_call_function_on()
+ * accepts only one parameter.
+ */
+static void __appldata_mod_vtimer_wrap(void *p) {
+	struct {
+		struct vtimer_list *timer;
+		u64    expires;
+	} *args = p;
+	mod_virt_timer(args->timer, args->expires);
+}
+
+#define APPLDATA_ADD_TIMER	0
+#define APPLDATA_DEL_TIMER	1
+#define APPLDATA_MOD_TIMER	2
+
+/*
+ * __appldata_vtimer_setup()
+ *
+ * Add, delete or modify virtual timers on all online cpus.
+ * The caller needs to get the appldata_timer_lock spinlock.
+ */
+static void
+__appldata_vtimer_setup(int cmd)
+{
+	u64 per_cpu_interval;
+	int i;
+
+	switch (cmd) {
+	case APPLDATA_ADD_TIMER:
+		if (appldata_timer_active)
+			break;
+		per_cpu_interval = (u64) (appldata_interval*1000 /
+					  num_online_cpus()) * TOD_MICRO;
+		for_each_online_cpu(i) {
+			per_cpu(appldata_timer, i).expires = per_cpu_interval;
+			smp_call_function_on(add_virt_timer_periodic,
+					     &per_cpu(appldata_timer, i),
+					     0, 1, i);
+		}
+		appldata_timer_active = 1;
+		P_INFO("Monitoring timer started.\n");
+		break;
+	case APPLDATA_DEL_TIMER:
+		for_each_online_cpu(i)
+			del_virt_timer(&per_cpu(appldata_timer, i));
+		if (!appldata_timer_active)
+			break;
+		appldata_timer_active = 0;
+		atomic_set(&appldata_expire_count, num_online_cpus());
+		P_INFO("Monitoring timer stopped.\n");
+		break;
+	case APPLDATA_MOD_TIMER:
+		per_cpu_interval = (u64) (appldata_interval*1000 /
+					  num_online_cpus()) * TOD_MICRO;
+		if (!appldata_timer_active)
+			break;
+		for_each_online_cpu(i) {
+			struct {
+				struct vtimer_list *timer;
+				u64    expires;
+			} args;
+			args.timer = &per_cpu(appldata_timer, i);
+			args.expires = per_cpu_interval;
+			smp_call_function_on(__appldata_mod_vtimer_wrap,
+					     &args, 0, 1, i);
+		}
+	}
+}
+
+/*
+ * appldata_timer_handler()
+ *
+ * Start/Stop timer, show status of timer (0 = not active, 1 = active)
+ */
+static int
+appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
+			   void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int len;
+	char buf[2];
+
+	if (!*lenp || *ppos) {
+		*lenp = 0;
+		return 0;
+	}
+	if (!write) {
+		len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
+		if (len > *lenp)
+			len = *lenp;
+		if (copy_to_user(buffer, buf, len))
+			return -EFAULT;
+		goto out;
+	}
+	len = *lenp;
+	if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
+		return -EFAULT;
+	spin_lock(&appldata_timer_lock);
+	if (buf[0] == '1')
+		__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
+	else if (buf[0] == '0')
+		__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
+	spin_unlock(&appldata_timer_lock);
+out:
+	*lenp = len;
+	*ppos += len;
+	return 0;
+}
+
+/*
+ * appldata_interval_handler()
+ *
+ * Set (CPU) timer interval for collection of data (in milliseconds), show
+ * current timer interval.
+ */
+static int
+appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
+			   void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int len, interval;
+	char buf[16];
+
+	if (!*lenp || *ppos) {
+		*lenp = 0;
+		return 0;
+	}
+	if (!write) {
+		len = sprintf(buf, "%i\n", appldata_interval);
+		if (len > *lenp)
+			len = *lenp;
+		if (copy_to_user(buffer, buf, len))
+			return -EFAULT;
+		goto out;
+	}
+	len = *lenp;
+	if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
+		return -EFAULT;
+	}
+	sscanf(buf, "%i", &interval);
+	if (interval <= 0) {
+		P_ERROR("Timer CPU interval has to be > 0!\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&appldata_timer_lock);
+	appldata_interval = interval;
+	__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
+	spin_unlock(&appldata_timer_lock);
+
+	P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
+		 interval);
+out:
+	*lenp = len;
+	*ppos += len;
+	return 0;
+}
+
+/*
+ * appldata_generic_handler()
+ *
+ * Generic start/stop monitoring and DIAG, show status of
+ * monitoring (0 = not in process, 1 = in process)
+ */
+static int
+appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
+			   void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	struct appldata_ops *ops = NULL, *tmp_ops;
+	int rc, len, found;
+	char buf[2];
+	struct list_head *lh;
+
+	found = 0;
+	spin_lock_bh(&appldata_ops_lock);
+	list_for_each(lh, &appldata_ops_list) {
+		tmp_ops = list_entry(lh, struct appldata_ops, list);
+		if (&tmp_ops->ctl_table[2] == ctl) {
+			found = 1;
+		}
+	}
+	if (!found) {
+		spin_unlock_bh(&appldata_ops_lock);
+		return -ENODEV;
+	}
+	ops = ctl->data;
+	if (!try_module_get(ops->owner)) {	// protect this function
+		spin_unlock_bh(&appldata_ops_lock);
+		return -ENODEV;
+	}
+	spin_unlock_bh(&appldata_ops_lock);
+
+	if (!*lenp || *ppos) {
+		*lenp = 0;
+		module_put(ops->owner);
+		return 0;
+	}
+	if (!write) {
+		len = sprintf(buf, ops->active ? "1\n" : "0\n");
+		if (len > *lenp)
+			len = *lenp;
+		if (copy_to_user(buffer, buf, len)) {
+			module_put(ops->owner);
+			return -EFAULT;
+		}
+		goto out;
+	}
+	len = *lenp;
+	if (copy_from_user(buf, buffer,
+			   len > sizeof(buf) ? sizeof(buf) : len)) {
+		module_put(ops->owner);
+		return -EFAULT;
+	}
+
+	spin_lock_bh(&appldata_ops_lock);
+	if ((buf[0] == '1') && (ops->active == 0)) {
+		if (!try_module_get(ops->owner)) {	// protect tasklet
+			spin_unlock_bh(&appldata_ops_lock);
+			module_put(ops->owner);
+			return -ENODEV;
+		}
+		ops->active = 1;
+		ops->callback(ops->data);	// init record
+		rc = appldata_diag(ops->record_nr,
+					APPLDATA_START_INTERVAL_REC,
+					(unsigned long) ops->data, ops->size);
+		if (rc != 0) {
+			P_ERROR("START DIAG 0xDC for %s failed, "
+				"return code: %d\n", ops->name, rc);
+			module_put(ops->owner);
+			ops->active = 0;
+		} else {
+			P_INFO("Monitoring %s data enabled, "
+				"DIAG 0xDC started.\n", ops->name);
+		}
+	} else if ((buf[0] == '0') && (ops->active == 1)) {
+		ops->active = 0;
+		rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
+				(unsigned long) ops->data, ops->size);
+		if (rc != 0) {
+			P_ERROR("STOP DIAG 0xDC for %s failed, "
+				"return code: %d\n", ops->name, rc);
+		} else {
+			P_INFO("Monitoring %s data disabled, "
+				"DIAG 0xDC stopped.\n", ops->name);
+		}
+		module_put(ops->owner);
+	}
+	spin_unlock_bh(&appldata_ops_lock);
+out:
+	*lenp = len;
+	*ppos += len;
+	module_put(ops->owner);
+	return 0;
+}
+
+/*************************** /proc stuff <END> *******************************/
+
+
+/************************* module-ops management *****************************/
+/*
+ * appldata_register_ops()
+ *
+ * update ops list, register /proc/sys entries
+ */
+int appldata_register_ops(struct appldata_ops *ops)
+{
+	struct list_head *lh;
+	struct appldata_ops *tmp_ops;
+	int i;
+
+	i = 0;
+
+	if ((ops->size > APPLDATA_MAX_REC_SIZE) ||
+		(ops->size < 0)){
+		P_ERROR("Invalid size of %s record = %i, maximum = %i!\n",
+			ops->name, ops->size, APPLDATA_MAX_REC_SIZE);
+		return -ENOMEM;
+	}
+	if ((ops->ctl_nr == CTL_APPLDATA) ||
+	    (ops->ctl_nr == CTL_APPLDATA_TIMER) ||
+	    (ops->ctl_nr == CTL_APPLDATA_INTERVAL)) {
+		P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr);
+		return -EBUSY;
+	}
+	ops->ctl_table = kmalloc(4*sizeof(struct ctl_table), GFP_KERNEL);
+	if (ops->ctl_table == NULL) {
+		P_ERROR("Not enough memory for %s ctl_table!\n", ops->name);
+		return -ENOMEM;
+	}
+	memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table));
+
+	spin_lock_bh(&appldata_ops_lock);
+	list_for_each(lh, &appldata_ops_list) {
+		tmp_ops = list_entry(lh, struct appldata_ops, list);
+		P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n",
+			++i, tmp_ops->name, tmp_ops->ctl_nr);
+		P_DEBUG("Comparing %s (ctl %i) with %s (ctl %i)\n",
+			tmp_ops->name, tmp_ops->ctl_nr, ops->name,
+			ops->ctl_nr);
+		if (strncmp(tmp_ops->name, ops->name,
+				APPLDATA_PROC_NAME_LENGTH) == 0) {
+			P_ERROR("Name \"%s\" already registered!\n", ops->name);
+			kfree(ops->ctl_table);
+			spin_unlock_bh(&appldata_ops_lock);
+			return -EBUSY;
+		}
+		if (tmp_ops->ctl_nr == ops->ctl_nr) {
+			P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr);
+			kfree(ops->ctl_table);
+			spin_unlock_bh(&appldata_ops_lock);
+			return -EBUSY;
+		}
+	}
+	list_add(&ops->list, &appldata_ops_list);
+	spin_unlock_bh(&appldata_ops_lock);
+
+	ops->ctl_table[0].ctl_name = CTL_APPLDATA;
+	ops->ctl_table[0].procname = appldata_proc_name;
+	ops->ctl_table[0].maxlen   = 0;
+	ops->ctl_table[0].mode     = S_IRUGO | S_IXUGO;
+	ops->ctl_table[0].child    = &ops->ctl_table[2];
+
+	ops->ctl_table[1].ctl_name = 0;
+
+	ops->ctl_table[2].ctl_name = ops->ctl_nr;
+	ops->ctl_table[2].procname = ops->name;
+	ops->ctl_table[2].mode     = S_IRUGO | S_IWUSR;
+	ops->ctl_table[2].proc_handler = appldata_generic_handler;
+	ops->ctl_table[2].data = ops;
+
+	ops->ctl_table[3].ctl_name = 0;
+
+	ops->sysctl_header = register_sysctl_table(ops->ctl_table,1);
+
+	P_INFO("%s-ops registered!\n", ops->name);
+	return 0;
+}
+
+/*
+ * appldata_unregister_ops()
+ *
+ * update ops list, unregister /proc entries, stop DIAG if necessary
+ */
+void appldata_unregister_ops(struct appldata_ops *ops)
+{
+	spin_lock_bh(&appldata_ops_lock);
+	unregister_sysctl_table(ops->sysctl_header);
+	list_del(&ops->list);
+	kfree(ops->ctl_table);
+	ops->ctl_table = NULL;
+	spin_unlock_bh(&appldata_ops_lock);
+	P_INFO("%s-ops unregistered!\n", ops->name);
+}
+/********************** module-ops management <END> **************************/
+
+
+/******************************* init / exit *********************************/
+
+static void
+appldata_online_cpu(int cpu)
+{
+	init_virt_timer(&per_cpu(appldata_timer, cpu));
+	per_cpu(appldata_timer, cpu).function = appldata_timer_function;
+	per_cpu(appldata_timer, cpu).data = (unsigned long)
+		&appldata_tasklet_struct;
+	atomic_inc(&appldata_expire_count);
+	spin_lock(&appldata_timer_lock);
+	__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
+	spin_unlock(&appldata_timer_lock);
+}
+
+static void
+appldata_offline_cpu(int cpu)
+{
+	del_virt_timer(&per_cpu(appldata_timer, cpu));
+	if (atomic_dec_and_test(&appldata_expire_count)) {
+		atomic_set(&appldata_expire_count, num_online_cpus());
+		tasklet_schedule(&appldata_tasklet_struct);
+	}
+	spin_lock(&appldata_timer_lock);
+	__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
+	spin_unlock(&appldata_timer_lock);
+}
+
+static int
+appldata_cpu_notify(struct notifier_block *self,
+		    unsigned long action, void *hcpu)
+{
+	switch (action) {
+	case CPU_ONLINE:
+		appldata_online_cpu((long) hcpu);
+		break;
+#ifdef CONFIG_HOTPLUG_CPU
+	case CPU_DEAD:
+		appldata_offline_cpu((long) hcpu);
+		break;
+#endif
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __devinitdata appldata_nb = {
+	.notifier_call = appldata_cpu_notify,
+};
+
+/*
+ * appldata_init()
+ *
+ * init timer and tasklet, register /proc entries
+ */
+static int __init appldata_init(void)
+{
+	int i;
+
+	P_DEBUG("sizeof(parameter_list) = %lu\n",
+		sizeof(struct appldata_parameter_list));
+
+	for_each_online_cpu(i)
+		appldata_online_cpu(i);
+
+	/* Register cpu hotplug notifier */
+	register_cpu_notifier(&appldata_nb);
+
+	appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1);
+#ifdef MODULE
+	appldata_dir_table[0].de->owner = THIS_MODULE;
+	appldata_table[0].de->owner = THIS_MODULE;
+	appldata_table[1].de->owner = THIS_MODULE;
+#endif
+
+	tasklet_init(&appldata_tasklet_struct, appldata_tasklet_function, 0);
+	P_DEBUG("Base interface initialized.\n");
+	return 0;
+}
+
+/*
+ * appldata_exit()
+ *
+ * stop timer and tasklet, unregister /proc entries
+ */
+static void __exit appldata_exit(void)
+{
+	struct list_head *lh;
+	struct appldata_ops *ops;
+	int rc, i;
+
+	P_DEBUG("Unloading module ...\n");
+	/*
+	 * ops list should be empty, but just in case something went wrong...
+	 */
+	spin_lock_bh(&appldata_ops_lock);
+	list_for_each(lh, &appldata_ops_list) {
+		ops = list_entry(lh, struct appldata_ops, list);
+		rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
+				(unsigned long) ops->data, ops->size);
+		if (rc != 0) {
+			P_ERROR("STOP DIAG 0xDC for %s failed, "
+				"return code: %d\n", ops->name, rc);
+		}
+	}
+	spin_unlock_bh(&appldata_ops_lock);
+
+	for_each_online_cpu(i)
+		appldata_offline_cpu(i);
+
+	appldata_timer_active = 0;
+
+	unregister_sysctl_table(appldata_sysctl_header);
+
+	tasklet_kill(&appldata_tasklet_struct);
+	P_DEBUG("... module unloaded!\n");
+}
+/**************************** init / exit <END> ******************************/
+
+
+module_init(appldata_init);
+module_exit(appldata_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gerald Schaefer");
+MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure");
+
+EXPORT_SYMBOL_GPL(appldata_register_ops);
+EXPORT_SYMBOL_GPL(appldata_unregister_ops);
+
+#ifdef MODULE
+/*
+ * Kernel symbols needed by appldata_mem and appldata_os modules.
+ * However, if this file is compiled as a module (for testing only), these
+ * symbols are not exported. In this case, we define them locally and export
+ * those.
+ */
+void si_swapinfo(struct sysinfo *val)
+{
+	val->freeswap = -1ul;
+	val->totalswap = -1ul;
+}
+
+unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200,
+				-1 - FIXED_1/200};
+int nr_threads = -1;
+
+void get_full_page_state(struct page_state *ps)
+{
+	memset(ps, -1, sizeof(struct page_state));
+}
+
+unsigned long nr_running(void)
+{
+	return -1;
+}
+
+unsigned long nr_iowait(void)
+{
+	return -1;
+}
+
+/*unsigned long nr_context_switches(void)
+{
+	return -1;
+}*/
+#endif /* MODULE */
+EXPORT_SYMBOL_GPL(si_swapinfo);
+EXPORT_SYMBOL_GPL(nr_threads);
+EXPORT_SYMBOL_GPL(avenrun);
+EXPORT_SYMBOL_GPL(get_full_page_state);
+EXPORT_SYMBOL_GPL(nr_running);
+EXPORT_SYMBOL_GPL(nr_iowait);
+//EXPORT_SYMBOL_GPL(nr_context_switches);
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
new file mode 100644
index 0000000..462ee9a
--- /dev/null
+++ b/arch/s390/appldata/appldata_mem.c
@@ -0,0 +1,195 @@
+/*
+ * arch/s390/appldata/appldata_mem.c
+ *
+ * Data gathering module for Linux-VM Monitor Stream, Stage 1.
+ * Collects data related to memory management.
+ *
+ * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ *
+ * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <asm/io.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+#include "appldata.h"
+
+
+#define MY_PRINT_NAME "appldata_mem"		/* for debug messages, etc. */
+#define P2K(x) ((x) << (PAGE_SHIFT - 10))	/* Converts #Pages to KB */
+
+/*
+ * Memory data
+ *
+ * This is accessed as binary data by z/VM. If changes to it can't be avoided,
+ * the structure version (product ID, see appldata_base.c) needs to be changed
+ * as well and all documentation and z/VM applications using it must be
+ * updated.
+ *
+ * The record layout is documented in the Linux for zSeries Device Drivers
+ * book:
+ * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
+ */
+struct appldata_mem_data {
+	u64 timestamp;
+	u32 sync_count_1;       /* after VM collected the record data, */
+	u32 sync_count_2;	/* sync_count_1 and sync_count_2 should be the
+				   same. If not, the record has been updated on
+				   the Linux side while VM was collecting the
+				   (possibly corrupt) data */
+
+	u64 pgpgin;		/* data read from disk  */
+	u64 pgpgout;		/* data written to disk */
+	u64 pswpin;		/* pages swapped in  */
+	u64 pswpout;		/* pages swapped out */
+
+	u64 sharedram;		/* sharedram is currently set to 0 */
+
+	u64 totalram;		/* total main memory size */
+	u64 freeram;		/* free main memory size  */
+	u64 totalhigh;		/* total high memory size */
+	u64 freehigh;		/* free high memory size  */
+
+	u64 bufferram;		/* memory reserved for buffers, free cache */
+	u64 cached;		/* size of (used) cache, w/o buffers */
+	u64 totalswap;		/* total swap space size */
+	u64 freeswap;		/* free swap space */
+
+// New in 2.6 -->
+	u64 pgalloc;		/* page allocations */
+	u64 pgfault;		/* page faults (major+minor) */
+	u64 pgmajfault;		/* page faults (major only) */
+// <-- New in 2.6
+
+} appldata_mem_data;
+
+
+static inline void appldata_debug_print(struct appldata_mem_data *mem_data)
+{
+	P_DEBUG("--- MEM - RECORD ---\n");
+	P_DEBUG("pgpgin     = %8lu KB\n", mem_data->pgpgin);
+	P_DEBUG("pgpgout    = %8lu KB\n", mem_data->pgpgout);
+	P_DEBUG("pswpin     = %8lu Pages\n", mem_data->pswpin);
+	P_DEBUG("pswpout    = %8lu Pages\n", mem_data->pswpout);
+	P_DEBUG("pgalloc    = %8lu \n", mem_data->pgalloc);
+	P_DEBUG("pgfault    = %8lu \n", mem_data->pgfault);
+	P_DEBUG("pgmajfault = %8lu \n", mem_data->pgmajfault);
+	P_DEBUG("sharedram  = %8lu KB\n", mem_data->sharedram);
+	P_DEBUG("totalram   = %8lu KB\n", mem_data->totalram);
+	P_DEBUG("freeram    = %8lu KB\n", mem_data->freeram);
+	P_DEBUG("totalhigh  = %8lu KB\n", mem_data->totalhigh);
+	P_DEBUG("freehigh   = %8lu KB\n", mem_data->freehigh);
+	P_DEBUG("bufferram  = %8lu KB\n", mem_data->bufferram);
+	P_DEBUG("cached     = %8lu KB\n", mem_data->cached);
+	P_DEBUG("totalswap  = %8lu KB\n", mem_data->totalswap);
+	P_DEBUG("freeswap   = %8lu KB\n", mem_data->freeswap);
+	P_DEBUG("sync_count_1 = %u\n", mem_data->sync_count_1);
+	P_DEBUG("sync_count_2 = %u\n", mem_data->sync_count_2);
+	P_DEBUG("timestamp    = %lX\n", mem_data->timestamp);
+}
+
+/*
+ * appldata_get_mem_data()
+ *
+ * gather memory data
+ */
+static void appldata_get_mem_data(void *data)
+{
+	/*
+	 * don't put large structures on the stack, we are
+	 * serialized through the appldata_ops_lock and can use static
+	 */
+	static struct sysinfo val;
+	static struct page_state ps;
+	struct appldata_mem_data *mem_data;
+
+	mem_data = data;
+	mem_data->sync_count_1++;
+
+	get_full_page_state(&ps);
+	mem_data->pgpgin     = ps.pgpgin >> 1;
+	mem_data->pgpgout    = ps.pgpgout >> 1;
+	mem_data->pswpin     = ps.pswpin;
+	mem_data->pswpout    = ps.pswpout;
+	mem_data->pgalloc    = ps.pgalloc_high + ps.pgalloc_normal +
+			       ps.pgalloc_dma;
+	mem_data->pgfault    = ps.pgfault;
+	mem_data->pgmajfault = ps.pgmajfault;
+
+	si_meminfo(&val);
+	mem_data->sharedram = val.sharedram;
+	mem_data->totalram  = P2K(val.totalram);
+	mem_data->freeram   = P2K(val.freeram);
+	mem_data->totalhigh = P2K(val.totalhigh);
+	mem_data->freehigh  = P2K(val.freehigh);
+	mem_data->bufferram = P2K(val.bufferram);
+	mem_data->cached    = P2K(atomic_read(&nr_pagecache) - val.bufferram);
+
+	si_swapinfo(&val);
+	mem_data->totalswap = P2K(val.totalswap);
+	mem_data->freeswap  = P2K(val.freeswap);
+
+	mem_data->timestamp = get_clock();
+	mem_data->sync_count_2++;
+#ifdef APPLDATA_DEBUG
+	appldata_debug_print(mem_data);
+#endif
+}
+
+
+static struct appldata_ops ops = {
+	.ctl_nr    = CTL_APPLDATA_MEM,
+	.name      = "mem",
+	.record_nr = APPLDATA_RECORD_MEM_ID,
+	.size	   = sizeof(struct appldata_mem_data),
+	.callback  = &appldata_get_mem_data,
+	.data      = &appldata_mem_data,
+	.owner     = THIS_MODULE,
+};
+
+
+/*
+ * appldata_mem_init()
+ *
+ * init_data, register ops
+ */
+static int __init appldata_mem_init(void)
+{
+	int rc;
+
+	P_DEBUG("sizeof(mem) = %lu\n", sizeof(struct appldata_mem_data));
+
+	rc = appldata_register_ops(&ops);
+	if (rc != 0) {
+		P_ERROR("Error registering ops, rc = %i\n", rc);
+	} else {
+		P_DEBUG("%s-ops registered!\n", ops.name);
+	}
+	return rc;
+}
+
+/*
+ * appldata_mem_exit()
+ *
+ * unregister ops
+ */
+static void __exit appldata_mem_exit(void)
+{
+	appldata_unregister_ops(&ops);
+	P_DEBUG("%s-ops unregistered!\n", ops.name);
+}
+
+
+module_init(appldata_mem_init);
+module_exit(appldata_mem_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gerald Schaefer");
+MODULE_DESCRIPTION("Linux-VM Monitor Stream, MEMORY statistics");
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
new file mode 100644
index 0000000..dd61638
--- /dev/null
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -0,0 +1,195 @@
+/*
+ * arch/s390/appldata/appldata_net_sum.c
+ *
+ * Data gathering module for Linux-VM Monitor Stream, Stage 1.
+ * Collects accumulated network statistics (Packets received/transmitted,
+ * dropped, errors, ...).
+ *
+ * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ *
+ * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/netdevice.h>
+
+#include "appldata.h"
+
+
+#define MY_PRINT_NAME	"appldata_net_sum"	/* for debug messages, etc. */
+
+
+/*
+ * Network data
+ *
+ * This is accessed as binary data by z/VM. If changes to it can't be avoided,
+ * the structure version (product ID, see appldata_base.c) needs to be changed
+ * as well and all documentation and z/VM applications using it must be updated.
+ *
+ * The record layout is documented in the Linux for zSeries Device Drivers
+ * book:
+ * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
+ */
+struct appldata_net_sum_data {
+	u64 timestamp;
+	u32 sync_count_1;	/* after VM collected the record data, */
+	u32 sync_count_2;	/* sync_count_1 and sync_count_2 should be the
+				   same. If not, the record has been updated on
+				   the Linux side while VM was collecting the
+				   (possibly corrupt) data */
+
+	u32 nr_interfaces;	/* nr. of network interfaces being monitored */
+
+	u32 padding;		/* next value is 64-bit aligned, so these */
+				/* 4 byte would be padded out by compiler */
+
+	u64 rx_packets;		/* total packets received        */
+	u64 tx_packets;		/* total packets transmitted     */
+	u64 rx_bytes;		/* total bytes received          */
+	u64 tx_bytes;		/* total bytes transmitted       */
+	u64 rx_errors;		/* bad packets received          */
+	u64 tx_errors;		/* packet transmit problems      */
+	u64 rx_dropped;		/* no space in linux buffers     */
+	u64 tx_dropped;		/* no space available in linux   */
+	u64 collisions;		/* collisions while transmitting */
+} appldata_net_sum_data;
+
+
+static inline void appldata_print_debug(struct appldata_net_sum_data *net_data)
+{
+	P_DEBUG("--- NET - RECORD ---\n");
+
+	P_DEBUG("nr_interfaces = %u\n", net_data->nr_interfaces);
+	P_DEBUG("rx_packets    = %8lu\n", net_data->rx_packets);
+	P_DEBUG("tx_packets    = %8lu\n", net_data->tx_packets);
+	P_DEBUG("rx_bytes      = %8lu\n", net_data->rx_bytes);
+	P_DEBUG("tx_bytes      = %8lu\n", net_data->tx_bytes);
+	P_DEBUG("rx_errors     = %8lu\n", net_data->rx_errors);
+	P_DEBUG("tx_errors     = %8lu\n", net_data->tx_errors);
+	P_DEBUG("rx_dropped    = %8lu\n", net_data->rx_dropped);
+	P_DEBUG("tx_dropped    = %8lu\n", net_data->tx_dropped);
+	P_DEBUG("collisions    = %8lu\n", net_data->collisions);
+
+	P_DEBUG("sync_count_1 = %u\n", net_data->sync_count_1);
+	P_DEBUG("sync_count_2 = %u\n", net_data->sync_count_2);
+	P_DEBUG("timestamp    = %lX\n", net_data->timestamp);
+}
+
+/*
+ * appldata_get_net_sum_data()
+ *
+ * gather accumulated network statistics
+ */
+static void appldata_get_net_sum_data(void *data)
+{
+	int i;
+	struct appldata_net_sum_data *net_data;
+	struct net_device *dev;
+	struct net_device_stats *stats;
+	unsigned long rx_packets, tx_packets, rx_bytes, tx_bytes, rx_errors,
+			tx_errors, rx_dropped, tx_dropped, collisions;
+
+	net_data = data;
+	net_data->sync_count_1++;
+
+	i = 0;
+	rx_packets = 0;
+	tx_packets = 0;
+	rx_bytes   = 0;
+	tx_bytes   = 0;
+	rx_errors  = 0;
+	tx_errors  = 0;
+	rx_dropped = 0;
+	tx_dropped = 0;
+	collisions = 0;
+	read_lock(&dev_base_lock);
+	for (dev = dev_base; dev != NULL; dev = dev->next) {
+		if (dev->get_stats == NULL) {
+			continue;
+		}
+		stats = dev->get_stats(dev);
+		rx_packets += stats->rx_packets;
+		tx_packets += stats->tx_packets;
+		rx_bytes   += stats->rx_bytes;
+		tx_bytes   += stats->tx_bytes;
+		rx_errors  += stats->rx_errors;
+		tx_errors  += stats->tx_errors;
+		rx_dropped += stats->rx_dropped;
+		tx_dropped += stats->tx_dropped;
+		collisions += stats->collisions;
+		i++;
+	}
+	read_unlock(&dev_base_lock);
+	net_data->nr_interfaces = i;
+	net_data->rx_packets = rx_packets;
+	net_data->tx_packets = tx_packets;
+	net_data->rx_bytes   = rx_bytes;
+	net_data->tx_bytes   = tx_bytes;
+	net_data->rx_errors  = rx_errors;
+	net_data->tx_errors  = tx_errors;
+	net_data->rx_dropped = rx_dropped;
+	net_data->tx_dropped = tx_dropped;
+	net_data->collisions = collisions;
+
+	net_data->timestamp = get_clock();
+	net_data->sync_count_2++;
+#ifdef APPLDATA_DEBUG
+	appldata_print_debug(net_data);
+#endif
+}
+
+
+static struct appldata_ops ops = {
+	.ctl_nr    = CTL_APPLDATA_NET_SUM,
+	.name	   = "net_sum",
+	.record_nr = APPLDATA_RECORD_NET_SUM_ID,
+	.size	   = sizeof(struct appldata_net_sum_data),
+	.callback  = &appldata_get_net_sum_data,
+	.data      = &appldata_net_sum_data,
+	.owner     = THIS_MODULE,
+};
+
+
+/*
+ * appldata_net_init()
+ *
+ * init data, register ops
+ */
+static int __init appldata_net_init(void)
+{
+	int rc;
+
+	P_DEBUG("sizeof(net) = %lu\n", sizeof(struct appldata_net_sum_data));
+
+	rc = appldata_register_ops(&ops);
+	if (rc != 0) {
+		P_ERROR("Error registering ops, rc = %i\n", rc);
+	} else {
+		P_DEBUG("%s-ops registered!\n", ops.name);
+	}
+	return rc;
+}
+
+/*
+ * appldata_net_exit()
+ *
+ * unregister ops
+ */
+static void __exit appldata_net_exit(void)
+{
+	appldata_unregister_ops(&ops);
+	P_DEBUG("%s-ops unregistered!\n", ops.name);
+}
+
+
+module_init(appldata_net_init);
+module_exit(appldata_net_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gerald Schaefer");
+MODULE_DESCRIPTION("Linux-VM Monitor Stream, accumulated network statistics");
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
new file mode 100644
index 0000000..b83f074
--- /dev/null
+++ b/arch/s390/appldata/appldata_os.c
@@ -0,0 +1,241 @@
+/*
+ * arch/s390/appldata/appldata_os.c
+ *
+ * Data gathering module for Linux-VM Monitor Stream, Stage 1.
+ * Collects misc. OS related data (CPU utilization, running processes).
+ *
+ * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ *
+ * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <asm/smp.h>
+
+#include "appldata.h"
+
+
+#define MY_PRINT_NAME	"appldata_os"		/* for debug messages, etc. */
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+/*
+ * OS data
+ *
+ * This is accessed as binary data by z/VM. If changes to it can't be avoided,
+ * the structure version (product ID, see appldata_base.c) needs to be changed
+ * as well and all documentation and z/VM applications using it must be
+ * updated.
+ *
+ * The record layout is documented in the Linux for zSeries Device Drivers
+ * book:
+ * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
+ */
+struct appldata_os_per_cpu {
+	u32 per_cpu_user;	/* timer ticks spent in user mode   */
+	u32 per_cpu_nice;	/* ... spent with modified priority */
+	u32 per_cpu_system;	/* ... spent in kernel mode         */
+	u32 per_cpu_idle;	/* ... spent in idle mode           */
+
+// New in 2.6 -->
+	u32 per_cpu_irq;	/* ... spent in interrupts          */
+	u32 per_cpu_softirq;	/* ... spent in softirqs            */
+	u32 per_cpu_iowait;	/* ... spent while waiting for I/O  */
+// <-- New in 2.6
+};
+
+struct appldata_os_data {
+	u64 timestamp;
+	u32 sync_count_1;	/* after VM collected the record data, */
+	u32 sync_count_2;	/* sync_count_1 and sync_count_2 should be the
+				   same. If not, the record has been updated on
+				   the Linux side while VM was collecting the
+				   (possibly corrupt) data */
+
+	u32 nr_cpus;		/* number of (virtual) CPUs        */
+	u32 per_cpu_size;	/* size of the per-cpu data struct */
+	u32 cpu_offset;		/* offset of the first per-cpu data struct */
+
+	u32 nr_running;		/* number of runnable threads      */
+	u32 nr_threads;		/* number of threads               */
+	u32 avenrun[3];		/* average nr. of running processes during */
+				/* the last 1, 5 and 15 minutes */
+
+// New in 2.6 -->
+	u32 nr_iowait;		/* number of blocked threads
+				   (waiting for I/O)               */
+// <-- New in 2.6
+
+	/* per cpu data */
+	struct appldata_os_per_cpu os_cpu[0];
+};
+
+static struct appldata_os_data *appldata_os_data;
+
+
+static inline void appldata_print_debug(struct appldata_os_data *os_data)
+{
+	int a0, a1, a2, i;
+
+	P_DEBUG("--- OS - RECORD ---\n");
+	P_DEBUG("nr_threads   = %u\n", os_data->nr_threads);
+	P_DEBUG("nr_running   = %u\n", os_data->nr_running);
+	P_DEBUG("nr_iowait    = %u\n", os_data->nr_iowait);
+	P_DEBUG("avenrun(int) = %8x / %8x / %8x\n", os_data->avenrun[0],
+		os_data->avenrun[1], os_data->avenrun[2]);
+	a0 = os_data->avenrun[0];
+	a1 = os_data->avenrun[1];
+	a2 = os_data->avenrun[2];
+	P_DEBUG("avenrun(float) = %d.%02d / %d.%02d / %d.%02d\n",
+		LOAD_INT(a0), LOAD_FRAC(a0), LOAD_INT(a1), LOAD_FRAC(a1),
+		LOAD_INT(a2), LOAD_FRAC(a2));
+
+	P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
+	for (i = 0; i < os_data->nr_cpus; i++) {
+		P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
+			"idle = %u, irq = %u, softirq = %u, iowait = %u\n",
+				i,
+				os_data->os_cpu[i].per_cpu_user,
+				os_data->os_cpu[i].per_cpu_nice,
+				os_data->os_cpu[i].per_cpu_system,
+				os_data->os_cpu[i].per_cpu_idle,
+				os_data->os_cpu[i].per_cpu_irq,
+				os_data->os_cpu[i].per_cpu_softirq,
+				os_data->os_cpu[i].per_cpu_iowait);
+	}
+
+	P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1);
+	P_DEBUG("sync_count_2 = %u\n", os_data->sync_count_2);
+	P_DEBUG("timestamp    = %lX\n", os_data->timestamp);
+}
+
+/*
+ * appldata_get_os_data()
+ *
+ * gather OS data
+ */
+static void appldata_get_os_data(void *data)
+{
+	int i, j;
+	struct appldata_os_data *os_data;
+
+	os_data = data;
+	os_data->sync_count_1++;
+
+	os_data->nr_cpus = num_online_cpus();
+
+	os_data->nr_threads = nr_threads;
+	os_data->nr_running = nr_running();
+	os_data->nr_iowait  = nr_iowait();
+	os_data->avenrun[0] = avenrun[0] + (FIXED_1/200);
+	os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
+	os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
+
+	j = 0;
+	for_each_online_cpu(i) {
+		os_data->os_cpu[j].per_cpu_user =
+					kstat_cpu(i).cpustat.user;
+		os_data->os_cpu[j].per_cpu_nice =
+					kstat_cpu(i).cpustat.nice;
+		os_data->os_cpu[j].per_cpu_system =
+					kstat_cpu(i).cpustat.system;
+		os_data->os_cpu[j].per_cpu_idle =
+					kstat_cpu(i).cpustat.idle;
+		os_data->os_cpu[j].per_cpu_irq =
+					kstat_cpu(i).cpustat.irq;
+		os_data->os_cpu[j].per_cpu_softirq =
+					kstat_cpu(i).cpustat.softirq;
+		os_data->os_cpu[j].per_cpu_iowait =
+					kstat_cpu(i).cpustat.iowait;
+		j++;
+	}
+
+	os_data->timestamp = get_clock();
+	os_data->sync_count_2++;
+#ifdef APPLDATA_DEBUG
+	appldata_print_debug(os_data);
+#endif
+}
+
+
+static struct appldata_ops ops = {
+	.ctl_nr    = CTL_APPLDATA_OS,
+	.name	   = "os",
+	.record_nr = APPLDATA_RECORD_OS_ID,
+	.callback  = &appldata_get_os_data,
+	.owner     = THIS_MODULE,
+};
+
+
+/*
+ * appldata_os_init()
+ *
+ * init data, register ops
+ */
+static int __init appldata_os_init(void)
+{
+	int rc, size;
+
+	size = sizeof(struct appldata_os_data) +
+		(NR_CPUS * sizeof(struct appldata_os_per_cpu));
+	if (size > APPLDATA_MAX_REC_SIZE) {
+		P_ERROR("Size of record = %i, bigger than maximum (%i)!\n",
+			size, APPLDATA_MAX_REC_SIZE);
+		rc = -ENOMEM;
+		goto out;
+	}
+	P_DEBUG("sizeof(os) = %i, sizeof(os_cpu) = %lu\n", size,
+		sizeof(struct appldata_os_per_cpu));
+
+	appldata_os_data = kmalloc(size, GFP_DMA);
+	if (appldata_os_data == NULL) {
+		P_ERROR("No memory for %s!\n", ops.name);
+		rc = -ENOMEM;
+		goto out;
+	}
+	memset(appldata_os_data, 0, size);
+
+	appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
+	appldata_os_data->cpu_offset   = offsetof(struct appldata_os_data,
+							os_cpu);
+	P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset);
+
+	ops.data = appldata_os_data;
+	ops.size = size;
+	rc = appldata_register_ops(&ops);
+	if (rc != 0) {
+		P_ERROR("Error registering ops, rc = %i\n", rc);
+		kfree(appldata_os_data);
+	} else {
+		P_DEBUG("%s-ops registered!\n", ops.name);
+	}
+out:
+	return rc;
+}
+
+/*
+ * appldata_os_exit()
+ *
+ * unregister ops
+ */
+static void __exit appldata_os_exit(void)
+{
+	appldata_unregister_ops(&ops);
+	kfree(appldata_os_data);
+	P_DEBUG("%s-ops unregistered!\n", ops.name);
+}
+
+
+module_init(appldata_os_init);
+module_exit(appldata_os_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gerald Schaefer");
+MODULE_DESCRIPTION("Linux-VM Monitor Stream, OS statistics");
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
new file mode 100644
index 0000000..4d97eef
--- /dev/null
+++ b/arch/s390/boot/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the linux s390-specific parts of the memory manager.
+#
+
+COMPILE_VERSION := __linux_compile_version_id__`hostname |  \
+			tr -c '[0-9A-Za-z]' '_'`__`date | \
+			tr -c '[0-9A-Za-z]' '_'`_t
+
+EXTRA_CFLAGS  := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
+
+targets := image
+
+$(obj)/image: vmlinux FORCE
+	$(call if_changed,objcopy)
+
+install: $(CONFIGURE) $(obj)/image
+	sh -x  $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
+	      System.map Kerntypes "$(INSTALL_PATH)"
diff --git a/arch/s390/boot/install.sh b/arch/s390/boot/install.sh
new file mode 100644
index 0000000..278a813
--- /dev/null
+++ b/arch/s390/boot/install.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+#
+# arch/s390x/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+#
+# "make install" script for s390 architecture
+#
+# Arguments:
+#   $1 - kernel version
+#   $2 - kernel image file
+#   $3 - kernel map file
+#   $4 - default install path (blank if root directory)
+#
+
+# User may have a custom install script
+
+if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
+if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+
+# Default install - same as make zlilo
+
+if [ -f $4/vmlinuz ]; then
+	mv $4/vmlinuz $4/vmlinuz.old
+fi
+
+if [ -f $4/System.map ]; then
+	mv $4/System.map $4/System.old
+fi
+
+cat $2 > $4/vmlinuz
+cp $3 $4/System.map
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
new file mode 100644
index 0000000..96a05e6
--- /dev/null
+++ b/arch/s390/crypto/Makefile
@@ -0,0 +1,8 @@
+#
+# Cryptographic API
+#
+
+obj-$(CONFIG_CRYPTO_SHA1_Z990) += sha1_z990.o
+obj-$(CONFIG_CRYPTO_DES_Z990) += des_z990.o des_check_key.o
+
+obj-$(CONFIG_CRYPTO_TEST) += crypt_z990_query.o
diff --git a/arch/s390/crypto/crypt_z990.h b/arch/s390/crypto/crypt_z990.h
new file mode 100644
index 0000000..4df660b
--- /dev/null
+++ b/arch/s390/crypto/crypt_z990.h
@@ -0,0 +1,374 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for z990 cryptographic instructions.
+ *
+ *   Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation
+ *   Author(s): Thomas Spatzier (tspat@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ARCH_S390_CRYPT_Z990_H
+#define _CRYPTO_ARCH_S390_CRYPT_Z990_H
+
+#include <asm/errno.h>
+
+#define CRYPT_Z990_OP_MASK 0xFF00
+#define CRYPT_Z990_FUNC_MASK 0x00FF
+
+
+/*z990 cryptographic operations*/
+enum crypt_z990_operations {
+	CRYPT_Z990_KM   = 0x0100,
+	CRYPT_Z990_KMC  = 0x0200,
+	CRYPT_Z990_KIMD = 0x0300,
+	CRYPT_Z990_KLMD = 0x0400,
+	CRYPT_Z990_KMAC = 0x0500
+};
+
+/*function codes for KM (CIPHER MESSAGE) instruction*/
+enum crypt_z990_km_func {
+	KM_QUERY            = CRYPT_Z990_KM | 0,
+	KM_DEA_ENCRYPT      = CRYPT_Z990_KM | 1,
+	KM_DEA_DECRYPT      = CRYPT_Z990_KM | 1 | 0x80, //modifier bit->decipher
+	KM_TDEA_128_ENCRYPT = CRYPT_Z990_KM | 2,
+	KM_TDEA_128_DECRYPT = CRYPT_Z990_KM | 2 | 0x80,
+	KM_TDEA_192_ENCRYPT = CRYPT_Z990_KM | 3,
+	KM_TDEA_192_DECRYPT = CRYPT_Z990_KM | 3 | 0x80,
+};
+
+/*function codes for KMC (CIPHER MESSAGE WITH CHAINING) instruction*/
+enum crypt_z990_kmc_func {
+	KMC_QUERY            = CRYPT_Z990_KMC | 0,
+	KMC_DEA_ENCRYPT      = CRYPT_Z990_KMC | 1,
+	KMC_DEA_DECRYPT      = CRYPT_Z990_KMC | 1 | 0x80, //modifier bit->decipher
+	KMC_TDEA_128_ENCRYPT = CRYPT_Z990_KMC | 2,
+	KMC_TDEA_128_DECRYPT = CRYPT_Z990_KMC | 2 | 0x80,
+	KMC_TDEA_192_ENCRYPT = CRYPT_Z990_KMC | 3,
+	KMC_TDEA_192_DECRYPT = CRYPT_Z990_KMC | 3 | 0x80,
+};
+
+/*function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) instruction*/
+enum crypt_z990_kimd_func {
+	KIMD_QUERY   = CRYPT_Z990_KIMD | 0,
+	KIMD_SHA_1   = CRYPT_Z990_KIMD | 1,
+};
+
+/*function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) instruction*/
+enum crypt_z990_klmd_func {
+	KLMD_QUERY   = CRYPT_Z990_KLMD | 0,
+	KLMD_SHA_1   = CRYPT_Z990_KLMD | 1,
+};
+
+/*function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) instruction*/
+enum crypt_z990_kmac_func {
+	KMAC_QUERY    = CRYPT_Z990_KMAC | 0,
+	KMAC_DEA      = CRYPT_Z990_KMAC | 1,
+	KMAC_TDEA_128 = CRYPT_Z990_KMAC | 2,
+	KMAC_TDEA_192 = CRYPT_Z990_KMAC | 3
+};
+
+/*status word for z990 crypto instructions' QUERY functions*/
+struct crypt_z990_query_status {
+	u64 high;
+	u64 low;
+};
+
+/*
+ * Standard fixup and ex_table sections for crypt_z990 inline functions.
+ * label 0: the z990 crypto operation
+ * label 1: just after 1 to catch illegal operation exception on non-z990
+ * label 6: the return point after fixup
+ * label 7: set error value if exception _in_ crypto operation
+ * label 8: set error value if illegal operation exception
+ * [ret] is the variable to receive the error code
+ * [ERR] is the error code value
+ */
+#ifndef __s390x__
+#define __crypt_z990_fixup \
+	".section .fixup,\"ax\" \n"	\
+	"7:	lhi	%0,%h[e1] \n"	\
+	"	bras	1,9f \n"	\
+	"	.long	6b \n"		\
+	"8:	lhi	%0,%h[e2] \n"	\
+	"	bras	1,9f \n"	\
+	"	.long	6b \n"		\
+	"9:	l	1,0(1) \n"	\
+	"	br	1 \n"		\
+	".previous \n"			\
+	".section __ex_table,\"a\" \n"	\
+	"	.align	4 \n"		\
+	"	.long	0b,7b \n"	\
+	"	.long	1b,8b \n"	\
+	".previous"
+#else /* __s390x__ */
+#define __crypt_z990_fixup \
+	".section .fixup,\"ax\" \n"	\
+	"7:	lhi	%0,%h[e1] \n"	\
+	"	jg	6b \n"		\
+	"8:	lhi	%0,%h[e2] \n"	\
+	"	jg	6b \n"		\
+	".previous\n"			\
+	".section __ex_table,\"a\" \n"	\
+	"	.align	8 \n"		\
+	"	.quad	0b,7b \n"	\
+	"	.quad	1b,8b \n"	\
+	".previous"
+#endif /* __s390x__ */
+
+/*
+ * Standard code for setting the result of z990 crypto instructions.
+ * %0: the register which will receive the result
+ * [result]: the register containing the result (e.g. second operand length
+ * to compute number of processed bytes].
+ */
+#ifndef __s390x__
+#define __crypt_z990_set_result \
+	"	lr	%0,%[result] \n"
+#else /* __s390x__ */
+#define __crypt_z990_set_result \
+	"	lgr	%0,%[result] \n"
+#endif
+
+/*
+ * Executes the KM (CIPHER MESSAGE) operation of the z990 CPU.
+ * @param func: the function code passed to KM; see crypt_z990_km_func
+ * @param param: address of parameter block; see POP for details on each func
+ * @param dest: address of destination memory area
+ * @param src: address of source memory area
+ * @param src_len: length of src operand in bytes
+ * @returns < zero for failure, 0 for the query func, number of processed bytes
+ * 	for encryption/decryption funcs
+ */
+static inline int
+crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
+{
+	register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+	register void* __param asm("1") = param;
+	register u8* __dest asm("4") = dest;
+	register const u8* __src asm("2") = src;
+	register long __src_len asm("3") = src_len;
+	int ret;
+
+	ret = 0;
+	__asm__ __volatile__ (
+		"0:	.insn	rre,0xB92E0000,%1,%2 \n" //KM opcode
+		"1:	brc	1,0b \n" //handle partial completion
+		__crypt_z990_set_result
+		"6:	\n"
+		__crypt_z990_fixup
+		: "+d" (ret), "+a" (__dest), "+a" (__src),
+		  [result] "+d" (__src_len)
+		: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
+		  "a" (__param)
+		: "cc", "memory"
+	);
+	if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+		ret = src_len - ret;
+	}
+	return ret;
+}
+
+/*
+ * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the z990 CPU.
+ * @param func: the function code passed to KM; see crypt_z990_kmc_func
+ * @param param: address of parameter block; see POP for details on each func
+ * @param dest: address of destination memory area
+ * @param src: address of source memory area
+ * @param src_len: length of src operand in bytes
+ * @returns < zero for failure, 0 for the query func, number of processed bytes
+ * 	for encryption/decryption funcs
+ */
+static inline int
+crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
+{
+	register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+	register void* __param asm("1") = param;
+	register u8* __dest asm("4") = dest;
+	register const u8* __src asm("2") = src;
+	register long __src_len asm("3") = src_len;
+	int ret;
+
+	ret = 0;
+	__asm__ __volatile__ (
+		"0:	.insn	rre,0xB92F0000,%1,%2 \n" //KMC opcode
+		"1:	brc	1,0b \n" //handle partial completion
+		__crypt_z990_set_result
+		"6:	\n"
+		__crypt_z990_fixup
+		: "+d" (ret), "+a" (__dest), "+a" (__src),
+		  [result] "+d" (__src_len)
+		: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
+		  "a" (__param)
+		: "cc", "memory"
+	);
+	if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+		ret = src_len - ret;
+	}
+	return ret;
+}
+
+/*
+ * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
+ * of the z990 CPU.
+ * @param func: the function code passed to KM; see crypt_z990_kimd_func
+ * @param param: address of parameter block; see POP for details on each func
+ * @param src: address of source memory area
+ * @param src_len: length of src operand in bytes
+ * @returns < zero for failure, 0 for the query func, number of processed bytes
+ * 	for digest funcs
+ */
+static inline int
+crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
+{
+	register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+	register void* __param asm("1") = param;
+	register const u8* __src asm("2") = src;
+	register long __src_len asm("3") = src_len;
+	int ret;
+
+	ret = 0;
+	__asm__ __volatile__ (
+		"0:	.insn	rre,0xB93E0000,%1,%1 \n" //KIMD opcode
+		"1:	brc	1,0b \n" /*handle partical completion of kimd*/
+		__crypt_z990_set_result
+		"6:	\n"
+		__crypt_z990_fixup
+		: "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
+		: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
+		  "a" (__param)
+		: "cc", "memory"
+	);
+	if (ret >= 0 && (func & CRYPT_Z990_FUNC_MASK)){
+		ret = src_len - ret;
+	}
+	return ret;
+}
+
+/*
+ * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the z990 CPU.
+ * @param func: the function code passed to KM; see crypt_z990_klmd_func
+ * @param param: address of parameter block; see POP for details on each func
+ * @param src: address of source memory area
+ * @param src_len: length of src operand in bytes
+ * @returns < zero for failure, 0 for the query func, number of processed bytes
+ * 	for digest funcs
+ */
+static inline int
+crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
+{
+	register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+	register void* __param asm("1") = param;
+	register const u8* __src asm("2") = src;
+	register long __src_len asm("3") = src_len;
+	int ret;
+
+	ret = 0;
+	__asm__ __volatile__ (
+		"0:	.insn	rre,0xB93F0000,%1,%1 \n" //KLMD opcode
+		"1:	brc	1,0b \n" /*handle partical completion of klmd*/
+		__crypt_z990_set_result
+		"6:	\n"
+		__crypt_z990_fixup
+		: "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
+		: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
+		  "a" (__param)
+		: "cc", "memory"
+	);
+	if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+		ret = src_len - ret;
+	}
+	return ret;
+}
+
+/*
+ * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
+ * of the z990 CPU.
+ * @param func: the function code passed to KM; see crypt_z990_klmd_func
+ * @param param: address of parameter block; see POP for details on each func
+ * @param src: address of source memory area
+ * @param src_len: length of src operand in bytes
+ * @returns < zero for failure, 0 for the query func, number of processed bytes
+ * 	for digest funcs
+ */
+static inline int
+crypt_z990_kmac(long func, void* param, const u8* src, long src_len)
+{
+	register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+	register void* __param asm("1") = param;
+	register const u8* __src asm("2") = src;
+	register long __src_len asm("3") = src_len;
+	int ret;
+
+	ret = 0;
+	__asm__ __volatile__ (
+		"0:	.insn	rre,0xB91E0000,%5,%5 \n" //KMAC opcode
+		"1:	brc	1,0b \n" /*handle partical completion of klmd*/
+		__crypt_z990_set_result
+		"6:	\n"
+		__crypt_z990_fixup
+		: "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
+		: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
+		  "a" (__param)
+		: "cc", "memory"
+	);
+	if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+		ret = src_len - ret;
+	}
+	return ret;
+}
+
+/**
+ * Tests if a specific z990 crypto function is implemented on the machine.
+ * @param func:	the function code of the specific function; 0 if op in general
+ * @return	1 if func available; 0 if func or op in general not available
+ */
+static inline int
+crypt_z990_func_available(int func)
+{
+	int ret;
+
+	struct crypt_z990_query_status status = {
+		.high = 0,
+		.low = 0
+	};
+	switch (func & CRYPT_Z990_OP_MASK){
+		case CRYPT_Z990_KM:
+			ret = crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0);
+			break;
+		case CRYPT_Z990_KMC:
+			ret = crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0);
+			break;
+		case CRYPT_Z990_KIMD:
+			ret = crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0);
+			break;
+		case CRYPT_Z990_KLMD:
+			ret = crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0);
+			break;
+		case CRYPT_Z990_KMAC:
+			ret = crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0);
+			break;
+		default:
+			ret = 0;
+			return ret;
+	}
+	if (ret >= 0){
+		func &= CRYPT_Z990_FUNC_MASK;
+		func &= 0x7f; //mask modifier bit
+		if (func < 64){
+			ret = (status.high >> (64 - func - 1)) & 0x1;
+		} else {
+			ret = (status.low >> (128 - func - 1)) & 0x1;
+		}
+	} else {
+		ret = 0;
+	}
+	return ret;
+}
+
+
+#endif // _CRYPTO_ARCH_S390_CRYPT_Z990_H
diff --git a/arch/s390/crypto/crypt_z990_query.c b/arch/s390/crypto/crypt_z990_query.c
new file mode 100644
index 0000000..7133983
--- /dev/null
+++ b/arch/s390/crypto/crypt_z990_query.c
@@ -0,0 +1,111 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for z990 cryptographic instructions.
+ * Testing module for querying processor crypto capabilities.
+ *
+ * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Thomas Spatzier (tspat@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/errno.h>
+#include "crypt_z990.h"
+
+static void
+query_available_functions(void)
+{
+	printk(KERN_INFO "#####################\n");
+	//query available KM functions
+	printk(KERN_INFO "KM_QUERY: %d\n",
+			crypt_z990_func_available(KM_QUERY));
+	printk(KERN_INFO "KM_DEA: %d\n",
+			crypt_z990_func_available(KM_DEA_ENCRYPT));
+	printk(KERN_INFO "KM_TDEA_128: %d\n",
+			crypt_z990_func_available(KM_TDEA_128_ENCRYPT));
+	printk(KERN_INFO "KM_TDEA_192: %d\n",
+			crypt_z990_func_available(KM_TDEA_192_ENCRYPT));
+	//query available KMC functions
+	printk(KERN_INFO "KMC_QUERY: %d\n",
+			crypt_z990_func_available(KMC_QUERY));
+	printk(KERN_INFO "KMC_DEA: %d\n",
+			crypt_z990_func_available(KMC_DEA_ENCRYPT));
+	printk(KERN_INFO "KMC_TDEA_128: %d\n",
+			crypt_z990_func_available(KMC_TDEA_128_ENCRYPT));
+	printk(KERN_INFO "KMC_TDEA_192: %d\n",
+			crypt_z990_func_available(KMC_TDEA_192_ENCRYPT));
+	//query available KIMD fucntions
+	printk(KERN_INFO "KIMD_QUERY: %d\n",
+			crypt_z990_func_available(KIMD_QUERY));
+	printk(KERN_INFO "KIMD_SHA_1: %d\n",
+			crypt_z990_func_available(KIMD_SHA_1));
+	//query available KLMD functions
+	printk(KERN_INFO "KLMD_QUERY: %d\n",
+			crypt_z990_func_available(KLMD_QUERY));
+	printk(KERN_INFO "KLMD_SHA_1: %d\n",
+			crypt_z990_func_available(KLMD_SHA_1));
+	//query available KMAC functions
+	printk(KERN_INFO "KMAC_QUERY: %d\n",
+			crypt_z990_func_available(KMAC_QUERY));
+	printk(KERN_INFO "KMAC_DEA: %d\n",
+			crypt_z990_func_available(KMAC_DEA));
+	printk(KERN_INFO "KMAC_TDEA_128: %d\n",
+			crypt_z990_func_available(KMAC_TDEA_128));
+	printk(KERN_INFO "KMAC_TDEA_192: %d\n",
+			crypt_z990_func_available(KMAC_TDEA_192));
+}
+
+static int
+init(void)
+{
+	struct crypt_z990_query_status status = {
+		.high = 0,
+		.low = 0
+	};
+
+	printk(KERN_INFO "crypt_z990: querying available crypto functions\n");
+	crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0);
+	printk(KERN_INFO "KM: %016llx %016llx\n",
+			(unsigned long long) status.high,
+			(unsigned long long) status.low);
+	status.high = status.low = 0;
+	crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0);
+	printk(KERN_INFO "KMC: %016llx %016llx\n",
+			(unsigned long long) status.high,
+			(unsigned long long) status.low);
+	status.high = status.low = 0;
+	crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0);
+	printk(KERN_INFO "KIMD: %016llx %016llx\n",
+			(unsigned long long) status.high,
+			(unsigned long long) status.low);
+	status.high = status.low = 0;
+	crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0);
+	printk(KERN_INFO "KLMD: %016llx %016llx\n",
+			(unsigned long long) status.high,
+			(unsigned long long) status.low);
+	status.high = status.low = 0;
+	crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0);
+	printk(KERN_INFO "KMAC: %016llx %016llx\n",
+			(unsigned long long) status.high,
+			(unsigned long long) status.low);
+
+	query_available_functions();
+	return -1;
+}
+
+static void __exit
+cleanup(void)
+{
+}
+
+module_init(init);
+module_exit(cleanup);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/crypto_des.h b/arch/s390/crypto/crypto_des.h
new file mode 100644
index 0000000..c964b64
--- /dev/null
+++ b/arch/s390/crypto/crypto_des.h
@@ -0,0 +1,18 @@
+/*
+ * Cryptographic API.
+ *
+ * Function for checking keys for the DES and Tripple DES Encryption
+ * algorithms.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __CRYPTO_DES_H__
+#define __CRYPTO_DES_H__
+
+extern int crypto_des_check_key(const u8*, unsigned int, u32*);
+
+#endif //__CRYPTO_DES_H__
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c
new file mode 100644
index 0000000..e3f5c5f
--- /dev/null
+++ b/arch/s390/crypto/des_check_key.c
@@ -0,0 +1,130 @@
+/*
+ * Cryptographic API.
+ *
+ * Function for checking keys for the DES and Tripple DES Encryption
+ * algorithms.
+ *
+ * Originally released as descore by Dana L. How <how@isl.stanford.edu>.
+ * Modified by Raimar Falke <rf13@inf.tu-dresden.de> for the Linux-Kernel.
+ * Derived from Cryptoapi and Nettle implementations, adapted for in-place
+ * scatterlist interface.  Changed LGPL to GPL per section 3 of the LGPL.
+ *
+ * s390 Version:
+ *   Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation
+ *   Author(s): Thomas Spatzier (tspat@de.ibm.com)
+ *
+ * Derived from "crypto/des.c"
+ *   Copyright (c) 1992 Dana L. How.
+ *   Copyright (c) Raimar Falke <rf13@inf.tu-dresden.de>
+ *   Copyright (c) Gisle Sflensminde <gisle@ii.uib.no>
+ *   Copyright (C) 2001 Niels Mvller.
+ *   Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/crypto.h>
+
+#define ROR(d,c,o)	((d) = (d) >> (c) | (d) << (o))
+
+static const u8 parity[] = {
+	8,1,0,8,0,8,8,0,0,8,8,0,8,0,2,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,3,
+	0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
+	0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
+	8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
+	0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
+	8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
+	8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
+	4,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,5,0,8,0,8,8,0,0,8,8,0,8,0,6,8,
+};
+
+/*
+ * RFC2451: Weak key checks SHOULD be performed.
+ */
+int
+crypto_des_check_key(const u8 *key, unsigned int keylen, u32 *flags)
+{
+	u32 n, w;
+
+	n  = parity[key[0]]; n <<= 4;
+	n |= parity[key[1]]; n <<= 4;
+	n |= parity[key[2]]; n <<= 4;
+	n |= parity[key[3]]; n <<= 4;
+	n |= parity[key[4]]; n <<= 4;
+	n |= parity[key[5]]; n <<= 4;
+	n |= parity[key[6]]; n <<= 4;
+	n |= parity[key[7]];
+	w = 0x88888888L;
+
+	if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY)
+	    && !((n - (w >> 3)) & w)) {  /* 1 in 10^10 keys passes this test */
+		if (n < 0x41415151) {
+			if (n < 0x31312121) {
+				if (n < 0x14141515) {
+					/* 01 01 01 01 01 01 01 01 */
+					if (n == 0x11111111) goto weak;
+					/* 01 1F 01 1F 01 0E 01 0E */
+					if (n == 0x13131212) goto weak;
+				} else {
+					/* 01 E0 01 E0 01 F1 01 F1 */
+					if (n == 0x14141515) goto weak;
+					/* 01 FE 01 FE 01 FE 01 FE */
+					if (n == 0x16161616) goto weak;
+				}
+			} else {
+				if (n < 0x34342525) {
+					/* 1F 01 1F 01 0E 01 0E 01 */
+					if (n == 0x31312121) goto weak;
+					/* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
+					if (n == 0x33332222) goto weak;
+				} else {
+					/* 1F E0 1F E0 0E F1 0E F1 */
+					if (n == 0x34342525) goto weak;
+					/* 1F FE 1F FE 0E FE 0E FE */
+					if (n == 0x36362626) goto weak;
+				}
+			}
+		} else {
+			if (n < 0x61616161) {
+				if (n < 0x44445555) {
+					/* E0 01 E0 01 F1 01 F1 01 */
+					if (n == 0x41415151) goto weak;
+					/* E0 1F E0 1F F1 0E F1 0E */
+					if (n == 0x43435252) goto weak;
+				} else {
+					/* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
+					if (n == 0x44445555) goto weak;
+					/* E0 FE E0 FE F1 FE F1 FE */
+					if (n == 0x46465656) goto weak;
+				}
+			} else {
+				if (n < 0x64646565) {
+					/* FE 01 FE 01 FE 01 FE 01 */
+					if (n == 0x61616161) goto weak;
+					/* FE 1F FE 1F FE 0E FE 0E */
+					if (n == 0x63636262) goto weak;
+				} else {
+					/* FE E0 FE E0 FE F1 FE F1 */
+					if (n == 0x64646565) goto weak;
+					/* FE FE FE FE FE FE FE FE */
+					if (n == 0x66666666) goto weak;
+				}
+			}
+		}
+	}
+	return 0;
+weak:
+	*flags |= CRYPTO_TFM_RES_WEAK_KEY;
+	return -EINVAL;
+}
+
+EXPORT_SYMBOL(crypto_des_check_key);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Key Check function for DES &  DES3 Cipher Algorithms");
diff --git a/arch/s390/crypto/des_z990.c b/arch/s390/crypto/des_z990.c
new file mode 100644
index 0000000..813cf37
--- /dev/null
+++ b/arch/s390/crypto/des_z990.c
@@ -0,0 +1,284 @@
+/*
+ * Cryptographic API.
+ *
+ * z990 implementation of the DES Cipher Algorithm.
+ *
+ * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Thomas Spatzier (tspat@de.ibm.com)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <asm/scatterlist.h>
+#include <linux/crypto.h>
+#include "crypt_z990.h"
+#include "crypto_des.h"
+
+#define DES_BLOCK_SIZE 8
+#define DES_KEY_SIZE 8
+
+#define DES3_128_KEY_SIZE	(2 * DES_KEY_SIZE)
+#define DES3_128_BLOCK_SIZE	DES_BLOCK_SIZE
+
+#define DES3_192_KEY_SIZE	(3 * DES_KEY_SIZE)
+#define DES3_192_BLOCK_SIZE	DES_BLOCK_SIZE
+
+struct crypt_z990_des_ctx {
+	u8 iv[DES_BLOCK_SIZE];
+	u8 key[DES_KEY_SIZE];
+};
+
+struct crypt_z990_des3_128_ctx {
+	u8 iv[DES_BLOCK_SIZE];
+	u8 key[DES3_128_KEY_SIZE];
+};
+
+struct crypt_z990_des3_192_ctx {
+	u8 iv[DES_BLOCK_SIZE];
+	u8 key[DES3_192_KEY_SIZE];
+};
+
+static int
+des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
+{
+	struct crypt_z990_des_ctx *dctx;
+	int ret;
+
+	dctx = ctx;
+	//test if key is valid (not a weak key)
+	ret = crypto_des_check_key(key, keylen, flags);
+	if (ret == 0){
+		memcpy(dctx->key, key, keylen);
+	}
+	return ret;
+}
+
+
+static void
+des_encrypt(void *ctx, u8 *dst, const u8 *src)
+{
+	struct crypt_z990_des_ctx *dctx;
+
+	dctx = ctx;
+	crypt_z990_km(KM_DEA_ENCRYPT, dctx->key, dst, src, DES_BLOCK_SIZE);
+}
+
+static void
+des_decrypt(void *ctx, u8 *dst, const u8 *src)
+{
+	struct crypt_z990_des_ctx *dctx;
+
+	dctx = ctx;
+	crypt_z990_km(KM_DEA_DECRYPT, dctx->key, dst, src, DES_BLOCK_SIZE);
+}
+
+static struct crypto_alg des_alg = {
+	.cra_name		=	"des",
+	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
+	.cra_blocksize		=	DES_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct crypt_z990_des_ctx),
+	.cra_module		=	THIS_MODULE,
+	.cra_list		=	LIST_HEAD_INIT(des_alg.cra_list),
+	.cra_u			=	{ .cipher = {
+	.cia_min_keysize	=	DES_KEY_SIZE,
+	.cia_max_keysize	=	DES_KEY_SIZE,
+	.cia_setkey		= 	des_setkey,
+	.cia_encrypt		=	des_encrypt,
+	.cia_decrypt		=	des_decrypt } }
+};
+
+/*
+ * RFC2451:
+ *
+ *   For DES-EDE3, there is no known need to reject weak or
+ *   complementation keys.  Any weakness is obviated by the use of
+ *   multiple keys.
+ *
+ *   However, if the two  independent 64-bit keys are equal,
+ *   then the DES3 operation is simply the same as DES.
+ *   Implementers MUST reject keys that exhibit this property.
+ *
+ */
+static int
+des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
+{
+	int i, ret;
+	struct crypt_z990_des3_128_ctx *dctx;
+	const u8* temp_key = key;
+
+	dctx = ctx;
+	if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
+
+		*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
+		return -EINVAL;
+	}
+	for (i = 0; i < 2; i++,	temp_key += DES_KEY_SIZE) {
+		ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags);
+		if (ret < 0)
+			return ret;
+	}
+	memcpy(dctx->key, key, keylen);
+	return 0;
+}
+
+static void
+des3_128_encrypt(void *ctx, u8 *dst, const u8 *src)
+{
+	struct crypt_z990_des3_128_ctx *dctx;
+
+	dctx = ctx;
+	crypt_z990_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src,
+			DES3_128_BLOCK_SIZE);
+}
+
+static void
+des3_128_decrypt(void *ctx, u8 *dst, const u8 *src)
+{
+	struct crypt_z990_des3_128_ctx *dctx;
+
+	dctx = ctx;
+	crypt_z990_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src,
+			DES3_128_BLOCK_SIZE);
+}
+
+static struct crypto_alg des3_128_alg = {
+	.cra_name		=	"des3_ede128",
+	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
+	.cra_blocksize		=	DES3_128_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct crypt_z990_des3_128_ctx),
+	.cra_module		=	THIS_MODULE,
+	.cra_list		=	LIST_HEAD_INIT(des3_128_alg.cra_list),
+	.cra_u			=	{ .cipher = {
+	.cia_min_keysize	=	DES3_128_KEY_SIZE,
+	.cia_max_keysize	=	DES3_128_KEY_SIZE,
+	.cia_setkey		= 	des3_128_setkey,
+	.cia_encrypt		=	des3_128_encrypt,
+	.cia_decrypt		=	des3_128_decrypt } }
+};
+
+/*
+ * RFC2451:
+ *
+ *   For DES-EDE3, there is no known need to reject weak or
+ *   complementation keys.  Any weakness is obviated by the use of
+ *   multiple keys.
+ *
+ *   However, if the first two or last two independent 64-bit keys are
+ *   equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
+ *   same as DES.  Implementers MUST reject keys that exhibit this
+ *   property.
+ *
+ */
+static int
+des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
+{
+	int i, ret;
+	struct crypt_z990_des3_192_ctx *dctx;
+	const u8* temp_key;
+
+	dctx = ctx;
+	temp_key = key;
+	if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
+	    memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
+	    					DES_KEY_SIZE))) {
+
+		*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
+		return -EINVAL;
+	}
+	for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) {
+		ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags);
+		if (ret < 0){
+			return ret;
+		}
+	}
+	memcpy(dctx->key, key, keylen);
+	return 0;
+}
+
+static void
+des3_192_encrypt(void *ctx, u8 *dst, const u8 *src)
+{
+	struct crypt_z990_des3_192_ctx *dctx;
+
+	dctx = ctx;
+	crypt_z990_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
+			DES3_192_BLOCK_SIZE);
+}
+
+static void
+des3_192_decrypt(void *ctx, u8 *dst, const u8 *src)
+{
+	struct crypt_z990_des3_192_ctx *dctx;
+
+	dctx = ctx;
+	crypt_z990_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
+			DES3_192_BLOCK_SIZE);
+}
+
+static struct crypto_alg des3_192_alg = {
+	.cra_name		=	"des3_ede",
+	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
+	.cra_blocksize		=	DES3_192_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct crypt_z990_des3_192_ctx),
+	.cra_module		=	THIS_MODULE,
+	.cra_list		=	LIST_HEAD_INIT(des3_192_alg.cra_list),
+	.cra_u			=	{ .cipher = {
+	.cia_min_keysize	=	DES3_192_KEY_SIZE,
+	.cia_max_keysize	=	DES3_192_KEY_SIZE,
+	.cia_setkey		= 	des3_192_setkey,
+	.cia_encrypt		=	des3_192_encrypt,
+	.cia_decrypt		=	des3_192_decrypt } }
+};
+
+
+
+static int
+init(void)
+{
+	int ret;
+
+	if (!crypt_z990_func_available(KM_DEA_ENCRYPT) ||
+	    !crypt_z990_func_available(KM_TDEA_128_ENCRYPT) ||
+	    !crypt_z990_func_available(KM_TDEA_192_ENCRYPT)){
+		return -ENOSYS;
+	}
+
+	ret = 0;
+	ret |= (crypto_register_alg(&des_alg) == 0)? 0:1;
+	ret |= (crypto_register_alg(&des3_128_alg) == 0)? 0:2;
+	ret |= (crypto_register_alg(&des3_192_alg) == 0)? 0:4;
+	if (ret){
+		crypto_unregister_alg(&des3_192_alg);
+		crypto_unregister_alg(&des3_128_alg);
+		crypto_unregister_alg(&des_alg);
+		return -EEXIST;
+	}
+
+	printk(KERN_INFO "crypt_z990: des_z990 loaded.\n");
+	return 0;
+}
+
+static void __exit
+fini(void)
+{
+	crypto_unregister_alg(&des3_192_alg);
+	crypto_unregister_alg(&des3_128_alg);
+	crypto_unregister_alg(&des_alg);
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_ALIAS("des");
+MODULE_ALIAS("des3_ede");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
diff --git a/arch/s390/crypto/sha1_z990.c b/arch/s390/crypto/sha1_z990.c
new file mode 100644
index 0000000..298174d
--- /dev/null
+++ b/arch/s390/crypto/sha1_z990.c
@@ -0,0 +1,167 @@
+/*
+ * Cryptographic API.
+ *
+ * z990 implementation of the SHA1 Secure Hash Algorithm.
+ *
+ * Derived from cryptoapi implementation, adapted for in-place
+ * scatterlist interface.  Originally based on the public domain
+ * implementation written by Steve Reid.
+ *
+ * s390 Version:
+ *   Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation
+ *   Author(s): Thomas Spatzier (tspat@de.ibm.com)
+ *
+ * Derived from "crypto/sha1.c"
+ *   Copyright (c) Alan Smithee.
+ *   Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ *   Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/crypto.h>
+#include <asm/scatterlist.h>
+#include <asm/byteorder.h>
+#include "crypt_z990.h"
+
+#define SHA1_DIGEST_SIZE	20
+#define SHA1_BLOCK_SIZE		64
+
+struct crypt_z990_sha1_ctx {
+        u64 count;
+        u32 state[5];
+	u32 buf_len;
+        u8 buffer[2 * SHA1_BLOCK_SIZE];
+};
+
+static void
+sha1_init(void *ctx)
+{
+	static const struct crypt_z990_sha1_ctx initstate = {
+		.state = {
+			0x67452301,
+			0xEFCDAB89,
+			0x98BADCFE,
+			0x10325476,
+			0xC3D2E1F0
+		},
+	};
+	memcpy(ctx, &initstate, sizeof(initstate));
+}
+
+static void
+sha1_update(void *ctx, const u8 *data, unsigned int len)
+{
+	struct crypt_z990_sha1_ctx *sctx;
+	long imd_len;
+
+	sctx = ctx;
+	sctx->count += len * 8; //message bit length
+
+	//anything in buffer yet? -> must be completed
+	if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) {
+		//complete full block and hash
+		memcpy(sctx->buffer + sctx->buf_len, data,
+				SHA1_BLOCK_SIZE - sctx->buf_len);
+		crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
+				SHA1_BLOCK_SIZE);
+		data += SHA1_BLOCK_SIZE - sctx->buf_len;
+		len -= SHA1_BLOCK_SIZE - sctx->buf_len;
+		sctx->buf_len = 0;
+	}
+
+	//rest of data contains full blocks?
+	imd_len = len & ~0x3ful;
+	if (imd_len){
+		crypt_z990_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
+		data += imd_len;
+		len -= imd_len;
+	}
+	//anything left? store in buffer
+	if (len){
+		memcpy(sctx->buffer + sctx->buf_len , data, len);
+		sctx->buf_len += len;
+	}
+}
+
+
+static void
+pad_message(struct crypt_z990_sha1_ctx* sctx)
+{
+	int index;
+
+	index = sctx->buf_len;
+	sctx->buf_len = (sctx->buf_len < 56)?
+		SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
+	//start pad with 1
+	sctx->buffer[index] = 0x80;
+	//pad with zeros
+	index++;
+	memset(sctx->buffer + index, 0x00, sctx->buf_len - index);
+	//append length
+	memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
+			sizeof sctx->count);
+}
+
+/* Add padding and return the message digest. */
+static void
+sha1_final(void* ctx, u8 *out)
+{
+	struct crypt_z990_sha1_ctx *sctx = ctx;
+
+	//must perform manual padding
+	pad_message(sctx);
+	crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
+	//copy digest to out
+	memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
+	/* Wipe context */
+	memset(sctx, 0, sizeof *sctx);
+}
+
+static struct crypto_alg alg = {
+	.cra_name	=	"sha1",
+	.cra_flags	=	CRYPTO_ALG_TYPE_DIGEST,
+	.cra_blocksize	=	SHA1_BLOCK_SIZE,
+	.cra_ctxsize	=	sizeof(struct crypt_z990_sha1_ctx),
+	.cra_module	=	THIS_MODULE,
+	.cra_list       =       LIST_HEAD_INIT(alg.cra_list),
+	.cra_u		=	{ .digest = {
+	.dia_digestsize	=	SHA1_DIGEST_SIZE,
+	.dia_init   	= 	sha1_init,
+	.dia_update 	=	sha1_update,
+	.dia_final  	=	sha1_final } }
+};
+
+static int
+init(void)
+{
+	int ret = -ENOSYS;
+
+	if (crypt_z990_func_available(KIMD_SHA_1)){
+		ret = crypto_register_alg(&alg);
+		if (ret == 0){
+			printk(KERN_INFO "crypt_z990: sha1_z990 loaded.\n");
+		}
+	}
+	return ret;
+}
+
+static void __exit
+fini(void)
+{
+	crypto_unregister_alg(&alg);
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_ALIAS("sha1");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
new file mode 100644
index 0000000..1358b42
--- /dev/null
+++ b/arch/s390/defconfig
@@ -0,0 +1,589 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.11
+# Wed Mar  2 16:57:55 2005
+#
+CONFIG_MMU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_S390=y
+CONFIG_UID16=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_LOCK_KERNEL=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+# CONFIG_MODULE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
+
+#
+# Base setup
+#
+
+#
+# Processor type and features
+#
+# CONFIG_ARCH_S390X is not set
+# CONFIG_64BIT is not set
+CONFIG_ARCH_S390_31=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=32
+CONFIG_HOTPLUG_CPU=y
+CONFIG_MATHEMU=y
+
+#
+# Code generation options
+#
+CONFIG_MARCH_G5=y
+# CONFIG_MARCH_Z900 is not set
+# CONFIG_MARCH_Z990 is not set
+CONFIG_PACK_STACK=y
+# CONFIG_SMALL_STACK is not set
+# CONFIG_CHECK_STACK is not set
+# CONFIG_WARN_STACK is not set
+
+#
+# I/O subsystem configuration
+#
+CONFIG_MACHCHK_WARNING=y
+CONFIG_QDIO=y
+# CONFIG_QDIO_PERF_STATS is not set
+# CONFIG_QDIO_DEBUG is not set
+
+#
+# Misc
+#
+# CONFIG_PREEMPT is not set
+CONFIG_IPL=y
+# CONFIG_IPL_TAPE is not set
+CONFIG_IPL_VM=y
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=m
+# CONFIG_PROCESS_DEBUG is not set
+CONFIG_PFAULT=y
+# CONFIG_SHARED_KERNEL is not set
+# CONFIG_CMM is not set
+# CONFIG_VIRT_TIMER is not set
+CONFIG_NO_IDLE_HZ=y
+CONFIG_NO_IDLE_HZ_INIT=y
+# CONFIG_PCMCIA is not set
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+CONFIG_SCSI_FC_ATTRS=y
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+CONFIG_ZFCP=y
+CONFIG_CCW=y
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_LBD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# S/390 block device drivers
+#
+CONFIG_BLK_DEV_XPRAM=m
+# CONFIG_DCSSBLK is not set
+CONFIG_DASD=y
+CONFIG_DASD_PROFILE=y
+CONFIG_DASD_ECKD=y
+CONFIG_DASD_FBA=y
+CONFIG_DASD_DIAG=y
+# CONFIG_DASD_CMB is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+# CONFIG_MD_RAID6 is not set
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+# CONFIG_BLK_DEV_DM is not set
+
+#
+# Character device drivers
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_UNIX98_PTY_COUNT=2048
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+
+#
+# S/390 character device drivers
+#
+CONFIG_TN3270=y
+CONFIG_TN3270_TTY=y
+CONFIG_TN3270_FS=m
+CONFIG_TN3270_CONSOLE=y
+CONFIG_TN3215=y
+CONFIG_TN3215_CONSOLE=y
+CONFIG_CCW_CONSOLE=y
+CONFIG_SCLP=y
+CONFIG_SCLP_TTY=y
+CONFIG_SCLP_CONSOLE=y
+# CONFIG_SCLP_VT220_TTY is not set
+CONFIG_SCLP_CPI=m
+CONFIG_S390_TAPE=m
+
+#
+# S/390 tape interface support
+#
+CONFIG_S390_TAPE_BLOCK=y
+
+#
+# S/390 tape hardware support
+#
+CONFIG_S390_TAPE_34XX=m
+# CONFIG_VMLOGRDR is not set
+# CONFIG_MONREADER is not set
+# CONFIG_DCSS_SHM is not set
+
+#
+# Cryptographic devices
+#
+CONFIG_Z90CRYPT=m
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+CONFIG_IP_TCPDIAG_IPV6=y
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_NETFILTER is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CLK_JIFFIES=y
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+# CONFIG_NET_SCH_CLK_CPU is not set
+CONFIG_NET_SCH_CBQ=m
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_INGRESS is not set
+CONFIG_NET_QOS=y
+CONFIG_NET_ESTIMATOR=y
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+# CONFIG_CLS_U32_PERF is not set
+# CONFIG_NET_CLS_IND is not set
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_CLS_POLICE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+
+#
+# S/390 network device drivers
+#
+CONFIG_LCS=m
+CONFIG_CTC=m
+CONFIG_IUCV=m
+# CONFIG_NETIUCV is not set
+# CONFIG_SMSGIUCV is not set
+CONFIG_QETH=y
+
+#
+# Gigabit Ethernet default settings
+#
+# CONFIG_QETH_IPV6 is not set
+# CONFIG_QETH_PERF_STATS is not set
+CONFIG_CCWGROUP=y
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+
+#
+# XFS support
+#
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V4 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_IBM_PARTITION=y
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA1_Z990 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_DES_Z990 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC32=m
+# CONFIG_LIBCRC32C is not set
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
new file mode 100644
index 0000000..b41e0e1
--- /dev/null
+++ b/arch/s390/kernel/Makefile
@@ -0,0 +1,31 @@
+#
+# Makefile for the linux kernel.
+#
+
+EXTRA_AFLAGS	:= -traditional
+
+obj-y	:=  bitmap.o traps.o time.o process.o \
+            setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
+            semaphore.o s390_ext.o debug.o profile.o irq.o
+
+extra-$(CONFIG_ARCH_S390_31)	+= head.o 
+extra-$(CONFIG_ARCH_S390X)	+= head64.o 
+extra-y				+= init_task.o vmlinux.lds
+
+obj-$(CONFIG_MODULES)		+= s390_ksyms.o module.o
+obj-$(CONFIG_SMP)		+= smp.o
+
+obj-$(CONFIG_S390_SUPPORT)	+= compat_linux.o compat_signal.o \
+					compat_ioctl.o compat_wrapper.o \
+					compat_exec_domain.o
+obj-$(CONFIG_BINFMT_ELF32)	+= binfmt_elf32.o
+
+obj-$(CONFIG_ARCH_S390_31)	+= entry.o reipl.o
+obj-$(CONFIG_ARCH_S390X)	+= entry64.o reipl64.o
+
+obj-$(CONFIG_VIRT_TIMER)	+= vtime.o
+
+#
+# This is just to get the dependencies...
+#
+binfmt_elf32.o:	$(TOPDIR)/fs/binfmt_elf.c
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
new file mode 100644
index 0000000..3f7018e
--- /dev/null
+++ b/arch/s390/kernel/asm-offsets.c
@@ -0,0 +1,49 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+
+/* Use marker if you need to separate the values later */
+
+#define DEFINE(sym, val, marker) \
+	asm volatile("\n->" #sym " %0 " #val " " #marker : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+	DEFINE(__THREAD_info, offsetof(struct task_struct, thread_info),);
+	DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),);
+	DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),);
+	DEFINE(__THREAD_mm_segment,
+	       offsetof(struct task_struct, thread.mm_segment),);
+	BLANK();
+	DEFINE(__TASK_pid, offsetof(struct task_struct, pid),);
+	BLANK();
+	DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid),);
+	DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address),);
+	DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id),);
+	BLANK();
+	DEFINE(__TI_task, offsetof(struct thread_info, task),);
+	DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain),);
+	DEFINE(__TI_flags, offsetof(struct thread_info, flags),);
+	DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),);
+	DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),);
+	BLANK();
+	DEFINE(__PT_ARGS, offsetof(struct pt_regs, args),);
+	DEFINE(__PT_PSW, offsetof(struct pt_regs, psw),);
+	DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs),);
+	DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2),);
+	DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc),);
+	DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap),);
+	DEFINE(__PT_SIZE, sizeof(struct pt_regs),);
+	BLANK();
+	DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain),);
+	DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs),);
+	DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1),);
+	return 0;
+}
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
new file mode 100644
index 0000000..03ba589
--- /dev/null
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -0,0 +1,210 @@
+/*
+ * Support for 32-bit Linux for S390 ELF binaries.
+ *
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Gerhard Tonn (ton@de.ibm.com)
+ *
+ * Heavily inspired by the 32-bit Sparc compat code which is
+ * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek   (jj@ultra.linux.cz)
+ */
+
+#define __ASMS390_ELF_H
+
+#include <linux/time.h>
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS	ELFCLASS32
+#define ELF_DATA	ELFDATA2MSB
+#define ELF_ARCH	EM_S390
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) \
+	(((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
+         && (x)->e_ident[EI_CLASS] == ELF_CLASS)
+
+/* ELF register definitions */
+#define NUM_GPRS      16
+#define NUM_FPRS      16
+#define NUM_ACRS      16    
+
+/* For SVR4/S390 the function pointer to be registered with `atexit` is
+   passed in R14. */
+#define ELF_PLAT_INIT(_r, load_addr) \
+	do { \
+		_r->gprs[14] = 0; \
+	} while(0)
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE       4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
+
+/* Wow, the "main" arch needs arch dependent functions too.. :) */
+
+/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+   now struct_user_regs, they are different) */
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg);
+
+#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs)
+
+#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this CPU supports. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.
+
+   For the moment, we have only optimizations for the Intel generations,
+   but that could change... */
+
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex, ibcs2)			\
+do {							\
+	if (ibcs2)                                      \
+		set_personality(PER_SVR4);              \
+	else if (current->personality != PER_LINUX32)   \
+		set_personality(PER_LINUX);             \
+	set_thread_flag(TIF_31BIT);			\
+} while (0)
+
+#include "compat_linux.h"
+
+typedef _s390_fp_regs32 elf_fpregset_t;
+
+typedef struct
+{
+	
+	_psw_t32	psw;
+	__u32		gprs[__NUM_GPRS]; 
+	__u32		acrs[__NUM_ACRS]; 
+	__u32		orig_gpr2;
+} s390_regs32;
+typedef s390_regs32 elf_gregset_t;
+
+static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
+{
+	int i;
+
+	memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
+	memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
+	for (i = 0; i < NUM_GPRS; i++)
+		regs->gprs[i] = ptregs->gprs[i];
+	save_access_regs(regs->acrs);
+	regs->orig_gpr2 = ptregs->orig_gpr2;
+	return 1;
+}
+
+static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
+{
+	struct pt_regs *ptregs = __KSTK_PTREGS(tsk);
+	int i;
+
+	memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
+	memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
+	for (i = 0; i < NUM_GPRS; i++)
+		regs->gprs[i] = ptregs->gprs[i];
+	memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
+	regs->orig_gpr2 = ptregs->orig_gpr2;
+	return 1;
+}
+
+static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+{
+	if (tsk == current)
+		save_fp_regs((s390_fp_regs *) fpregs);
+	else
+		memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
+	return 1;
+}
+
+#include <asm/processor.h>
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/elfcore.h>
+#include <linux/binfmts.h>
+#include <linux/compat.h>
+
+#define elf_prstatus elf_prstatus32
+struct elf_prstatus32
+{
+	struct elf_siginfo pr_info;	/* Info associated with signal */
+	short	pr_cursig;		/* Current signal */
+	u32	pr_sigpend;	/* Set of pending signals */
+	u32	pr_sighold;	/* Set of held signals */
+	pid_t	pr_pid;
+	pid_t	pr_ppid;
+	pid_t	pr_pgrp;
+	pid_t	pr_sid;
+	struct compat_timeval pr_utime;	/* User time */
+	struct compat_timeval pr_stime;	/* System time */
+	struct compat_timeval pr_cutime;	/* Cumulative user time */
+	struct compat_timeval pr_cstime;	/* Cumulative system time */
+	elf_gregset_t pr_reg;	/* GP registers */
+	int pr_fpvalid;		/* True if math co-processor being used.  */
+};
+
+#define elf_prpsinfo elf_prpsinfo32
+struct elf_prpsinfo32
+{
+	char	pr_state;	/* numeric process state */
+	char	pr_sname;	/* char for pr_state */
+	char	pr_zomb;	/* zombie */
+	char	pr_nice;	/* nice val */
+	u32	pr_flag;	/* flags */
+	u16	pr_uid;
+	u16	pr_gid;
+	pid_t	pr_pid, pr_ppid, pr_pgrp, pr_sid;
+	/* Lots missing */
+	char	pr_fname[16];	/* filename of executable */
+	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
+};
+
+#include <linux/highuid.h>
+
+#undef NEW_TO_OLD_UID
+#undef NEW_TO_OLD_GID
+#define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
+#define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid) 
+
+#define elf_addr_t	u32
+/*
+#define init_elf_binfmt init_elf32_binfmt
+*/
+
+#undef start_thread
+#define start_thread                    start_thread31 
+
+MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries,"
+                   " Copyright 2000 IBM Corporation"); 
+MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
+
+#undef MODULE_DESCRIPTION
+#undef MODULE_AUTHOR
+
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+	value->tv_usec = cputime % 1000000;
+	value->tv_sec = cputime / 1000000;
+}
+
+#include "../../../fs/binfmt_elf.c"
+
diff --git a/arch/s390/kernel/bitmap.S b/arch/s390/kernel/bitmap.S
new file mode 100644
index 0000000..dfb41f9
--- /dev/null
+++ b/arch/s390/kernel/bitmap.S
@@ -0,0 +1,56 @@
+/*
+ *  arch/s390/kernel/bitmap.S
+ *    Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
+ *    See include/asm-s390/{bitops.h|posix_types.h} for details
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+         .globl _oi_bitmap
+_oi_bitmap:
+         .byte  0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80
+
+         .globl _ni_bitmap
+_ni_bitmap:
+         .byte  0xFE,0xFD,0xFB,0xF7,0xEF,0xDF,0xBF,0x7F
+
+         .globl _zb_findmap
+_zb_findmap:
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8
+
+         .globl _sb_findmap
+_sb_findmap:
+         .byte  8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
new file mode 100644
index 0000000..71d27c4
--- /dev/null
+++ b/arch/s390/kernel/compat_exec_domain.c
@@ -0,0 +1,30 @@
+/*
+ * Support for 32-bit Linux for S390 personality.
+ *
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Gerhard Tonn (ton@de.ibm.com)
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/personality.h>
+#include <linux/sched.h>
+
+struct exec_domain s390_exec_domain;
+
+static int __init
+s390_init (void)
+{
+	s390_exec_domain.name = "Linux/s390";
+	s390_exec_domain.handler = NULL;
+	s390_exec_domain.pers_low = PER_LINUX32;
+	s390_exec_domain.pers_high = PER_LINUX32;
+	s390_exec_domain.signal_map = default_exec_domain.signal_map;
+	s390_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
+	register_exec_domain(&s390_exec_domain);
+	return 0;
+}
+
+__initcall(s390_init);
diff --git a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c
new file mode 100644
index 0000000..96571ff
--- /dev/null
+++ b/arch/s390/kernel/compat_ioctl.c
@@ -0,0 +1,73 @@
+/*
+ * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
+ *
+ *  S390 version
+ *    Copyright (C) 2000-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Gerhard Tonn (ton@de.ibm.com)
+ *               Arnd Bergmann (arndb@de.ibm.com)
+ *
+ * Original implementation from 32-bit Sparc compat code which is
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Written by Ulf Carlsson (ulfc@engr.sgi.com) 
+ */
+
+#include "compat_linux.h"
+#define INCLUDES
+#define CODE
+#include "../../../fs/compat_ioctl.c"
+#include <asm/dasd.h>
+#include <asm/tape390.h>
+
+static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
+				unsigned long arg, struct file *f)
+{
+	return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
+}
+
+static int do_ioctl32_ulong(unsigned int fd, unsigned int cmd,
+				unsigned long arg, struct file *f)
+{
+	return sys_ioctl(fd, cmd, arg);
+}
+
+#define COMPATIBLE_IOCTL(cmd)		HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_pointer)
+#define ULONG_IOCTL(cmd)		HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_ulong)
+#define HANDLE_IOCTL(cmd,handler)	{ (cmd), (ioctl_trans_handler_t)(handler), NULL },
+
+struct ioctl_trans ioctl_start[] = {
+/* architecture independent ioctls */
+#include <linux/compat_ioctl.h>
+#define DECLARES
+#include "../../../fs/compat_ioctl.c"
+
+/* s390 only ioctls */
+#if defined(CONFIG_DASD) || defined(CONFIG_DASD_MODULE)
+COMPATIBLE_IOCTL(DASDAPIVER)
+COMPATIBLE_IOCTL(BIODASDDISABLE)
+COMPATIBLE_IOCTL(BIODASDENABLE)
+COMPATIBLE_IOCTL(BIODASDRSRV)
+COMPATIBLE_IOCTL(BIODASDRLSE)
+COMPATIBLE_IOCTL(BIODASDSLCK)
+COMPATIBLE_IOCTL(BIODASDINFO)
+COMPATIBLE_IOCTL(BIODASDINFO2)
+COMPATIBLE_IOCTL(BIODASDFMT)
+COMPATIBLE_IOCTL(BIODASDPRRST)
+COMPATIBLE_IOCTL(BIODASDQUIESCE)
+COMPATIBLE_IOCTL(BIODASDRESUME)
+COMPATIBLE_IOCTL(BIODASDPRRD)
+COMPATIBLE_IOCTL(BIODASDPSRD)
+COMPATIBLE_IOCTL(BIODASDGATTR)
+COMPATIBLE_IOCTL(BIODASDSATTR)
+
+#endif
+
+#if defined(CONFIG_S390_TAPE) || defined(CONFIG_S390_TAPE_MODULE)
+COMPATIBLE_IOCTL(TAPE390_DISPLAY)
+#endif
+
+/* s390 doesn't need handlers here */
+COMPATIBLE_IOCTL(TIOCGSERIAL)
+COMPATIBLE_IOCTL(TIOCSSERIAL)
+};
+
+int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
new file mode 100644
index 0000000..6140562
--- /dev/null
+++ b/arch/s390/kernel/compat_linux.c
@@ -0,0 +1,1045 @@
+/*
+ *  arch/s390x/kernel/linux32.c
+ *
+ *  S390 version
+ *    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Gerhard Tonn (ton@de.ibm.com)   
+ *               Thomas Spatzier (tspat@de.ibm.com)
+ *
+ *  Conversion between 31bit and 64bit native syscalls.
+ *
+ * Heavily inspired by the 32-bit Sparc compat code which is 
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h> 
+#include <linux/mm.h> 
+#include <linux/file.h> 
+#include <linux/signal.h>
+#include <linux/resource.h>
+#include <linux/times.h>
+#include <linux/utsname.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/nfs_fs.h>
+#include <linux/quota.h>
+#include <linux/module.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfsd/xdr.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/poll.h>
+#include <linux/personality.h>
+#include <linux/stat.h>
+#include <linux/filter.h>
+#include <linux/highmem.h>
+#include <linux/highuid.h>
+#include <linux/mman.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/icmpv6.h>
+#include <linux/syscalls.h>
+#include <linux/sysctl.h>
+#include <linux/binfmts.h>
+#include <linux/compat.h>
+#include <linux/vfs.h>
+#include <linux/ptrace.h>
+
+#include <asm/types.h>
+#include <asm/ipc.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include <net/scm.h>
+#include <net/sock.h>
+
+#include "compat_linux.h"
+
+ 
+/* For this source file, we want overflow handling. */
+
+#undef high2lowuid
+#undef high2lowgid
+#undef low2highuid
+#undef low2highgid
+#undef SET_UID16
+#undef SET_GID16
+#undef NEW_TO_OLD_UID
+#undef NEW_TO_OLD_GID
+#undef SET_OLDSTAT_UID
+#undef SET_OLDSTAT_GID
+#undef SET_STAT_UID
+#undef SET_STAT_GID
+
+#define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
+#define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
+#define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid)
+#define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid)
+#define SET_UID16(var, uid)	var = high2lowuid(uid)
+#define SET_GID16(var, gid)	var = high2lowgid(gid)
+#define NEW_TO_OLD_UID(uid)	high2lowuid(uid)
+#define NEW_TO_OLD_GID(gid)	high2lowgid(gid)
+#define SET_OLDSTAT_UID(stat, uid)	(stat).st_uid = high2lowuid(uid)
+#define SET_OLDSTAT_GID(stat, gid)	(stat).st_gid = high2lowgid(gid)
+#define SET_STAT_UID(stat, uid)		(stat).st_uid = high2lowuid(uid)
+#define SET_STAT_GID(stat, gid)		(stat).st_gid = high2lowgid(gid)
+
+asmlinkage long sys32_chown16(const char * filename, u16 user, u16 group)
+{
+	return sys_chown(filename, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_lchown16(const char * filename, u16 user, u16 group)
+{
+	return sys_lchown(filename, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
+{
+	return sys_fchown(fd, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
+{
+	return sys_setregid(low2highgid(rgid), low2highgid(egid));
+}
+
+asmlinkage long sys32_setgid16(u16 gid)
+{
+	return sys_setgid((gid_t)gid);
+}
+
+asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
+{
+	return sys_setreuid(low2highuid(ruid), low2highuid(euid));
+}
+
+asmlinkage long sys32_setuid16(u16 uid)
+{
+	return sys_setuid((uid_t)uid);
+}
+
+asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
+{
+	return sys_setresuid(low2highuid(ruid), low2highuid(euid),
+		low2highuid(suid));
+}
+
+asmlinkage long sys32_getresuid16(u16 *ruid, u16 *euid, u16 *suid)
+{
+	int retval;
+
+	if (!(retval = put_user(high2lowuid(current->uid), ruid)) &&
+	    !(retval = put_user(high2lowuid(current->euid), euid)))
+		retval = put_user(high2lowuid(current->suid), suid);
+
+	return retval;
+}
+
+asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
+{
+	return sys_setresgid(low2highgid(rgid), low2highgid(egid),
+		low2highgid(sgid));
+}
+
+asmlinkage long sys32_getresgid16(u16 *rgid, u16 *egid, u16 *sgid)
+{
+	int retval;
+
+	if (!(retval = put_user(high2lowgid(current->gid), rgid)) &&
+	    !(retval = put_user(high2lowgid(current->egid), egid)))
+		retval = put_user(high2lowgid(current->sgid), sgid);
+
+	return retval;
+}
+
+asmlinkage long sys32_setfsuid16(u16 uid)
+{
+	return sys_setfsuid((uid_t)uid);
+}
+
+asmlinkage long sys32_setfsgid16(u16 gid)
+{
+	return sys_setfsgid((gid_t)gid);
+}
+
+static int groups16_to_user(u16 *grouplist, struct group_info *group_info)
+{
+	int i;
+	u16 group;
+
+	for (i = 0; i < group_info->ngroups; i++) {
+		group = (u16)GROUP_AT(group_info, i);
+		if (put_user(group, grouplist+i))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int groups16_from_user(struct group_info *group_info, u16 *grouplist)
+{
+	int i;
+	u16 group;
+
+	for (i = 0; i < group_info->ngroups; i++) {
+		if (get_user(group, grouplist+i))
+			return  -EFAULT;
+		GROUP_AT(group_info, i) = (gid_t)group;
+	}
+
+	return 0;
+}
+
+asmlinkage long sys32_getgroups16(int gidsetsize, u16 *grouplist)
+{
+	int i;
+
+	if (gidsetsize < 0)
+		return -EINVAL;
+
+	get_group_info(current->group_info);
+	i = current->group_info->ngroups;
+	if (gidsetsize) {
+		if (i > gidsetsize) {
+			i = -EINVAL;
+			goto out;
+		}
+		if (groups16_to_user(grouplist, current->group_info)) {
+			i = -EFAULT;
+			goto out;
+		}
+	}
+out:
+	put_group_info(current->group_info);
+	return i;
+}
+
+asmlinkage long sys32_setgroups16(int gidsetsize, u16 *grouplist)
+{
+	struct group_info *group_info;
+	int retval;
+
+	if (!capable(CAP_SETGID))
+		return -EPERM;
+	if ((unsigned)gidsetsize > NGROUPS_MAX)
+		return -EINVAL;
+
+	group_info = groups_alloc(gidsetsize);
+	if (!group_info)
+		return -ENOMEM;
+	retval = groups16_from_user(group_info, grouplist);
+	if (retval) {
+		put_group_info(group_info);
+		return retval;
+	}
+
+	retval = set_current_groups(group_info);
+	put_group_info(group_info);
+
+	return retval;
+}
+
+asmlinkage long sys32_getuid16(void)
+{
+	return high2lowuid(current->uid);
+}
+
+asmlinkage long sys32_geteuid16(void)
+{
+	return high2lowuid(current->euid);
+}
+
+asmlinkage long sys32_getgid16(void)
+{
+	return high2lowgid(current->gid);
+}
+
+asmlinkage long sys32_getegid16(void)
+{
+	return high2lowgid(current->egid);
+}
+
+/* 32-bit timeval and related flotsam.  */
+
+static inline long get_tv32(struct timeval *o, struct compat_timeval *i)
+{
+	return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) ||
+		(__get_user(o->tv_sec, &i->tv_sec) ||
+		 __get_user(o->tv_usec, &i->tv_usec)));
+}
+
+static inline long put_tv32(struct compat_timeval *o, struct timeval *i)
+{
+	return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+		(__put_user(i->tv_sec, &o->tv_sec) ||
+		 __put_user(i->tv_usec, &o->tv_usec)));
+}
+
+/*
+ * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr)
+{
+	if (call >> 16)		/* hack for backward compatibility */
+		return -EINVAL;
+
+	call &= 0xffff;
+
+	switch (call) {
+	case SEMTIMEDOP:
+		return compat_sys_semtimedop(first, compat_ptr(ptr),
+					     second, compat_ptr(third));
+	case SEMOP:
+		/* struct sembuf is the same on 32 and 64bit :)) */
+		return sys_semtimedop(first, compat_ptr(ptr),
+				      second, NULL);
+	case SEMGET:
+		return sys_semget(first, second, third);
+	case SEMCTL:
+		return compat_sys_semctl(first, second, third,
+					 compat_ptr(ptr));
+	case MSGSND:
+		return compat_sys_msgsnd(first, second, third,
+					 compat_ptr(ptr));
+	case MSGRCV:
+		return compat_sys_msgrcv(first, second, 0, third,
+					 0, compat_ptr(ptr));
+	case MSGGET:
+		return sys_msgget((key_t) first, second);
+	case MSGCTL:
+		return compat_sys_msgctl(first, second, compat_ptr(ptr));
+	case SHMAT:
+		return compat_sys_shmat(first, second, third,
+					0, compat_ptr(ptr));
+	case SHMDT:
+		return sys_shmdt(compat_ptr(ptr));
+	case SHMGET:
+		return sys_shmget(first, (unsigned)second, third);
+	case SHMCTL:
+		return compat_sys_shmctl(first, second, compat_ptr(ptr));
+	}
+
+	return -ENOSYS;
+}
+
+asmlinkage long sys32_truncate64(const char * path, unsigned long high, unsigned long low)
+{
+	if ((int)high < 0)
+		return -EINVAL;
+	else
+		return sys_truncate(path, (high << 32) | low);
+}
+
+asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
+{
+	if ((int)high < 0)
+		return -EINVAL;
+	else
+		return sys_ftruncate(fd, (high << 32) | low);
+}
+
+int cp_compat_stat(struct kstat *stat, struct compat_stat *statbuf)
+{
+	int err;
+
+	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
+		return -EOVERFLOW;
+
+	err = put_user(old_encode_dev(stat->dev), &statbuf->st_dev);
+	err |= put_user(stat->ino, &statbuf->st_ino);
+	err |= put_user(stat->mode, &statbuf->st_mode);
+	err |= put_user(stat->nlink, &statbuf->st_nlink);
+	err |= put_user(high2lowuid(stat->uid), &statbuf->st_uid);
+	err |= put_user(high2lowgid(stat->gid), &statbuf->st_gid);
+	err |= put_user(old_encode_dev(stat->rdev), &statbuf->st_rdev);
+	err |= put_user(stat->size, &statbuf->st_size);
+	err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
+	err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
+	err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
+	err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
+	err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
+	err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
+	err |= put_user(stat->blksize, &statbuf->st_blksize);
+	err |= put_user(stat->blocks, &statbuf->st_blocks);
+/* fixme
+	err |= put_user(0, &statbuf->__unused4[0]);
+	err |= put_user(0, &statbuf->__unused4[1]);
+*/
+	return err;
+}
+
+struct sysinfo32 {
+        s32 uptime;
+        u32 loads[3];
+        u32 totalram;
+        u32 freeram;
+        u32 sharedram;
+        u32 bufferram;
+        u32 totalswap;
+        u32 freeswap;
+        unsigned short procs;
+	unsigned short pads;
+	u32 totalhigh;
+	u32 freehigh;
+	unsigned int mem_unit;
+        char _f[8];
+};
+
+asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
+{
+	struct sysinfo s;
+	int ret, err;
+	mm_segment_t old_fs = get_fs ();
+	
+	set_fs (KERNEL_DS);
+	ret = sys_sysinfo(&s);
+	set_fs (old_fs);
+	err = put_user (s.uptime, &info->uptime);
+	err |= __put_user (s.loads[0], &info->loads[0]);
+	err |= __put_user (s.loads[1], &info->loads[1]);
+	err |= __put_user (s.loads[2], &info->loads[2]);
+	err |= __put_user (s.totalram, &info->totalram);
+	err |= __put_user (s.freeram, &info->freeram);
+	err |= __put_user (s.sharedram, &info->sharedram);
+	err |= __put_user (s.bufferram, &info->bufferram);
+	err |= __put_user (s.totalswap, &info->totalswap);
+	err |= __put_user (s.freeswap, &info->freeswap);
+	err |= __put_user (s.procs, &info->procs);
+	err |= __put_user (s.totalhigh, &info->totalhigh);
+	err |= __put_user (s.freehigh, &info->freehigh);
+	err |= __put_user (s.mem_unit, &info->mem_unit);
+	if (err)
+		return -EFAULT;
+	return ret;
+}
+
+asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
+				struct compat_timespec __user *interval)
+{
+	struct timespec t;
+	int ret;
+	mm_segment_t old_fs = get_fs ();
+	
+	set_fs (KERNEL_DS);
+	ret = sys_sched_rr_get_interval(pid, &t);
+	set_fs (old_fs);
+	if (put_compat_timespec(&t, interval))
+		return -EFAULT;
+	return ret;
+}
+
+asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
+			compat_sigset_t __user *oset, size_t sigsetsize)
+{
+	sigset_t s;
+	compat_sigset_t s32;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+	
+	if (set) {
+		if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
+			return -EFAULT;
+		switch (_NSIG_WORDS) {
+		case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
+		case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
+		case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
+		case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
+		}
+	}
+	set_fs (KERNEL_DS);
+	ret = sys_rt_sigprocmask(how, set ? &s : NULL, oset ? &s : NULL, sigsetsize);
+	set_fs (old_fs);
+	if (ret) return ret;
+	if (oset) {
+		switch (_NSIG_WORDS) {
+		case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+		case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
+		case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+		case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+		}
+		if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
+				size_t sigsetsize)
+{
+	sigset_t s;
+	compat_sigset_t s32;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+		
+	set_fs (KERNEL_DS);
+	ret = sys_rt_sigpending(&s, sigsetsize);
+	set_fs (old_fs);
+	if (!ret) {
+		switch (_NSIG_WORDS) {
+		case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+		case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
+		case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+		case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+		}
+		if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
+			return -EFAULT;
+	}
+	return ret;
+}
+
+asmlinkage long
+sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
+{
+	siginfo_t info;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+	
+	if (copy_siginfo_from_user32(&info, uinfo))
+		return -EFAULT;
+	set_fs (KERNEL_DS);
+	ret = sys_rt_sigqueueinfo(pid, sig, &info);
+	set_fs (old_fs);
+	return ret;
+}
+
+/*
+ * sys32_execve() executes a new program after the asm stub has set
+ * things up for us.  This should basically do what I want it to.
+ */
+asmlinkage long
+sys32_execve(struct pt_regs regs)
+{
+        int error;
+        char * filename;
+
+        filename = getname(compat_ptr(regs.orig_gpr2));
+        error = PTR_ERR(filename);
+        if (IS_ERR(filename))
+                goto out;
+        error = compat_do_execve(filename, compat_ptr(regs.gprs[3]),
+				 compat_ptr(regs.gprs[4]), &regs);
+	if (error == 0)
+	{
+		task_lock(current);
+		current->ptrace &= ~PT_DTRACE;
+		task_unlock(current);
+		current->thread.fp_regs.fpc=0;
+		__asm__ __volatile__
+		        ("sr  0,0\n\t"
+		         "sfpc 0,0\n\t"
+			 : : :"0");
+	}
+        putname(filename);
+out:
+        return error;
+}
+
+
+#ifdef CONFIG_MODULES
+
+asmlinkage long
+sys32_init_module(void __user *umod, unsigned long len,
+		const char __user *uargs)
+{
+	return sys_init_module(umod, len, uargs);
+}
+
+asmlinkage long
+sys32_delete_module(const char __user *name_user, unsigned int flags)
+{
+	return sys_delete_module(name_user, flags);
+}
+
+#else /* CONFIG_MODULES */
+
+asmlinkage long
+sys32_init_module(void __user *umod, unsigned long len,
+		const char __user *uargs)
+{
+	return -ENOSYS;
+}
+
+asmlinkage long
+sys32_delete_module(const char __user *name_user, unsigned int flags)
+{
+	return -ENOSYS;
+}
+
+#endif  /* CONFIG_MODULES */
+
+/* Translations due to time_t size differences.  Which affects all
+   sorts of things, like timeval and itimerval.  */
+
+extern struct timezone sys_tz;
+
+asmlinkage long sys32_gettimeofday(struct compat_timeval *tv, struct timezone *tz)
+{
+	if (tv) {
+		struct timeval ktv;
+		do_gettimeofday(&ktv);
+		if (put_tv32(tv, &ktv))
+			return -EFAULT;
+	}
+	if (tz) {
+		if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static inline long get_ts32(struct timespec *o, struct compat_timeval *i)
+{
+	long usec;
+
+	if (!access_ok(VERIFY_READ, i, sizeof(*i)))
+		return -EFAULT;
+	if (__get_user(o->tv_sec, &i->tv_sec))
+		return -EFAULT;
+	if (__get_user(usec, &i->tv_usec))
+		return -EFAULT;
+	o->tv_nsec = usec * 1000;
+	return 0;
+}
+
+asmlinkage long sys32_settimeofday(struct compat_timeval *tv, struct timezone *tz)
+{
+	struct timespec kts;
+	struct timezone ktz;
+
+ 	if (tv) {
+		if (get_ts32(&kts, tv))
+			return -EFAULT;
+	}
+	if (tz) {
+		if (copy_from_user(&ktz, tz, sizeof(ktz)))
+			return -EFAULT;
+	}
+
+	return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
+}
+
+/* These are here just in case some old sparc32 binary calls it. */
+asmlinkage long sys32_pause(void)
+{
+	current->state = TASK_INTERRUPTIBLE;
+	schedule();
+	return -ERESTARTNOHAND;
+}
+
+asmlinkage long sys32_pread64(unsigned int fd, char *ubuf,
+				size_t count, u32 poshi, u32 poslo)
+{
+	if ((compat_ssize_t) count < 0)
+		return -EINVAL;
+	return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
+}
+
+asmlinkage long sys32_pwrite64(unsigned int fd, const char *ubuf,
+				size_t count, u32 poshi, u32 poslo)
+{
+	if ((compat_ssize_t) count < 0)
+		return -EINVAL;
+	return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
+}
+
+asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count)
+{
+	return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count);
+}
+
+asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t *offset, size_t count)
+{
+	mm_segment_t old_fs = get_fs();
+	int ret;
+	off_t of;
+	
+	if (offset && get_user(of, offset))
+		return -EFAULT;
+		
+	set_fs(KERNEL_DS);
+	ret = sys_sendfile(out_fd, in_fd, offset ? &of : NULL, count);
+	set_fs(old_fs);
+	
+	if (!ret && offset && put_user(of, offset))
+		return -EFAULT;
+		
+	return ret;
+}
+
+asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
+				compat_loff_t *offset, s32 count)
+{
+	mm_segment_t old_fs = get_fs();
+	int ret;
+	loff_t lof;
+	
+	if (offset && get_user(lof, offset))
+		return -EFAULT;
+		
+	set_fs(KERNEL_DS);
+	ret = sys_sendfile64(out_fd, in_fd, offset ? &lof : NULL, count);
+	set_fs(old_fs);
+	
+	if (offset && put_user(lof, offset))
+		return -EFAULT;
+		
+	return ret;
+}
+
+/* Handle adjtimex compatibility. */
+
+struct timex32 {
+	u32 modes;
+	s32 offset, freq, maxerror, esterror;
+	s32 status, constant, precision, tolerance;
+	struct compat_timeval time;
+	s32 tick;
+	s32 ppsfreq, jitter, shift, stabil;
+	s32 jitcnt, calcnt, errcnt, stbcnt;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+};
+
+extern int do_adjtimex(struct timex *);
+
+asmlinkage long sys32_adjtimex(struct timex32 *utp)
+{
+	struct timex txc;
+	int ret;
+
+	memset(&txc, 0, sizeof(struct timex));
+
+	if(get_user(txc.modes, &utp->modes) ||
+	   __get_user(txc.offset, &utp->offset) ||
+	   __get_user(txc.freq, &utp->freq) ||
+	   __get_user(txc.maxerror, &utp->maxerror) ||
+	   __get_user(txc.esterror, &utp->esterror) ||
+	   __get_user(txc.status, &utp->status) ||
+	   __get_user(txc.constant, &utp->constant) ||
+	   __get_user(txc.precision, &utp->precision) ||
+	   __get_user(txc.tolerance, &utp->tolerance) ||
+	   __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+	   __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+	   __get_user(txc.tick, &utp->tick) ||
+	   __get_user(txc.ppsfreq, &utp->ppsfreq) ||
+	   __get_user(txc.jitter, &utp->jitter) ||
+	   __get_user(txc.shift, &utp->shift) ||
+	   __get_user(txc.stabil, &utp->stabil) ||
+	   __get_user(txc.jitcnt, &utp->jitcnt) ||
+	   __get_user(txc.calcnt, &utp->calcnt) ||
+	   __get_user(txc.errcnt, &utp->errcnt) ||
+	   __get_user(txc.stbcnt, &utp->stbcnt))
+		return -EFAULT;
+
+	ret = do_adjtimex(&txc);
+
+	if(put_user(txc.modes, &utp->modes) ||
+	   __put_user(txc.offset, &utp->offset) ||
+	   __put_user(txc.freq, &utp->freq) ||
+	   __put_user(txc.maxerror, &utp->maxerror) ||
+	   __put_user(txc.esterror, &utp->esterror) ||
+	   __put_user(txc.status, &utp->status) ||
+	   __put_user(txc.constant, &utp->constant) ||
+	   __put_user(txc.precision, &utp->precision) ||
+	   __put_user(txc.tolerance, &utp->tolerance) ||
+	   __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+	   __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+	   __put_user(txc.tick, &utp->tick) ||
+	   __put_user(txc.ppsfreq, &utp->ppsfreq) ||
+	   __put_user(txc.jitter, &utp->jitter) ||
+	   __put_user(txc.shift, &utp->shift) ||
+	   __put_user(txc.stabil, &utp->stabil) ||
+	   __put_user(txc.jitcnt, &utp->jitcnt) ||
+	   __put_user(txc.calcnt, &utp->calcnt) ||
+	   __put_user(txc.errcnt, &utp->errcnt) ||
+	   __put_user(txc.stbcnt, &utp->stbcnt))
+		ret = -EFAULT;
+
+	return ret;
+}
+
+#ifdef CONFIG_SYSCTL
+struct __sysctl_args32 {
+	u32 name;
+	int nlen;
+	u32 oldval;
+	u32 oldlenp;
+	u32 newval;
+	u32 newlen;
+	u32 __unused[4];
+};
+
+asmlinkage long sys32_sysctl(struct __sysctl_args32 *args)
+{
+	struct __sysctl_args32 tmp;
+	int error;
+	size_t oldlen, *oldlenp = NULL;
+	unsigned long addr = (((long)&args->__unused[0]) + 7) & ~7;
+
+	if (copy_from_user(&tmp, args, sizeof(tmp)))
+		return -EFAULT;
+
+	if (tmp.oldval && tmp.oldlenp) {
+		/* Duh, this is ugly and might not work if sysctl_args
+		   is in read-only memory, but do_sysctl does indirectly
+		   a lot of uaccess in both directions and we'd have to
+		   basically copy the whole sysctl.c here, and
+		   glibc's __sysctl uses rw memory for the structure
+		   anyway.  */
+		if (get_user(oldlen, (u32 *)A(tmp.oldlenp)) ||
+		    put_user(oldlen, (size_t *)addr))
+			return -EFAULT;
+		oldlenp = (size_t *)addr;
+	}
+
+	lock_kernel();
+	error = do_sysctl((int *)A(tmp.name), tmp.nlen, (void *)A(tmp.oldval),
+			  oldlenp, (void *)A(tmp.newval), tmp.newlen);
+	unlock_kernel();
+	if (oldlenp) {
+		if (!error) {
+			if (get_user(oldlen, (size_t *)addr) ||
+			    put_user(oldlen, (u32 *)A(tmp.oldlenp)))
+				error = -EFAULT;
+		}
+		copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
+	}
+	return error;
+}
+#endif
+
+struct stat64_emu31 {
+	unsigned long long  st_dev;
+	unsigned int    __pad1;
+#define STAT64_HAS_BROKEN_ST_INO        1
+	u32             __st_ino;
+	unsigned int    st_mode;
+	unsigned int    st_nlink;
+	u32             st_uid;
+	u32             st_gid;
+	unsigned long long  st_rdev;
+	unsigned int    __pad3;
+	long            st_size;
+	u32             st_blksize;
+	unsigned char   __pad4[4];
+	u32             __pad5;     /* future possible st_blocks high bits */
+	u32             st_blocks;  /* Number 512-byte blocks allocated. */
+	u32             st_atime;
+	u32             __pad6;
+	u32             st_mtime;
+	u32             __pad7;
+	u32             st_ctime;
+	u32             __pad8;     /* will be high 32 bits of ctime someday */
+	unsigned long   st_ino;
+};	
+
+static int cp_stat64(struct stat64_emu31 *ubuf, struct kstat *stat)
+{
+	struct stat64_emu31 tmp;
+
+	memset(&tmp, 0, sizeof(tmp));
+
+	tmp.st_dev = huge_encode_dev(stat->dev);
+	tmp.st_ino = stat->ino;
+	tmp.__st_ino = (u32)stat->ino;
+	tmp.st_mode = stat->mode;
+	tmp.st_nlink = (unsigned int)stat->nlink;
+	tmp.st_uid = stat->uid;
+	tmp.st_gid = stat->gid;
+	tmp.st_rdev = huge_encode_dev(stat->rdev);
+	tmp.st_size = stat->size;
+	tmp.st_blksize = (u32)stat->blksize;
+	tmp.st_blocks = (u32)stat->blocks;
+	tmp.st_atime = (u32)stat->atime.tv_sec;
+	tmp.st_mtime = (u32)stat->mtime.tv_sec;
+	tmp.st_ctime = (u32)stat->ctime.tv_sec;
+
+	return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 
+}
+
+asmlinkage long sys32_stat64(char * filename, struct stat64_emu31 * statbuf)
+{
+	struct kstat stat;
+	int ret = vfs_stat(filename, &stat);
+	if (!ret)
+		ret = cp_stat64(statbuf, &stat);
+	return ret;
+}
+
+asmlinkage long sys32_lstat64(char * filename, struct stat64_emu31 * statbuf)
+{
+	struct kstat stat;
+	int ret = vfs_lstat(filename, &stat);
+	if (!ret)
+		ret = cp_stat64(statbuf, &stat);
+	return ret;
+}
+
+asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 * statbuf)
+{
+	struct kstat stat;
+	int ret = vfs_fstat(fd, &stat);
+	if (!ret)
+		ret = cp_stat64(statbuf, &stat);
+	return ret;
+}
+
+/*
+ * Linux/i386 didn't use to be able to handle more than
+ * 4 system call parameters, so these system calls used a memory
+ * block for parameter passing..
+ */
+
+struct mmap_arg_struct_emu31 {
+	u32	addr;
+	u32	len;
+	u32	prot;
+	u32	flags;
+	u32	fd;
+	u32	offset;
+};
+
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+	unsigned long addr, unsigned long len,
+	unsigned long prot, unsigned long flags,
+	unsigned long fd, unsigned long pgoff)
+{
+	struct file * file = NULL;
+	unsigned long error = -EBADF;
+
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	if (!(flags & MAP_ANONYMOUS)) {
+		file = fget(fd);
+		if (!file)
+			goto out;
+	}
+
+	down_write(&current->mm->mmap_sem);
+	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+	if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) {
+		/* Result is out of bounds.  */
+		do_munmap(current->mm, addr, len);
+		error = -ENOMEM;
+	}
+	up_write(&current->mm->mmap_sem);
+
+	if (file)
+		fput(file);
+out:    
+	return error;
+}
+
+
+asmlinkage unsigned long
+old32_mmap(struct mmap_arg_struct_emu31 *arg)
+{
+	struct mmap_arg_struct_emu31 a;
+	int error = -EFAULT;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		goto out;
+
+	error = -EINVAL;
+	if (a.offset & ~PAGE_MASK)
+		goto out;
+
+	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); 
+out:
+	return error;
+}
+
+asmlinkage long 
+sys32_mmap2(struct mmap_arg_struct_emu31 *arg)
+{
+	struct mmap_arg_struct_emu31 a;
+	int error = -EFAULT;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		goto out;
+	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+out:
+	return error;
+}
+
+asmlinkage long sys32_read(unsigned int fd, char * buf, size_t count)
+{
+	if ((compat_ssize_t) count < 0)
+		return -EINVAL; 
+
+	return sys_read(fd, buf, count);
+}
+
+asmlinkage long sys32_write(unsigned int fd, char * buf, size_t count)
+{
+	if ((compat_ssize_t) count < 0)
+		return -EINVAL; 
+
+	return sys_write(fd, buf, count);
+}
+
+asmlinkage long sys32_clone(struct pt_regs regs)
+{
+        unsigned long clone_flags;
+        unsigned long newsp;
+	int *parent_tidptr, *child_tidptr;
+
+        clone_flags = regs.gprs[3] & 0xffffffffUL;
+        newsp = regs.orig_gpr2 & 0x7fffffffUL;
+	parent_tidptr = (int *) (regs.gprs[4] & 0x7fffffffUL);
+	child_tidptr = (int *) (regs.gprs[5] & 0x7fffffffUL);
+        if (!newsp)
+                newsp = regs.gprs[15];
+        return do_fork(clone_flags, newsp, &regs, 0,
+		       parent_tidptr, child_tidptr);
+}
+
+/*
+ * Wrapper function for sys_timer_create.
+ */
+extern asmlinkage long
+sys_timer_create(clockid_t, struct sigevent *, timer_t *);
+
+asmlinkage long
+sys32_timer_create(clockid_t which_clock, struct compat_sigevent *se32,
+		timer_t *timer_id)
+{
+	struct sigevent se;
+	timer_t ktimer_id;
+	mm_segment_t old_fs;
+	long ret;
+
+	if (se32 == NULL)
+		return sys_timer_create(which_clock, NULL, timer_id);
+
+	if (get_compat_sigevent(&se, se32))
+		return -EFAULT;
+
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	ret = sys_timer_create(which_clock, &se, &ktimer_id);
+	set_fs(old_fs);
+
+	if (!ret)
+		ret = put_user (ktimer_id, timer_id);
+
+	return ret;
+}
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
new file mode 100644
index 0000000..bf33dcf
--- /dev/null
+++ b/arch/s390/kernel/compat_linux.h
@@ -0,0 +1,197 @@
+#ifndef _ASM_S390X_S390_H
+#define _ASM_S390X_S390_H
+
+#include <linux/config.h>
+#include <linux/compat.h>
+#include <linux/socket.h>
+#include <linux/syscalls.h>
+#include <linux/nfs_fs.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/export.h>
+
+/* Macro that masks the high order bit of an 32 bit pointer and converts it*/
+/*       to a 64 bit pointer */
+#define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL))
+#define AA(__x)				\
+	((unsigned long)(__x))
+
+/* Now 32bit compatibility types */
+struct ipc_kludge_32 {
+        __u32   msgp;                           /* pointer              */
+        __s32   msgtyp;
+};
+
+struct old_sigaction32 {
+       __u32			sa_handler;	/* Really a pointer, but need to deal with 32 bits */
+       compat_old_sigset_t	sa_mask;	/* A 32 bit mask */
+       __u32			sa_flags;
+       __u32			sa_restorer;	/* Another 32 bit pointer */
+};
+ 
+typedef struct compat_siginfo {
+	int	si_signo;
+	int	si_errno;
+	int	si_code;
+
+	union {
+		int _pad[((128/sizeof(int)) - 3)];
+
+		/* kill() */
+		struct {
+			pid_t	_pid;	/* sender's pid */
+			uid_t	_uid;	/* sender's uid */
+		} _kill;
+
+		/* POSIX.1b timers */
+		struct {
+			timer_t _tid;		/* timer id */
+			int _overrun;		/* overrun count */
+			compat_sigval_t _sigval;	/* same as below */
+			int _sys_private;       /* not to be passed to user */
+		} _timer;
+
+		/* POSIX.1b signals */
+		struct {
+			pid_t			_pid;	/* sender's pid */
+			uid_t			_uid;	/* sender's uid */
+			compat_sigval_t		_sigval;
+		} _rt;
+
+		/* SIGCHLD */
+		struct {
+			pid_t			_pid;	/* which child */
+			uid_t			_uid;	/* sender's uid */
+			int			_status;/* exit code */
+			compat_clock_t		_utime;
+			compat_clock_t		_stime;
+		} _sigchld;
+
+		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+		struct {
+			__u32	_addr;	/* faulting insn/memory ref. - pointer */
+		} _sigfault;
+                          
+		/* SIGPOLL */
+		struct {
+			int	_band;	/* POLL_IN, POLL_OUT, POLL_MSG */
+			int	_fd;
+		} _sigpoll;
+	} _sifields;
+} compat_siginfo_t;
+
+/*
+ * How these fields are to be accessed.
+ */
+#define si_pid		_sifields._kill._pid
+#define si_uid		_sifields._kill._uid
+#define si_status	_sifields._sigchld._status
+#define si_utime	_sifields._sigchld._utime
+#define si_stime	_sifields._sigchld._stime
+#define si_value	_sifields._rt._sigval
+#define si_int		_sifields._rt._sigval.sival_int
+#define si_ptr		_sifields._rt._sigval.sival_ptr
+#define si_addr		_sifields._sigfault._addr
+#define si_band		_sifields._sigpoll._band
+#define si_fd		_sifields._sigpoll._fd    
+#define si_tid		_sifields._timer._tid
+#define si_overrun	_sifields._timer._overrun
+
+/* asm/sigcontext.h */
+typedef union
+{
+	__u64   d;
+	__u32   f; 
+} freg_t32;
+
+typedef struct
+{
+	unsigned int	fpc;
+	freg_t32	fprs[__NUM_FPRS];              
+} _s390_fp_regs32;
+
+typedef struct 
+{
+        __u32   mask;
+        __u32	addr;
+} _psw_t32 __attribute__ ((aligned(8)));
+
+#define PSW32_MASK_PER		0x40000000UL
+#define PSW32_MASK_DAT		0x04000000UL
+#define PSW32_MASK_IO		0x02000000UL
+#define PSW32_MASK_EXT		0x01000000UL
+#define PSW32_MASK_KEY		0x00F00000UL
+#define PSW32_MASK_MCHECK	0x00040000UL
+#define PSW32_MASK_WAIT		0x00020000UL
+#define PSW32_MASK_PSTATE	0x00010000UL
+#define PSW32_MASK_ASC		0x0000C000UL
+#define PSW32_MASK_CC		0x00003000UL
+#define PSW32_MASK_PM		0x00000f00UL
+
+#define PSW32_ADDR_AMODE31	0x80000000UL
+#define PSW32_ADDR_INSN		0x7FFFFFFFUL
+
+#define PSW32_BASE_BITS		0x00080000UL
+
+#define PSW32_ASC_PRIMARY	0x00000000UL
+#define PSW32_ASC_ACCREG	0x00004000UL
+#define PSW32_ASC_SECONDARY	0x00008000UL
+#define PSW32_ASC_HOME		0x0000C000UL
+
+#define PSW32_USER_BITS	(PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \
+			 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \
+			 PSW32_MASK_PSTATE)
+
+#define PSW32_MASK_MERGE(CURRENT,NEW) \
+        (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
+         ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
+
+
+typedef struct
+{
+	_psw_t32	psw;
+	__u32		gprs[__NUM_GPRS];
+	__u32		acrs[__NUM_ACRS];
+} _s390_regs_common32;
+
+typedef struct
+{
+	_s390_regs_common32 regs;
+	_s390_fp_regs32     fpregs;
+} _sigregs32;
+
+#define _SIGCONTEXT_NSIG32	64
+#define _SIGCONTEXT_NSIG_BPW32	32
+#define __SIGNAL_FRAMESIZE32	96
+#define _SIGMASK_COPY_SIZE32	(sizeof(u32)*2)
+
+struct sigcontext32
+{
+	__u32	oldmask[_COMPAT_NSIG_WORDS];
+	__u32	sregs;				/* pointer */
+};
+
+/* asm/signal.h */
+struct sigaction32 {
+	__u32		sa_handler;		/* pointer */
+	__u32		sa_flags;
+        __u32		sa_restorer;		/* pointer */
+	compat_sigset_t	sa_mask;        /* mask last for extensibility */
+};
+
+typedef struct {
+	__u32			ss_sp;		/* pointer */
+	int			ss_flags;
+	compat_size_t		ss_size;
+} stack_t32;
+
+/* asm/ucontext.h */
+struct ucontext32 {
+	__u32			uc_flags;
+	__u32			uc_link;	/* pointer */	
+	stack_t32		uc_stack;
+	_sigregs32		uc_mcontext;
+	compat_sigset_t		uc_sigmask;	/* mask last for extensibility */
+};
+
+#endif /* _ASM_S390X_S390_H */
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
new file mode 100644
index 0000000..419aef9
--- /dev/null
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -0,0 +1,83 @@
+#ifndef _PTRACE32_H
+#define _PTRACE32_H
+
+#include "compat_linux.h"  /* needed for _psw_t32 */
+
+typedef struct {
+	__u32 cr[3];
+} per_cr_words32;
+
+typedef struct {
+	__u16          perc_atmid;          /* 0x096 */
+	__u32          address;             /* 0x098 */
+	__u8           access_id;           /* 0x0a1 */
+} per_lowcore_words32;
+
+typedef struct {
+	union {
+		per_cr_words32   words;
+	} control_regs;
+	/*
+	 * Use these flags instead of setting em_instruction_fetch
+	 * directly they are used so that single stepping can be
+	 * switched on & off while not affecting other tracing
+	 */
+	unsigned  single_step       : 1;
+	unsigned  instruction_fetch : 1;
+	unsigned                    : 30;
+	/*
+	 * These addresses are copied into cr10 & cr11 if single
+	 * stepping is switched off
+	 */
+	__u32     starting_addr;
+	__u32     ending_addr;
+	union {
+		per_lowcore_words32 words;
+	} lowcore; 
+} per_struct32;
+
+struct user_regs_struct32
+{
+	_psw_t32 psw;
+	u32 gprs[NUM_GPRS];
+	u32 acrs[NUM_ACRS];
+	u32 orig_gpr2;
+	s390_fp_regs fp_regs;
+	/*
+	 * These per registers are in here so that gdb can modify them
+	 * itself as there is no "official" ptrace interface for hardware
+	 * watchpoints. This is the way intel does it.
+	 */
+	per_struct32 per_info;
+	u32  ieee_instruction_pointer; 
+	/* Used to give failing instruction back to user for ieee exceptions */
+};
+
+struct user32 {
+	/* We start with the registers, to mimic the way that "memory"
+	   is returned from the ptrace(3,...) function.  */
+	struct user_regs_struct32 regs; /* Where the registers are actually stored */
+	/* The rest of this junk is to help gdb figure out what goes where */
+	u32 u_tsize;		/* Text segment size (pages). */
+	u32 u_dsize;	        /* Data segment size (pages). */
+	u32 u_ssize;	        /* Stack segment size (pages). */
+	u32 start_code;         /* Starting virtual address of text. */
+	u32 start_stack;	/* Starting virtual address of stack area.
+				   This is actually the bottom of the stack,
+				   the top of the stack is always found in the
+				   esp register.  */
+	s32 signal;     	 /* Signal that caused the core dump. */
+	u32 u_ar0;               /* Used by gdb to help find the values for */
+	                         /* the registers. */
+	u32 magic;		 /* To uniquely identify a core file */
+	char u_comm[32];	 /* User command that was responsible */
+};
+
+typedef struct
+{
+	__u32   len;
+	__u32   kernel_addr;
+	__u32   process_addr;
+} ptrace_area_emu31;
+
+#endif /* _PTRACE32_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
new file mode 100644
index 0000000..d05d65a
--- /dev/null
+++ b/arch/s390/kernel/compat_signal.c
@@ -0,0 +1,648 @@
+/*
+ *  arch/s390/kernel/signal32.c
+ *
+ *  S390 version
+ *    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *               Gerhard Tonn (ton@de.ibm.com)                  
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+ */
+
+#include <linux/config.h>
+#include <linux/compat.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tty.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/lowcore.h>
+#include "compat_linux.h"
+#include "compat_ptrace.h"
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+typedef struct 
+{
+	__u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
+	struct sigcontext32 sc;
+	_sigregs32 sregs;
+	int signo;
+	__u8 retcode[S390_SYSCALL_SIZE];
+} sigframe32;
+
+typedef struct 
+{
+	__u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
+	__u8 retcode[S390_SYSCALL_SIZE];
+	compat_siginfo_t info;
+	struct ucontext32 uc;
+} rt_sigframe32;
+
+asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
+
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+{
+	int err;
+
+	if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	/* If you change siginfo_t structure, please be sure
+	   this code is fixed accordingly.
+	   It should never copy any pad contained in the structure
+	   to avoid security leaks, but must copy the generic
+	   3 ints plus the relevant union member.  
+	   This routine must convert siginfo from 64bit to 32bit as well
+	   at the same time.  */
+	err = __put_user(from->si_signo, &to->si_signo);
+	err |= __put_user(from->si_errno, &to->si_errno);
+	err |= __put_user((short)from->si_code, &to->si_code);
+	if (from->si_code < 0)
+		err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+	else {
+		switch (from->si_code >> 16) {
+		case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
+		case __SI_MESGQ >> 16:
+			err |= __put_user(from->si_int, &to->si_int);
+			/* fallthrough */
+		case __SI_KILL >> 16:
+			err |= __put_user(from->si_pid, &to->si_pid);
+			err |= __put_user(from->si_uid, &to->si_uid);
+			break;
+		case __SI_CHLD >> 16:
+			err |= __put_user(from->si_pid, &to->si_pid);
+			err |= __put_user(from->si_uid, &to->si_uid);
+			err |= __put_user(from->si_utime, &to->si_utime);
+			err |= __put_user(from->si_stime, &to->si_stime);
+			err |= __put_user(from->si_status, &to->si_status);
+			break;
+		case __SI_FAULT >> 16:
+			err |= __put_user((unsigned long) from->si_addr,
+					  &to->si_addr);
+			break;
+		case __SI_POLL >> 16:
+			err |= __put_user(from->si_band, &to->si_band);
+			err |= __put_user(from->si_fd, &to->si_fd);
+			break;
+		case __SI_TIMER >> 16:
+			err |= __put_user(from->si_tid, &to->si_tid);
+			err |= __put_user(from->si_overrun, &to->si_overrun);
+			err |= __put_user(from->si_int, &to->si_int);
+			break;
+		default:
+			break;
+		}
+	}
+	return err;
+}
+
+int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+{
+	int err;
+	u32 tmp;
+
+	if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	err = __get_user(to->si_signo, &from->si_signo);
+	err |= __get_user(to->si_errno, &from->si_errno);
+	err |= __get_user(to->si_code, &from->si_code);
+
+	if (to->si_code < 0)
+		err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+	else {
+		switch (to->si_code >> 16) {
+		case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
+		case __SI_MESGQ >> 16:
+			err |= __get_user(to->si_int, &from->si_int);
+			/* fallthrough */
+		case __SI_KILL >> 16:
+			err |= __get_user(to->si_pid, &from->si_pid);
+			err |= __get_user(to->si_uid, &from->si_uid);
+			break;
+		case __SI_CHLD >> 16:
+			err |= __get_user(to->si_pid, &from->si_pid);
+			err |= __get_user(to->si_uid, &from->si_uid);
+			err |= __get_user(to->si_utime, &from->si_utime);
+			err |= __get_user(to->si_stime, &from->si_stime);
+			err |= __get_user(to->si_status, &from->si_status);
+			break;
+		case __SI_FAULT >> 16:
+			err |= __get_user(tmp, &from->si_addr);
+			to->si_addr = (void *)(u64) (tmp & PSW32_ADDR_INSN);
+			break;
+		case __SI_POLL >> 16:
+			err |= __get_user(to->si_band, &from->si_band);
+			err |= __get_user(to->si_fd, &from->si_fd);
+			break;
+		case __SI_TIMER >> 16:
+			err |= __get_user(to->si_tid, &from->si_tid);
+			err |= __get_user(to->si_overrun, &from->si_overrun);
+			err |= __get_user(to->si_int, &from->si_int);
+			break;
+		default:
+			break;
+		}
+	}
+	return err;
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys32_sigsuspend(struct pt_regs * regs,int history0, int history1, old_sigset_t mask)
+{
+	sigset_t saveset;
+
+	mask &= _BLOCKABLE;
+	spin_lock_irq(&current->sighand->siglock);
+	saveset = current->blocked;
+	siginitset(&current->blocked, mask);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	regs->gprs[2] = -EINTR;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+		if (do_signal(regs, &saveset))
+			return -EINTR;
+	}
+}
+
+asmlinkage int
+sys32_rt_sigsuspend(struct pt_regs * regs, compat_sigset_t __user *unewset,
+								size_t sigsetsize)
+{
+	sigset_t saveset, newset;
+	compat_sigset_t set32;
+
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(sigset_t))
+		return -EINVAL;
+
+	if (copy_from_user(&set32, unewset, sizeof(set32)))
+		return -EFAULT;
+	switch (_NSIG_WORDS) {
+	case 4: newset.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32);
+	case 3: newset.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32);
+	case 2: newset.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32);
+	case 1: newset.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32);
+	}
+        sigdelsetmask(&newset, ~_BLOCKABLE);
+
+        spin_lock_irq(&current->sighand->siglock);
+        saveset = current->blocked;
+        current->blocked = newset;
+        recalc_sigpending();
+        spin_unlock_irq(&current->sighand->siglock);
+        regs->gprs[2] = -EINTR;
+
+        while (1) {
+                set_current_state(TASK_INTERRUPTIBLE);
+                schedule();
+                if (do_signal(regs, &saveset))
+                        return -EINTR;
+        }
+}
+
+asmlinkage long
+sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
+		 struct old_sigaction32 __user *oact)
+{
+        struct k_sigaction new_ka, old_ka;
+	unsigned long sa_handler, sa_restorer;
+        int ret;
+
+        if (act) {
+		compat_old_sigset_t mask;
+		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+		    __get_user(sa_handler, &act->sa_handler) ||
+		    __get_user(sa_restorer, &act->sa_restorer))
+			return -EFAULT;
+		new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
+		new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer;
+		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		__get_user(mask, &act->sa_mask);
+		siginitset(&new_ka.sa.sa_mask, mask);
+        }
+
+        ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		sa_handler = (unsigned long) old_ka.sa.sa_handler;
+		sa_restorer = (unsigned long) old_ka.sa.sa_restorer;
+		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+		    __put_user(sa_handler, &oact->sa_handler) ||
+		    __put_user(sa_restorer, &oact->sa_restorer))
+			return -EFAULT;
+		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+        }
+
+	return ret;
+}
+
+int
+do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact);
+
+asmlinkage long
+sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
+	   struct sigaction32 __user *oact,  size_t sigsetsize)
+{
+	struct k_sigaction new_ka, old_ka;
+	unsigned long sa_handler;
+	int ret;
+	compat_sigset_t set32;
+
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(compat_sigset_t))
+		return -EINVAL;
+
+	if (act) {
+		ret = get_user(sa_handler, &act->sa_handler);
+		ret |= __copy_from_user(&set32, &act->sa_mask,
+					sizeof(compat_sigset_t));
+		switch (_NSIG_WORDS) {
+		case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
+				| (((long)set32.sig[7]) << 32);
+		case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4]
+				| (((long)set32.sig[5]) << 32);
+		case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2]
+				| (((long)set32.sig[3]) << 32);
+		case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0]
+				| (((long)set32.sig[1]) << 32);
+		}
+		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		
+		if (ret)
+			return -EFAULT;
+		new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		switch (_NSIG_WORDS) {
+		case 4:
+			set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
+			set32.sig[6] = old_ka.sa.sa_mask.sig[3];
+		case 3:
+			set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32);
+			set32.sig[4] = old_ka.sa.sa_mask.sig[2];
+		case 2:
+			set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32);
+			set32.sig[2] = old_ka.sa.sa_mask.sig[1];
+		case 1:
+			set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
+			set32.sig[0] = old_ka.sa.sa_mask.sig[0];
+		}
+		ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler);
+		ret |= __copy_to_user(&oact->sa_mask, &set32,
+				      sizeof(compat_sigset_t));
+		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+	}
+
+	return ret;
+}
+
+asmlinkage long
+sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
+							struct pt_regs *regs)
+{
+	stack_t kss, koss;
+	unsigned long ss_sp;
+	int ret, err = 0;
+	mm_segment_t old_fs = get_fs();
+
+	if (uss) {
+		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
+			return -EFAULT;
+		err |= __get_user(ss_sp, &uss->ss_sp);
+		err |= __get_user(kss.ss_size, &uss->ss_size);
+		err |= __get_user(kss.ss_flags, &uss->ss_flags);
+		if (err)
+			return -EFAULT;
+		kss.ss_sp = (void *) ss_sp;
+	}
+
+	set_fs (KERNEL_DS);
+	ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL),
+			     (stack_t __user *) (uoss ? &koss : NULL),
+			     regs->gprs[15]);
+	set_fs (old_fs);
+
+	if (!ret && uoss) {
+		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
+			return -EFAULT;
+		ss_sp = (unsigned long) koss.ss_sp;
+		err |= __put_user(ss_sp, &uoss->ss_sp);
+		err |= __put_user(koss.ss_size, &uoss->ss_size);
+		err |= __put_user(koss.ss_flags, &uoss->ss_flags);
+		if (err)
+			return -EFAULT;
+	}
+	return ret;
+}
+
+static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
+{
+	_s390_regs_common32 regs32;
+	int err, i;
+
+	regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS,
+					   (__u32)(regs->psw.mask >> 32));
+	regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
+	for (i = 0; i < NUM_GPRS; i++)
+		regs32.gprs[i] = (__u32) regs->gprs[i];
+	save_access_regs(current->thread.acrs);
+	memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs));
+	err = __copy_to_user(&sregs->regs, &regs32, sizeof(regs32));
+	if (err)
+		return err;
+	save_fp_regs(&current->thread.fp_regs);
+	/* s390_fp_regs and _s390_fp_regs32 are the same ! */
+	return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
+			      sizeof(_s390_fp_regs32));
+}
+
+static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
+{
+	_s390_regs_common32 regs32;
+	int err, i;
+
+	/* Alwys make any pending restarted system call return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32));
+	if (err)
+		return err;
+	regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask,
+				        (__u64)regs32.psw.mask << 32);
+	regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
+	for (i = 0; i < NUM_GPRS; i++)
+		regs->gprs[i] = (__u64) regs32.gprs[i];
+	memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs));
+	restore_access_regs(current->thread.acrs);
+
+	err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
+			       sizeof(_s390_fp_regs32));
+	current->thread.fp_regs.fpc &= FPC_VALID_MASK;
+	if (err)
+		return err;
+
+	restore_fp_regs(&current->thread.fp_regs);
+	regs->trap = -1;	/* disable syscall checks */
+	return 0;
+}
+
+asmlinkage long sys32_sigreturn(struct pt_regs *regs)
+{
+	sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
+	sigset_t set;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigregs32(regs, &frame->sregs))
+		goto badframe;
+
+	return regs->gprs[2];
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
+{
+	rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
+	sigset_t set;
+	stack_t st;
+	__u32 ss_sp;
+	int err;
+	mm_segment_t old_fs = get_fs();
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
+		goto badframe;
+
+	err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
+	st.ss_sp = (void *) A((unsigned long)ss_sp);
+	err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
+	err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
+	if (err)
+		goto badframe; 
+
+	/* It is more difficult to avoid calling this function than to
+	   call it and ignore errors.  */
+	set_fs (KERNEL_DS);
+	do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]);
+	set_fs (old_fs);
+
+	return regs->gprs[2];
+
+badframe:
+        force_sig(SIGSEGV, current);
+        return 0;
+}	
+
+/*
+ * Set up a signal frame.
+ */
+
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+{
+	unsigned long sp;
+
+	/* Default to using normal stack */
+	sp = (unsigned long) A(regs->gprs[15]);
+
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if (ka->sa.sa_flags & SA_ONSTACK) {
+		if (! on_sig_stack(sp))
+			sp = current->sas_ss_sp + current->sas_ss_size;
+	}
+
+	/* This is the legacy signal stack switching. */
+	else if (!user_mode(regs) &&
+		 !(ka->sa.sa_flags & SA_RESTORER) &&
+		 ka->sa.sa_restorer) {
+		sp = (unsigned long) ka->sa.sa_restorer;
+	}
+
+	return (void __user *)((sp - frame_size) & -8ul);
+}
+
+static inline int map_signal(int sig)
+{
+	if (current_thread_info()->exec_domain
+	    && current_thread_info()->exec_domain->signal_invmap
+	    && sig < 32)
+		return current_thread_info()->exec_domain->signal_invmap[sig];
+        else
+		return sig;
+}
+
+static void setup_frame32(int sig, struct k_sigaction *ka,
+			sigset_t *set, struct pt_regs * regs)
+{
+	sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(sigframe32));
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
+		goto give_sigsegv;
+
+	if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
+		goto give_sigsegv;
+
+	if (save_sigregs32(regs, &frame->sregs))
+		goto give_sigsegv;
+	if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs))
+		goto give_sigsegv;
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace.  */
+	if (ka->sa.sa_flags & SA_RESTORER) {
+		regs->gprs[14] = (__u64) ka->sa.sa_restorer;
+	} else {
+		regs->gprs[14] = (__u64) frame->retcode;
+		if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
+		               (u16 __user *)(frame->retcode)))
+			goto give_sigsegv;
+        }
+
+	/* Set up backchain. */
+	if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
+		goto give_sigsegv;
+
+	/* Set up registers for signal handler */
+	regs->gprs[15] = (__u64) frame;
+	regs->psw.addr = (__u64) ka->sa.sa_handler;
+
+	regs->gprs[2] = map_signal(sig);
+	regs->gprs[3] = (__u64) &frame->sc;
+
+	/* We forgot to include these in the sigcontext.
+	   To avoid breaking binary compatibility, they are passed as args. */
+	regs->gprs[4] = current->thread.trap_no;
+	regs->gprs[5] = current->thread.prot_addr;
+
+	/* Place signal number on stack to allow backtrace from handler.  */
+	if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
+		goto give_sigsegv;
+	return;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+}
+
+static void setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
+			   sigset_t *set, struct pt_regs * regs)
+{
+	int err = 0;
+	rt_sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(rt_sigframe32));
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
+		goto give_sigsegv;
+
+	if (copy_siginfo_to_user32(&frame->info, info))
+		goto give_sigsegv;
+
+	/* Create the ucontext.  */
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(0, &frame->uc.uc_link);
+	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+	err |= __put_user(sas_ss_flags(regs->gprs[15]),
+	                  &frame->uc.uc_stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+	err |= save_sigregs32(regs, &frame->uc.uc_mcontext);
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+	if (err)
+		goto give_sigsegv;
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace.  */
+	if (ka->sa.sa_flags & SA_RESTORER) {
+		regs->gprs[14] = (__u64) ka->sa.sa_restorer;
+	} else {
+		regs->gprs[14] = (__u64) frame->retcode;
+		err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
+		                  (u16 __user *)(frame->retcode));
+	}
+
+	/* Set up backchain. */
+	if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
+		goto give_sigsegv;
+
+	/* Set up registers for signal handler */
+	regs->gprs[15] = (__u64) frame;
+	regs->psw.addr = (__u64) ka->sa.sa_handler;
+
+	regs->gprs[2] = map_signal(sig);
+	regs->gprs[3] = (__u64) &frame->info;
+	regs->gprs[4] = (__u64) &frame->uc;
+	return;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */	
+
+void
+handle_signal32(unsigned long sig, struct k_sigaction *ka,
+		siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+	/* Set up the stack frame */
+	if (ka->sa.sa_flags & SA_SIGINFO)
+		setup_rt_frame32(sig, ka, info, oldset, regs);
+	else
+		setup_frame32(sig, ka, oldset, regs);
+
+	if (!(ka->sa.sa_flags & SA_NODEFER)) {
+		spin_lock_irq(&current->sighand->siglock);
+		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+		sigaddset(&current->blocked,sig);
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+}
+
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
new file mode 100644
index 0000000..7a607b1
--- /dev/null
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -0,0 +1,1443 @@
+/*
+*  arch/s390/kernel/sys_wrapper31.S
+*    wrapper for 31 bit compatible system calls.
+*
+*  S390 version
+*    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+*    Author(s): Gerhard Tonn (ton@de.ibm.com),
+*               Thomas Spatzier (tspat@de.ibm.com)
+*/ 
+
+	.globl  sys32_exit_wrapper 
+sys32_exit_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_exit		# branch to sys_exit
+    
+	.globl  sys32_read_wrapper 
+sys32_read_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	jg	sys32_read		# branch to sys_read
+
+	.globl  sys32_write_wrapper 
+sys32_write_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# const char *
+	llgfr	%r4,%r4			# size_t
+	jg	sys32_write		# branch to system call
+
+	.globl  sys32_open_wrapper 
+sys32_open_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	lgfr	%r4,%r4			# int
+	jg	sys_open		# branch to system call
+
+	.globl  sys32_close_wrapper 
+sys32_close_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_close		# branch to system call
+
+	.globl  sys32_creat_wrapper 
+sys32_creat_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	jg	sys_creat		# branch to system call
+
+	.globl  sys32_link_wrapper 
+sys32_link_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	jg	sys_link		# branch to system call
+
+	.globl  sys32_unlink_wrapper 
+sys32_unlink_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_unlink		# branch to system call
+
+	.globl  sys32_chdir_wrapper 
+sys32_chdir_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_chdir		# branch to system call
+
+	.globl  sys32_time_wrapper 
+sys32_time_wrapper:
+	llgtr	%r2,%r2			# int *
+	jg	compat_sys_time		# branch to system call
+
+	.globl  sys32_mknod_wrapper 
+sys32_mknod_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int 
+	llgfr	%r4,%r4			# dev
+	jg	sys_mknod		# branch to system call
+
+	.globl  sys32_chmod_wrapper 
+sys32_chmod_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# mode_t
+	jg	sys_chmod		# branch to system call
+
+	.globl  sys32_lchown16_wrapper 
+sys32_lchown16_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
+	llgfr	%r4,%r4			# __kernel_old_uid_emu31_t 
+	jg	sys32_lchown16		# branch to system call
+
+	.globl  sys32_lseek_wrapper 
+sys32_lseek_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	lgfr	%r3,%r3			# off_t
+	llgfr	%r4,%r4			# unsigned int
+	jg	sys_lseek		# branch to system call
+
+#sys32_getpid_wrapper				# void 
+
+	.globl  sys32_mount_wrapper 
+sys32_mount_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# char *
+	llgfr	%r5,%r5			# unsigned long
+	llgtr	%r6,%r6			# void *
+	jg	compat_sys_mount	# branch to system call
+
+	.globl  sys32_oldumount_wrapper 
+sys32_oldumount_wrapper:
+	llgtr	%r2,%r2			# char *
+	jg	sys_oldumount		# branch to system call
+
+	.globl  sys32_setuid16_wrapper 
+sys32_setuid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
+	jg	sys32_setuid16		# branch to system call
+
+#sys32_getuid16_wrapper			# void 
+
+	.globl  sys32_ptrace_wrapper 
+sys32_ptrace_wrapper:
+	lgfr	%r2,%r2			# long
+	lgfr	%r3,%r3			# long
+	llgtr	%r4,%r4			# long
+	llgfr	%r5,%r5			# long
+	jg	sys_ptrace		# branch to system call
+
+	.globl  sys32_alarm_wrapper 
+sys32_alarm_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_alarm		# branch to system call
+
+#sys32_pause_wrapper			# void 
+
+	.globl  compat_sys_utime_wrapper 
+compat_sys_utime_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct compat_utimbuf *
+	jg	compat_sys_utime	# branch to system call
+
+	.globl  sys32_access_wrapper 
+sys32_access_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	jg	sys_access		# branch to system call
+
+	.globl  sys32_nice_wrapper 
+sys32_nice_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_nice		# branch to system call
+
+#sys32_sync_wrapper			# void 
+
+	.globl  sys32_kill_wrapper 
+sys32_kill_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	jg	sys_kill		# branch to system call
+
+	.globl  sys32_rename_wrapper 
+sys32_rename_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	jg	sys_rename		# branch to system call
+
+	.globl  sys32_mkdir_wrapper 
+sys32_mkdir_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	jg	sys_mkdir		# branch to system call
+
+	.globl  sys32_rmdir_wrapper 
+sys32_rmdir_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_rmdir		# branch to system call
+
+	.globl  sys32_dup_wrapper 
+sys32_dup_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_dup			# branch to system call
+
+	.globl  sys32_pipe_wrapper 
+sys32_pipe_wrapper:
+	llgtr	%r2,%r2			# u32 *
+	jg	sys_pipe		# branch to system call
+
+	.globl  compat_sys_times_wrapper 
+compat_sys_times_wrapper:
+	llgtr	%r2,%r2			# struct compat_tms *
+	jg	compat_sys_times	# branch to system call
+
+	.globl  sys32_brk_wrapper 
+sys32_brk_wrapper:
+	llgtr	%r2,%r2			# unsigned long
+	jg	sys_brk			# branch to system call
+
+	.globl  sys32_setgid16_wrapper 
+sys32_setgid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
+	jg	sys32_setgid16		# branch to system call
+
+#sys32_getgid16_wrapper			# void 
+
+	.globl sys32_signal_wrapper
+sys32_signal_wrapper:
+	lgfr	%r2,%r2			# int 
+	llgtr	%r3,%r3			# __sighandler_t
+	jg	sys_signal
+
+#sys32_geteuid16_wrapper		# void 
+
+#sys32_getegid16_wrapper		# void 
+
+	.globl  sys32_acct_wrapper 
+sys32_acct_wrapper:
+	llgtr	%r2,%r2			# char *
+	jg	sys_acct		# branch to system call
+
+	.globl  sys32_umount_wrapper 
+sys32_umount_wrapper:
+	llgtr	%r2,%r2			# char *
+	lgfr	%r3,%r3			# int
+	jg	sys_umount		# branch to system call
+
+	.globl  compat_sys_ioctl_wrapper
+compat_sys_ioctl_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned int
+	llgfr	%r4,%r4			# unsigned int
+	jg	compat_sys_ioctl	# branch to system call
+
+	.globl  compat_sys_fcntl_wrapper 
+compat_sys_fcntl_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned int 
+	llgfr	%r4,%r4			# unsigned long
+	jg	compat_sys_fcntl	# branch to system call
+
+	.globl  sys32_setpgid_wrapper 
+sys32_setpgid_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	lgfr	%r3,%r3			# pid_t
+	jg	sys_setpgid		# branch to system call
+
+	.globl  sys32_umask_wrapper 
+sys32_umask_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_umask		# branch to system call
+
+	.globl  sys32_chroot_wrapper 
+sys32_chroot_wrapper:
+	llgtr	%r2,%r2			# char *
+	jg	sys_chroot		# branch to system call
+
+	.globl sys32_ustat_wrapper
+sys32_ustat_wrapper:
+	llgfr	%r2,%r2			# dev_t 
+	llgtr	%r3,%r3			# struct ustat *
+	jg	sys_ustat
+
+	.globl  sys32_dup2_wrapper 
+sys32_dup2_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned int
+	jg	sys_dup2		# branch to system call
+
+#sys32_getppid_wrapper			# void 
+
+#sys32_getpgrp_wrapper			# void 
+
+#sys32_setsid_wrapper			# void 
+
+	.globl  sys32_sigaction_wrapper
+sys32_sigaction_wrapper:
+	lgfr	%r2,%r2			# int 
+	llgtr	%r3,%r3			# const struct old_sigaction *
+	llgtr	%r4,%r4			# struct old_sigaction32 *
+	jg	sys32_sigaction		# branch to system call
+
+	.globl  sys32_setreuid16_wrapper 
+sys32_setreuid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
+	jg	sys32_setreuid16	# branch to system call
+
+	.globl  sys32_setregid16_wrapper 
+sys32_setregid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
+	llgfr	%r3,%r3			# __kernel_old_gid_emu31_t 
+	jg	sys32_setregid16	# branch to system call
+
+#sys32_sigsuspend_wrapper		# done in sigsuspend_glue 
+
+	.globl  compat_sys_sigpending_wrapper 
+compat_sys_sigpending_wrapper:
+	llgtr	%r2,%r2			# compat_old_sigset_t *
+	jg	compat_sys_sigpending	# branch to system call
+
+	.globl  sys32_sethostname_wrapper 
+sys32_sethostname_wrapper:
+	llgtr	%r2,%r2			# char *
+	lgfr	%r3,%r3			# int
+	jg	sys_sethostname		# branch to system call
+
+	.globl  compat_sys_setrlimit_wrapper 
+compat_sys_setrlimit_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# struct rlimit_emu31 *
+	jg	compat_sys_setrlimit	# branch to system call
+
+	.globl  compat_sys_old_getrlimit_wrapper 
+compat_sys_old_getrlimit_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# struct rlimit_emu31 *
+	jg	compat_sys_old_getrlimit # branch to system call
+
+	.globl  compat_sys_getrlimit_wrapper 
+compat_sys_getrlimit_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# struct rlimit_emu31 *
+	jg	compat_sys_getrlimit	# branch to system call
+
+	.globl  sys32_mmap2_wrapper 
+sys32_mmap2_wrapper:
+	llgtr	%r2,%r2			# struct mmap_arg_struct_emu31 *
+	jg	sys32_mmap2			# branch to system call
+
+	.globl  compat_sys_getrusage_wrapper 
+compat_sys_getrusage_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# struct rusage_emu31 *
+	jg	compat_sys_getrusage	# branch to system call
+
+	.globl  sys32_gettimeofday_wrapper 
+sys32_gettimeofday_wrapper:
+	llgtr	%r2,%r2			# struct timeval_emu31 *
+	llgtr	%r3,%r3			# struct timezone *
+	jg	sys32_gettimeofday	# branch to system call
+
+	.globl  sys32_settimeofday_wrapper 
+sys32_settimeofday_wrapper:
+	llgtr	%r2,%r2			# struct timeval_emu31 *
+	llgtr	%r3,%r3			# struct timezone *
+	jg	sys32_settimeofday	# branch to system call
+
+	.globl  sys32_getgroups16_wrapper 
+sys32_getgroups16_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# __kernel_old_gid_emu31_t *
+	jg	sys32_getgroups16	# branch to system call
+
+	.globl  sys32_setgroups16_wrapper 
+sys32_setgroups16_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# __kernel_old_gid_emu31_t *
+	jg	sys32_setgroups16	# branch to system call
+
+	.globl  sys32_symlink_wrapper 
+sys32_symlink_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	jg	sys_symlink		# branch to system call
+
+	.globl  sys32_readlink_wrapper 
+sys32_readlink_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# char *
+	lgfr	%r4,%r4			# int
+	jg	sys_readlink		# branch to system call
+
+	.globl  sys32_uselib_wrapper 
+sys32_uselib_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_uselib		# branch to system call
+
+	.globl  sys32_swapon_wrapper 
+sys32_swapon_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	jg	sys_swapon		# branch to system call
+
+	.globl  sys32_reboot_wrapper 
+sys32_reboot_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	llgfr	%r4,%r4			# unsigned int
+	llgtr	%r5,%r5			# void *
+	jg	sys_reboot		# branch to system call
+
+	.globl  old32_readdir_wrapper 
+old32_readdir_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# void *
+	llgfr	%r4,%r4			# unsigned int
+	jg	compat_sys_old_readdir	# branch to system call
+
+	.globl  old32_mmap_wrapper 
+old32_mmap_wrapper:
+	llgtr	%r2,%r2			# struct mmap_arg_struct_emu31 *
+	jg	old32_mmap		# branch to system call
+
+	.globl  sys32_munmap_wrapper 
+sys32_munmap_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t 
+	jg	sys_munmap		# branch to system call
+
+	.globl  sys32_truncate_wrapper 
+sys32_truncate_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# unsigned long
+	jg	sys_truncate		# branch to system call
+
+	.globl  sys32_ftruncate_wrapper 
+sys32_ftruncate_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned long
+	jg	sys_ftruncate		# branch to system call
+
+	.globl  sys32_fchmod_wrapper 
+sys32_fchmod_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# mode_t
+	jg	sys_fchmod		# branch to system call
+
+	.globl  sys32_fchown16_wrapper 
+sys32_fchown16_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# compat_uid_t
+	llgfr	%r4,%r4			# compat_uid_t
+	jg	sys32_fchown16		# branch to system call
+
+	.globl  sys32_getpriority_wrapper 
+sys32_getpriority_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	jg	sys_getpriority		# branch to system call
+
+	.globl  sys32_setpriority_wrapper 
+sys32_setpriority_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	lgfr	%r4,%r4			# int
+	jg	sys_setpriority		# branch to system call
+
+	.globl  compat_sys_statfs_wrapper 
+compat_sys_statfs_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct compat_statfs *
+	jg	compat_sys_statfs	# branch to system call
+
+	.globl  compat_sys_fstatfs_wrapper 
+compat_sys_fstatfs_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# struct compat_statfs *
+	jg	compat_sys_fstatfs	# branch to system call
+
+	.globl  compat_sys_socketcall_wrapper 
+compat_sys_socketcall_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# u32 *
+	jg	compat_sys_socketcall	# branch to system call
+
+	.globl  sys32_syslog_wrapper 
+sys32_syslog_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# char *
+	lgfr	%r4,%r4			# int
+	jg	sys_syslog		# branch to system call
+
+	.globl  compat_sys_setitimer_wrapper 
+compat_sys_setitimer_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# struct itimerval_emu31 *
+	llgtr	%r4,%r4			# struct itimerval_emu31 *
+	jg	compat_sys_setitimer	# branch to system call
+
+	.globl  compat_sys_getitimer_wrapper 
+compat_sys_getitimer_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# struct itimerval_emu31 *
+	jg	compat_sys_getitimer	# branch to system call
+
+	.globl  compat_sys_newstat_wrapper 
+compat_sys_newstat_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct stat_emu31 *
+	jg	compat_sys_newstat	# branch to system call
+
+	.globl  compat_sys_newlstat_wrapper 
+compat_sys_newlstat_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct stat_emu31 *
+	jg	compat_sys_newlstat	# branch to system call
+
+	.globl  compat_sys_newfstat_wrapper 
+compat_sys_newfstat_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# struct stat_emu31 *
+	jg	compat_sys_newfstat	# branch to system call
+
+#sys32_vhangup_wrapper			# void 
+
+	.globl  compat_sys_wait4_wrapper 
+compat_sys_wait4_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	llgtr	%r3,%r3			# unsigned int *
+	lgfr	%r4,%r4			# int
+	llgtr	%r5,%r5			# struct rusage *
+	jg	compat_sys_wait4	# branch to system call
+
+	.globl  sys32_swapoff_wrapper 
+sys32_swapoff_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_swapoff		# branch to system call
+
+	.globl  sys32_sysinfo_wrapper 
+sys32_sysinfo_wrapper:
+	llgtr	%r2,%r2			# struct sysinfo_emu31 *
+	jg	sys32_sysinfo		# branch to system call
+
+	.globl  sys32_ipc_wrapper 
+sys32_ipc_wrapper:
+	llgfr	%r2,%r2			# uint
+	lgfr	%r3,%r3			# int
+	lgfr	%r4,%r4			# int
+	lgfr	%r5,%r5			# int
+	llgfr	%r6,%r6			# u32
+	jg	sys32_ipc		# branch to system call
+
+	.globl  sys32_fsync_wrapper 
+sys32_fsync_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_fsync		# branch to system call
+
+#sys32_sigreturn_wrapper		# done in sigreturn_glue 
+
+#sys32_clone_wrapper			# done in clone_glue 
+
+	.globl  sys32_setdomainname_wrapper 
+sys32_setdomainname_wrapper:
+	llgtr	%r2,%r2			# char *
+	lgfr	%r3,%r3			# int
+	jg	sys_setdomainname	# branch to system call
+
+	.globl  sys32_newuname_wrapper 
+sys32_newuname_wrapper:
+	llgtr	%r2,%r2			# struct new_utsname *
+	jg	s390x_newuname		# branch to system call
+
+	.globl  sys32_adjtimex_wrapper 
+sys32_adjtimex_wrapper:
+	llgtr	%r2,%r2			# struct timex_emu31 *
+	jg	sys32_adjtimex		# branch to system call
+
+	.globl  sys32_mprotect_wrapper 
+sys32_mprotect_wrapper:
+	llgtr	%r2,%r2			# unsigned long (actually pointer
+	llgfr	%r3,%r3			# size_t
+	llgfr	%r4,%r4			# unsigned long
+	jg	sys_mprotect		# branch to system call
+
+	.globl  compat_sys_sigprocmask_wrapper 
+compat_sys_sigprocmask_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# compat_old_sigset_t *
+	llgtr	%r4,%r4			# compat_old_sigset_t *
+	jg	compat_sys_sigprocmask		# branch to system call
+
+	.globl  sys32_init_module_wrapper 
+sys32_init_module_wrapper:
+	llgtr	%r2,%r2			# void *
+	llgfr	%r3,%r3			# unsigned long
+	llgtr	%r4,%r4			# char *
+	jg	sys32_init_module	# branch to system call
+
+	.globl  sys32_delete_module_wrapper 
+sys32_delete_module_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# unsigned int
+	jg	sys32_delete_module	# branch to system call
+
+	.globl  sys32_quotactl_wrapper 
+sys32_quotactl_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# const char *
+	llgfr	%r4,%r4			# qid_t
+	llgtr	%r5,%r5			# caddr_t
+	jg	sys_quotactl		# branch to system call
+
+	.globl  sys32_getpgid_wrapper 
+sys32_getpgid_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	jg	sys_getpgid		# branch to system call
+
+	.globl  sys32_fchdir_wrapper 
+sys32_fchdir_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_fchdir		# branch to system call
+
+	.globl  sys32_bdflush_wrapper 
+sys32_bdflush_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# long
+	jg	sys_bdflush		# branch to system call
+
+	.globl  sys32_sysfs_wrapper 
+sys32_sysfs_wrapper:
+	lgfr	%r2,%r2			# int
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	jg	sys_sysfs		# branch to system call
+
+	.globl  sys32_personality_wrapper 
+sys32_personality_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	jg	s390x_personality	# branch to system call
+
+	.globl  sys32_setfsuid16_wrapper 
+sys32_setfsuid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
+	jg	sys32_setfsuid16	# branch to system call
+
+	.globl  sys32_setfsgid16_wrapper 
+sys32_setfsgid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
+	jg	sys32_setfsgid16	# branch to system call
+
+	.globl  sys32_llseek_wrapper 
+sys32_llseek_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	llgtr	%r5,%r5			# loff_t *
+	llgfr	%r6,%r6			# unsigned int
+	jg	sys_llseek		# branch to system call
+
+	.globl  sys32_getdents_wrapper 
+sys32_getdents_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# void *
+	llgfr	%r4,%r4			# unsigned int
+	jg	compat_sys_getdents	# branch to system call
+
+	.globl  compat_sys_select_wrapper
+compat_sys_select_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# compat_fd_set *
+	llgtr	%r4,%r4			# compat_fd_set *
+	llgtr	%r5,%r5			# compat_fd_set *
+	llgtr	%r6,%r6			# struct compat_timeval *
+	jg	compat_sys_select	# branch to system call
+
+	.globl  sys32_flock_wrapper 
+sys32_flock_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned int
+	jg	sys_flock		# branch to system call
+
+	.globl  sys32_msync_wrapper 
+sys32_msync_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t
+	lgfr	%r4,%r4			# int
+	jg	sys_msync		# branch to system call
+
+	.globl  compat_sys_readv_wrapper
+compat_sys_readv_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# const struct compat_iovec *
+	llgfr	%r4,%r4			# unsigned long
+	jg	compat_sys_readv	# branch to system call
+
+	.globl  compat_sys_writev_wrapper
+compat_sys_writev_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# const struct compat_iovec *
+	llgfr	%r4,%r4			# unsigned long
+	jg	compat_sys_writev	# branch to system call
+
+	.globl  sys32_getsid_wrapper 
+sys32_getsid_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	jg	sys_getsid		# branch to system call
+
+	.globl  sys32_fdatasync_wrapper 
+sys32_fdatasync_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_fdatasync		# branch to system call
+
+#sys32_sysctl_wrapper			# tbd 
+
+	.globl  sys32_mlock_wrapper 
+sys32_mlock_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t
+	jg	sys_mlock		# branch to system call
+
+	.globl  sys32_munlock_wrapper 
+sys32_munlock_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t
+	jg	sys_munlock		# branch to system call
+
+	.globl  sys32_mlockall_wrapper 
+sys32_mlockall_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_mlockall		# branch to system call
+
+#sys32_munlockall_wrapper		# void 
+
+	.globl  sys32_sched_setparam_wrapper 
+sys32_sched_setparam_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	llgtr	%r3,%r3			# struct sched_param *
+	jg	sys_sched_setparam	# branch to system call
+
+	.globl  sys32_sched_getparam_wrapper 
+sys32_sched_getparam_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	llgtr	%r3,%r3			# struct sched_param *
+	jg	sys_sched_getparam	# branch to system call
+
+	.globl  sys32_sched_setscheduler_wrapper 
+sys32_sched_setscheduler_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	lgfr	%r3,%r3			# int
+	llgtr	%r4,%r4			# struct sched_param *
+	jg	sys_sched_setscheduler	# branch to system call
+
+	.globl  sys32_sched_getscheduler_wrapper 
+sys32_sched_getscheduler_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	jg	sys_sched_getscheduler	# branch to system call
+
+#sys32_sched_yield_wrapper		# void 
+
+	.globl  sys32_sched_get_priority_max_wrapper 
+sys32_sched_get_priority_max_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_sched_get_priority_max	# branch to system call
+
+	.globl  sys32_sched_get_priority_min_wrapper 
+sys32_sched_get_priority_min_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_sched_get_priority_min	# branch to system call
+
+	.globl  sys32_sched_rr_get_interval_wrapper 
+sys32_sched_rr_get_interval_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	llgtr	%r3,%r3			# struct compat_timespec *
+	jg	sys32_sched_rr_get_interval	# branch to system call
+
+	.globl  compat_sys_nanosleep_wrapper 
+compat_sys_nanosleep_wrapper:
+	llgtr	%r2,%r2			# struct compat_timespec *
+	llgtr	%r3,%r3			# struct compat_timespec *
+	jg	compat_sys_nanosleep		# branch to system call
+
+	.globl  sys32_mremap_wrapper 
+sys32_mremap_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	llgfr	%r5,%r5			# unsigned long
+	llgfr	%r6,%r6			# unsigned long
+	jg	sys_mremap		# branch to system call
+
+	.globl  sys32_setresuid16_wrapper 
+sys32_setresuid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
+	llgfr	%r4,%r4			# __kernel_old_uid_emu31_t 
+	jg	sys32_setresuid16	# branch to system call
+
+	.globl  sys32_getresuid16_wrapper 
+sys32_getresuid16_wrapper:
+	llgtr	%r2,%r2			# __kernel_old_uid_emu31_t *
+	llgtr	%r3,%r3			# __kernel_old_uid_emu31_t *
+	llgtr	%r4,%r4			# __kernel_old_uid_emu31_t *
+	jg	sys32_getresuid16	# branch to system call
+
+	.globl  sys32_poll_wrapper 
+sys32_poll_wrapper:
+	llgtr	%r2,%r2			# struct pollfd * 
+	llgfr	%r3,%r3			# unsigned int 
+	lgfr	%r4,%r4			# long 
+	jg	sys_poll		# branch to system call
+
+	.globl  compat_sys_nfsservctl_wrapper
+compat_sys_nfsservctl_wrapper:
+	lgfr	%r2,%r2			# int 
+	llgtr	%r3,%r3			# struct compat_nfsctl_arg*
+	llgtr	%r4,%r4			# union compat_nfsctl_res*
+	jg	compat_sys_nfsservctl	# branch to system call
+
+	.globl  sys32_setresgid16_wrapper 
+sys32_setresgid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
+	llgfr	%r3,%r3			# __kernel_old_gid_emu31_t 
+	llgfr	%r4,%r4			# __kernel_old_gid_emu31_t 
+	jg	sys32_setresgid16	# branch to system call
+
+	.globl  sys32_getresgid16_wrapper 
+sys32_getresgid16_wrapper:
+	llgtr	%r2,%r2			# __kernel_old_gid_emu31_t *
+	llgtr	%r3,%r3			# __kernel_old_gid_emu31_t *
+	llgtr	%r4,%r4			# __kernel_old_gid_emu31_t *
+	jg	sys32_getresgid16	# branch to system call
+
+	.globl  sys32_prctl_wrapper 
+sys32_prctl_wrapper:
+	lgfr	%r2,%r2			# int
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	llgfr	%r5,%r5			# unsigned long
+	llgfr	%r6,%r6			# unsigned long
+	jg	sys_prctl		# branch to system call
+
+#sys32_rt_sigreturn_wrapper		# done in rt_sigreturn_glue 
+
+	.globl  sys32_rt_sigaction_wrapper 
+sys32_rt_sigaction_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# const struct sigaction_emu31 *
+	llgtr	%r4,%r4			# const struct sigaction_emu31 *
+	llgfr	%r5,%r5			# size_t
+	jg	sys32_rt_sigaction	# branch to system call
+
+	.globl  sys32_rt_sigprocmask_wrapper 
+sys32_rt_sigprocmask_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# old_sigset_emu31 *
+	llgtr	%r4,%r4			# old_sigset_emu31 *
+	llgfr	%r5,%r5			# size_t
+	jg	sys32_rt_sigprocmask	# branch to system call
+
+	.globl  sys32_rt_sigpending_wrapper 
+sys32_rt_sigpending_wrapper:
+	llgtr	%r2,%r2			# sigset_emu31 *
+	llgfr	%r3,%r3			# size_t
+	jg	sys32_rt_sigpending	# branch to system call
+
+	.globl  compat_sys_rt_sigtimedwait_wrapper
+compat_sys_rt_sigtimedwait_wrapper:
+	llgtr	%r2,%r2			# const sigset_emu31_t *
+	llgtr	%r3,%r3			# siginfo_emu31_t *
+	llgtr	%r4,%r4			# const struct compat_timespec *
+	llgfr	%r5,%r5			# size_t
+	jg	compat_sys_rt_sigtimedwait	# branch to system call
+
+	.globl  sys32_rt_sigqueueinfo_wrapper 
+sys32_rt_sigqueueinfo_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	llgtr	%r4,%r4			# siginfo_emu31_t *
+	jg	sys32_rt_sigqueueinfo	# branch to system call
+
+#sys32_rt_sigsuspend_wrapper		# done in rt_sigsuspend_glue 
+
+	.globl  sys32_pread64_wrapper 
+sys32_pread64_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	llgfr	%r5,%r5			# u32
+	llgfr	%r6,%r6			# u32
+	jg	sys32_pread64		# branch to system call
+
+	.globl  sys32_pwrite64_wrapper 
+sys32_pwrite64_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# const char *
+	llgfr	%r4,%r4			# size_t
+	llgfr	%r5,%r5			# u32
+	llgfr	%r6,%r6			# u32
+	jg	sys32_pwrite64		# branch to system call
+
+	.globl  sys32_chown16_wrapper 
+sys32_chown16_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
+	llgfr	%r4,%r4			# __kernel_old_gid_emu31_t 
+	jg	sys32_chown16		# branch to system call
+
+	.globl  sys32_getcwd_wrapper 
+sys32_getcwd_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgfr	%r3,%r3			# unsigned long
+	jg	sys_getcwd		# branch to system call
+
+	.globl  sys32_capget_wrapper 
+sys32_capget_wrapper:
+	llgtr	%r2,%r2			# cap_user_header_t
+	llgtr	%r3,%r3			# cap_user_data_t
+	jg	sys_capget		# branch to system call
+
+	.globl  sys32_capset_wrapper 
+sys32_capset_wrapper:
+	llgtr	%r2,%r2			# cap_user_header_t
+	llgtr	%r3,%r3			# const cap_user_data_t
+	jg	sys_capset		# branch to system call
+
+	.globl sys32_sigaltstack_wrapper
+sys32_sigaltstack_wrapper:
+	llgtr	%r2,%r2			# const stack_emu31_t * 
+	llgtr	%r3,%r3			# stack_emu31_t * 
+	jg	sys32_sigaltstack
+
+	.globl  sys32_sendfile_wrapper 
+sys32_sendfile_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	llgtr	%r4,%r4			# __kernel_off_emu31_t *
+	llgfr	%r5,%r5			# size_t
+	jg	sys32_sendfile		# branch to system call
+
+#sys32_vfork_wrapper			# done in vfork_glue 
+
+	.globl  sys32_truncate64_wrapper 
+sys32_truncate64_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	jg	sys32_truncate64	# branch to system call
+
+	.globl  sys32_ftruncate64_wrapper 
+sys32_ftruncate64_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	jg	sys32_ftruncate64	# branch to system call
+
+	.globl sys32_lchown_wrapper	
+sys32_lchown_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# uid_t
+	llgfr	%r4,%r4			# gid_t
+	jg	sys_lchown		# branch to system call
+
+#sys32_getuid_wrapper			# void			 
+#sys32_getgid_wrapper			# void 
+#sys32_geteuid_wrapper			# void 
+#sys32_getegid_wrapper			# void 
+
+	.globl sys32_setreuid_wrapper
+sys32_setreuid_wrapper:
+	llgfr	%r2,%r2			# uid_t
+	llgfr	%r3,%r3			# uid_t
+	jg	sys_setreuid		# branch to system call
+
+	.globl sys32_setregid_wrapper
+sys32_setregid_wrapper:
+	llgfr	%r2,%r2			# gid_t
+	llgfr	%r3,%r3			# gid_t
+	jg	sys_setregid		# branch to system call
+
+	.globl  sys32_getgroups_wrapper 
+sys32_getgroups_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# gid_t *
+	jg	sys_getgroups		# branch to system call
+
+	.globl  sys32_setgroups_wrapper 
+sys32_setgroups_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# gid_t *
+	jg	sys_setgroups		# branch to system call
+
+	.globl sys32_fchown_wrapper	
+sys32_fchown_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# uid_t
+	llgfr	%r4,%r4			# gid_t
+	jg	sys_fchown		# branch to system call
+
+	.globl sys32_setresuid_wrapper	
+sys32_setresuid_wrapper:
+	llgfr	%r2,%r2			# uid_t
+	llgfr	%r3,%r3			# uid_t
+	llgfr	%r4,%r4			# uid_t
+	jg	sys_setresuid		# branch to system call
+
+	.globl sys32_getresuid_wrapper	
+sys32_getresuid_wrapper:
+	llgtr	%r2,%r2			# uid_t *
+	llgtr	%r3,%r3			# uid_t *
+	llgtr	%r4,%r4			# uid_t *
+	jg	sys_getresuid		# branch to system call
+
+	.globl sys32_setresgid_wrapper	
+sys32_setresgid_wrapper:
+	llgfr	%r2,%r2			# gid_t
+	llgfr	%r3,%r3			# gid_t
+	llgfr	%r4,%r4			# gid_t
+	jg	sys_setresgid		# branch to system call
+
+	.globl sys32_getresgid_wrapper	
+sys32_getresgid_wrapper:
+	llgtr	%r2,%r2			# gid_t *
+	llgtr	%r3,%r3			# gid_t *
+	llgtr	%r4,%r4			# gid_t *
+	jg	sys_getresgid		# branch to system call
+
+	.globl sys32_chown_wrapper	
+sys32_chown_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# uid_t
+	llgfr	%r4,%r4			# gid_t
+	jg	sys_chown		# branch to system call
+
+	.globl sys32_setuid_wrapper	
+sys32_setuid_wrapper:
+	llgfr	%r2,%r2			# uid_t
+	jg	sys_setuid		# branch to system call
+
+	.globl sys32_setgid_wrapper	
+sys32_setgid_wrapper:
+	llgfr	%r2,%r2			# gid_t
+	jg	sys_setgid		# branch to system call
+
+	.globl sys32_setfsuid_wrapper	
+sys32_setfsuid_wrapper:
+	llgfr	%r2,%r2			# uid_t
+	jg	sys_setfsuid		# branch to system call
+
+	.globl sys32_setfsgid_wrapper	
+sys32_setfsgid_wrapper:
+	llgfr	%r2,%r2			# gid_t
+	jg	sys_setfsgid		# branch to system call
+
+	.globl  sys32_pivot_root_wrapper 
+sys32_pivot_root_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	jg	sys_pivot_root		# branch to system call
+
+	.globl  sys32_mincore_wrapper 
+sys32_mincore_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t
+	llgtr	%r4,%r4			# unsigned char *
+	jg	sys_mincore		# branch to system call
+
+	.globl  sys32_madvise_wrapper 
+sys32_madvise_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t
+	lgfr	%r4,%r4			# int
+	jg	sys_madvise		# branch to system call
+
+	.globl  sys32_getdents64_wrapper 
+sys32_getdents64_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# void *
+	llgfr	%r4,%r4			# unsigned int
+	jg	sys_getdents64		# branch to system call
+
+	.globl  compat_sys_fcntl64_wrapper 
+compat_sys_fcntl64_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned int 
+	llgfr	%r4,%r4			# unsigned long
+	jg	compat_sys_fcntl64	# branch to system call
+
+	.globl	sys32_stat64_wrapper
+sys32_stat64_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct stat64 *
+	jg	sys32_stat64		# branch to system call
+
+	.globl	sys32_lstat64_wrapper
+sys32_lstat64_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct stat64 *
+	jg	sys32_lstat64		# branch to system call
+
+	.globl	sys32_stime_wrapper
+sys32_stime_wrapper:
+	llgtr	%r2,%r2			# long *
+	jg	compat_sys_stime	# branch to system call
+
+	.globl  sys32_sysctl_wrapper
+sys32_sysctl_wrapper:
+	llgtr   %r2,%r2                 # struct __sysctl_args32 *
+	jg      sys32_sysctl
+
+	.globl	sys32_fstat64_wrapper
+sys32_fstat64_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgtr	%r3,%r3			# struct stat64 *
+	jg	sys32_fstat64		# branch to system call
+
+	.globl  compat_sys_futex_wrapper 
+compat_sys_futex_wrapper:
+	llgtr	%r2,%r2			# u32 *
+	lgfr	%r3,%r3			# int
+	lgfr	%r4,%r4			# int
+	llgtr	%r5,%r5			# struct compat_timespec *
+	llgtr	%r6,%r6			# u32 *
+	lgf	%r0,164(%r15)		# int
+	stg	%r0,160(%r15)
+	jg	compat_sys_futex	# branch to system call
+
+	.globl	sys32_setxattr_wrapper
+sys32_setxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	lgfr	%r6,%r6			# int
+	jg	sys_setxattr
+
+	.globl	sys32_lsetxattr_wrapper
+sys32_lsetxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	lgfr	%r6,%r6			# int
+	jg	sys_lsetxattr
+
+	.globl	sys32_fsetxattr_wrapper
+sys32_fsetxattr_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	lgfr	%r6,%r6			# int
+	jg	sys_fsetxattr
+
+	.globl	sys32_getxattr_wrapper
+sys32_getxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	jg	sys_getxattr
+
+	.globl	sys32_lgetxattr_wrapper
+sys32_lgetxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	jg	sys_lgetxattr
+
+	.globl	sys32_fgetxattr_wrapper
+sys32_fgetxattr_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	jg	sys_fgetxattr
+
+	.globl	sys32_listxattr_wrapper
+sys32_listxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	jg	sys_listxattr
+
+	.globl	sys32_llistxattr_wrapper
+sys32_llistxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	jg	sys_llistxattr
+
+	.globl	sys32_flistxattr_wrapper
+sys32_flistxattr_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	jg	sys_flistxattr
+
+	.globl	sys32_removexattr_wrapper
+sys32_removexattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	jg	sys_removexattr
+
+	.globl	sys32_lremovexattr_wrapper
+sys32_lremovexattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	jg	sys_lremovexattr
+
+	.globl	sys32_fremovexattr_wrapper
+sys32_fremovexattr_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# char *
+	jg	sys_fremovexattr
+
+	.globl	sys32_sched_setaffinity_wrapper
+sys32_sched_setaffinity_wrapper:
+	lgfr	%r2,%r2			# int
+	llgfr	%r3,%r3			# unsigned int
+	llgtr	%r4,%r4			# unsigned long *
+	jg	compat_sys_sched_setaffinity
+
+	.globl	sys32_sched_getaffinity_wrapper
+sys32_sched_getaffinity_wrapper:
+	lgfr	%r2,%r2			# int
+	llgfr	%r3,%r3			# unsigned int
+	llgtr	%r4,%r4			# unsigned long *
+	jg	compat_sys_sched_getaffinity
+
+	.globl  sys32_exit_group_wrapper
+sys32_exit_group_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_exit_group		# branch to system call
+
+	.globl  sys32_set_tid_address_wrapper
+sys32_set_tid_address_wrapper:
+	llgtr	%r2,%r2			# int *
+	jg	sys_set_tid_address	# branch to system call
+
+	.globl  sys_epoll_create_wrapper
+sys_epoll_create_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_epoll_create	# branch to system call
+
+	.globl  sys_epoll_ctl_wrapper
+sys_epoll_ctl_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	lgfr	%r4,%r4			# int
+	llgtr	%r5,%r5			# struct epoll_event *
+	jg	sys_epoll_ctl		# branch to system call
+
+	.globl  sys_epoll_wait_wrapper
+sys_epoll_wait_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# struct epoll_event *
+	lgfr	%r4,%r4			# int
+	lgfr	%r5,%r5			# int
+	jg	sys_epoll_wait		# branch to system call
+
+	.globl	sys32_lookup_dcookie_wrapper
+sys32_lookup_dcookie_wrapper:
+	sllg	%r2,%r2,32		# get high word of 64bit dcookie
+	or	%r2,%r3			# get low word of 64bit dcookie
+	llgtr	%r3,%r4			# char *
+	llgfr	%r4,%r5			# size_t
+	jg	sys_lookup_dcookie
+
+	.globl	sys32_fadvise64_wrapper
+sys32_fadvise64_wrapper:
+	lgfr	%r2,%r2			# int
+	sllg	%r3,%r3,32		# get high word of 64bit loff_t
+	or	%r3,%r4			# get low word of 64bit loff_t
+	llgfr	%r4,%r5			# size_t (unsigned long)
+	lgfr	%r5,%r6			# int
+	jg	sys_fadvise64
+
+	.globl	sys32_fadvise64_64_wrapper
+sys32_fadvise64_64_wrapper:
+	llgtr	%r2,%r2			# struct fadvise64_64_args *
+	jg	s390_fadvise64_64
+
+	.globl	sys32_clock_settime_wrapper
+sys32_clock_settime_wrapper:
+	lgfr	%r2,%r2			# clockid_t (int)
+	llgtr	%r3,%r3			# struct compat_timespec *
+	jg	compat_sys_clock_settime
+
+	.globl	sys32_clock_gettime_wrapper
+sys32_clock_gettime_wrapper:
+	lgfr	%r2,%r2			# clockid_t (int)
+	llgtr	%r3,%r3			# struct compat_timespec *
+	jg	compat_sys_clock_gettime
+
+	.globl	sys32_clock_getres_wrapper
+sys32_clock_getres_wrapper:
+	lgfr	%r2,%r2			# clockid_t (int)
+	llgtr	%r3,%r3			# struct compat_timespec *
+	jg	compat_sys_clock_getres
+
+	.globl	sys32_clock_nanosleep_wrapper
+sys32_clock_nanosleep_wrapper:
+	lgfr	%r2,%r2			# clockid_t (int)
+	lgfr	%r3,%r3			# int
+	llgtr	%r4,%r4			# struct compat_timespec *
+	llgtr	%r5,%r5			# struct compat_timespec *
+	jg	compat_sys_clock_nanosleep
+
+	.globl	sys32_timer_create_wrapper
+sys32_timer_create_wrapper:
+	lgfr	%r2,%r2			# timer_t (int)
+	llgtr	%r3,%r3			# struct compat_sigevent *
+	llgtr	%r4,%r4			# timer_t *
+	jg	sys32_timer_create
+
+	.globl	sys32_timer_settime_wrapper
+sys32_timer_settime_wrapper:
+	lgfr	%r2,%r2			# timer_t (int)
+	lgfr	%r3,%r3			# int
+	llgtr	%r4,%r4			# struct compat_itimerspec *
+	llgtr	%r5,%r5			# struct compat_itimerspec *
+	jg	compat_sys_timer_settime
+
+	.globl	sys32_timer_gettime_wrapper
+sys32_timer_gettime_wrapper:
+	lgfr	%r2,%r2			# timer_t (int)
+	llgtr	%r3,%r3			# struct compat_itimerspec *
+	jg	compat_sys_timer_gettime
+
+	.globl	sys32_timer_getoverrun_wrapper
+sys32_timer_getoverrun_wrapper:
+	lgfr	%r2,%r2			# timer_t (int)
+	jg	sys_timer_getoverrun
+
+	.globl	sys32_timer_delete_wrapper
+sys32_timer_delete_wrapper:
+	lgfr	%r2,%r2			# timer_t (int)
+	jg	sys_timer_delete
+
+	.globl	sys32_io_setup_wrapper
+sys32_io_setup_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# u32 *
+	jg	compat_sys_io_setup
+
+	.globl	sys32_io_destroy_wrapper
+sys32_io_destroy_wrapper:
+	llgfr	%r2,%r2			# (aio_context_t) u32
+	jg	sys_io_destroy
+
+	.globl	sys32_io_getevents_wrapper
+sys32_io_getevents_wrapper:
+	llgfr	%r2,%r2			# (aio_context_t) u32
+	lgfr	%r3,%r3			# long
+	lgfr	%r4,%r4			# long
+	llgtr	%r5,%r5			# struct io_event *
+	llgtr	%r6,%r6			# struct compat_timespec *
+	jg	compat_sys_io_getevents
+
+	.globl	sys32_io_submit_wrapper
+sys32_io_submit_wrapper:
+	llgfr	%r2,%r2			# (aio_context_t) u32
+	lgfr	%r3,%r3			# long
+	llgtr	%r4,%r4			# struct iocb **
+	jg	compat_sys_io_submit
+
+	.globl	sys32_io_cancel_wrapper
+sys32_io_cancel_wrapper:
+	llgfr	%r2,%r2			# (aio_context_t) u32
+	llgtr	%r3,%r3			# struct iocb *
+	llgtr	%r4,%r4			# struct io_event *
+	jg	sys_io_cancel
+
+	.globl compat_sys_statfs64_wrapper
+compat_sys_statfs64_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# compat_size_t
+	llgtr	%r4,%r4			# struct compat_statfs64 *
+	jg	compat_sys_statfs64
+
+	.globl compat_sys_fstatfs64_wrapper
+compat_sys_fstatfs64_wrapper:
+	llgfr	%r2,%r2			# unsigned int fd
+	llgfr	%r3,%r3			# compat_size_t
+	llgtr	%r4,%r4			# struct compat_statfs64 *
+	jg	compat_sys_fstatfs64
+
+	.globl	compat_sys_mq_open_wrapper
+compat_sys_mq_open_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	llgfr	%r4,%r4			# mode_t
+	llgtr	%r5,%r5			# struct compat_mq_attr *
+	jg	compat_sys_mq_open
+
+	.globl	sys32_mq_unlink_wrapper
+sys32_mq_unlink_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_mq_unlink
+
+	.globl	compat_sys_mq_timedsend_wrapper
+compat_sys_mq_timedsend_wrapper:
+	lgfr	%r2,%r2			# mqd_t
+	llgtr	%r3,%r3			# const char *
+	llgfr	%r4,%r4			# size_t
+	llgfr	%r5,%r5			# unsigned int
+	llgtr	%r6,%r6			# const struct compat_timespec *
+	jg	compat_sys_mq_timedsend
+
+	.globl	compat_sys_mq_timedreceive_wrapper
+compat_sys_mq_timedreceive_wrapper:
+	lgfr	%r2,%r2			# mqd_t
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	llgtr	%r5,%r5			# unsigned int *
+	llgtr	%r6,%r6			# const struct compat_timespec *
+	jg	compat_sys_mq_timedreceive
+
+	.globl	compat_sys_mq_notify_wrapper
+compat_sys_mq_notify_wrapper:
+	lgfr	%r2,%r2			# mqd_t
+	llgtr	%r3,%r3			# struct compat_sigevent *
+	jg	compat_sys_mq_notify
+
+	.globl	compat_sys_mq_getsetattr_wrapper
+compat_sys_mq_getsetattr_wrapper:
+	lgfr	%r2,%r2			# mqd_t
+	llgtr	%r3,%r3			# struct compat_mq_attr *
+	llgtr	%r4,%r4			# struct compat_mq_attr *
+	jg	compat_sys_mq_getsetattr
+
+	.globl	compat_sys_add_key_wrapper
+compat_sys_add_key_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	llgtr	%r4,%r4			# const void *
+	llgfr	%r5,%r5			# size_t
+	llgfr	%r6,%r6			# (key_serial_t) u32
+	jg	sys_add_key
+
+	.globl	compat_sys_request_key_wrapper
+compat_sys_request_key_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	llgtr	%r4,%r4			# const void *
+	llgfr	%r5,%r5			# (key_serial_t) u32
+	jg	sys_request_key
+
+	.globl	sys32_remap_file_pages_wrapper
+sys32_remap_file_pages_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	llgfr	%r5,%r5			# unsigned long
+	llgfr	%r6,%r6			# unsigned long
+	jg	sys_remap_file_pages
+
+	.globl	compat_sys_waitid_wrapper
+compat_sys_waitid_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# pid_t
+	llgtr	%r4,%r4			# siginfo_emu31_t *
+	lgfr	%r5,%r5			# int
+	llgtr	%r6,%r6			# struct rusage_emu31 *
+	jg	compat_sys_waitid
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
new file mode 100644
index 0000000..44df8dc
--- /dev/null
+++ b/arch/s390/kernel/cpcmd.c
@@ -0,0 +1,111 @@
+/*
+ *  arch/s390/kernel/cpcmd.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Christian Borntraeger (cborntra@de.ibm.com),
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <asm/ebcdic.h>
+#include <asm/cpcmd.h>
+#include <asm/system.h>
+
+static DEFINE_SPINLOCK(cpcmd_lock);
+static char cpcmd_buf[240];
+
+/*
+ * the caller of __cpcmd has to ensure that the response buffer is below 2 GB
+ */
+void __cpcmd(char *cmd, char *response, int rlen)
+{
+	const int mask = 0x40000000L;
+	unsigned long flags;
+	int cmdlen;
+
+	spin_lock_irqsave(&cpcmd_lock, flags);
+	cmdlen = strlen(cmd);
+	BUG_ON(cmdlen > 240);
+	strcpy(cpcmd_buf, cmd);
+	ASCEBC(cpcmd_buf, cmdlen);
+
+	if (response != NULL && rlen > 0) {
+		memset(response, 0, rlen);
+#ifndef CONFIG_ARCH_S390X
+		asm volatile ("LRA   2,0(%0)\n\t"
+                              "LR    4,%1\n\t"
+                              "O     4,%4\n\t"
+                              "LRA   3,0(%2)\n\t"
+                              "LR    5,%3\n\t"
+                              ".long 0x83240008 # Diagnose X'08'\n\t"
+                              : /* no output */
+                              : "a" (cpcmd_buf), "d" (cmdlen),
+                                "a" (response), "d" (rlen), "m" (mask)
+                              : "cc", "2", "3", "4", "5" );
+#else /* CONFIG_ARCH_S390X */
+                asm volatile ("   lrag  2,0(%0)\n"
+                              "   lgr   4,%1\n"
+                              "   o     4,%4\n"
+                              "   lrag  3,0(%2)\n"
+                              "   lgr   5,%3\n"
+                              "   sam31\n"
+                              "   .long 0x83240008 # Diagnose X'08'\n"
+                              "   sam64"
+                              : /* no output */
+                              : "a" (cpcmd_buf), "d" (cmdlen),
+                                "a" (response), "d" (rlen), "m" (mask)
+                              : "cc", "2", "3", "4", "5" );
+#endif /* CONFIG_ARCH_S390X */
+                EBCASC(response, rlen);
+        } else {
+#ifndef CONFIG_ARCH_S390X
+                asm volatile ("LRA   2,0(%0)\n\t"
+                              "LR    3,%1\n\t"
+                              ".long 0x83230008 # Diagnose X'08'\n\t"
+                              : /* no output */
+                              : "a" (cpcmd_buf), "d" (cmdlen)
+                              : "2", "3"  );
+#else /* CONFIG_ARCH_S390X */
+                asm volatile ("   lrag  2,0(%0)\n"
+                              "   lgr   3,%1\n"
+                              "   sam31\n"
+                              "   .long 0x83230008 # Diagnose X'08'\n"
+                              "   sam64"
+                              : /* no output */
+                              : "a" (cpcmd_buf), "d" (cmdlen)
+                              : "2", "3"  );
+#endif /* CONFIG_ARCH_S390X */
+        }
+	spin_unlock_irqrestore(&cpcmd_lock, flags);
+}
+
+EXPORT_SYMBOL(__cpcmd);
+
+#ifdef CONFIG_ARCH_S390X
+void cpcmd(char *cmd, char *response, int rlen)
+{
+	char *lowbuf;
+	if ((rlen == 0) || (response == NULL)
+	    || !((unsigned long)response >> 31))
+		__cpcmd(cmd, response, rlen);
+	else {
+		lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
+		if (!lowbuf) {
+			printk(KERN_WARNING
+				"cpcmd: could not allocate response buffer\n");
+			return;
+		}
+		__cpcmd(cmd, lowbuf, rlen);
+		memcpy(response, lowbuf, rlen);
+		kfree(lowbuf);
+	}
+}
+
+EXPORT_SYMBOL(cpcmd);
+#endif		/* CONFIG_ARCH_S390X */
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
new file mode 100644
index 0000000..91f8ce5
--- /dev/null
+++ b/arch/s390/kernel/debug.c
@@ -0,0 +1,1286 @@
+/*
+ *  arch/s390/kernel/debug.c
+ *   S/390 debug facility
+ *
+ *    Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH,
+ *                             IBM Corporation
+ *    Author(s): Michael Holzheu (holzheu@de.ibm.com),
+ *               Holger Smolinski (Holger.Smolinski@de.ibm.com)
+ *
+ *    Bugreports to: <Linux390@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/debug.h>
+
+#define DEBUG_PROLOG_ENTRY -1
+
+/* typedefs */
+
+typedef struct file_private_info {
+	loff_t offset;			/* offset of last read in file */
+	int    act_area;                /* number of last formated area */
+	int    act_entry;               /* last formated entry (offset */
+                                        /* relative to beginning of last */
+                                        /* formated area) */ 
+	size_t act_entry_offset;        /* up to this offset we copied */
+					/* in last read the last formated */
+					/* entry to userland */
+	char   temp_buf[2048];		/* buffer for output */
+	debug_info_t *debug_info_org;   /* original debug information */
+	debug_info_t *debug_info_snap;	/* snapshot of debug information */
+	struct debug_view *view;	/* used view of debug info */
+} file_private_info_t;
+
+typedef struct
+{
+	char *string;
+	/* 
+	 * This assumes that all args are converted into longs 
+	 * on L/390 this is the case for all types of parameter 
+	 * except of floats, and long long (32 bit) 
+         *
+         */
+	long args[0];
+} debug_sprintf_entry_t;
+
+
+extern void tod_to_timeval(uint64_t todval, struct timeval *xtime);
+
+/* internal function prototyes */
+
+static int debug_init(void);
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+			    size_t user_len, loff_t * offset);
+static ssize_t debug_input(struct file *file, const char __user *user_buf,
+			   size_t user_len, loff_t * offset);
+static int debug_open(struct inode *inode, struct file *file);
+static int debug_close(struct inode *inode, struct file *file);
+static debug_info_t*  debug_info_create(char *name, int page_order, int nr_areas, int buf_size);
+static void debug_info_get(debug_info_t *);
+static void debug_info_put(debug_info_t *);
+static int debug_prolog_level_fn(debug_info_t * id,
+				 struct debug_view *view, char *out_buf);
+static int debug_input_level_fn(debug_info_t * id, struct debug_view *view,
+				struct file *file, const char __user *user_buf,
+				size_t user_buf_size, loff_t * offset);
+static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
+                                struct file *file, const char __user *user_buf,
+                                size_t user_buf_size, loff_t * offset);
+static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
+                                char *out_buf, const char *in_buf);
+static int debug_raw_format_fn(debug_info_t * id,
+				 struct debug_view *view, char *out_buf,
+				 const char *in_buf);
+static int debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
+                         int area, debug_entry_t * entry, char *out_buf);
+
+static int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
+				   char *out_buf, debug_sprintf_entry_t *curr_event);
+
+/* globals */
+
+struct debug_view debug_raw_view = {
+	"raw",
+	NULL,
+	&debug_raw_header_fn,
+	&debug_raw_format_fn,
+	NULL,
+	NULL
+};
+
+struct debug_view debug_hex_ascii_view = {
+	"hex_ascii",
+	NULL,
+	&debug_dflt_header_fn,
+	&debug_hex_ascii_format_fn,
+	NULL,
+	NULL
+};
+
+struct debug_view debug_level_view = {
+	"level",
+	&debug_prolog_level_fn,
+	NULL,
+	NULL,
+	&debug_input_level_fn,
+	NULL
+};
+
+struct debug_view debug_flush_view = {
+        "flush",
+        NULL,
+        NULL,
+        NULL,
+        &debug_input_flush_fn,
+        NULL
+};
+
+struct debug_view debug_sprintf_view = {
+	"sprintf",
+	NULL,
+	&debug_dflt_header_fn,
+	(debug_format_proc_t*)&debug_sprintf_format_fn,
+	NULL,
+	NULL
+};
+
+
+unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION;
+
+/* static globals */
+
+static debug_info_t *debug_area_first = NULL;
+static debug_info_t *debug_area_last = NULL;
+DECLARE_MUTEX(debug_lock);
+
+static int initialized;
+
+static struct file_operations debug_file_ops = {
+	.owner	 = THIS_MODULE,
+	.read    = debug_output,
+	.write   = debug_input,	
+	.open    = debug_open,
+	.release = debug_close,
+};
+
+static struct proc_dir_entry *debug_proc_root_entry;
+
+/* functions */
+
+/*
+ * debug_info_alloc
+ * - alloc new debug-info
+ */
+
+static debug_info_t*  debug_info_alloc(char *name, int page_order,
+                                        int nr_areas, int buf_size)
+{
+	debug_info_t* rc;
+	int i;
+
+	/* alloc everything */
+
+	rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_ATOMIC);
+	if(!rc)
+		goto fail_malloc_rc;
+	rc->active_entry = (int*)kmalloc(nr_areas * sizeof(int), GFP_ATOMIC);
+	if(!rc->active_entry)
+		goto fail_malloc_active_entry;
+	memset(rc->active_entry, 0, nr_areas * sizeof(int));
+	rc->areas = (debug_entry_t **) kmalloc(nr_areas *
+						sizeof(debug_entry_t *),
+						GFP_ATOMIC);
+	if (!rc->areas)
+		goto fail_malloc_areas;
+	for (i = 0; i < nr_areas; i++) {
+		rc->areas[i] = (debug_entry_t *) __get_free_pages(GFP_ATOMIC,
+								page_order);
+		if (!rc->areas[i]) {
+			for (i--; i >= 0; i--) {
+				free_pages((unsigned long) rc->areas[i],
+						page_order);
+			}
+			goto fail_malloc_areas2;
+		} else {
+			memset(rc->areas[i], 0, PAGE_SIZE << page_order);
+		}
+	}
+
+	/* initialize members */
+
+	spin_lock_init(&rc->lock);
+	rc->page_order  = page_order;
+	rc->nr_areas    = nr_areas;
+	rc->active_area = 0;
+	rc->level       = DEBUG_DEFAULT_LEVEL;
+	rc->buf_size    = buf_size;
+	rc->entry_size  = sizeof(debug_entry_t) + buf_size;
+	strlcpy(rc->name, name, sizeof(rc->name));
+	memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
+#ifdef CONFIG_PROC_FS
+	memset(rc->proc_entries, 0 ,DEBUG_MAX_VIEWS *
+		sizeof(struct proc_dir_entry*));
+#endif /* CONFIG_PROC_FS */
+	atomic_set(&(rc->ref_count), 0);
+
+	return rc;
+
+fail_malloc_areas2:
+	kfree(rc->areas);
+fail_malloc_areas:
+	kfree(rc->active_entry);
+fail_malloc_active_entry:
+	kfree(rc);
+fail_malloc_rc:
+	return NULL;
+}
+
+/*
+ * debug_info_free
+ * - free memory debug-info
+ */
+
+static void debug_info_free(debug_info_t* db_info){
+	int i;
+	for (i = 0; i < db_info->nr_areas; i++) {
+		free_pages((unsigned long) db_info->areas[i],
+		db_info->page_order);
+	}
+	kfree(db_info->areas);
+	kfree(db_info->active_entry);
+	kfree(db_info);
+}
+
+/*
+ * debug_info_create
+ * - create new debug-info
+ */
+
+static debug_info_t*  debug_info_create(char *name, int page_order, 
+                                        int nr_areas, int buf_size)
+{
+	debug_info_t* rc;
+
+        rc = debug_info_alloc(name, page_order, nr_areas, buf_size);
+        if(!rc) 
+		goto out;
+
+
+	/* create proc rood directory */
+        rc->proc_root_entry = proc_mkdir(rc->name, debug_proc_root_entry);
+
+	/* append new element to linked list */
+        if (debug_area_first == NULL) {
+                /* first element in list */
+                debug_area_first = rc;
+                rc->prev = NULL;
+        } else {
+                /* append element to end of list */
+                debug_area_last->next = rc;
+                rc->prev = debug_area_last;
+        }
+        debug_area_last = rc;
+        rc->next = NULL;
+
+	debug_info_get(rc);
+out:
+	return rc;
+}
+
+/*
+ * debug_info_copy
+ * - copy debug-info
+ */
+
+static debug_info_t* debug_info_copy(debug_info_t* in)
+{
+        int i;
+        debug_info_t* rc;
+        rc = debug_info_alloc(in->name, in->page_order, 
+                                in->nr_areas, in->buf_size);
+        if(!rc)
+                goto out;
+
+        for(i = 0; i < in->nr_areas; i++){
+                memcpy(rc->areas[i],in->areas[i], PAGE_SIZE << in->page_order);
+        }
+out:
+        return rc;
+}
+
+/*
+ * debug_info_get
+ * - increments reference count for debug-info
+ */
+
+static void debug_info_get(debug_info_t * db_info)
+{
+	if (db_info)
+		atomic_inc(&db_info->ref_count);
+}
+
+/*
+ * debug_info_put:
+ * - decreases reference count for debug-info and frees it if necessary
+ */
+
+static void debug_info_put(debug_info_t *db_info)
+{
+	int i;
+
+	if (!db_info)
+		return;
+	if (atomic_dec_and_test(&db_info->ref_count)) {
+#ifdef DEBUG
+		printk(KERN_INFO "debug: freeing debug area %p (%s)\n",
+		       db_info, db_info->name);
+#endif
+		for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+			if (db_info->views[i] == NULL)
+				continue;
+#ifdef CONFIG_PROC_FS
+			remove_proc_entry(db_info->proc_entries[i]->name,
+					  db_info->proc_root_entry);
+#endif
+		}
+#ifdef CONFIG_PROC_FS
+		remove_proc_entry(db_info->proc_root_entry->name,
+				  debug_proc_root_entry);
+#endif
+		if(db_info == debug_area_first)
+			debug_area_first = db_info->next;
+		if(db_info == debug_area_last)
+			debug_area_last = db_info->prev;
+		if(db_info->prev) db_info->prev->next = db_info->next;
+		if(db_info->next) db_info->next->prev = db_info->prev;
+		debug_info_free(db_info);
+	}
+}
+
+/*
+ * debug_format_entry:
+ * - format one debug entry and return size of formated data
+ */
+
+static int debug_format_entry(file_private_info_t *p_info)
+{
+	debug_info_t *id_org    = p_info->debug_info_org;
+	debug_info_t *id_snap   = p_info->debug_info_snap;
+	struct debug_view *view = p_info->view;
+	debug_entry_t *act_entry;
+	size_t len = 0;
+	if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
+		/* print prolog */
+        	if (view->prolog_proc)
+                	len += view->prolog_proc(id_org, view,p_info->temp_buf);
+		goto out;
+	}
+
+	act_entry = (debug_entry_t *) ((char*)id_snap->areas[p_info->act_area] +
+					p_info->act_entry);
+                        
+	if (act_entry->id.stck == 0LL)
+			goto out;  /* empty entry */
+	if (view->header_proc)
+		len += view->header_proc(id_org, view, p_info->act_area, 
+					act_entry, p_info->temp_buf + len);
+	if (view->format_proc)
+		len += view->format_proc(id_org, view, p_info->temp_buf + len,
+						DEBUG_DATA(act_entry));
+      out:
+        return len;
+}
+
+/*
+ * debug_next_entry:
+ * - goto next entry in p_info
+ */
+
+extern inline int debug_next_entry(file_private_info_t *p_info)
+{
+	debug_info_t *id = p_info->debug_info_snap;
+	if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
+		p_info->act_entry = 0;
+		goto out;
+	}
+	if ((p_info->act_entry += id->entry_size)
+		> ((PAGE_SIZE << (id->page_order)) 
+		- id->entry_size)){
+
+		/* next area */
+		p_info->act_entry = 0;
+        	p_info->act_area++;
+        	if(p_info->act_area >= id->nr_areas)
+			return 1;
+	}
+out:
+	return 0;	
+}
+
+/*
+ * debug_output:
+ * - called for user read()
+ * - copies formated debug entries to the user buffer
+ */
+
+static ssize_t debug_output(struct file *file,		/* file descriptor */
+			    char __user *user_buf,	/* user buffer */
+			    size_t  len,		/* length of buffer */
+			    loff_t *offset)	      /* offset in the file */
+{
+	size_t count = 0;
+	size_t entry_offset, size = 0;
+	file_private_info_t *p_info;
+
+	p_info = ((file_private_info_t *) file->private_data);
+	if (*offset != p_info->offset) 
+		return -EPIPE;
+	if(p_info->act_area >= p_info->debug_info_snap->nr_areas)
+		return 0;
+
+	entry_offset = p_info->act_entry_offset;
+
+	while(count < len){
+		size = debug_format_entry(p_info);
+		size = min((len - count), (size - entry_offset));
+
+		if(size){
+			if (copy_to_user(user_buf + count, 
+					p_info->temp_buf + entry_offset, size))
+			return -EFAULT;
+		}
+		count += size;
+		entry_offset = 0;
+		if(count != len)
+			if(debug_next_entry(p_info)) 
+				goto out;
+	}
+out:
+	p_info->offset           = *offset + count;
+	p_info->act_entry_offset = size;	
+	*offset = p_info->offset;
+	return count;
+}
+
+/*
+ * debug_input:
+ * - called for user write()
+ * - calls input function of view
+ */
+
+static ssize_t debug_input(struct file *file,
+			   const char __user *user_buf, size_t length,
+			   loff_t *offset)
+{
+	int rc = 0;
+	file_private_info_t *p_info;
+
+	down(&debug_lock);
+	p_info = ((file_private_info_t *) file->private_data);
+	if (p_info->view->input_proc)
+		rc = p_info->view->input_proc(p_info->debug_info_org,
+					      p_info->view, file, user_buf,
+					      length, offset);
+	else
+		rc = -EPERM;
+	up(&debug_lock);
+	return rc;		/* number of input characters */
+}
+
+/*
+ * debug_open:
+ * - called for user open()
+ * - copies formated output to private_data area of the file
+ *   handle
+ */
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	int i = 0, rc = 0;
+	file_private_info_t *p_info;
+	debug_info_t *debug_info, *debug_info_snapshot;
+
+#ifdef DEBUG
+	printk("debug_open\n");
+#endif
+	down(&debug_lock);
+
+	/* find debug log and view */
+
+	debug_info = debug_area_first;
+	while(debug_info != NULL){
+		for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+			if (debug_info->views[i] == NULL)
+				continue;
+			else if (debug_info->proc_entries[i] ==
+				 PDE(file->f_dentry->d_inode)) {
+				goto found;	/* found view ! */
+			}
+		}
+		debug_info = debug_info->next;
+	}
+	/* no entry found */
+	rc = -EINVAL;
+	goto out;
+
+      found:
+
+	/* make snapshot of current debug areas to get it consistent */
+
+	debug_info_snapshot = debug_info_copy(debug_info);
+
+	if(!debug_info_snapshot){
+#ifdef DEBUG
+		printk(KERN_ERR "debug_open: debug_info_copy failed (out of mem)\n");
+#endif
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if ((file->private_data =
+	     kmalloc(sizeof(file_private_info_t), GFP_ATOMIC)) == 0) {
+#ifdef DEBUG
+		printk(KERN_ERR "debug_open: kmalloc failed\n");
+#endif
+		debug_info_free(debug_info_snapshot);	
+		rc = -ENOMEM;
+		goto out;
+	}
+	p_info = (file_private_info_t *) file->private_data;
+	p_info->offset = 0;
+	p_info->debug_info_snap = debug_info_snapshot;
+	p_info->debug_info_org  = debug_info;
+	p_info->view = debug_info->views[i];
+	p_info->act_area = 0;
+	p_info->act_entry = DEBUG_PROLOG_ENTRY;
+	p_info->act_entry_offset = 0;
+
+	debug_info_get(debug_info);
+
+      out:
+	up(&debug_lock);
+	return rc;
+}
+
+/*
+ * debug_close:
+ * - called for user close()
+ * - deletes  private_data area of the file handle
+ */
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+	file_private_info_t *p_info;
+#ifdef DEBUG
+	printk("debug_close\n");
+#endif
+	p_info = (file_private_info_t *) file->private_data;
+	debug_info_free(p_info->debug_info_snap);
+	debug_info_put(p_info->debug_info_org);
+	kfree(file->private_data);
+	return 0;		/* success */
+}
+
+/*
+ * debug_register:
+ * - creates and initializes debug area for the caller
+ * - returns handle for debug area
+ */
+
+debug_info_t *debug_register
+    (char *name, int page_order, int nr_areas, int buf_size) 
+{
+	debug_info_t *rc = NULL;
+
+	if (!initialized)
+		BUG();
+	down(&debug_lock);
+
+        /* create new debug_info */
+
+	rc = debug_info_create(name, page_order, nr_areas, buf_size);
+	if(!rc) 
+		goto out;
+	debug_register_view(rc, &debug_level_view);
+        debug_register_view(rc, &debug_flush_view);
+#ifdef DEBUG
+	printk(KERN_INFO
+	       "debug: reserved %d areas of %d pages for debugging %s\n",
+	       nr_areas, 1 << page_order, rc->name);
+#endif
+      out:
+        if (rc == NULL){
+		printk(KERN_ERR "debug: debug_register failed for %s\n",name);
+        }
+	up(&debug_lock);
+	return rc;
+}
+
+/*
+ * debug_unregister:
+ * - give back debug area
+ */
+
+void debug_unregister(debug_info_t * id)
+{
+	if (!id)
+		goto out;
+	down(&debug_lock);
+#ifdef DEBUG
+	printk(KERN_INFO "debug: unregistering %s\n", id->name);
+#endif
+	debug_info_put(id);
+	up(&debug_lock);
+
+      out:
+	return;
+}
+
+/*
+ * debug_set_level:
+ * - set actual debug level
+ */
+
+void debug_set_level(debug_info_t* id, int new_level)
+{
+	unsigned long flags;
+	if(!id)
+		return;	
+	spin_lock_irqsave(&id->lock,flags);
+        if(new_level == DEBUG_OFF_LEVEL){
+                id->level = DEBUG_OFF_LEVEL;
+                printk(KERN_INFO "debug: %s: switched off\n",id->name);
+        } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
+                printk(KERN_INFO
+                        "debug: %s: level %i is out of range (%i - %i)\n",
+                        id->name, new_level, 0, DEBUG_MAX_LEVEL);
+        } else {
+                id->level = new_level;
+#ifdef DEBUG
+                printk(KERN_INFO 
+			"debug: %s: new level %i\n",id->name,id->level);
+#endif
+        }
+	spin_unlock_irqrestore(&id->lock,flags);
+}
+
+
+/*
+ * proceed_active_entry:
+ * - set active entry to next in the ring buffer
+ */
+
+extern inline void proceed_active_entry(debug_info_t * id)
+{
+	if ((id->active_entry[id->active_area] += id->entry_size)
+	    > ((PAGE_SIZE << (id->page_order)) - id->entry_size))
+		id->active_entry[id->active_area] = 0;
+}
+
+/*
+ * proceed_active_area:
+ * - set active area to next in the ring buffer
+ */
+
+extern inline void proceed_active_area(debug_info_t * id)
+{
+	id->active_area++;
+	id->active_area = id->active_area % id->nr_areas;
+}
+
+/*
+ * get_active_entry:
+ */
+
+extern inline debug_entry_t *get_active_entry(debug_info_t * id)
+{
+	return (debug_entry_t *) ((char *) id->areas[id->active_area] +
+				  id->active_entry[id->active_area]);
+}
+
+/*
+ * debug_finish_entry:
+ * - set timestamp, caller address, cpu number etc.
+ */
+
+extern inline void debug_finish_entry(debug_info_t * id, debug_entry_t* active,
+		int level, int exception)
+{
+	STCK(active->id.stck);
+	active->id.fields.cpuid = smp_processor_id();
+	active->caller = __builtin_return_address(0);
+	active->id.fields.exception = exception;
+	active->id.fields.level     = level;
+	proceed_active_entry(id);
+	if(exception)
+		proceed_active_area(id);
+}
+
+static int debug_stoppable=1;
+static int debug_active=1;
+
+#define CTL_S390DBF 5677
+#define CTL_S390DBF_STOPPABLE 5678
+#define CTL_S390DBF_ACTIVE 5679
+
+/*
+ * proc handler for the running debug_active sysctl
+ * always allow read, allow write only if debug_stoppable is set or
+ * if debug_active is already off
+ */
+static int s390dbf_procactive(ctl_table *table, int write, struct file *filp,
+                     void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	if (!write || debug_stoppable || !debug_active)
+		return proc_dointvec(table, write, filp, buffer, lenp, ppos);
+	else
+		return 0;
+}
+
+
+static struct ctl_table s390dbf_table[] = {
+	{
+		.ctl_name       = CTL_S390DBF_STOPPABLE,
+		.procname       = "debug_stoppable",
+		.data		= &debug_stoppable,
+		.maxlen		= sizeof(int),
+		.mode           = S_IRUGO | S_IWUSR,
+		.proc_handler   = &proc_dointvec,
+		.strategy	= &sysctl_intvec,
+	},
+	 {
+		.ctl_name       = CTL_S390DBF_ACTIVE,
+		.procname       = "debug_active",
+		.data		= &debug_active,
+		.maxlen		= sizeof(int),
+		.mode           = S_IRUGO | S_IWUSR,
+		.proc_handler   = &s390dbf_procactive,
+		.strategy	= &sysctl_intvec,
+	},
+	{ .ctl_name = 0 }
+};
+
+static struct ctl_table s390dbf_dir_table[] = {
+	{
+		.ctl_name       = CTL_S390DBF,
+		.procname       = "s390dbf",
+		.maxlen         = 0,
+		.mode           = S_IRUGO | S_IXUGO,
+		.child          = s390dbf_table,
+	},
+	{ .ctl_name = 0 }
+};
+
+struct ctl_table_header *s390dbf_sysctl_header;
+
+void debug_stop_all(void)
+{
+	if (debug_stoppable)
+		debug_active = 0;
+}
+
+
+/*
+ * debug_event_common:
+ * - write debug entry with given size
+ */
+
+debug_entry_t *debug_event_common(debug_info_t * id, int level, const void *buf,
+			          int len)
+{
+	unsigned long flags;
+	debug_entry_t *active;
+
+	if (!debug_active)
+		return NULL;
+	spin_lock_irqsave(&id->lock, flags);
+	active = get_active_entry(id);
+	memset(DEBUG_DATA(active), 0, id->buf_size);
+	memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
+	debug_finish_entry(id, active, level, 0);
+	spin_unlock_irqrestore(&id->lock, flags);
+
+	return active;
+}
+
+/*
+ * debug_exception_common:
+ * - write debug entry with given size and switch to next debug area
+ */
+
+debug_entry_t *debug_exception_common(debug_info_t * id, int level, 
+                                      const void *buf, int len)
+{
+	unsigned long flags;
+	debug_entry_t *active;
+
+	if (!debug_active)
+		return NULL;
+	spin_lock_irqsave(&id->lock, flags);
+	active = get_active_entry(id);
+	memset(DEBUG_DATA(active), 0, id->buf_size);
+	memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
+	debug_finish_entry(id, active, level, 1);
+	spin_unlock_irqrestore(&id->lock, flags);
+
+	return active;
+}
+
+/*
+ * counts arguments in format string for sprintf view
+ */
+
+extern inline int debug_count_numargs(char *string)
+{
+	int numargs=0;
+
+	while(*string) {
+		if(*string++=='%')
+			numargs++;
+	}
+	return(numargs);
+}
+
+/*
+ * debug_sprintf_event:
+ */
+
+debug_entry_t *debug_sprintf_event(debug_info_t* id,
+                                   int level,char *string,...)
+{
+	va_list   ap;
+	int numargs,idx;
+	unsigned long flags;
+	debug_sprintf_entry_t *curr_event;
+	debug_entry_t *active;
+
+	if((!id) || (level > id->level))
+		return NULL;
+	if (!debug_active)
+		return NULL;
+	numargs=debug_count_numargs(string);
+
+	spin_lock_irqsave(&id->lock, flags);
+	active = get_active_entry(id);
+	curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active);
+	va_start(ap,string);
+	curr_event->string=string;
+	for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
+		curr_event->args[idx]=va_arg(ap,long);
+	va_end(ap);
+	debug_finish_entry(id, active, level, 0);
+	spin_unlock_irqrestore(&id->lock, flags);
+
+	return active;
+}
+
+/*
+ * debug_sprintf_exception:
+ */
+
+debug_entry_t *debug_sprintf_exception(debug_info_t* id,
+                                       int level,char *string,...)
+{
+	va_list   ap;
+	int numargs,idx;
+	unsigned long flags;
+	debug_sprintf_entry_t *curr_event;
+	debug_entry_t *active;
+
+	if((!id) || (level > id->level))
+		return NULL;
+	if (!debug_active)
+		return NULL;
+
+	numargs=debug_count_numargs(string);
+
+	spin_lock_irqsave(&id->lock, flags);
+	active = get_active_entry(id);
+	curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active);
+	va_start(ap,string);
+	curr_event->string=string;
+	for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
+		curr_event->args[idx]=va_arg(ap,long);
+	va_end(ap);
+	debug_finish_entry(id, active, level, 1);
+	spin_unlock_irqrestore(&id->lock, flags);
+
+	return active;
+}
+
+/*
+ * debug_init:
+ * - is called exactly once to initialize the debug feature
+ */
+
+static int __init debug_init(void)
+{
+	int rc = 0;
+
+	s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table, 1);
+	down(&debug_lock);
+#ifdef CONFIG_PROC_FS
+	debug_proc_root_entry = proc_mkdir(DEBUG_DIR_ROOT, NULL);
+#endif /* CONFIG_PROC_FS */
+	printk(KERN_INFO "debug: Initialization complete\n");
+	initialized = 1;
+	up(&debug_lock);
+
+	return rc;
+}
+
+/*
+ * debug_register_view:
+ */
+
+int debug_register_view(debug_info_t * id, struct debug_view *view)
+{
+	int rc = 0;
+	int i;
+	unsigned long flags;
+	mode_t mode = S_IFREG;
+	struct proc_dir_entry *pde;
+
+	if (!id)
+		goto out;
+	if (view->prolog_proc || view->format_proc || view->header_proc)
+		mode |= S_IRUSR;
+	if (view->input_proc)
+		mode |= S_IWUSR;
+	pde = create_proc_entry(view->name, mode, id->proc_root_entry);
+	if (!pde){
+		printk(KERN_WARNING "debug: create_proc_entry() failed! Cannot register view %s/%s\n", id->name,view->name);
+		rc = -1;
+		goto out;
+	}
+
+	spin_lock_irqsave(&id->lock, flags);
+	for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+		if (id->views[i] == NULL)
+			break;
+	}
+	if (i == DEBUG_MAX_VIEWS) {
+		printk(KERN_WARNING "debug: cannot register view %s/%s\n",
+			id->name,view->name);
+		printk(KERN_WARNING 
+			"debug: maximum number of views reached (%i)!\n", i);
+		remove_proc_entry(pde->name, id->proc_root_entry);
+		rc = -1;
+	}
+	else {
+		id->views[i] = view;
+		pde->proc_fops = &debug_file_ops;
+		id->proc_entries[i] = pde;
+	}
+	spin_unlock_irqrestore(&id->lock, flags);
+      out:
+	return rc;
+}
+
+/*
+ * debug_unregister_view:
+ */
+
+int debug_unregister_view(debug_info_t * id, struct debug_view *view)
+{
+	int rc = 0;
+	int i;
+	unsigned long flags;
+
+	if (!id)
+		goto out;
+	spin_lock_irqsave(&id->lock, flags);
+	for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+		if (id->views[i] == view)
+			break;
+	}
+	if (i == DEBUG_MAX_VIEWS)
+		rc = -1;
+	else {
+#ifdef CONFIG_PROC_FS
+		remove_proc_entry(id->proc_entries[i]->name,
+				  id->proc_root_entry);
+#endif
+		id->views[i] = NULL;
+		rc = 0;
+	}
+	spin_unlock_irqrestore(&id->lock, flags);
+      out:
+	return rc;
+}
+
+/*
+ * functions for debug-views
+ ***********************************
+*/
+
+/*
+ * prints out actual debug level
+ */
+
+static int debug_prolog_level_fn(debug_info_t * id,
+				 struct debug_view *view, char *out_buf)
+{
+	int rc = 0;
+
+	if(id->level == -1) rc = sprintf(out_buf,"-\n");
+	else rc = sprintf(out_buf, "%i\n", id->level);
+	return rc;
+}
+
+/*
+ * reads new debug level
+ */
+
+static int debug_input_level_fn(debug_info_t * id, struct debug_view *view,
+				struct file *file, const char __user *user_buf,
+				size_t in_buf_size, loff_t * offset)
+{
+	char input_buf[1];
+	int rc = in_buf_size;
+
+	if (*offset != 0)
+		goto out;
+	if (copy_from_user(input_buf, user_buf, 1)){
+		rc = -EFAULT;
+		goto out;
+	}
+	if (isdigit(input_buf[0])) {
+		int new_level = ((int) input_buf[0] - (int) '0');
+		debug_set_level(id, new_level);
+	} else if(input_buf[0] == '-') {
+		debug_set_level(id, DEBUG_OFF_LEVEL);
+	} else {
+		printk(KERN_INFO "debug: level `%c` is not valid\n",
+		       input_buf[0]);
+	}
+      out:
+	*offset += in_buf_size;
+	return rc;		/* number of input characters */
+}
+
+
+/*
+ * flushes debug areas
+ */
+ 
+void debug_flush(debug_info_t* id, int area)
+{
+        unsigned long flags;
+        int i;
+
+        if(!id)
+                return;
+        spin_lock_irqsave(&id->lock,flags);
+        if(area == DEBUG_FLUSH_ALL){
+                id->active_area = 0;
+                memset(id->active_entry, 0, id->nr_areas * sizeof(int));
+                for (i = 0; i < id->nr_areas; i++) 
+                        memset(id->areas[i], 0, PAGE_SIZE << id->page_order);
+                printk(KERN_INFO "debug: %s: all areas flushed\n",id->name);
+        } else if(area >= 0 && area < id->nr_areas) {
+                id->active_entry[area] = 0;
+                memset(id->areas[area], 0, PAGE_SIZE << id->page_order);
+                printk(KERN_INFO
+                        "debug: %s: area %i has been flushed\n",
+                        id->name, area);
+        } else {
+                printk(KERN_INFO
+                        "debug: %s: area %i cannot be flushed (range: %i - %i)\n",
+                        id->name, area, 0, id->nr_areas-1);
+        }
+        spin_unlock_irqrestore(&id->lock,flags);
+}
+
+/*
+ * view function: flushes debug areas 
+ */
+
+static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
+                                struct file *file, const char __user *user_buf,
+                                size_t in_buf_size, loff_t * offset)
+{
+        char input_buf[1];
+        int rc = in_buf_size;
+ 
+        if (*offset != 0)
+                goto out;
+        if (copy_from_user(input_buf, user_buf, 1)){
+                rc = -EFAULT;
+                goto out;
+        }
+        if(input_buf[0] == '-') { 
+                debug_flush(id, DEBUG_FLUSH_ALL);
+                goto out;
+        }
+        if (isdigit(input_buf[0])) {
+                int area = ((int) input_buf[0] - (int) '0');
+                debug_flush(id, area);
+                goto out;
+        }
+
+        printk(KERN_INFO "debug: area `%c` is not valid\n", input_buf[0]);
+
+      out:
+        *offset += in_buf_size;
+        return rc;              /* number of input characters */
+}
+
+/*
+ * prints debug header in raw format
+ */
+
+int debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
+                         int area, debug_entry_t * entry, char *out_buf)
+{
+        int rc;
+
+	rc = sizeof(debug_entry_t);
+	memcpy(out_buf,entry,sizeof(debug_entry_t));
+        return rc;
+}
+
+/*
+ * prints debug data in raw format
+ */
+
+static int debug_raw_format_fn(debug_info_t * id, struct debug_view *view,
+			       char *out_buf, const char *in_buf)
+{
+	int rc;
+
+	rc = id->buf_size;
+	memcpy(out_buf, in_buf, id->buf_size);
+	return rc;
+}
+
+/*
+ * prints debug data in hex/ascii format
+ */
+
+static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
+		    		  char *out_buf, const char *in_buf)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < id->buf_size; i++) {
+                rc += sprintf(out_buf + rc, "%02x ",
+                              ((unsigned char *) in_buf)[i]);
+        }
+	rc += sprintf(out_buf + rc, "| ");
+	for (i = 0; i < id->buf_size; i++) {
+		unsigned char c = in_buf[i];
+		if (!isprint(c))
+			rc += sprintf(out_buf + rc, ".");
+		else
+			rc += sprintf(out_buf + rc, "%c", c);
+	}
+	rc += sprintf(out_buf + rc, "\n");
+	return rc;
+}
+
+/*
+ * prints header for debug entry
+ */
+
+int debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
+			 int area, debug_entry_t * entry, char *out_buf)
+{
+	struct timeval time_val;
+	unsigned long long time;
+	char *except_str;
+	unsigned long caller;
+	int rc = 0;
+	unsigned int level;
+
+	level = entry->id.fields.level;
+	time = entry->id.stck;
+	/* adjust todclock to 1970 */
+	time -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
+	tod_to_timeval(time, &time_val);
+
+	if (entry->id.fields.exception)
+		except_str = "*";
+	else
+		except_str = "-";
+	caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN;
+	rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %02i %p  ",
+		      area, time_val.tv_sec, time_val.tv_usec, level,
+		      except_str, entry->id.fields.cpuid, (void *) caller);
+	return rc;
+}
+
+/*
+ * prints debug data sprintf-formated:
+ * debug_sprinf_event/exception calls must be used together with this view
+ */
+
+#define DEBUG_SPRINTF_MAX_ARGS 10
+
+int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
+                            char *out_buf, debug_sprintf_entry_t *curr_event)
+{
+	int num_longs, num_used_args = 0,i, rc = 0;
+	int index[DEBUG_SPRINTF_MAX_ARGS];
+
+	/* count of longs fit into one entry */
+	num_longs = id->buf_size /  sizeof(long); 
+
+	if(num_longs < 1)
+		goto out; /* bufsize of entry too small */
+	if(num_longs == 1) {
+		/* no args, we use only the string */
+		strcpy(out_buf, curr_event->string);
+		rc = strlen(curr_event->string);
+		goto out;
+	}
+
+	/* number of arguments used for sprintf (without the format string) */
+	num_used_args   = min(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1));
+
+	memset(index,0, DEBUG_SPRINTF_MAX_ARGS * sizeof(int));
+
+	for(i = 0; i < num_used_args; i++)
+		index[i] = i;
+
+	rc =  sprintf(out_buf, curr_event->string, curr_event->args[index[0]],
+		curr_event->args[index[1]], curr_event->args[index[2]],
+		curr_event->args[index[3]], curr_event->args[index[4]],
+		curr_event->args[index[5]], curr_event->args[index[6]],
+		curr_event->args[index[7]], curr_event->args[index[8]],
+		curr_event->args[index[9]]);
+
+out:
+
+	return rc;
+}
+
+/*
+ * clean up module
+ */
+void __exit debug_exit(void)
+{
+#ifdef DEBUG
+	printk("debug_cleanup_module: \n");
+#endif
+#ifdef CONFIG_PROC_FS
+	remove_proc_entry(debug_proc_root_entry->name, NULL);
+#endif /* CONFIG_PROC_FS */
+	unregister_sysctl_table(s390dbf_sysctl_header);
+	return;
+}
+
+/*
+ * module definitions
+ */
+core_initcall(debug_init);
+module_exit(debug_exit);
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(debug_register);
+EXPORT_SYMBOL(debug_unregister); 
+EXPORT_SYMBOL(debug_set_level);
+EXPORT_SYMBOL(debug_stop_all);
+EXPORT_SYMBOL(debug_register_view);
+EXPORT_SYMBOL(debug_unregister_view);
+EXPORT_SYMBOL(debug_event_common);
+EXPORT_SYMBOL(debug_exception_common);
+EXPORT_SYMBOL(debug_hex_ascii_view);
+EXPORT_SYMBOL(debug_raw_view);
+EXPORT_SYMBOL(debug_dflt_header_fn);
+EXPORT_SYMBOL(debug_sprintf_view);
+EXPORT_SYMBOL(debug_sprintf_exception);
+EXPORT_SYMBOL(debug_sprintf_event);
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
new file mode 100644
index 0000000..bb0f973
--- /dev/null
+++ b/arch/s390/kernel/ebcdic.c
@@ -0,0 +1,400 @@
+/*
+ *  arch/s390/kernel/ebcdic.c
+ *    ECBDIC -> ASCII, ASCII -> ECBDIC,
+ *    upper to lower case (EBCDIC) conversion tables.
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *               Martin Peschke <peschke@fh-brandenburg.de>
+ */
+
+#include <linux/module.h>
+#include <asm/types.h>
+
+/*
+ * ASCII (IBM PC 437)  -> EBCDIC 037
+ */
+__u8 _ascebc[256] =
+{
+ /*00 NUL   SOH   STX   ETX   EOT   ENQ   ACK   BEL */
+     0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
+ /*08  BS    HT    LF    VT    FF    CR    SO    SI */
+ /*              ->NL                               */
+     0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ /*10 DLE   DC1   DC2   DC3   DC4   NAK   SYN   ETB */
+     0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
+ /*18 CAN    EM   SUB   ESC    FS    GS    RS    US */
+ /*                               ->IGS ->IRS ->IUS */
+     0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
+ /*20  SP     !     "     #     $     %     &     ' */
+     0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
+ /*28   (     )     *     +     ,     -    .      / */
+     0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
+ /*30   0     1     2     3     4     5     6     7 */
+     0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ /*38   8     9     :     ;     <     =     >     ? */
+     0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
+ /*40   @     A     B     C     D     E     F     G */
+     0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ /*48   H     I     J     K     L     M     N     O */
+     0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
+ /*50   P     Q     R     S     T     U     V     W */
+     0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
+ /*58   X     Y     Z     [     \     ]     ^     _ */
+     0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D,
+ /*60   `     a     b     c     d     e     f     g */
+     0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ /*68   h     i     j     k     l     m     n     o */
+     0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
+ /*70   p     q     r     s     t     u     v     w */
+     0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
+ /*78   x     y     z     {     |     }     ~    DL */
+     0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07,
+ /*80*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*88*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*90*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*98*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*A0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*A8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*B0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*B8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*C0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*C8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*D0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*D8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*E0        sz						*/
+     0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*E8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*F0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*F8*/
+     0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
+};
+
+/*
+ * EBCDIC 037 -> ASCII (IBM PC 437)
+ */
+__u8 _ebcasc[256] =
+{
+ /* 0x00   NUL   SOH   STX   ETX  *SEL    HT  *RNL   DEL */
+          0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
+ /* 0x08   -GE  -SPS  -RPT    VT    FF    CR    SO    SI */
+          0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ /* 0x10   DLE   DC1   DC2   DC3  -RES   -NL    BS  -POC
+                                  -ENP  ->LF             */
+          0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
+ /* 0x18   CAN    EM  -UBS  -CU1  -IFS  -IGS  -IRS  -ITB
+                                                    -IUS */
+          0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ /* 0x20   -DS  -SOS    FS  -WUS  -BYP    LF   ETB   ESC
+                                  -INP                   */
+          0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
+ /* 0x28   -SA  -SFE   -SM  -CSP  -MFA   ENQ   ACK   BEL
+                       -SW                               */ 
+          0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
+ /* 0x30  ----  ----   SYN   -IR   -PP  -TRN  -NBS   EOT */
+          0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
+ /* 0x38  -SBS   -IT  -RFF  -CU3   DC4   NAK  ----   SUB */
+          0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
+ /* 0x40    SP   RSP           ä              ----       */
+          0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
+ /* 0x48                       .     <     (     +     | */
+          0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
+ /* 0x50     &                                      ---- */
+          0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
+ /* 0x58           ß     !     $     *     )     ;       */
+          0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
+ /* 0x60     -     /  ----     Ä  ----  ----  ----       */
+          0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
+ /* 0x68              ----     ,     %     _     >     ? */ 
+          0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
+ /* 0x70  ----        ----  ----  ----  ----  ----  ---- */
+          0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ /* 0x78     *     `     :     #     @     '     =     " */
+          0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
+ /* 0x80     *     a     b     c     d     e     f     g */
+          0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ /* 0x88     h     i              ----  ----  ----       */
+          0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
+ /* 0x90     °     j     k     l     m     n     o     p */
+          0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
+ /* 0x98     q     r                    ----        ---- */
+          0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
+ /* 0xA0           ~     s     t     u     v     w     x */
+          0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ /* 0xA8     y     z              ----  ----  ----  ---- */
+          0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
+ /* 0xB0     ^                    ----     §  ----       */
+          0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
+ /* 0xB8        ----     [     ]  ----  ----  ----  ---- */
+          0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
+ /* 0xC0     {     A     B     C     D     E     F     G */
+          0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ /* 0xC8     H     I  ----           ö              ---- */
+          0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
+ /* 0xD0     }     J     K     L     M     N     O     P */
+          0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
+ /* 0xD8     Q     R  ----           ü                   */
+          0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
+ /* 0xE0     \           S     T     U     V     W     X */
+          0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ /* 0xE8     Y     Z        ----     Ö  ----  ----  ---- */
+          0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
+ /* 0xF0     0     1     2     3     4     5     6     7 */
+          0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ /* 0xF8     8     9  ----  ----     Ü  ----  ----  ---- */
+          0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
+};
+
+
+/*
+ * ASCII (IBM PC 437)  -> EBCDIC 500
+ */
+__u8 _ascebc_500[256] =
+{
+ /*00 NUL   SOH   STX   ETX   EOT   ENQ   ACK   BEL */
+     0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
+ /*08  BS    HT    LF    VT    FF    CR    SO    SI */
+ /*              ->NL                               */
+     0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ /*10 DLE   DC1   DC2   DC3   DC4   NAK   SYN   ETB */
+     0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
+ /*18 CAN    EM   SUB   ESC    FS    GS    RS    US */
+ /*                               ->IGS ->IRS ->IUS */
+     0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
+ /*20  SP     !     "     #     $     %     &     ' */
+     0x40, 0x4F, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
+ /*28   (     )     *     +     ,     -    .      / */
+     0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
+ /*30   0     1     2     3     4     5     6     7 */
+     0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ /*38   8     9     :     ;     <     =     >     ? */
+     0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
+ /*40   @     A     B     C     D     E     F     G */
+     0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ /*48   H     I     J     K     L     M     N     O */
+     0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
+ /*50   P     Q     R     S     T     U     V     W */
+     0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
+ /*58   X     Y     Z     [     \     ]     ^     _ */
+     0xE7, 0xE8, 0xE9, 0x4A, 0xE0, 0x5A, 0x5F, 0x6D,
+ /*60   `     a     b     c     d     e     f     g */
+     0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ /*68   h     i     j     k     l     m     n     o */
+     0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
+ /*70   p     q     r     s     t     u     v     w */
+     0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
+ /*78   x     y     z     {     |     }     ~    DL */
+     0xA7, 0xA8, 0xA9, 0xC0, 0xBB, 0xD0, 0xA1, 0x07,
+ /*80*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*88*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*90*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*98*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*A0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*A8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*B0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*B8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*C0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*C8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*D0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*D8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*E0        sz						*/
+     0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*E8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*F0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*F8*/
+     0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
+};
+
+/*
+ * EBCDIC 500 -> ASCII (IBM PC 437)
+ */
+__u8 _ebcasc_500[256] =
+{
+ /* 0x00   NUL   SOH   STX   ETX  *SEL    HT  *RNL   DEL */
+          0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
+ /* 0x08   -GE  -SPS  -RPT    VT    FF    CR    SO    SI */
+          0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ /* 0x10   DLE   DC1   DC2   DC3  -RES   -NL    BS  -POC
+                                  -ENP  ->LF             */
+          0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
+ /* 0x18   CAN    EM  -UBS  -CU1  -IFS  -IGS  -IRS  -ITB
+                                                    -IUS */
+          0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ /* 0x20   -DS  -SOS    FS  -WUS  -BYP    LF   ETB   ESC
+                                  -INP                   */
+          0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
+ /* 0x28   -SA  -SFE   -SM  -CSP  -MFA   ENQ   ACK   BEL
+                       -SW                               */ 
+          0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
+ /* 0x30  ----  ----   SYN   -IR   -PP  -TRN  -NBS   EOT */
+          0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
+ /* 0x38  -SBS   -IT  -RFF  -CU3   DC4   NAK  ----   SUB */
+          0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
+ /* 0x40    SP   RSP           ä              ----       */
+          0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
+ /* 0x48                 [     .     <     (     +     ! */
+          0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21,
+ /* 0x50     &                                      ---- */
+          0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
+ /* 0x58           ß     ]     $     *     )     ;     ^ */
+          0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E,
+ /* 0x60     -     /  ----     Ä  ----  ----  ----       */
+          0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
+ /* 0x68              ----     ,     %     _     >     ? */ 
+          0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
+ /* 0x70  ----        ----  ----  ----  ----  ----  ---- */
+          0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ /* 0x78     *     `     :     #     @     '     =     " */
+          0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
+ /* 0x80     *     a     b     c     d     e     f     g */
+          0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ /* 0x88     h     i              ----  ----  ----       */
+          0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
+ /* 0x90     °     j     k     l     m     n     o     p */
+          0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
+ /* 0x98     q     r                    ----        ---- */
+          0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
+ /* 0xA0           ~     s     t     u     v     w     x */
+          0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ /* 0xA8     y     z              ----  ----  ----  ---- */
+          0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
+ /* 0xB0                          ----     §  ----       */
+          0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
+ /* 0xB8        ----           |  ----  ----  ----  ---- */
+          0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07,
+ /* 0xC0     {     A     B     C     D     E     F     G */
+          0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ /* 0xC8     H     I  ----           ö              ---- */
+          0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
+ /* 0xD0     }     J     K     L     M     N     O     P */
+          0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
+ /* 0xD8     Q     R  ----           ü                   */
+          0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
+ /* 0xE0     \           S     T     U     V     W     X */
+          0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ /* 0xE8     Y     Z        ----     Ö  ----  ----  ---- */
+          0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
+ /* 0xF0     0     1     2     3     4     5     6     7 */
+          0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ /* 0xF8     8     9  ----  ----     Ü  ----  ----  ---- */
+          0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
+};
+
+
+/*
+ * EBCDIC 037/500 conversion table:
+ * from upper to lower case
+ */
+__u8 _ebc_tolower[256] =
+{
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+	0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+	0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+	0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+	0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+	0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+	0x60, 0x61, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+	0x48, 0x49, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+	0x70, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+	0x58, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+	0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+	0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9C, 0x9F,
+	0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+	0xA8, 0xA9, 0xAA, 0xAB, 0x8C, 0x8D, 0x8E, 0xAF,
+	0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
+	0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+	0xC0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+	0x88, 0x89, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+	0xD0, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+	0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+	0xE0, 0xE1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+	0xA8, 0xA9, 0xEA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+	0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+	0xF8, 0xF9, 0xFA, 0xDB, 0xDC, 0xDD, 0xDE, 0xFF
+};
+
+
+/*
+ * EBCDIC 037/500 conversion table:
+ * from lower to upper case
+ */
+__u8 _ebc_toupper[256] =
+{
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+	0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+	0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+	0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
+	0x40, 0x41, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+	0x68, 0x69, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
+	0x50, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+	0x78, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+	0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+	0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
+	0x80, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+	0xC8, 0xC9, 0x8A, 0x8B, 0xAC, 0xAD, 0xAE, 0x8F,
+	0x90, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+	0xD8, 0xD9, 0x9A, 0x9B, 0x9E, 0x9D, 0x9E, 0x9F,
+	0xA0, 0xA1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+	0xE8, 0xE9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
+	0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
+	0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+	0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+	0xC8, 0xC9, 0xCA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+	0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+	0xD8, 0xD9, 0xDA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
+	0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+	0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+	0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+	0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
+};
+
+EXPORT_SYMBOL(_ascebc_500);
+EXPORT_SYMBOL(_ebcasc_500);
+EXPORT_SYMBOL(_ascebc);
+EXPORT_SYMBOL(_ebcasc);
+EXPORT_SYMBOL(_ebc_tolower);
+EXPORT_SYMBOL(_ebc_toupper);
+
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
new file mode 100644
index 0000000..c0e09b3
--- /dev/null
+++ b/arch/s390/kernel/entry.S
@@ -0,0 +1,868 @@
+/*
+ *  arch/s390/kernel/entry.S
+ *    S390 low-level entry points.
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Hartmut Penner (hp@de.ibm.com),
+ *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <linux/config.h>
+#include <asm/cache.h>
+#include <asm/lowcore.h>
+#include <asm/errno.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/offsets.h>
+#include <asm/unistd.h>
+#include <asm/page.h>
+
+/*
+ * Stack layout for the system_call stack entry.
+ * The first few entries are identical to the user_regs_struct.
+ */
+SP_PTREGS    =  STACK_FRAME_OVERHEAD
+SP_ARGS      =  STACK_FRAME_OVERHEAD + __PT_ARGS
+SP_PSW       =  STACK_FRAME_OVERHEAD + __PT_PSW
+SP_R0        =  STACK_FRAME_OVERHEAD + __PT_GPRS
+SP_R1        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 4
+SP_R2        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 8
+SP_R3        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 12
+SP_R4        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 16
+SP_R5        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 20
+SP_R6        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 24
+SP_R7        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 28
+SP_R8        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 32
+SP_R9        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 36
+SP_R10       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 40
+SP_R11       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 44
+SP_R12       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 48
+SP_R13       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 52
+SP_R14       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 56
+SP_R15       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 60
+SP_ORIG_R2   =  STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
+SP_ILC       =  STACK_FRAME_OVERHEAD + __PT_ILC
+SP_TRAP      =  STACK_FRAME_OVERHEAD + __PT_TRAP
+SP_SIZE      =  STACK_FRAME_OVERHEAD + __PT_SIZE
+
+_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+		 _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
+_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
+
+STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+STACK_SIZE  = 1 << STACK_SHIFT
+
+#define BASED(name) name-system_call(%r13)
+
+/*
+ * Register usage in interrupt handlers:
+ *    R9  - pointer to current task structure
+ *    R13 - pointer to literal pool
+ *    R14 - return register for function calls
+ *    R15 - kernel stack pointer
+ */
+
+	.macro  STORE_TIMER lc_offset
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	stpt	\lc_offset
+#endif
+	.endm
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.macro  UPDATE_VTIME lc_from,lc_to,lc_sum
+	lm	%r10,%r11,\lc_from
+	sl	%r10,\lc_to
+	sl	%r11,\lc_to+4
+	bc	3,BASED(0f)
+	sl	%r10,BASED(.Lc_1)
+0:	al	%r10,\lc_sum
+	al	%r11,\lc_sum+4
+	bc	12,BASED(1f)
+	al	%r10,BASED(.Lc_1)
+1:	stm	%r10,%r11,\lc_sum
+	.endm
+#endif
+
+	.macro	SAVE_ALL_BASE savearea
+	stm	%r12,%r15,\savearea
+	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
+	.endm
+
+	.macro	SAVE_ALL psworg,savearea,sync
+	la	%r12,\psworg
+	.if	\sync
+	tm	\psworg+1,0x01		# test problem state bit
+	bz	BASED(2f)		# skip stack setup save
+	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
+	.else
+	tm	\psworg+1,0x01		# test problem state bit
+	bnz	BASED(1f)		# from user -> load async stack
+	clc	\psworg+4(4),BASED(.Lcritical_end)
+	bhe	BASED(0f)
+	clc	\psworg+4(4),BASED(.Lcritical_start)
+	bl	BASED(0f)
+	l	%r14,BASED(.Lcleanup_critical)
+	basr	%r14,%r14
+	tm	0(%r12),0x01		# retest problem state after cleanup
+	bnz	BASED(1f)
+0:	l	%r14,__LC_ASYNC_STACK	# are we already on the async stack ?
+	slr	%r14,%r15
+	sra	%r14,STACK_SHIFT
+	be	BASED(2f)
+1:	l	%r15,__LC_ASYNC_STACK
+	.endif
+#ifdef CONFIG_CHECK_STACK
+	b	BASED(3f)
+2:	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
+	bz	BASED(stack_overflow)
+3:
+#endif
+2:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
+	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
+	la	%r12,\psworg
+	st	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
+	icm	%r12,12,__LC_SVC_ILC
+	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
+	st	%r12,SP_ILC(%r15)
+	mvc	SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
+	la	%r12,0
+	st	%r12,__SF_BACKCHAIN(%r15)	# clear back chain
+	.endm
+
+	.macro  RESTORE_ALL sync
+	mvc	__LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore
+	.if !\sync
+	ni	__LC_RETURN_PSW+1,0xfd	# clear wait state bit
+	.endif
+	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15 of user
+	STORE_TIMER __LC_EXIT_TIMER
+	lpsw	__LC_RETURN_PSW		# back to caller
+	.endm
+
+/*
+ * Scheduler resume function, called by switch_to
+ *  gpr2 = (task_struct *) prev
+ *  gpr3 = (task_struct *) next
+ * Returns:
+ *  gpr2 = prev
+ */
+        .globl  __switch_to
+__switch_to:
+        basr    %r1,0
+__switch_to_base:
+	tm	__THREAD_per(%r3),0xe8		# new process is using per ?
+	bz	__switch_to_noper-__switch_to_base(%r1)	# if not we're fine
+        stctl   %c9,%c11,__SF_EMPTY(%r15)	# We are using per stuff
+        clc     __THREAD_per(12,%r3),__SF_EMPTY(%r15)
+        be      __switch_to_noper-__switch_to_base(%r1)	# we got away w/o bashing TLB's
+        lctl    %c9,%c11,__THREAD_per(%r3)	# Nope we didn't
+__switch_to_noper:
+        stm     %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
+	st	%r15,__THREAD_ksp(%r2)	# store kernel stack to prev->tss.ksp
+	l	%r15,__THREAD_ksp(%r3)	# load kernel stack from next->tss.ksp
+	lm	%r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
+	st	%r3,__LC_CURRENT	# __LC_CURRENT = current task struct
+	lctl	%c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
+	l	%r3,__THREAD_info(%r3)  # load thread_info from task struct
+	st	%r3,__LC_THREAD_INFO
+	ahi	%r3,STACK_SIZE
+	st	%r3,__LC_KERNEL_STACK	# __LC_KERNEL_STACK = new kernel stack
+	br	%r14
+
+__critical_start:
+/*
+ * SVC interrupt handler routine. System calls are synchronous events and
+ * are executed with interrupts enabled.
+ */
+
+	.globl  system_call
+system_call:
+	STORE_TIMER __LC_SYNC_ENTER_TIMER
+sysc_saveall:
+	SAVE_ALL_BASE __LC_SAVE_AREA
+        SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+	lh	%r7,0x8a	  # get svc number from lowcore
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+sysc_vtime:
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(sysc_do_svc)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+sysc_stime:
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+sysc_update:
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+#endif
+sysc_do_svc:
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	sla	%r7,2             # *4 and test for svc 0
+	bnz	BASED(sysc_nr_ok) # svc number > 0
+	# svc 0: system call number in %r1
+	cl	%r1,BASED(.Lnr_syscalls)
+	bnl	BASED(sysc_nr_ok)
+	lr	%r7,%r1           # copy svc number to %r7
+	sla	%r7,2             # *4
+sysc_nr_ok:
+	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
+sysc_do_restart:
+	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+        l       %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
+        bnz     BASED(sysc_tracesys)
+        basr    %r14,%r8          # call sys_xxxx
+        st      %r2,SP_R2(%r15)   # store return value (change R2 on stack)
+                                  # ATTENTION: check sys_execve_glue before
+                                  # changing anything here !!
+
+sysc_return:
+	tm	SP_PSW+1(%r15),0x01	# returning to user ?
+	bno	BASED(sysc_leave)
+	tm	__TI_flags+3(%r9),_TIF_WORK_SVC
+	bnz	BASED(sysc_work)  # there is work to do (signals etc.)
+sysc_leave:
+        RESTORE_ALL 1
+
+#
+# recheck if there is more work to do
+#
+sysc_work_loop:
+	tm	__TI_flags+3(%r9),_TIF_WORK_SVC
+	bz	BASED(sysc_leave)      # there is no work to do
+#
+# One of the work bits is on. Find out which one.
+#
+sysc_work:
+	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
+	bo	BASED(sysc_reschedule)
+	tm	__TI_flags+3(%r9),_TIF_SIGPENDING
+	bo	BASED(sysc_sigpending)
+	tm	__TI_flags+3(%r9),_TIF_RESTART_SVC
+	bo	BASED(sysc_restart)
+	tm	__TI_flags+3(%r9),_TIF_SINGLE_STEP
+	bo	BASED(sysc_singlestep)
+	b	BASED(sysc_leave)
+
+#
+# _TIF_NEED_RESCHED is set, call schedule
+#	
+sysc_reschedule:        
+        l       %r1,BASED(.Lschedule)
+	la      %r14,BASED(sysc_work_loop)
+	br      %r1		       # call scheduler
+
+#
+# _TIF_SIGPENDING is set, call do_signal
+#
+sysc_sigpending:     
+	ni	__TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        sr      %r3,%r3                # clear *oldset
+        l       %r1,BASED(.Ldo_signal)
+	basr	%r14,%r1               # call do_signal
+	tm	__TI_flags+3(%r9),_TIF_RESTART_SVC
+	bo	BASED(sysc_restart)
+	tm	__TI_flags+3(%r9),_TIF_SINGLE_STEP
+	bo	BASED(sysc_singlestep)
+	b	BASED(sysc_leave)      # out of here, do NOT recheck
+
+#
+# _TIF_RESTART_SVC is set, set up registers and restart svc
+#
+sysc_restart:
+	ni	__TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
+	l	%r7,SP_R2(%r15)        # load new svc number
+	sla	%r7,2
+	mvc	SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
+	lm	%r2,%r6,SP_R2(%r15)    # load svc arguments
+	b	BASED(sysc_do_restart) # restart svc
+
+#
+# _TIF_SINGLE_STEP is set, call do_single_step
+#
+sysc_singlestep:
+	ni	__TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+	mvi	SP_TRAP+1(%r15),0x28	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
+	la	%r14,BASED(sysc_return)	# load adr. of system return
+	br	%r1			# branch to do_single_step
+
+__critical_end:
+
+#
+# call trace before and after sys_call
+#
+sysc_tracesys:
+        l       %r1,BASED(.Ltrace)
+	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	la	%r3,0
+	srl	%r7,2
+	st	%r7,SP_R2(%r15)
+	basr	%r14,%r1
+	clc	SP_R2(4,%r15),BASED(.Lnr_syscalls)
+	bnl	BASED(sysc_tracenogo)
+	l	%r7,SP_R2(%r15)        # strace might have changed the 
+	sll	%r7,2                  #  system call
+	l	%r8,sys_call_table-system_call(%r7,%r13)
+sysc_tracego:
+	lm	%r3,%r6,SP_R3(%r15)
+	l	%r2,SP_ORIG_R2(%r15)
+	basr	%r14,%r8          # call sys_xxx
+	st	%r2,SP_R2(%r15)   # store return value
+sysc_tracenogo:
+	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+        bz      BASED(sysc_return)
+	l	%r1,BASED(.Ltrace)
+	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	la	%r3,1
+	la	%r14,BASED(sysc_return)
+	br	%r1
+
+#
+# a new process exits the kernel with ret_from_fork
+#
+        .globl  ret_from_fork
+ret_from_fork:
+	l	%r13,__LC_SVC_NEW_PSW+4
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
+	bo	BASED(0f)
+	st	%r15,SP_R15(%r15)	# store stack pointer for new kthread
+0:	l       %r1,BASED(.Lschedtail)
+	basr    %r14,%r1
+        stosm   __SF_EMPTY(%r15),0x03     # reenable interrupts
+	b	BASED(sysc_return)
+
+#
+# clone, fork, vfork, exec and sigreturn need glue,
+# because they all expect pt_regs as parameter,
+# but are called with different parameter.
+# return-address is set up above
+#
+sys_clone_glue: 
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        l       %r1,BASED(.Lclone)
+        br      %r1                   # branch to sys_clone
+
+sys_fork_glue:  
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        l       %r1,BASED(.Lfork)
+        br      %r1                   # branch to sys_fork
+
+sys_vfork_glue: 
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        l       %r1,BASED(.Lvfork)
+        br      %r1                   # branch to sys_vfork
+
+sys_execve_glue:        
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs
+        l       %r1,BASED(.Lexecve)
+	lr      %r12,%r14             # save return address
+        basr    %r14,%r1              # call sys_execve
+        ltr     %r2,%r2               # check if execve failed
+        bnz     0(%r12)               # it did fail -> store result in gpr2
+        b       4(%r12)               # SKIP ST 2,SP_R2(15) after BASR 14,8
+                                      # in system_call/sysc_tracesys
+
+sys_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        l       %r1,BASED(.Lsigreturn)
+        br      %r1                   # branch to sys_sigreturn
+
+sys_rt_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        l       %r1,BASED(.Lrt_sigreturn)
+        br      %r1                   # branch to sys_sigreturn
+
+#
+# sigsuspend and rt_sigsuspend need pt_regs as an additional
+# parameter and they have to skip the store of %r2 into the
+# user register %r2 because the return value was set in 
+# sigsuspend and rt_sigsuspend already and must not be overwritten!
+#
+
+sys_sigsuspend_glue:    
+        lr      %r5,%r4               # move mask back
+        lr      %r4,%r3               # move history1 parameter
+        lr      %r3,%r2               # move history0 parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+        l       %r1,BASED(.Lsigsuspend)
+	la      %r14,4(%r14)          # skip store of return value
+        br      %r1                   # branch to sys_sigsuspend
+
+sys_rt_sigsuspend_glue: 
+        lr      %r4,%r3               # move sigsetsize parameter
+        lr      %r3,%r2               # move unewset parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+        l       %r1,BASED(.Lrt_sigsuspend)
+	la      %r14,4(%r14)          # skip store of return value
+        br      %r1                   # branch to sys_rt_sigsuspend
+
+sys_sigaltstack_glue:
+        la      %r4,SP_PTREGS(%r15)   # load pt_regs as parameter
+        l       %r1,BASED(.Lsigaltstack)
+        br      %r1                   # branch to sys_sigreturn
+
+
+/*
+ * Program check handler routine
+ */
+
+        .globl  pgm_check_handler
+pgm_check_handler:
+/*
+ * First we need to check for a special case:
+ * Single stepping an instruction that disables the PER event mask will
+ * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
+ * For a single stepped SVC the program check handler gets control after
+ * the SVC new PSW has been loaded. But we want to execute the SVC first and
+ * then handle the PER event. Therefore we update the SVC old PSW to point
+ * to the pgm_check_handler and branch to the SVC handler after we checked
+ * if we have to load the kernel stack register.
+ * For every other possible cause for PER event without the PER mask set
+ * we just ignore the PER event (FIXME: is there anything we have to do
+ * for LPSW?).
+ */
+	STORE_TIMER __LC_SYNC_ENTER_TIMER
+	SAVE_ALL_BASE __LC_SAVE_AREA
+        tm      __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
+        bnz     BASED(pgm_per)           # got per exception -> special case
+	SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(pgm_no_vtime)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime:
+#endif
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+        l       %r3,__LC_PGM_ILC         # load program interruption code
+	la	%r8,0x7f
+	nr	%r8,%r3
+pgm_do_call:
+        l       %r7,BASED(.Ljump_table)
+        sll     %r8,2
+        l       %r7,0(%r8,%r7)		 # load address of handler routine
+        la      %r2,SP_PTREGS(%r15)	 # address of register-save area
+	la      %r14,BASED(sysc_return)
+	br      %r7			 # branch to interrupt-handler
+
+#
+# handle per exception
+#
+pgm_per:
+        tm      __LC_PGM_OLD_PSW,0x40    # test if per event recording is on
+        bnz     BASED(pgm_per_std)       # ok, normal per event from user space
+# ok its one of the special cases, now we need to find out which one
+        clc     __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
+        be      BASED(pgm_svcper)
+# no interesting special case, ignore PER event
+        lm      %r12,%r15,__LC_SAVE_AREA
+	lpsw    0x28
+
+#
+# Normal per exception
+#
+pgm_per_std:
+	SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(pgm_no_vtime2)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime2:
+#endif
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	l	%r1,__TI_task(%r9)
+	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
+	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
+	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	l	%r3,__LC_PGM_ILC	 # load program interruption code
+	la	%r8,0x7f
+	nr	%r8,%r3                  # clear per-event-bit and ilc
+	be	BASED(sysc_return)       # only per or per+check ?
+	b	BASED(pgm_do_call)
+
+#
+# it was a single stepped SVC that is causing all the trouble
+#
+pgm_svcper:
+	SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(pgm_no_vtime3)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime3:
+#endif
+	lh	%r7,0x8a		# get svc number from lowcore
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	l	%r1,__TI_task(%r9)
+	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
+	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
+	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	b	BASED(sysc_do_svc)
+
+/*
+ * IO interrupt handler routine
+ */
+
+        .globl io_int_handler
+io_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	stck	__LC_INT_CLOCK
+	SAVE_ALL_BASE __LC_SAVE_AREA+16
+        SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(io_no_vtime)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+io_no_vtime:
+#endif
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+        l       %r1,BASED(.Ldo_IRQ)        # load address of do_IRQ
+        la      %r2,SP_PTREGS(%r15) # address of register-save area
+        basr    %r14,%r1          # branch to standard irq handler
+
+io_return:
+        tm      SP_PSW+1(%r15),0x01    # returning to user ?
+#ifdef CONFIG_PREEMPT
+	bno     BASED(io_preempt)      # no -> check for preemptive scheduling
+#else
+        bno     BASED(io_leave)        # no-> skip resched & signal
+#endif
+	tm	__TI_flags+3(%r9),_TIF_WORK_INT
+	bnz	BASED(io_work)         # there is work to do (signals etc.)
+io_leave:
+        RESTORE_ALL 0
+
+#ifdef CONFIG_PREEMPT
+io_preempt:
+	icm	%r0,15,__TI_precount(%r9)
+	bnz     BASED(io_leave)
+	l	%r1,SP_R15(%r15)
+	s	%r1,BASED(.Lc_spsize)
+	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
+        xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	lr	%r15,%r1
+io_resume_loop:
+	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
+	bno	BASED(io_leave)
+	mvc     __TI_precount(4,%r9),BASED(.Lc_pactive)
+        stosm   __SF_EMPTY(%r15),0x03  # reenable interrupts
+        l       %r1,BASED(.Lschedule)
+	basr	%r14,%r1	       # call schedule
+        stnsm   __SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
+	xc      __TI_precount(4,%r9),__TI_precount(%r9)
+	b	BASED(io_resume_loop)
+#endif
+
+#
+# switch to kernel stack, then check the TIF bits
+#
+io_work:
+	l	%r1,__LC_KERNEL_STACK
+	s	%r1,BASED(.Lc_spsize)
+	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
+        xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	lr	%r15,%r1
+#
+# One of the work bits is on. Find out which one.
+# Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED
+#
+io_work_loop:
+	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
+	bo	BASED(io_reschedule)
+	tm	__TI_flags+3(%r9),_TIF_SIGPENDING
+	bo	BASED(io_sigpending)
+	b	BASED(io_leave)
+
+#
+# _TIF_NEED_RESCHED is set, call schedule
+#	
+io_reschedule:        
+        l       %r1,BASED(.Lschedule)
+        stosm   __SF_EMPTY(%r15),0x03  # reenable interrupts
+	basr    %r14,%r1	       # call scheduler
+        stnsm   __SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
+	tm	__TI_flags+3(%r9),_TIF_WORK_INT
+	bz	BASED(io_leave)        # there is no work to do
+	b	BASED(io_work_loop)
+
+#
+# _TIF_SIGPENDING is set, call do_signal
+#
+io_sigpending:     
+        stosm   __SF_EMPTY(%r15),0x03  # reenable interrupts
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        sr      %r3,%r3                # clear *oldset
+        l       %r1,BASED(.Ldo_signal)
+	basr    %r14,%r1	       # call do_signal
+        stnsm   __SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
+	b	BASED(io_leave)        # out of here, do NOT recheck
+
+/*
+ * External interrupt handler routine
+ */
+
+        .globl  ext_int_handler
+ext_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	stck	__LC_INT_CLOCK
+	SAVE_ALL_BASE __LC_SAVE_AREA+16
+        SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(ext_no_vtime)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ext_no_vtime:
+#endif
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	la	%r2,SP_PTREGS(%r15)    # address of register-save area
+	lh	%r3,__LC_EXT_INT_CODE  # get interruption code
+	l	%r1,BASED(.Ldo_extint)
+	basr	%r14,%r1
+	b	BASED(io_return)
+
+/*
+ * Machine check handler routines
+ */
+
+        .globl mcck_int_handler
+mcck_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	SAVE_ALL_BASE __LC_SAVE_AREA+32
+        SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(mcck_no_vtime)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+mcck_no_vtime:
+#endif
+	l       %r1,BASED(.Ls390_mcck)
+	basr    %r14,%r1	  # call machine check handler
+mcck_return:
+        RESTORE_ALL 0
+
+#ifdef CONFIG_SMP
+/*
+ * Restart interruption handler, kick starter for additional CPUs
+ */
+        .globl restart_int_handler
+restart_int_handler:
+        l       %r15,__LC_SAVE_AREA+60 # load ksp
+        lctl    %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
+        lam     %a0,%a15,__LC_AREGS_SAVE_AREA
+        lm      %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+        stosm   __SF_EMPTY(%r15),0x04    # now we can turn dat on
+        basr    %r14,0
+        l       %r14,restart_addr-.(%r14)
+        br      %r14                   # branch to start_secondary
+restart_addr:
+        .long   start_secondary
+#else
+/*
+ * If we do not run with SMP enabled, let the new CPU crash ...
+ */
+        .globl restart_int_handler
+restart_int_handler:
+        basr    %r1,0
+restart_base:
+        lpsw    restart_crash-restart_base(%r1)
+        .align 8
+restart_crash:
+        .long  0x000a0000,0x00000000
+restart_go:
+#endif
+
+#ifdef CONFIG_CHECK_STACK
+/*
+ * The synchronous or the asynchronous stack overflowed. We are dead.
+ * No need to properly save the registers, we are going to panic anyway.
+ * Setup a pt_regs so that show_trace can provide a good call trace.
+ */
+stack_overflow:
+	l	%r15,__LC_PANIC_STACK	# change to panic stack
+	sl	%r15,BASED(.Lc_spsize)
+	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
+	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
+	la	%r1,__LC_SAVE_AREA
+	ch	%r12,BASED(.L0x020)	# old psw addr == __LC_SVC_OLD_PSW ?
+	be	BASED(0f)
+	ch	%r12,BASED(.L0x028)	# old psw addr == __LC_PGM_OLD_PSW ?
+	be	BASED(0f)
+	la	%r1,__LC_SAVE_AREA+16
+0:	mvc	SP_R12(16,%r15),0(%r1)	# move %r12-%r15 to stack
+        xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
+	l	%r1,BASED(1f)		# branch to kernel_stack_overflow
+        la      %r2,SP_PTREGS(%r15)	# load pt_regs
+	br	%r1
+1:	.long  kernel_stack_overflow
+#endif
+
+cleanup_table_system_call:
+	.long	system_call + 0x80000000, sysc_do_svc + 0x80000000
+cleanup_table_sysc_return:
+	.long	sysc_return + 0x80000000, sysc_leave + 0x80000000
+cleanup_table_sysc_leave:
+	.long	sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
+cleanup_table_sysc_work_loop:
+	.long	sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
+
+cleanup_critical:
+	clc	4(4,%r12),BASED(cleanup_table_system_call)
+	bl	BASED(0f)
+	clc	4(4,%r12),BASED(cleanup_table_system_call+4)
+	bl	BASED(cleanup_system_call)
+0:
+	clc	4(4,%r12),BASED(cleanup_table_sysc_return)
+	bl	BASED(0f)
+	clc	4(4,%r12),BASED(cleanup_table_sysc_return+4)
+	bl	BASED(cleanup_sysc_return)
+0:
+	clc	4(4,%r12),BASED(cleanup_table_sysc_leave)
+	bl	BASED(0f)
+	clc	4(4,%r12),BASED(cleanup_table_sysc_leave+4)
+	bl	BASED(cleanup_sysc_leave)
+0:
+	clc	4(4,%r12),BASED(cleanup_table_sysc_work_loop)
+	bl	BASED(0f)
+	clc	4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
+	bl	BASED(cleanup_sysc_leave)
+0:
+	br	%r14
+
+cleanup_system_call:
+	mvc	__LC_RETURN_PSW(8),0(%r12)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
+	bh	BASED(0f)
+	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0:	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
+	bhe	BASED(cleanup_vtime)
+#endif
+	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
+	bh	BASED(0f)
+	mvc	__LC_SAVE_AREA(16),__LC_SAVE_AREA+16
+0:	st	%r13,__LC_SAVE_AREA+20
+	SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+	st	%r15,__LC_SAVE_AREA+28
+	lh	%r7,0x8a
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+cleanup_vtime:
+	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
+	bhe	BASED(cleanup_stime)
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(cleanup_novtime)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+cleanup_stime:
+	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
+	bh	BASED(cleanup_update)
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+cleanup_update:
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+cleanup_novtime:
+#endif
+	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
+	la	%r12,__LC_RETURN_PSW
+	br	%r14
+cleanup_system_call_insn:
+	.long	sysc_saveall + 0x80000000
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.long   system_call + 0x80000000
+	.long   sysc_vtime + 0x80000000
+	.long   sysc_stime + 0x80000000
+	.long   sysc_update + 0x80000000
+#endif
+
+cleanup_sysc_return:
+	mvc	__LC_RETURN_PSW(4),0(%r12)
+	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
+	la	%r12,__LC_RETURN_PSW
+	br	%r14
+
+cleanup_sysc_leave:
+	clc	4(4,%r12),BASED(cleanup_sysc_leave_insn)
+	be	BASED(0f)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+	clc	4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
+	be	BASED(0f)
+#endif
+	mvc	__LC_RETURN_PSW(8),SP_PSW(%r15)
+	mvc	__LC_SAVE_AREA+16(16),SP_R12(%r15)
+	lm	%r0,%r11,SP_R0(%r15)
+	l	%r15,SP_R15(%r15)
+0:	la	%r12,__LC_RETURN_PSW
+	br	%r14
+cleanup_sysc_leave_insn:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.long	sysc_leave + 14 + 0x80000000
+#endif
+	.long	sysc_leave + 10 + 0x80000000
+
+/*
+ * Integer constants
+ */
+               .align 4
+.Lc_spsize:    .long  SP_SIZE
+.Lc_overhead:  .long  STACK_FRAME_OVERHEAD
+.Lc_pactive:   .long  PREEMPT_ACTIVE
+.Lnr_syscalls: .long  NR_syscalls
+.L0x018:       .short 0x018
+.L0x020:       .short 0x020
+.L0x028:       .short 0x028
+.L0x030:       .short 0x030
+.L0x038:       .short 0x038
+.Lc_1:         .long  1
+
+/*
+ * Symbol constants
+ */
+.Ls390_mcck:   .long  s390_do_machine_check
+.Ldo_IRQ:      .long  do_IRQ
+.Ldo_extint:   .long  do_extint
+.Ldo_signal:   .long  do_signal
+.Lhandle_per:  .long  do_single_step
+.Ljump_table:  .long  pgm_check_table
+.Lschedule:    .long  schedule
+.Lclone:       .long  sys_clone
+.Lexecve:      .long  sys_execve
+.Lfork:        .long  sys_fork
+.Lrt_sigreturn:.long  sys_rt_sigreturn
+.Lrt_sigsuspend:
+               .long  sys_rt_sigsuspend
+.Lsigreturn:   .long  sys_sigreturn
+.Lsigsuspend:  .long  sys_sigsuspend
+.Lsigaltstack: .long  sys_sigaltstack
+.Ltrace:       .long  syscall_trace
+.Lvfork:       .long  sys_vfork
+.Lschedtail:   .long  schedule_tail
+
+.Lcritical_start:
+               .long  __critical_start + 0x80000000
+.Lcritical_end:
+               .long  __critical_end + 0x80000000
+.Lcleanup_critical:
+               .long  cleanup_critical
+
+#define SYSCALL(esa,esame,emu)	.long esa
+	.globl  sys_call_table
+sys_call_table:
+#include "syscalls.S"
+#undef SYSCALL
+
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
new file mode 100644
index 0000000..51527ab
--- /dev/null
+++ b/arch/s390/kernel/entry64.S
@@ -0,0 +1,881 @@
+/*
+ *  arch/s390/kernel/entry.S
+ *    S390 low-level entry points.
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Hartmut Penner (hp@de.ibm.com),
+ *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <linux/config.h>
+#include <asm/cache.h>
+#include <asm/lowcore.h>
+#include <asm/errno.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/offsets.h>
+#include <asm/unistd.h>
+#include <asm/page.h>
+
+/*
+ * Stack layout for the system_call stack entry.
+ * The first few entries are identical to the user_regs_struct.
+ */
+SP_PTREGS    =  STACK_FRAME_OVERHEAD
+SP_ARGS      =  STACK_FRAME_OVERHEAD + __PT_ARGS
+SP_PSW       =  STACK_FRAME_OVERHEAD + __PT_PSW
+SP_R0        =  STACK_FRAME_OVERHEAD + __PT_GPRS
+SP_R1        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 8
+SP_R2        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 16
+SP_R3        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 24
+SP_R4        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 32
+SP_R5        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 40
+SP_R6        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 48
+SP_R7        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 56
+SP_R8        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 64
+SP_R9        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 72
+SP_R10       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 80
+SP_R11       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 88
+SP_R12       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 96
+SP_R13       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 104
+SP_R14       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 112
+SP_R15       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 120
+SP_ORIG_R2   =  STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
+SP_ILC       =  STACK_FRAME_OVERHEAD + __PT_ILC
+SP_TRAP      =  STACK_FRAME_OVERHEAD + __PT_TRAP
+SP_SIZE      =  STACK_FRAME_OVERHEAD + __PT_SIZE
+
+STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+STACK_SIZE  = 1 << STACK_SHIFT
+
+_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+		 _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
+_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
+
+#define BASED(name) name-system_call(%r13)
+
+	.macro  STORE_TIMER lc_offset
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	stpt	\lc_offset
+#endif
+	.endm
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.macro  UPDATE_VTIME lc_from,lc_to,lc_sum
+	lg	%r10,\lc_from
+	slg	%r10,\lc_to
+	alg	%r10,\lc_sum
+	stg	%r10,\lc_sum
+	.endm
+#endif
+
+/*
+ * Register usage in interrupt handlers:
+ *    R9  - pointer to current task structure
+ *    R13 - pointer to literal pool
+ *    R14 - return register for function calls
+ *    R15 - kernel stack pointer
+ */
+
+        .macro  SAVE_ALL_BASE savearea
+	stmg	%r12,%r15,\savearea
+	larl	%r13,system_call
+	.endm
+
+        .macro  SAVE_ALL psworg,savearea,sync
+	la	%r12,\psworg
+	.if	\sync
+	tm	\psworg+1,0x01		# test problem state bit
+	jz	2f			# skip stack setup save
+	lg	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
+	.else
+	tm	\psworg+1,0x01		# test problem state bit
+	jnz	1f			# from user -> load kernel stack
+	clc	\psworg+8(8),BASED(.Lcritical_end)
+	jhe	0f
+	clc	\psworg+8(8),BASED(.Lcritical_start)
+	jl	0f
+	brasl	%r14,cleanup_critical
+	tm	0(%r12),0x01		# retest problem state after cleanup
+	jnz	1f
+0:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async. stack ?
+	slgr	%r14,%r15
+	srag	%r14,%r14,STACK_SHIFT
+	jz	2f
+1:	lg	%r15,__LC_ASYNC_STACK	# load async stack
+	.endif
+#ifdef CONFIG_CHECK_STACK
+	j	3f
+2:	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
+	jz	stack_overflow
+3:
+#endif
+2:	aghi    %r15,-SP_SIZE		# make room for registers & psw
+	mvc     SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
+	la	%r12,\psworg
+	stg	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
+	icm	%r12,12,__LC_SVC_ILC
+	stmg	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
+	st	%r12,SP_ILC(%r15)
+	mvc	SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
+	la	%r12,0
+	stg	%r12,__SF_BACKCHAIN(%r15)
+        .endm
+
+	.macro	RESTORE_ALL sync
+	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
+	.if !\sync
+	ni	__LC_RETURN_PSW+1,0xfd	# clear wait state bit
+	.endif
+	lmg	%r0,%r15,SP_R0(%r15)	# load gprs 0-15 of user
+	STORE_TIMER __LC_EXIT_TIMER
+	lpswe	__LC_RETURN_PSW		# back to caller
+	.endm
+
+/*
+ * Scheduler resume function, called by switch_to
+ *  gpr2 = (task_struct *) prev
+ *  gpr3 = (task_struct *) next
+ * Returns:
+ *  gpr2 = prev
+ */
+        .globl  __switch_to
+__switch_to:
+	tm	__THREAD_per+4(%r3),0xe8 # is the new process using per ?
+	jz	__switch_to_noper		# if not we're fine
+        stctg   %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
+        clc     __THREAD_per(24,%r3),__SF_EMPTY(%r15)
+        je      __switch_to_noper            # we got away without bashing TLB's
+        lctlg   %c9,%c11,__THREAD_per(%r3)	# Nope we didn't
+__switch_to_noper:
+        stmg    %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
+	stg	%r15,__THREAD_ksp(%r2)	# store kernel stack to prev->tss.ksp
+	lg	%r15,__THREAD_ksp(%r3)	# load kernel stack from next->tss.ksp
+        lmg     %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
+	stg	%r3,__LC_CURRENT	# __LC_CURRENT = current task struct
+	lctl	%c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
+	lg	%r3,__THREAD_info(%r3)  # load thread_info from task struct
+	stg	%r3,__LC_THREAD_INFO
+	aghi	%r3,STACK_SIZE
+	stg	%r3,__LC_KERNEL_STACK	# __LC_KERNEL_STACK = new kernel stack
+	br	%r14
+
+__critical_start:
+/*
+ * SVC interrupt handler routine. System calls are synchronous events and
+ * are executed with interrupts enabled.
+ */
+
+	.globl  system_call
+system_call:
+	STORE_TIMER __LC_SYNC_ENTER_TIMER
+sysc_saveall:
+	SAVE_ALL_BASE __LC_SAVE_AREA
+        SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+	llgh    %r7,__LC_SVC_INT_CODE # get svc number from lowcore
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+sysc_vtime:
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	sysc_do_svc
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+sysc_stime:
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+sysc_update:
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+#endif
+sysc_do_svc:
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+        slag    %r7,%r7,2         # *4 and test for svc 0
+	jnz	sysc_nr_ok
+	# svc 0: system call number in %r1
+	cl	%r1,BASED(.Lnr_syscalls)
+	jnl	sysc_nr_ok
+	lgfr	%r7,%r1           # clear high word in r1
+	slag    %r7,%r7,2         # svc 0: system call number in %r1
+sysc_nr_ok:
+	mvc	SP_ARGS(8,%r15),SP_R7(%r15)
+sysc_do_restart:
+	larl    %r10,sys_call_table
+#ifdef CONFIG_S390_SUPPORT
+        tm      SP_PSW+3(%r15),0x01  # are we running in 31 bit mode ?
+        jo      sysc_noemu
+	larl    %r10,sys_call_table_emu  # use 31 bit emulation system calls
+sysc_noemu:
+#endif
+	tm	__TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+        lgf     %r8,0(%r7,%r10)   # load address of system call routine
+        jnz     sysc_tracesys
+        basr    %r14,%r8          # call sys_xxxx
+        stg     %r2,SP_R2(%r15)   # store return value (change R2 on stack)
+                                  # ATTENTION: check sys_execve_glue before
+                                  # changing anything here !!
+
+sysc_return:
+        tm      SP_PSW+1(%r15),0x01    # returning to user ?
+        jno     sysc_leave
+	tm	__TI_flags+7(%r9),_TIF_WORK_SVC
+	jnz	sysc_work         # there is work to do (signals etc.)
+sysc_leave:
+        RESTORE_ALL 1
+
+#
+# recheck if there is more work to do
+#
+sysc_work_loop:
+	tm	__TI_flags+7(%r9),_TIF_WORK_SVC
+	jz	sysc_leave        # there is no work to do
+#
+# One of the work bits is on. Find out which one.
+#
+sysc_work:
+	tm	__TI_flags+7(%r9),_TIF_NEED_RESCHED
+	jo	sysc_reschedule
+	tm	__TI_flags+7(%r9),_TIF_SIGPENDING
+	jo	sysc_sigpending
+	tm	__TI_flags+7(%r9),_TIF_RESTART_SVC
+	jo	sysc_restart
+	tm	__TI_flags+7(%r9),_TIF_SINGLE_STEP
+	jo	sysc_singlestep
+	j	sysc_leave
+
+#
+# _TIF_NEED_RESCHED is set, call schedule
+#	
+sysc_reschedule:        
+	larl    %r14,sysc_work_loop
+        jg      schedule            # return point is sysc_return
+
+#
+# _TIF_SIGPENDING is set, call do_signal
+#
+sysc_sigpending:     
+	ni	__TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+        la      %r2,SP_PTREGS(%r15) # load pt_regs
+        sgr     %r3,%r3           # clear *oldset
+	brasl	%r14,do_signal    # call do_signal
+	tm	__TI_flags+7(%r9),_TIF_RESTART_SVC
+	jo	sysc_restart
+	tm	__TI_flags+7(%r9),_TIF_SINGLE_STEP
+	jo	sysc_singlestep
+	j	sysc_leave        # out of here, do NOT recheck
+
+#
+# _TIF_RESTART_SVC is set, set up registers and restart svc
+#
+sysc_restart:
+	ni	__TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
+	lg	%r7,SP_R2(%r15)        # load new svc number
+        slag    %r7,%r7,2              # *4
+	mvc	SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
+	lmg	%r2,%r6,SP_R2(%r15)    # load svc arguments
+	j	sysc_do_restart        # restart svc
+
+#
+# _TIF_SINGLE_STEP is set, call do_single_step
+#
+sysc_singlestep:
+	ni	__TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+	lhi	%r0,__LC_PGM_OLD_PSW
+	sth	%r0,SP_TRAP(%r15)	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	larl	%r14,sysc_return	# load adr. of system return
+	jg	do_single_step		# branch to do_sigtrap
+
+
+__critical_end:
+
+#
+# call syscall_trace before and after system call
+# special linkage: %r12 contains the return address for trace_svc
+#
+sysc_tracesys:
+	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	la	%r3,0
+	srl	%r7,2
+	stg     %r7,SP_R2(%r15)
+        brasl   %r14,syscall_trace
+	lghi	%r0,NR_syscalls
+	clg	%r0,SP_R2(%r15)
+	jnh	sysc_tracenogo
+	lg	%r7,SP_R2(%r15)   # strace might have changed the
+	sll     %r7,2             #  system call
+	lgf	%r8,0(%r7,%r10)
+sysc_tracego:
+	lmg     %r3,%r6,SP_R3(%r15)
+	lg      %r2,SP_ORIG_R2(%r15)
+        basr    %r14,%r8            # call sys_xxx
+        stg     %r2,SP_R2(%r15)     # store return value
+sysc_tracenogo:
+	tm	__TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+        jz      sysc_return
+	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	la	%r3,1
+	larl	%r14,sysc_return    # return point is sysc_return
+	jg	syscall_trace
+
+#
+# a new process exits the kernel with ret_from_fork
+#
+        .globl  ret_from_fork
+ret_from_fork:
+	lg	%r13,__LC_SVC_NEW_PSW+8
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
+	jo	0f
+	stg	%r15,SP_R15(%r15)	# store stack pointer for new kthread
+0:	brasl   %r14,schedule_tail
+        stosm   24(%r15),0x03     # reenable interrupts
+	j	sysc_return
+
+#
+# clone, fork, vfork, exec and sigreturn need glue,
+# because they all expect pt_regs as parameter,
+# but are called with different parameter.
+# return-address is set up above
+#
+sys_clone_glue: 
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        jg      sys_clone              # branch to sys_clone
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_clone_glue: 
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        jg      sys32_clone            # branch to sys32_clone
+#endif
+
+sys_fork_glue:  
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        jg      sys_fork               # branch to sys_fork
+
+sys_vfork_glue: 
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        jg      sys_vfork              # branch to sys_vfork
+
+sys_execve_glue:        
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs
+	lgr     %r12,%r14             # save return address
+        brasl   %r14,sys_execve       # call sys_execve
+        ltgr    %r2,%r2               # check if execve failed
+        bnz     0(%r12)               # it did fail -> store result in gpr2
+        b       6(%r12)               # SKIP STG 2,SP_R2(15) in
+                                      # system_call/sysc_tracesys
+#ifdef CONFIG_S390_SUPPORT
+sys32_execve_glue:        
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs
+	lgr     %r12,%r14             # save return address
+        brasl   %r14,sys32_execve     # call sys32_execve
+        ltgr    %r2,%r2               # check if execve failed
+        bnz     0(%r12)               # it did fail -> store result in gpr2
+        b       6(%r12)               # SKIP STG 2,SP_R2(15) in
+                                      # system_call/sysc_tracesys
+#endif
+
+sys_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys_sigreturn         # branch to sys_sigreturn
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys32_sigreturn       # branch to sys32_sigreturn
+#endif
+
+sys_rt_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys_rt_sigreturn      # branch to sys_sigreturn
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_rt_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys32_rt_sigreturn    # branch to sys32_sigreturn
+#endif
+
+#
+# sigsuspend and rt_sigsuspend need pt_regs as an additional
+# parameter and they have to skip the store of %r2 into the
+# user register %r2 because the return value was set in 
+# sigsuspend and rt_sigsuspend already and must not be overwritten!
+#
+
+sys_sigsuspend_glue:    
+        lgr     %r5,%r4               # move mask back
+        lgr     %r4,%r3               # move history1 parameter
+        lgr     %r3,%r2               # move history0 parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+	la      %r14,6(%r14)          # skip store of return value
+        jg      sys_sigsuspend        # branch to sys_sigsuspend
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_sigsuspend_glue:    
+	llgfr	%r4,%r4               # unsigned long			
+        lgr     %r5,%r4               # move mask back
+	lgfr	%r3,%r3               # int			
+        lgr     %r4,%r3               # move history1 parameter
+	lgfr	%r2,%r2               # int			
+        lgr     %r3,%r2               # move history0 parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+	la      %r14,6(%r14)          # skip store of return value
+        jg      sys32_sigsuspend      # branch to sys32_sigsuspend
+#endif
+
+sys_rt_sigsuspend_glue: 
+        lgr     %r4,%r3               # move sigsetsize parameter
+        lgr     %r3,%r2               # move unewset parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+	la      %r14,6(%r14)          # skip store of return value
+        jg      sys_rt_sigsuspend     # branch to sys_rt_sigsuspend
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_rt_sigsuspend_glue: 
+	llgfr	%r3,%r3               # size_t			
+        lgr     %r4,%r3               # move sigsetsize parameter
+	llgtr	%r2,%r2               # sigset_emu31_t *
+        lgr     %r3,%r2               # move unewset parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+	la      %r14,6(%r14)          # skip store of return value
+        jg      sys32_rt_sigsuspend   # branch to sys32_rt_sigsuspend
+#endif
+
+sys_sigaltstack_glue:
+        la      %r4,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys_sigaltstack       # branch to sys_sigreturn
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_sigaltstack_glue:
+        la      %r4,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys32_sigaltstack_wrapper # branch to sys_sigreturn
+#endif
+
+/*
+ * Program check handler routine
+ */
+
+        .globl  pgm_check_handler
+pgm_check_handler:
+/*
+ * First we need to check for a special case:
+ * Single stepping an instruction that disables the PER event mask will
+ * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
+ * For a single stepped SVC the program check handler gets control after
+ * the SVC new PSW has been loaded. But we want to execute the SVC first and
+ * then handle the PER event. Therefore we update the SVC old PSW to point
+ * to the pgm_check_handler and branch to the SVC handler after we checked
+ * if we have to load the kernel stack register.
+ * For every other possible cause for PER event without the PER mask set
+ * we just ignore the PER event (FIXME: is there anything we have to do
+ * for LPSW?).
+ */
+	STORE_TIMER __LC_SYNC_ENTER_TIMER
+	SAVE_ALL_BASE __LC_SAVE_AREA
+        tm      __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
+        jnz     pgm_per                  # got per exception -> special case
+	SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	pgm_no_vtime
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime:
+#endif
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	lgf     %r3,__LC_PGM_ILC	 # load program interruption code
+	lghi	%r8,0x7f
+	ngr	%r8,%r3
+pgm_do_call:
+        sll     %r8,3
+        larl    %r1,pgm_check_table
+        lg      %r1,0(%r8,%r1)		 # load address of handler routine
+        la      %r2,SP_PTREGS(%r15)	 # address of register-save area
+	larl	%r14,sysc_return
+        br      %r1			 # branch to interrupt-handler
+
+#
+# handle per exception
+#
+pgm_per:
+        tm      __LC_PGM_OLD_PSW,0x40    # test if per event recording is on
+        jnz     pgm_per_std              # ok, normal per event from user space
+# ok its one of the special cases, now we need to find out which one
+        clc     __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
+        je      pgm_svcper
+# no interesting special case, ignore PER event
+	lmg	%r12,%r15,__LC_SAVE_AREA
+	lpswe   __LC_PGM_OLD_PSW
+
+#
+# Normal per exception
+#
+pgm_per_std:
+	SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	pgm_no_vtime2
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime2:
+#endif
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	lg	%r1,__TI_task(%r9)
+	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
+	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
+	oi	__TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	lgf     %r3,__LC_PGM_ILC	 # load program interruption code
+	lghi	%r8,0x7f
+	ngr	%r8,%r3			 # clear per-event-bit and ilc
+	je	sysc_return
+	j	pgm_do_call
+
+#
+# it was a single stepped SVC that is causing all the trouble
+#
+pgm_svcper:
+	SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	pgm_no_vtime3
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime3:
+#endif
+	llgh    %r7,__LC_SVC_INT_CODE	# get svc number from lowcore
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	lg	%r1,__TI_task(%r9)
+	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
+	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
+	oi	__TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	j	sysc_do_svc
+
+/*
+ * IO interrupt handler routine
+ */
+        .globl io_int_handler
+io_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	stck	__LC_INT_CLOCK
+	SAVE_ALL_BASE __LC_SAVE_AREA+32
+        SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	io_no_vtime
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+io_no_vtime:
+#endif
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+        la      %r2,SP_PTREGS(%r15)    # address of register-save area
+	brasl   %r14,do_IRQ            # call standard irq handler
+
+io_return:
+        tm      SP_PSW+1(%r15),0x01    # returning to user ?
+#ifdef CONFIG_PREEMPT
+	jno     io_preempt             # no -> check for preemptive scheduling
+#else
+        jno     io_leave               # no-> skip resched & signal
+#endif
+	tm	__TI_flags+7(%r9),_TIF_WORK_INT
+	jnz	io_work                # there is work to do (signals etc.)
+io_leave:
+        RESTORE_ALL 0
+
+#ifdef CONFIG_PREEMPT
+io_preempt:
+	icm	%r0,15,__TI_precount(%r9)	
+	jnz     io_leave
+	# switch to kernel stack
+	lg	%r1,SP_R15(%r15)
+	aghi	%r1,-SP_SIZE
+	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
+        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	lgr	%r15,%r1
+io_resume_loop:
+	tm	__TI_flags+7(%r9),_TIF_NEED_RESCHED
+	jno	io_leave
+	larl    %r1,.Lc_pactive
+	mvc     __TI_precount(4,%r9),0(%r1)
+        stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+	brasl   %r14,schedule          # call schedule
+        stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
+	xc      __TI_precount(4,%r9),__TI_precount(%r9)
+	j	io_resume_loop
+#endif
+
+#
+# switch to kernel stack, then check TIF bits
+#
+io_work:
+	lg	%r1,__LC_KERNEL_STACK
+	aghi	%r1,-SP_SIZE
+	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
+        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	lgr	%r15,%r1
+#
+# One of the work bits is on. Find out which one.
+# Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED
+#
+io_work_loop:
+	tm	__TI_flags+7(%r9),_TIF_NEED_RESCHED
+	jo	io_reschedule
+	tm	__TI_flags+7(%r9),_TIF_SIGPENDING
+	jo	io_sigpending
+	j	io_leave
+
+#
+# _TIF_NEED_RESCHED is set, call schedule
+#	
+io_reschedule:        
+	stosm   __SF_EMPTY(%r15),0x03	# reenable interrupts
+	brasl   %r14,schedule		# call scheduler
+	stnsm   __SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	tm	__TI_flags+7(%r9),_TIF_WORK_INT
+	jz	io_leave		# there is no work to do
+	j	io_work_loop
+
+#
+# _TIF_SIGPENDING is set, call do_signal
+#
+io_sigpending:     
+	stosm   __SF_EMPTY(%r15),0x03	# reenable interrupts
+	la      %r2,SP_PTREGS(%r15)	# load pt_regs
+	slgr    %r3,%r3			# clear *oldset
+	brasl	%r14,do_signal		# call do_signal
+	stnsm   __SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	j	sysc_leave		# out of here, do NOT recheck
+
+/*
+ * External interrupt handler routine
+ */
+        .globl  ext_int_handler
+ext_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	stck	__LC_INT_CLOCK
+	SAVE_ALL_BASE __LC_SAVE_AREA+32
+        SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	ext_no_vtime
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ext_no_vtime:
+#endif
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	la	%r2,SP_PTREGS(%r15)    # address of register-save area
+	llgh	%r3,__LC_EXT_INT_CODE  # get interruption code
+	brasl   %r14,do_extint
+	j	io_return
+
+/*
+ * Machine check handler routines
+ */
+        .globl mcck_int_handler
+mcck_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	SAVE_ALL_BASE __LC_SAVE_AREA+64
+        SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	mcck_no_vtime
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+mcck_no_vtime:
+#endif
+	brasl   %r14,s390_do_machine_check
+mcck_return:
+        RESTORE_ALL 0
+
+#ifdef CONFIG_SMP
+/*
+ * Restart interruption handler, kick starter for additional CPUs
+ */
+        .globl restart_int_handler
+restart_int_handler:
+        lg      %r15,__LC_SAVE_AREA+120 # load ksp
+        lghi    %r10,__LC_CREGS_SAVE_AREA
+        lctlg   %c0,%c15,0(%r10) # get new ctl regs
+        lghi    %r10,__LC_AREGS_SAVE_AREA
+        lam     %a0,%a15,0(%r10)
+        lmg     %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+        stosm   __SF_EMPTY(%r15),0x04    # now we can turn dat on
+	jg      start_secondary
+#else
+/*
+ * If we do not run with SMP enabled, let the new CPU crash ...
+ */
+        .globl restart_int_handler
+restart_int_handler:
+        basr    %r1,0
+restart_base:
+        lpswe   restart_crash-restart_base(%r1)
+        .align 8
+restart_crash:
+        .long  0x000a0000,0x00000000,0x00000000,0x00000000
+restart_go:
+#endif
+
+#ifdef CONFIG_CHECK_STACK
+/*
+ * The synchronous or the asynchronous stack overflowed. We are dead.
+ * No need to properly save the registers, we are going to panic anyway.
+ * Setup a pt_regs so that show_trace can provide a good call trace.
+ */
+stack_overflow:
+	lg	%r15,__LC_PANIC_STACK	# change to panic stack
+	aghi	%r1,-SP_SIZE
+	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
+	stmg	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
+	la	%r1,__LC_SAVE_AREA
+	chi	%r12,__LC_SVC_OLD_PSW
+	je	0f
+	chi	%r12,__LC_PGM_OLD_PSW
+	je	0f
+	la	%r1,__LC_SAVE_AREA+16
+0:	mvc	SP_R12(32,%r15),0(%r1)  # move %r12-%r15 to stack
+        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
+        la      %r2,SP_PTREGS(%r15)	# load pt_regs
+	jg	kernel_stack_overflow
+#endif
+
+cleanup_table_system_call:
+	.quad	system_call, sysc_do_svc
+cleanup_table_sysc_return:
+	.quad	sysc_return, sysc_leave
+cleanup_table_sysc_leave:
+	.quad	sysc_leave, sysc_work_loop
+cleanup_table_sysc_work_loop:
+	.quad	sysc_work_loop, sysc_reschedule
+
+cleanup_critical:
+	clc	8(8,%r12),BASED(cleanup_table_system_call)
+	jl	0f
+	clc	8(8,%r12),BASED(cleanup_table_system_call+8)
+	jl	cleanup_system_call
+0:
+	clc	8(8,%r12),BASED(cleanup_table_sysc_return)
+	jl	0f
+	clc	8(8,%r12),BASED(cleanup_table_sysc_return+8)
+	jl	cleanup_sysc_return
+0:
+	clc	8(8,%r12),BASED(cleanup_table_sysc_leave)
+	jl	0f
+	clc	8(8,%r12),BASED(cleanup_table_sysc_leave+8)
+	jl	cleanup_sysc_leave
+0:
+	clc	8(8,%r12),BASED(cleanup_table_sysc_work_loop)
+	jl	0f
+	clc	8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
+	jl	cleanup_sysc_leave
+0:
+	br	%r14
+
+cleanup_system_call:
+	mvc	__LC_RETURN_PSW(16),0(%r12)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
+	jh	0f
+	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0:	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
+	jhe	cleanup_vtime
+#endif
+	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
+	jh	0f
+	mvc	__LC_SAVE_AREA(32),__LC_SAVE_AREA+32
+0:	stg	%r13,__LC_SAVE_AREA+40
+	SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+	stg	%r15,__LC_SAVE_AREA+56
+	llgh	%r7,__LC_SVC_INT_CODE
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+cleanup_vtime:
+	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
+	jhe	cleanup_stime
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	cleanup_novtime
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+cleanup_stime:
+	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
+	jh	cleanup_update
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+cleanup_update:
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+cleanup_novtime:
+#endif
+	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
+	la	%r12,__LC_RETURN_PSW
+	br	%r14
+cleanup_system_call_insn:
+	.quad	sysc_saveall
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.quad   system_call
+	.quad   sysc_vtime
+	.quad   sysc_stime
+	.quad   sysc_update
+#endif
+
+cleanup_sysc_return:
+	mvc	__LC_RETURN_PSW(8),0(%r12)
+	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
+	la	%r12,__LC_RETURN_PSW
+	br	%r14
+
+cleanup_sysc_leave:
+	clc	8(8,%r12),BASED(cleanup_sysc_leave_insn)
+	je	0f
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+	clc	8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
+	je	0f
+#endif
+	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
+	mvc	__LC_SAVE_AREA+32(32),SP_R12(%r15)
+	lmg	%r0,%r11,SP_R0(%r15)
+	lg	%r15,SP_R15(%r15)
+0:	la	%r12,__LC_RETURN_PSW
+	br	%r14
+cleanup_sysc_leave_insn:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.quad	sysc_leave + 16
+#endif
+	.quad	sysc_leave + 12
+
+/*
+ * Integer constants
+ */
+               .align 4
+.Lconst:
+.Lc_pactive:   .long  PREEMPT_ACTIVE
+.Lnr_syscalls: .long  NR_syscalls
+.L0x0130:      .short 0x130
+.L0x0140:      .short 0x140
+.L0x0150:      .short 0x150
+.L0x0160:      .short 0x160
+.L0x0170:      .short 0x170
+.Lcritical_start:
+               .quad  __critical_start
+.Lcritical_end:
+               .quad  __critical_end
+
+#define SYSCALL(esa,esame,emu)	.long esame
+	.globl  sys_call_table
+sys_call_table:
+#include "syscalls.S"
+#undef SYSCALL
+
+#ifdef CONFIG_S390_SUPPORT
+
+#define SYSCALL(esa,esame,emu)	.long emu
+	.globl  sys_call_table_emu
+sys_call_table_emu:
+#include "syscalls.S"
+#undef SYSCALL
+#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
new file mode 100644
index 0000000..b804c55
--- /dev/null
+++ b/arch/s390/kernel/head.S
@@ -0,0 +1,772 @@
+/*
+ *  arch/s390/kernel/head.S
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Rob van der Heij (rvdhei@iae.nl)
+ *
+ * There are 5 different IPL methods
+ *  1) load the image directly into ram at address 0 and do an PSW restart
+ *  2) linload will load the image from address 0x10000 to memory 0x10000
+ *     and start the code thru LPSW 0x0008000080010000 (VM only, deprecated)
+ *  3) generate the tape ipl header, store the generated image on a tape
+ *     and ipl from it
+ *     In case of SL tape you need to IPL 5 times to get past VOL1 etc
+ *  4) generate the vm reader ipl header, move the generated image to the
+ *     VM reader (use option NOH!) and do a ipl from reader (VM only)
+ *  5) direct call of start by the SALIPL loader
+ *  We use the cpuid to distinguish between VM and native ipl
+ *  params for kernel are pushed to 0x10400 (see setup.h)
+
+    Changes: 
+    Okt 25 2000 <rvdheij@iae.nl>
+	added code to skip HDR and EOF to allow SL tape IPL (5 retries)
+	changed first CCW from rewind to backspace block
+
+ */
+
+#include <linux/config.h>
+#include <asm/setup.h>
+#include <asm/lowcore.h>
+#include <asm/offsets.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+
+#ifndef CONFIG_IPL
+        .org   0
+        .long  0x00080000,0x80000000+startup   # Just a restart PSW
+#else
+#ifdef CONFIG_IPL_TAPE
+#define IPL_BS 1024
+        .org   0
+        .long  0x00080000,0x80000000+iplstart  # The first 24 bytes are loaded
+        .long  0x27000000,0x60000001           # by ipl to addresses 0-23.
+        .long  0x02000000,0x20000000+IPL_BS    # (a PSW and two CCWs).
+        .long  0x00000000,0x00000000           # external old psw
+        .long  0x00000000,0x00000000           # svc old psw
+        .long  0x00000000,0x00000000           # program check old psw
+        .long  0x00000000,0x00000000           # machine check old psw
+        .long  0x00000000,0x00000000           # io old psw
+        .long  0x00000000,0x00000000
+        .long  0x00000000,0x00000000
+        .long  0x00000000,0x00000000
+        .long  0x000a0000,0x00000058           # external new psw
+        .long  0x000a0000,0x00000060           # svc new psw
+        .long  0x000a0000,0x00000068           # program check new psw
+        .long  0x000a0000,0x00000070           # machine check new psw
+        .long  0x00080000,0x80000000+.Lioint   # io new psw
+
+        .org   0x100
+#
+# subroutine for loading from tape
+# Paramters:	
+#  R1 = device number
+#  R2 = load address
+.Lloader:	
+        st    %r14,.Lldret
+        la    %r3,.Lorbread                    # r3 = address of orb 
+	la    %r5,.Lirb                        # r5 = address of irb
+        st    %r2,.Lccwread+4                  # initialize CCW data addresses
+        lctl  %c6,%c6,.Lcr6               
+        slr   %r2,%r2
+.Lldlp:
+        la    %r6,3                            # 3 retries
+.Lssch:
+        ssch  0(%r3)                           # load chunk of IPL_BS bytes
+        bnz   .Llderr
+.Lw4end:
+        bas   %r14,.Lwait4io
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Lrecov
+        slr   %r7,%r7
+        icm   %r7,3,10(%r5)                    # get residual count
+        lcr   %r7,%r7
+        la    %r7,IPL_BS(%r7)                  # IPL_BS-residual=#bytes read
+        ar    %r2,%r7                          # add to total size
+        tm    8(%r5),0x01                      # found a tape mark ?
+        bnz   .Ldone
+        l     %r0,.Lccwread+4                  # update CCW data addresses
+        ar    %r0,%r7
+        st    %r0,.Lccwread+4                
+        b     .Lldlp
+.Ldone:
+        l     %r14,.Lldret
+        br    %r14                             # r2 contains the total size
+.Lrecov:
+        bas   %r14,.Lsense                     # do the sensing
+        bct   %r6,.Lssch                       # dec. retry count & branch
+        b     .Llderr
+#
+# Sense subroutine
+#
+.Lsense:
+        st    %r14,.Lsnsret
+        la    %r7,.Lorbsense              
+        ssch  0(%r7)                           # start sense command
+        bnz   .Llderr
+        bas   %r14,.Lwait4io
+        l     %r14,.Lsnsret
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Llderr
+        br    %r14
+#
+# Wait for interrupt subroutine
+#
+.Lwait4io:
+        lpsw  .Lwaitpsw                 
+.Lioint:
+        c     %r1,0xb8                         # compare subchannel number
+        bne   .Lwait4io
+        tsch  0(%r5)
+        slr   %r0,%r0
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Lwtexit
+        tm    8(%r5),0x04                      # got device end ?
+        bz    .Lwait4io
+.Lwtexit:
+        br    %r14
+.Llderr:
+        lpsw  .Lcrash              
+
+        .align 8
+.Lorbread:
+	.long  0x00000000,0x0080ff00,.Lccwread
+        .align 8
+.Lorbsense:
+        .long  0x00000000,0x0080ff00,.Lccwsense
+        .align 8
+.Lccwread:
+        .long  0x02200000+IPL_BS,0x00000000
+.Lccwsense:
+        .long  0x04200001,0x00000000
+.Lwaitpsw:
+	.long  0x020a0000,0x80000000+.Lioint
+
+.Lirb:	.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6:  .long  0xff000000
+        .align 8
+.Lcrash:.long  0x000a0000,0x00000000
+.Lldret:.long  0
+.Lsnsret: .long 0
+#endif  /* CONFIG_IPL_TAPE */
+
+#ifdef CONFIG_IPL_VM
+#define IPL_BS 0x730
+        .org   0
+        .long  0x00080000,0x80000000+iplstart  # The first 24 bytes are loaded
+        .long  0x02000018,0x60000050           # by ipl to addresses 0-23.
+        .long  0x02000068,0x60000050           # (a PSW and two CCWs).
+        .fill  80-24,1,0x40                    # bytes 24-79 are discarded !!
+        .long  0x020000f0,0x60000050           # The next 160 byte are loaded
+        .long  0x02000140,0x60000050           # to addresses 0x18-0xb7
+        .long  0x02000190,0x60000050           # They form the continuation
+        .long  0x020001e0,0x60000050           # of the CCW program started
+        .long  0x02000230,0x60000050           # by ipl and load the range
+        .long  0x02000280,0x60000050           # 0x0f0-0x730 from the image
+        .long  0x020002d0,0x60000050           # to the range 0x0f0-0x730
+        .long  0x02000320,0x60000050           # in memory. At the end of
+        .long  0x02000370,0x60000050           # the channel program the PSW
+        .long  0x020003c0,0x60000050           # at location 0 is loaded.
+        .long  0x02000410,0x60000050           # Initial processing starts
+        .long  0x02000460,0x60000050           # at 0xf0 = iplstart.
+        .long  0x020004b0,0x60000050
+        .long  0x02000500,0x60000050
+        .long  0x02000550,0x60000050
+        .long  0x020005a0,0x60000050
+        .long  0x020005f0,0x60000050
+        .long  0x02000640,0x60000050
+        .long  0x02000690,0x60000050
+        .long  0x020006e0,0x20000050
+
+        .org   0xf0
+#
+# subroutine for loading cards from the reader
+#
+.Lloader:	
+	la    %r3,.Lorb                        # r2 = address of orb into r2
+	la    %r5,.Lirb                        # r4 = address of irb
+        la    %r6,.Lccws              
+        la    %r7,20
+.Linit:
+        st    %r2,4(%r6)                       # initialize CCW data addresses
+        la    %r2,0x50(%r2)
+        la    %r6,8(%r6)
+        bct   7,.Linit
+
+        lctl  %c6,%c6,.Lcr6                    # set IO subclass mask
+	slr   %r2,%r2
+.Lldlp:
+        ssch  0(%r3)                           # load chunk of 1600 bytes
+        bnz   .Llderr
+.Lwait4irq:
+        mvc   __LC_IO_NEW_PSW(8),.Lnewpsw      # set up IO interrupt psw
+        lpsw  .Lwaitpsw              
+.Lioint:
+        c     %r1,0xb8                         # compare subchannel number
+	bne   .Lwait4irq
+	tsch  0(%r5)
+
+	slr   %r0,%r0
+	ic    %r0,8(%r5)                       # get device status
+	chi   %r0,8                            # channel end ?
+	be    .Lcont
+	chi   %r0,12                           # channel end + device end ?
+	be    .Lcont
+
+        l     %r0,4(%r5)
+        s     %r0,8(%r3)                       # r0/8 = number of ccws executed
+        mhi   %r0,10                           # *10 = number of bytes in ccws
+        lh    %r3,10(%r5)                      # get residual count
+        sr    %r0,%r3                          # #ccws*80-residual=#bytes read
+	ar    %r2,%r0
+	
+        br    %r14                             # r2 contains the total size
+
+.Lcont:
+	ahi   %r2,0x640                        # add 0x640 to total size
+        la    %r6,.Lccws             
+        la    %r7,20
+.Lincr:
+        l     %r0,4(%r6)                       # update CCW data addresses
+        ahi   %r0,0x640
+        st    %r0,4(%r6)
+        ahi   %r6,8
+        bct   7,.Lincr
+
+        b     .Lldlp
+.Llderr:
+        lpsw  .Lcrash              
+
+        .align 8
+.Lorb:	.long  0x00000000,0x0080ff00,.Lccws
+.Lirb:	.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6:  .long  0xff000000
+.Lloadp:.long  0,0
+        .align 8
+.Lcrash:.long  0x000a0000,0x00000000
+.Lnewpsw:
+        .long  0x00080000,0x80000000+.Lioint
+.Lwaitpsw:
+        .long  0x020a0000,0x80000000+.Lioint
+
+        .align 8
+.Lccws: .rept  19
+        .long  0x02600050,0x00000000
+        .endr
+        .long  0x02200050,0x00000000
+#endif  /* CONFIG_IPL_VM */
+
+iplstart:
+        lh    %r1,0xb8                         # test if subchannel number
+        bct   %r1,.Lnoload                     #  is valid
+	l     %r1,0xb8                         # load ipl subchannel number
+        la    %r2,IPL_BS                       # load start address
+        bas   %r14,.Lloader                    # load rest of ipl image
+        l     %r12,.Lparm                      # pointer to parameter area
+        st    %r1,IPL_DEVICE-PARMAREA(%r12)    # store ipl device number
+
+#
+# load parameter file from ipl device
+#
+.Lagain1:
+ 	l     %r2,INITRD_START-PARMAREA(%r12)  # use ramdisk location as temp
+        bas   %r14,.Lloader                    # load parameter file
+        ltr   %r2,%r2                          # got anything ?
+        bz    .Lnopf
+	chi   %r2,895
+	bnh   .Lnotrunc
+	la    %r2,895
+.Lnotrunc:
+	l     %r4,INITRD_START-PARMAREA(%r12)
+	clc   0(3,%r4),.L_hdr		       # if it is HDRx
+	bz    .Lagain1			       # skip dataset header
+	clc   0(3,%r4),.L_eof		       # if it is EOFx
+	bz    .Lagain1			       # skip dateset trailer
+        la    %r5,0(%r4,%r2)
+        lr    %r3,%r2
+.Lidebc:
+        tm    0(%r5),0x80                      # high order bit set ?
+        bo    .Ldocv                           #  yes -> convert from EBCDIC
+        ahi   %r5,-1
+        bct   %r3,.Lidebc
+        b     .Lnocv
+.Ldocv:
+        l     %r3,.Lcvtab
+        tr    0(256,%r4),0(%r3)                # convert parameters to ascii
+        tr    256(256,%r4),0(%r3)
+        tr    512(256,%r4),0(%r3)
+        tr    768(122,%r4),0(%r3)
+.Lnocv: la    %r3,COMMAND_LINE-PARMAREA(%r12)  # load adr. of command line
+	mvc   0(256,%r3),0(%r4)
+	mvc   256(256,%r3),256(%r4)
+	mvc   512(256,%r3),512(%r4)
+	mvc   768(122,%r3),768(%r4)
+        slr   %r0,%r0
+        b     .Lcntlp
+.Ldelspc:
+        ic    %r0,0(%r2,%r3)
+        chi   %r0,0x20                         # is it a space ?
+        be    .Lcntlp
+        ahi   %r2,1
+        b     .Leolp
+.Lcntlp:
+        brct  %r2,.Ldelspc
+.Leolp:
+        slr   %r0,%r0
+        stc   %r0,0(%r2,%r3)                   # terminate buffer
+.Lnopf:
+
+#
+# load ramdisk from ipl device
+#	
+.Lagain2:
+ 	l     %r2,INITRD_START-PARMAREA(%r12)  # load adr. of ramdisk
+        bas   %r14,.Lloader                    # load ramdisk
+ 	st    %r2,INITRD_SIZE-PARMAREA(%r12)   # store size of ramdisk
+        ltr   %r2,%r2
+        bnz   .Lrdcont
+        st    %r2,INITRD_START-PARMAREA(%r12)  # no ramdisk found, null it
+.Lrdcont:
+	l     %r2,INITRD_START-PARMAREA(%r12)
+
+	clc   0(3,%r2),.L_hdr		       # skip HDRx and EOFx 
+	bz    .Lagain2
+	clc   0(3,%r2),.L_eof
+	bz    .Lagain2
+
+#ifdef CONFIG_IPL_VM
+#
+# reset files in VM reader
+#
+        stidp __LC_CPUID                       # store cpuid
+	tm    __LC_CPUID,0xff                  # running VM ?
+	bno   .Lnoreset
+        la    %r2,.Lreset              
+        lhi   %r3,26
+        .long 0x83230008
+.Lnoreset:
+#endif
+	
+#
+# everything loaded, go for it
+#
+.Lnoload:
+        l     %r1,.Lstartup
+        br    %r1
+
+.Lparm:	.long  PARMAREA
+.Lstartup: .long startup
+.Lcvtab:.long  _ebcasc                         # ebcdic to ascii table
+.Lreset:.byte  0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
+        .byte  0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
+        .byte  0xc8,0xd6,0xd3,0xc4             # "change rdr all keep nohold"
+.L_eof: .long  0xc5d6c600       /* C'EOF' */
+.L_hdr: .long  0xc8c4d900       /* C'HDR' */
+
+#endif  /* CONFIG_IPL */
+
+#
+# SALIPL loader support. Based on a patch by Rob van der Heij.
+# This entry point is called directly from the SALIPL loader and
+# doesn't need a builtin ipl record.
+#
+        .org  0x800
+	.globl start
+start:
+	stm   %r0,%r15,0x07b0		# store registers
+	basr  %r12,%r0
+.base:
+	l     %r11,.parm
+	l     %r8,.cmd			# pointer to command buffer
+
+	ltr   %r9,%r9			# do we have SALIPL parameters?
+	bp    .sk8x8
+
+	mvc   0(64,%r8),0x00b0		# copy saved registers
+	xc    64(240-64,%r8),0(%r8)	# remainder of buffer
+	tr    0(64,%r8),.lowcase	
+	b     .gotr
+.sk8x8:
+	mvc   0(240,%r8),0(%r9)		# copy iplparms into buffer
+.gotr:
+	l     %r10,.tbl			# EBCDIC to ASCII table
+	tr    0(240,%r8),0(%r10)
+	stidp __LC_CPUID		# Are we running on VM maybe
+	cli   __LC_CPUID,0xff
+	bnz   .test
+	.long 0x83300060		# diag 3,0,x'0060' - storage size
+	b     .done
+.test:
+	mvc   0x68(8),.pgmnw		# set up pgm check handler
+	l     %r2,.fourmeg
+	lr    %r3,%r2
+	bctr  %r3,%r0			# 4M-1
+.loop:  iske  %r0,%r3
+	ar    %r3,%r2
+.pgmx:
+	sr    %r3,%r2
+	la    %r3,1(%r3)
+.done:
+        l     %r1,.memsize
+	st    %r3,0(%r1)
+	slr   %r0,%r0
+	st    %r0,INITRD_SIZE-PARMAREA(%r11)
+	st    %r0,INITRD_START-PARMAREA(%r11)
+	j     startup                   # continue with startup
+.tbl:	.long _ebcasc			# translate table
+.cmd:	.long COMMAND_LINE		# address of command line buffer
+.parm:	.long PARMAREA
+.memsize: .long memory_size
+.fourmeg: .long 0x00400000      	# 4M
+.pgmnw:	.long 0x00080000,.pgmx
+.lowcase:
+	.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 
+	.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
+	.byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 
+	.byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
+	.byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 
+	.byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
+	.byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 
+	.byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
+	.byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 
+	.byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
+	.byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 
+	.byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
+	.byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 
+	.byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
+	.byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 
+	.byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
+
+	.byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 
+	.byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
+	.byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 
+	.byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
+	.byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 
+	.byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
+	.byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 
+	.byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
+	.byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87	# .abcdefg 
+	.byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf	# hi
+	.byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 	# .jklmnop
+	.byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf	# qr
+	.byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7	# ..stuvwx
+	.byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef	# yz
+	.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 
+	.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
+
+#
+# startup-code at 0x10000, running in real mode
+# this is called either by the ipl loader or directly by PSW restart
+# or linload or SALIPL
+#
+        .org  0x10000
+startup:basr  %r13,0                     # get base
+.LPG1:  lctl  %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
+	la    %r12,_pstart-.LPG1(%r13)   # pointer to parameter area
+					 # move IPL device to lowcore
+        mvc   __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
+	
+#
+# clear bss memory
+#
+        l     %r2,.Lbss_bgn-.LPG1(%r13) # start of bss
+        l     %r3,.Lbss_end-.LPG1(%r13) # end of bss
+        sr    %r3,%r2                   # length of bss
+        sr    %r4,%r4                   #
+        sr    %r5,%r5                   # set src,length and pad to zero
+        sr    %r0,%r0                   #
+        mvcle %r2,%r4,0                 # clear mem
+        jo    .-4                       # branch back, if not finish
+
+	l     %r2,.Lrcp-.LPG1(%r13)	# Read SCP forced command word
+.Lservicecall:
+	stosm .Lpmask-.LPG1(%r13),0x01	# authorize ext interrupts
+
+	stctl %r0, %r0,.Lcr-.LPG1(%r13)	# get cr0
+	la    %r1,0x200			# set bit 22
+	o     %r1,.Lcr-.LPG1(%r13)	# or old cr0 with r1
+	st    %r1,.Lcr-.LPG1(%r13)
+	lctl  %r0, %r0,.Lcr-.LPG1(%r13)	# load modified cr0
+
+	mvc   __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw
+	la    %r1, .Lsclph-.LPG1(%r13)
+	a     %r1,__LC_EXT_NEW_PSW+4	# set handler
+	st    %r1,__LC_EXT_NEW_PSW+4
+
+	la    %r4,_pstart-.LPG1(%r13)	# %r4 is our index for sccb stuff
+	la    %r1, .Lsccb-PARMAREA(%r4)	# our sccb
+	.insn rre,0xb2200000,%r2,%r1	# service call
+	ipm   %r1
+	srl   %r1,28			# get cc code
+	xr    %r3, %r3
+	chi   %r1,3
+	be    .Lfchunk-.LPG1(%r13)	# leave
+	chi   %r1,2
+	be    .Lservicecall-.LPG1(%r13)
+	lpsw  .Lwaitsclp-.LPG1(%r13)
+.Lsclph:
+	lh    %r1,.Lsccbr-PARMAREA(%r4)
+	chi   %r1,0x10			# 0x0010 is the sucess code
+	je    .Lprocsccb		# let's process the sccb
+	chi   %r1,0x1f0
+	bne   .Lfchunk-.LPG1(%r13)	# unhandled error code
+	c     %r2, .Lrcp-.LPG1(%r13)	# Did we try Read SCP forced
+	bne   .Lfchunk-.LPG1(%r13)	# if no, give up
+	l     %r2, .Lrcp2-.LPG1(%r13)	# try with Read SCP
+	b     .Lservicecall-.LPG1(%r13)
+.Lprocsccb:
+	lh    %r1,.Lscpincr1-PARMAREA(%r4) # use this one if != 0
+	chi   %r1,0x00
+	jne   .Lscnd
+	l     %r1,.Lscpincr2-PARMAREA(%r4) # otherwise use this one
+.Lscnd:
+	xr    %r3,%r3			# same logic
+	ic    %r3,.Lscpa1-PARMAREA(%r4)
+	chi   %r3,0x00
+	jne   .Lcompmem
+	l     %r3,.Lscpa2-PARMAREA(%r13)
+.Lcompmem:
+	mr    %r2,%r1			# mem in MB on 128-bit
+	l     %r1,.Lonemb-.LPG1(%r13)
+	mr    %r2,%r1			# mem size in bytes in %r3
+	b     .Lfchunk-.LPG1(%r13)
+
+.Lpmask:
+	.byte 0
+.align 8
+.Lpcext:.long  0x00080000,0x80000000
+.Lcr:
+	.long 0x00			# place holder for cr0
+.Lwaitsclp:
+	.long 0x020A0000
+	.long .Lsclph
+.Lrcp:
+	.int 0x00120001			# Read SCP forced code
+.Lrcp2:
+	.int 0x00020001			# Read SCP code
+.Lonemb:
+	.int 0x100000
+.Lfchunk:
+
+#
+# find memory chunks.
+#
+	lr    %r9,%r3			 # end of mem
+	mvc   __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13)
+	la    %r1,1                      # test in increments of 128KB
+	sll   %r1,17
+	l     %r3,.Lmchunk-.LPG1(%r13)   # get pointer to memory_chunk array
+	slr   %r4,%r4                    # set start of chunk to zero
+	slr   %r5,%r5                    # set end of chunk to zero
+	slr   %r6,%r6			 # set access code to zero
+	la    %r10, MEMORY_CHUNKS	 # number of chunks
+.Lloop:
+	tprot 0(%r5),0			 # test protection of first byte
+	ipm   %r7
+	srl   %r7,28
+	clr   %r6,%r7			 # compare cc with last access code
+	be    .Lsame-.LPG1(%r13)
+	b     .Lchkmem-.LPG1(%r13)
+.Lsame:
+	ar    %r5,%r1			 # add 128KB to end of chunk
+	bno   .Lloop-.LPG1(%r13)	 # r1 < 0x80000000 -> loop
+.Lchkmem:				 # > 2GB or tprot got a program check
+	clr   %r4,%r5			 # chunk size > 0?
+	be    .Lchkloop-.LPG1(%r13)
+	st    %r4,0(%r3)		 # store start address of chunk
+	lr    %r0,%r5
+	slr   %r0,%r4
+	st    %r0,4(%r3)		 # store size of chunk
+	st    %r6,8(%r3)		 # store type of chunk
+	la    %r3,12(%r3)
+	l     %r4,.Lmemsize-.LPG1(%r13)	 # address of variable memory_size
+	st    %r5,0(%r4)		 # store last end to memory size
+	ahi   %r10,-1			 # update chunk number
+.Lchkloop:
+	lr    %r6,%r7			 # set access code to last cc
+	# we got an exception or we're starting a new
+	# chunk , we must check if we should
+	# still try to find valid memory (if we detected
+	# the amount of available storage), and if we
+	# have chunks left
+	xr    %r0,%r0
+	clr   %r0,%r9			 # did we detect memory?
+	je    .Ldonemem			 # if not, leave
+	chi   %r10,0			 # do we have chunks left?
+	je    .Ldonemem
+	alr   %r5,%r1			 # add 128KB to end of chunk
+	lr    %r4,%r5			 # potential new chunk
+	clr    %r5,%r9			 # should we go on?
+	jl     .Lloop
+.Ldonemem:		
+        l      %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
+#
+# find out if we are running under VM
+#
+        stidp  __LC_CPUID               # store cpuid
+	tm     __LC_CPUID,0xff          # running under VM ?
+	bno    .Lnovm-.LPG1(%r13)
+        oi     3(%r12),1                # set VM flag
+.Lnovm:
+        lh     %r0,__LC_CPUID+4         # get cpu version
+        chi    %r0,0x7490               # running on a P/390 ?
+        bne    .Lnop390-.LPG1(%r13)
+        oi     3(%r12),4                # set P/390 flag
+.Lnop390:
+
+#
+# find out if we have an IEEE fpu
+#
+        mvc    __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
+	efpc   %r0,0                    # test IEEE extract fpc instruction
+        oi     3(%r12),2                # set IEEE fpu flag
+.Lchkfpu:
+
+#
+# find out if we have the CSP instruction
+#
+       mvc    __LC_PGM_NEW_PSW(8),.Lpccsp-.LPG1(%r13)
+       la     %r0,0
+       lr     %r1,%r0
+       la     %r2,4
+       csp    %r0,%r2                   # Test CSP instruction
+       oi     3(%r12),8                 # set CSP flag
+.Lchkcsp:
+
+#
+# find out if we have the MVPG instruction
+#
+       mvc    __LC_PGM_NEW_PSW(8),.Lpcmvpg-.LPG1(%r13)
+       sr     %r0,%r0
+       la     %r1,0
+       la     %r2,0
+       mvpg   %r1,%r2                   # Test CSP instruction
+       oi     3(%r12),16                # set MVPG flag
+.Lchkmvpg:
+
+#
+# find out if we have the IDTE instruction
+#
+	mvc	__LC_PGM_NEW_PSW(8),.Lpcidte-.LPG1(%r13)
+	.long	0xb2b10000		# store facility list
+	tm	0xc8,0x08		# check bit for clearing-by-ASCE
+	bno	.Lchkidte-.LPG1(%r13)
+	lhi	%r1,2094
+	lhi	%r2,0
+	.long	0xb98e2001
+	oi	3(%r12),0x80		# set IDTE flag
+.Lchkidte:
+
+        lpsw  .Lentry-.LPG1(13)         # jump to _stext in primary-space,
+                                        # virtual and never return ...
+        .align 8
+.Lentry:.long  0x00080000,0x80000000 + _stext
+.Lctl:  .long  0x04b50002               # cr0: various things
+        .long  0                        # cr1: primary space segment table
+        .long  .Lduct                   # cr2: dispatchable unit control table
+        .long  0                        # cr3: instruction authorization
+        .long  0                        # cr4: instruction authorization
+        .long  0xffffffff               # cr5: primary-aste origin
+        .long  0                        # cr6:  I/O interrupts
+        .long  0                        # cr7:  secondary space segment table
+        .long  0                        # cr8:  access registers translation
+        .long  0                        # cr9:  tracing off
+        .long  0                        # cr10: tracing off
+        .long  0                        # cr11: tracing off
+        .long  0                        # cr12: tracing off
+        .long  0                        # cr13: home space segment table
+        .long  0xc0000000               # cr14: machine check handling off
+        .long  0                        # cr15: linkage stack operations
+.Lpcmem:.long  0x00080000,0x80000000 + .Lchkmem
+.Lpcfpu:.long  0x00080000,0x80000000 + .Lchkfpu
+.Lpccsp:.long  0x00080000,0x80000000 + .Lchkcsp
+.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
+.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
+.Lmemsize:.long memory_size
+.Lmchunk:.long memory_chunk
+.Lmflags:.long machine_flags
+.Lbss_bgn:  .long  __bss_start
+.Lbss_end:  .long  _end
+
+	.org PARMAREA-64
+.Lduct:	.long 0,0,0,0,0,0,0,0
+	.long 0,0,0,0,0,0,0,0
+
+#
+# params at 10400 (setup.h)
+#
+	.org   PARMAREA
+	.global _pstart
+_pstart:	
+        .long  0,0                      # IPL_DEVICE
+        .long  0,RAMDISK_ORIGIN         # INITRD_START
+        .long  0,RAMDISK_SIZE           # INITRD_SIZE
+
+        .org   COMMAND_LINE
+    	.byte  "root=/dev/ram0 ro"
+        .byte  0
+	.org   0x11000
+.Lsccb:
+	.hword 0x1000			# length, one page
+	.byte 0x00,0x00,0x00
+	.byte 0x80			# variable response bit set
+.Lsccbr:
+	.hword 0x00			# response code
+.Lscpincr1:
+	.hword 0x00
+.Lscpa1:
+	.byte 0x00
+	.fill 89,1,0
+.Lscpa2:
+	.int 0x00
+.Lscpincr2:
+	.quad 0x00
+	.fill 3984,1,0
+	.org 0x12000
+	.global _pend
+_pend:	
+
+#ifdef CONFIG_SHARED_KERNEL
+	.org   0x100000
+#endif
+
+#
+# startup-code, running in virtual mode
+#
+        .globl _stext
+_stext:	basr  %r13,0                    # get base
+.LPG2:
+#
+# Setup stack
+#
+        l     %r15,.Linittu-.LPG2(%r13)
+	mvc   __LC_CURRENT(4),__TI_task(%r15)
+        ahi   %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+        st    %r15,__LC_KERNEL_STACK    # set end of kernel stack
+        ahi   %r15,-96
+        xc    __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
+
+# check control registers
+        stctl  %c0,%c15,0(%r15)
+	oi     2(%r15),0x20             # enable sigp external interrupts
+	oi     0(%r15),0x10             # switch on low address protection
+        lctl   %c0,%c15,0(%r15)
+
+#
+        lam    0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess
+        l      %r14,.Lstart-.LPG2(%r13)
+        basr   %r14,%r14                # call start_kernel
+#
+# We returned from start_kernel ?!? PANIK
+#
+        basr  %r13,0
+	lpsw  .Ldw-.(%r13)           # load disabled wait psw
+#
+            .align 8
+.Ldw:	    .long  0x000a0000,0x00000000
+.Linittu:   .long  init_thread_union
+.Lstart:    .long  start_kernel
+.Laregs:    .long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
new file mode 100644
index 0000000..8366793
--- /dev/null
+++ b/arch/s390/kernel/head64.S
@@ -0,0 +1,769 @@
+/*
+ *  arch/s390/kernel/head.S
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Rob van der Heij (rvdhei@iae.nl)
+ *
+ * There are 5 different IPL methods
+ *  1) load the image directly into ram at address 0 and do an PSW restart
+ *  2) linload will load the image from address 0x10000 to memory 0x10000
+ *     and start the code thru LPSW 0x0008000080010000 (VM only, deprecated)
+ *  3) generate the tape ipl header, store the generated image on a tape
+ *     and ipl from it
+ *     In case of SL tape you need to IPL 5 times to get past VOL1 etc
+ *  4) generate the vm reader ipl header, move the generated image to the
+ *     VM reader (use option NOH!) and do a ipl from reader (VM only)
+ *  5) direct call of start by the SALIPL loader
+ *  We use the cpuid to distinguish between VM and native ipl
+ *  params for kernel are pushed to 0x10400 (see setup.h)
+
+    Changes: 
+    Okt 25 2000 <rvdheij@iae.nl>
+	added code to skip HDR and EOF to allow SL tape IPL (5 retries)
+	changed first CCW from rewind to backspace block
+
+ */
+
+#include <linux/config.h>
+#include <asm/setup.h>
+#include <asm/lowcore.h>
+#include <asm/offsets.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+
+#ifndef CONFIG_IPL
+        .org   0
+        .long  0x00080000,0x80000000+startup   # Just a restart PSW
+#else
+#ifdef CONFIG_IPL_TAPE
+#define IPL_BS 1024
+        .org   0
+        .long  0x00080000,0x80000000+iplstart  # The first 24 bytes are loaded
+        .long  0x27000000,0x60000001           # by ipl to addresses 0-23.
+        .long  0x02000000,0x20000000+IPL_BS    # (a PSW and two CCWs).
+        .long  0x00000000,0x00000000           # external old psw
+        .long  0x00000000,0x00000000           # svc old psw
+        .long  0x00000000,0x00000000           # program check old psw
+        .long  0x00000000,0x00000000           # machine check old psw
+        .long  0x00000000,0x00000000           # io old psw
+        .long  0x00000000,0x00000000
+        .long  0x00000000,0x00000000
+        .long  0x00000000,0x00000000
+        .long  0x000a0000,0x00000058           # external new psw
+        .long  0x000a0000,0x00000060           # svc new psw
+        .long  0x000a0000,0x00000068           # program check new psw
+        .long  0x000a0000,0x00000070           # machine check new psw
+        .long  0x00080000,0x80000000+.Lioint   # io new psw
+
+        .org   0x100
+#
+# subroutine for loading from tape
+# Paramters:	
+#  R1 = device number
+#  R2 = load address
+.Lloader:	
+        st    %r14,.Lldret
+        la    %r3,.Lorbread                    # r3 = address of orb 
+	la    %r5,.Lirb                        # r5 = address of irb
+        st    %r2,.Lccwread+4                  # initialize CCW data addresses
+        lctl  %c6,%c6,.Lcr6               
+        slr   %r2,%r2
+.Lldlp:
+        la    %r6,3                            # 3 retries
+.Lssch:
+        ssch  0(%r3)                           # load chunk of IPL_BS bytes
+        bnz   .Llderr
+.Lw4end:
+        bas   %r14,.Lwait4io
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Lrecov
+        slr   %r7,%r7
+        icm   %r7,3,10(%r5)                    # get residual count
+        lcr   %r7,%r7
+        la    %r7,IPL_BS(%r7)                  # IPL_BS-residual=#bytes read
+        ar    %r2,%r7                          # add to total size
+        tm    8(%r5),0x01                      # found a tape mark ?
+        bnz   .Ldone
+        l     %r0,.Lccwread+4                  # update CCW data addresses
+        ar    %r0,%r7
+        st    %r0,.Lccwread+4                
+        b     .Lldlp
+.Ldone:
+        l     %r14,.Lldret
+        br    %r14                             # r2 contains the total size
+.Lrecov:
+        bas   %r14,.Lsense                     # do the sensing
+        bct   %r6,.Lssch                       # dec. retry count & branch
+        b     .Llderr
+#
+# Sense subroutine
+#
+.Lsense:
+        st    %r14,.Lsnsret
+        la    %r7,.Lorbsense              
+        ssch  0(%r7)                           # start sense command
+        bnz   .Llderr
+        bas   %r14,.Lwait4io
+        l     %r14,.Lsnsret
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Llderr
+        br    %r14
+#
+# Wait for interrupt subroutine
+#
+.Lwait4io:
+        lpsw  .Lwaitpsw                 
+.Lioint:
+        c     %r1,0xb8                         # compare subchannel number
+        bne   .Lwait4io
+        tsch  0(%r5)
+        slr   %r0,%r0
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Lwtexit
+        tm    8(%r5),0x04                      # got device end ?
+        bz    .Lwait4io
+.Lwtexit:
+        br    %r14
+.Llderr:
+        lpsw  .Lcrash              
+
+        .align 8
+.Lorbread:
+	.long  0x00000000,0x0080ff00,.Lccwread
+        .align 8
+.Lorbsense:
+        .long  0x00000000,0x0080ff00,.Lccwsense
+        .align 8
+.Lccwread:
+        .long  0x02200000+IPL_BS,0x00000000
+.Lccwsense:
+        .long  0x04200001,0x00000000
+.Lwaitpsw:
+	.long  0x020a0000,0x80000000+.Lioint
+
+.Lirb:	.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6:  .long  0xff000000
+        .align 8
+.Lcrash:.long  0x000a0000,0x00000000
+.Lldret:.long  0
+.Lsnsret: .long 0
+#endif  /* CONFIG_IPL_TAPE */
+
+#ifdef CONFIG_IPL_VM
+#define IPL_BS 0x730
+        .org   0
+        .long  0x00080000,0x80000000+iplstart  # The first 24 bytes are loaded
+        .long  0x02000018,0x60000050           # by ipl to addresses 0-23.
+        .long  0x02000068,0x60000050           # (a PSW and two CCWs).
+        .fill  80-24,1,0x40                    # bytes 24-79 are discarded !!
+        .long  0x020000f0,0x60000050           # The next 160 byte are loaded
+        .long  0x02000140,0x60000050           # to addresses 0x18-0xb7
+        .long  0x02000190,0x60000050           # They form the continuation
+        .long  0x020001e0,0x60000050           # of the CCW program started
+        .long  0x02000230,0x60000050           # by ipl and load the range
+        .long  0x02000280,0x60000050           # 0x0f0-0x730 from the image
+        .long  0x020002d0,0x60000050           # to the range 0x0f0-0x730
+        .long  0x02000320,0x60000050           # in memory. At the end of
+        .long  0x02000370,0x60000050           # the channel program the PSW
+        .long  0x020003c0,0x60000050           # at location 0 is loaded.
+        .long  0x02000410,0x60000050           # Initial processing starts
+        .long  0x02000460,0x60000050           # at 0xf0 = iplstart.
+        .long  0x020004b0,0x60000050
+        .long  0x02000500,0x60000050
+        .long  0x02000550,0x60000050
+        .long  0x020005a0,0x60000050
+        .long  0x020005f0,0x60000050
+        .long  0x02000640,0x60000050
+        .long  0x02000690,0x60000050
+        .long  0x020006e0,0x20000050
+
+        .org   0xf0
+#
+# subroutine for loading cards from the reader
+#
+.Lloader:	
+	la    %r3,.Lorb                        # r2 = address of orb into r2
+	la    %r5,.Lirb                        # r4 = address of irb
+        la    %r6,.Lccws              
+        la    %r7,20
+.Linit:
+        st    %r2,4(%r6)                       # initialize CCW data addresses
+        la    %r2,0x50(%r2)
+        la    %r6,8(%r6)
+        bct   7,.Linit
+
+        lctl  %c6,%c6,.Lcr6                    # set IO subclass mask
+	slr   %r2,%r2
+.Lldlp:
+        ssch  0(%r3)                           # load chunk of 1600 bytes
+        bnz   .Llderr
+.Lwait4irq:
+        mvc   0x78(8),.Lnewpsw                 # set up IO interrupt psw
+        lpsw  .Lwaitpsw              
+.Lioint:
+        c     %r1,0xb8                         # compare subchannel number
+	bne   .Lwait4irq
+	tsch  0(%r5)
+
+	slr   %r0,%r0
+	ic    %r0,8(%r5)                       # get device status
+	chi   %r0,8                            # channel end ?
+	be    .Lcont
+	chi   %r0,12                           # channel end + device end ?
+	be    .Lcont
+
+        l     %r0,4(%r5)
+        s     %r0,8(%r3)                       # r0/8 = number of ccws executed
+        mhi   %r0,10                           # *10 = number of bytes in ccws
+        lh    %r3,10(%r5)                      # get residual count
+        sr    %r0,%r3                          # #ccws*80-residual=#bytes read
+	ar    %r2,%r0
+	
+        br    %r14                             # r2 contains the total size
+
+.Lcont:
+	ahi   %r2,0x640                        # add 0x640 to total size
+        la    %r6,.Lccws             
+        la    %r7,20
+.Lincr:
+        l     %r0,4(%r6)                       # update CCW data addresses
+        ahi   %r0,0x640
+        st    %r0,4(%r6)
+        ahi   %r6,8
+        bct   7,.Lincr
+
+        b     .Lldlp
+.Llderr:
+        lpsw  .Lcrash              
+
+        .align 8
+.Lorb:	.long  0x00000000,0x0080ff00,.Lccws
+.Lirb:	.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6:  .long  0xff000000
+.Lloadp:.long  0,0
+        .align 8
+.Lcrash:.long  0x000a0000,0x00000000
+.Lnewpsw:
+        .long  0x00080000,0x80000000+.Lioint
+.Lwaitpsw:
+        .long  0x020a0000,0x80000000+.Lioint
+
+        .align 8
+.Lccws: .rept  19
+        .long  0x02600050,0x00000000
+        .endr
+        .long  0x02200050,0x00000000
+#endif  /* CONFIG_IPL_VM */
+
+iplstart:
+        lh    %r1,0xb8                         # test if subchannel number
+        bct   %r1,.Lnoload                     #  is valid
+	l     %r1,0xb8                         # load ipl subchannel number
+        la    %r2,IPL_BS                       # load start address
+        bas   %r14,.Lloader                    # load rest of ipl image
+        larl  %r12,_pstart                     # pointer to parameter area
+        st    %r1,IPL_DEVICE+4-PARMAREA(%r12)  # store ipl device number
+
+#
+# load parameter file from ipl device
+#
+.Lagain1:
+ 	l     %r2,INITRD_START+4-PARMAREA(%r12)# use ramdisk location as temp
+        bas   %r14,.Lloader                    # load parameter file
+        ltr   %r2,%r2                          # got anything ?
+        bz    .Lnopf
+	chi   %r2,895
+	bnh   .Lnotrunc
+	la    %r2,895
+.Lnotrunc:
+	l     %r4,INITRD_START+4-PARMAREA(%r12)
+ 	clc   0(3,%r4),.L_hdr		       # if it is HDRx
+ 	bz    .Lagain1			       # skip dataset header
+ 	clc   0(3,%r4),.L_eof		       # if it is EOFx
+ 	bz    .Lagain1			       # skip dateset trailer
+        la    %r5,0(%r4,%r2)
+        lr    %r3,%r2
+.Lidebc:
+        tm    0(%r5),0x80                      # high order bit set ?
+        bo    .Ldocv                           #  yes -> convert from EBCDIC
+        ahi   %r5,-1
+        bct   %r3,.Lidebc
+        b     .Lnocv
+.Ldocv:
+        l     %r3,.Lcvtab
+        tr    0(256,%r4),0(%r3)                # convert parameters to ascii
+        tr    256(256,%r4),0(%r3)
+        tr    512(256,%r4),0(%r3)
+        tr    768(122,%r4),0(%r3)
+.Lnocv: la    %r3,COMMAND_LINE-PARMAREA(%r12)  # load adr. of command line
+	mvc   0(256,%r3),0(%r4)
+	mvc   256(256,%r3),256(%r4)
+	mvc   512(256,%r3),512(%r4)
+	mvc   768(122,%r3),768(%r4)
+        slr   %r0,%r0
+        b     .Lcntlp
+.Ldelspc:
+        ic    %r0,0(%r2,%r3)
+        chi   %r0,0x20                         # is it a space ?
+        be    .Lcntlp
+        ahi   %r2,1
+        b     .Leolp
+.Lcntlp:
+        brct  %r2,.Ldelspc
+.Leolp:
+        slr   %r0,%r0
+        stc   %r0,0(%r2,%r3)                   # terminate buffer
+.Lnopf:
+
+#
+# load ramdisk from ipl device
+#
+.Lagain2:
+ 	l     %r2,INITRD_START+4-PARMAREA(%r12)# load adr. of ramdisk
+        bas   %r14,.Lloader                    # load ramdisk
+ 	st    %r2,INITRD_SIZE+4-PARMAREA(%r12) # store size of ramdisk
+        ltr   %r2,%r2
+        bnz   .Lrdcont
+        st    %r2,INITRD_START+4-PARMAREA(%r12)# no ramdisk found, null it
+.Lrdcont:
+	l     %r2,INITRD_START+4-PARMAREA(%r12)
+	clc   0(3,%r2),.L_hdr		       # skip HDRx and EOFx 
+	bz    .Lagain2
+	clc   0(3,%r2),.L_eof
+	bz    .Lagain2
+
+#ifdef CONFIG_IPL_VM
+#
+# reset files in VM reader
+#
+        stidp __LC_CPUID                       # store cpuid
+	tm    __LC_CPUID,0xff                  # running VM ?
+	bno   .Lnoreset
+        la    %r2,.Lreset              
+        lhi   %r3,26
+        .long 0x83230008
+.Lnoreset:
+#endif
+	
+#
+# everything loaded, go for it
+#
+.Lnoload:
+        l     %r1,.Lstartup
+        br    %r1
+
+.Lstartup: .long startup
+.Lcvtab:.long  _ebcasc                         # ebcdic to ascii table
+.Lreset:.byte  0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
+        .byte  0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
+        .byte  0xc8,0xd6,0xd3,0xc4             # "change rdr all keep nohold"
+.L_eof: .long  0xc5d6c600       /* C'EOF' */
+.L_hdr: .long  0xc8c4d900       /* C'HDR' */
+#endif  /* CONFIG_IPL */
+
+#
+# SALIPL loader support. Based on a patch by Rob van der Heij.
+# This entry point is called directly from the SALIPL loader and
+# doesn't need a builtin ipl record.
+#
+        .org  0x800
+	.globl start
+start:
+	stm   %r0,%r15,0x07b0		# store registers
+	basr  %r12,%r0
+.base:
+	l     %r11,.parm
+	l     %r8,.cmd			# pointer to command buffer
+
+	ltr   %r9,%r9			# do we have SALIPL parameters?
+	bp    .sk8x8
+
+	mvc   0(64,%r8),0x00b0		# copy saved registers
+	xc    64(240-64,%r8),0(%r8)	# remainder of buffer
+	tr    0(64,%r8),.lowcase	
+	b     .gotr
+.sk8x8:
+	mvc   0(240,%r8),0(%r9)		# copy iplparms into buffer
+.gotr:
+	l     %r10,.tbl			# EBCDIC to ASCII table
+	tr    0(240,%r8),0(%r10)
+	stidp __LC_CPUID		# Are we running on VM maybe
+	cli   __LC_CPUID,0xff
+	bnz   .test
+	.long 0x83300060		# diag 3,0,x'0060' - storage size
+	b     .done
+.test:
+	mvc   0x68(8),.pgmnw		# set up pgm check handler
+	l     %r2,.fourmeg
+	lr    %r3,%r2
+	bctr  %r3,%r0			# 4M-1
+.loop:  iske  %r0,%r3
+	ar    %r3,%r2
+.pgmx:
+	sr    %r3,%r2
+	la    %r3,1(%r3)
+.done:
+	l     %r1,.memsize
+	st    %r3,4(%r1)
+	slr   %r0,%r0
+	st    %r0,INITRD_SIZE+4-PARMAREA(%r11)
+	st    %r0,INITRD_START+4-PARMAREA(%r11)
+	j     startup                   # continue with startup
+.tbl:	.long _ebcasc			# translate table
+.cmd:	.long COMMAND_LINE		# address of command line buffer
+.parm:	.long PARMAREA
+.fourmeg: .long 0x00400000      	# 4M
+.pgmnw:	.long 0x00080000,.pgmx
+.memsize: .long memory_size
+.lowcase:
+	.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 
+	.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
+	.byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 
+	.byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
+	.byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 
+	.byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
+	.byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 
+	.byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
+	.byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 
+	.byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
+	.byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 
+	.byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
+	.byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 
+	.byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
+	.byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 
+	.byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
+
+	.byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 
+	.byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
+	.byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 
+	.byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
+	.byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 
+	.byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
+	.byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 
+	.byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
+	.byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87	# .abcdefg 
+	.byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf	# hi
+	.byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 	# .jklmnop
+	.byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf	# qr
+	.byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7	# ..stuvwx
+	.byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef	# yz
+	.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 
+	.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
+
+#
+# startup-code at 0x10000, running in real mode
+# this is called either by the ipl loader or directly by PSW restart
+# or linload or SALIPL
+#
+        .org  0x10000
+startup:basr  %r13,0                     # get base
+.LPG1:  sll   %r13,1                     # remove high order bit
+        srl   %r13,1
+        lhi   %r1,1                      # mode 1 = esame
+        slr   %r0,%r0                    # set cpuid to zero
+        sigp  %r1,%r0,0x12               # switch to esame mode
+	sam64				 # switch to 64 bit mode
+	lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
+	larl  %r12,_pstart               # pointer to parameter area
+					 # move IPL device to lowcore
+        mvc   __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
+
+#
+# clear bss memory
+#
+	larl  %r2,__bss_start           # start of bss segment
+        larl  %r3,_end                  # end of bss segment
+        sgr   %r3,%r2                   # length of bss
+        sgr   %r4,%r4                   #
+        sgr   %r5,%r5                   # set src,length and pad to zero
+        mvcle %r2,%r4,0                 # clear mem
+        jo    .-4                       # branch back, if not finish
+
+	l     %r2,.Lrcp-.LPG1(%r13)	# Read SCP forced command word
+.Lservicecall:
+	stosm .Lpmask-.LPG1(%r13),0x01	# authorize ext interrupts
+
+	stctg %r0,%r0,.Lcr-.LPG1(%r13)	# get cr0
+	la    %r1,0x200			# set bit 22
+	og    %r1,.Lcr-.LPG1(%r13)	# or old cr0 with r1
+	stg   %r1,.Lcr-.LPG1(%r13)
+	lctlg %r0,%r0,.Lcr-.LPG1(%r13)	# load modified cr0
+
+	mvc   __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
+	larl  %r1,.Lsclph
+	stg   %r1,__LC_EXT_NEW_PSW+8	# set handler
+
+	larl  %r4,_pstart		# %r4 is our index for sccb stuff
+	la    %r1,.Lsccb-PARMAREA(%r4)	# our sccb
+	.insn rre,0xb2200000,%r2,%r1	# service call
+	ipm   %r1
+	srl   %r1,28			# get cc code
+	xr    %r3,%r3
+	chi   %r1,3
+	be    .Lfchunk-.LPG1(%r13)	# leave
+	chi   %r1,2
+	be    .Lservicecall-.LPG1(%r13)
+	lpsw  .Lwaitsclp-.LPG1(%r13)
+.Lsclph:
+	lh    %r1,.Lsccbr-PARMAREA(%r4)
+	chi   %r1,0x10			# 0x0010 is the sucess code
+	je    .Lprocsccb		# let's process the sccb
+	chi   %r1,0x1f0
+	bne   .Lfchunk-.LPG1(%r13)	# unhandled error code
+	c     %r2,.Lrcp-.LPG1(%r13)	# Did we try Read SCP forced
+	bne   .Lfchunk-.LPG1(%r13)	# if no, give up
+	l     %r2,.Lrcp2-.LPG1(%r13)	# try with Read SCP
+	b     .Lservicecall-.LPG1(%r13)
+.Lprocsccb:
+	lh    %r1,.Lscpincr1-PARMAREA(%r4) # use this one if != 0
+	chi   %r1,0x00
+	jne   .Lscnd
+	lg    %r1,.Lscpincr2-PARMAREA(%r4) # otherwise use this one
+.Lscnd:
+	xr    %r3,%r3			# same logic
+	ic    %r3,.Lscpa1-PARMAREA(%r4)
+	chi   %r3,0x00
+	jne   .Lcompmem
+	l     %r3,.Lscpa2-PARMAREA(%r13)
+.Lcompmem:
+	mlgr  %r2,%r1			# mem in MB on 128-bit
+	l     %r1,.Lonemb-.LPG1(%r13)
+	mlgr  %r2,%r1			# mem size in bytes in %r3
+	b     .Lfchunk-.LPG1(%r13)
+
+.Lpmask:
+	.byte 0
+	.align 8
+.Lcr:
+	.quad 0x00  # place holder for cr0
+.Lwaitsclp:
+	.long 0x020A0000
+	.quad .Lsclph
+.Lrcp:
+	.int 0x00120001 # Read SCP forced code
+.Lrcp2:
+	.int 0x00020001 # Read SCP code
+.Lonemb:
+	.int 0x100000
+
+.Lfchunk:
+					 # set program check new psw mask
+	mvc   __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
+
+#
+# find memory chunks.
+#
+	lgr   %r9,%r3			 # end of mem
+	larl  %r1,.Lchkmem               # set program check address
+	stg   %r1,__LC_PGM_NEW_PSW+8
+	la    %r1,1                      # test in increments of 128KB
+	sllg  %r1,%r1,17
+	larl  %r3,memory_chunk
+	slgr  %r4,%r4                    # set start of chunk to zero
+	slgr  %r5,%r5                    # set end of chunk to zero
+	slr  %r6,%r6			 # set access code to zero
+	la    %r10,MEMORY_CHUNKS	 # number of chunks
+.Lloop:
+	tprot 0(%r5),0			 # test protection of first byte
+	ipm   %r7
+	srl   %r7,28
+	clr   %r6,%r7			 # compare cc with last access code
+	je    .Lsame
+	j     .Lchkmem
+.Lsame:
+	algr  %r5,%r1			 # add 128KB to end of chunk
+					 # no need to check here,
+	brc   12,.Lloop			 # this is the same chunk
+.Lchkmem:				 # > 16EB or tprot got a program check
+	clgr  %r4,%r5			 # chunk size > 0?
+	je    .Lchkloop
+	stg   %r4,0(%r3)		 # store start address of chunk
+	lgr   %r0,%r5
+	slgr  %r0,%r4
+	stg   %r0,8(%r3)		 # store size of chunk
+	st    %r6,20(%r3)		 # store type of chunk
+	la    %r3,24(%r3)
+	larl  %r8,memory_size
+	stg   %r5,0(%r8)                 # store memory size
+	ahi   %r10,-1			 # update chunk number
+.Lchkloop:
+	lr    %r6,%r7			 # set access code to last cc
+	# we got an exception or we're starting a new
+	# chunk , we must check if we should
+	# still try to find valid memory (if we detected
+	# the amount of available storage), and if we
+	# have chunks left
+	lghi  %r4,1
+	sllg  %r4,%r4,31
+	clgr  %r5,%r4
+	je    .Lhsaskip
+	xr    %r0, %r0
+	clgr  %r0, %r9			 # did we detect memory?
+	je    .Ldonemem			 # if not, leave
+	chi   %r10, 0			 # do we have chunks left?
+	je    .Ldonemem
+.Lhsaskip:
+	algr  %r5,%r1			 # add 128KB to end of chunk
+	lgr   %r4,%r5			 # potential new chunk
+	clgr  %r5,%r9			 # should we go on?
+	jl    .Lloop
+.Ldonemem:		
+
+	larl  %r12,machine_flags
+#
+# find out if we are running under VM
+#
+        stidp  __LC_CPUID               # store cpuid
+	tm     __LC_CPUID,0xff          # running under VM ?
+	bno    0f-.LPG1(%r13)
+        oi     7(%r12),1                # set VM flag
+0:      lh     %r0,__LC_CPUID+4         # get cpu version
+        chi    %r0,0x7490               # running on a P/390 ?
+        bne    1f-.LPG1(%r13)
+        oi     7(%r12),4                # set P/390 flag
+1:
+
+#
+# find out if we have the MVPG instruction
+#
+	la     %r1,0f-.LPG1(%r13)       # set program check address
+	stg    %r1,__LC_PGM_NEW_PSW+8
+	sgr    %r0,%r0
+	lghi   %r1,0
+	lghi   %r2,0
+	mvpg   %r1,%r2                  # test MVPG instruction
+	oi     7(%r12),16               # set MVPG flag
+0:
+
+#
+# find out if the diag 0x44 works in 64 bit mode
+#
+	la     %r1,0f-.LPG1(%r13)	# set program check address
+	stg    %r1,__LC_PGM_NEW_PSW+8
+	mvc    __LC_DIAG44_OPCODE(8),.Lnop-.LPG1(%r13)
+	diag   0,0,0x44			# test diag 0x44
+	oi     7(%r12),32		# set diag44 flag
+	mvc    __LC_DIAG44_OPCODE(8),.Ldiag44-.LPG1(%r13)
+0:	
+
+#
+# find out if we have the IDTE instruction
+#
+	la     %r1,0f-.LPG1(%r13)	# set program check address
+	stg    %r1,__LC_PGM_NEW_PSW+8
+	.long	0xb2b10000		# store facility list
+	tm	0xc8,0x08		# check bit for clearing-by-ASCE
+	bno	0f-.LPG1(%r13)
+	lhi	%r1,2094
+	lhi	%r2,0
+	.long	0xb98e2001
+	oi	7(%r12),0x80		# set IDTE flag
+0:
+
+        lpswe .Lentry-.LPG1(13)         # jump to _stext in primary-space,
+                                        # virtual and never return ...
+        .align 16
+.Lentry:.quad  0x0000000180000000,_stext
+.Lctl:  .quad  0x04b50002               # cr0: various things
+        .quad  0                        # cr1: primary space segment table
+        .quad  .Lduct                   # cr2: dispatchable unit control table
+        .quad  0                        # cr3: instruction authorization
+        .quad  0                        # cr4: instruction authorization
+        .quad  0xffffffffffffffff       # cr5: primary-aste origin
+        .quad  0                        # cr6:  I/O interrupts
+        .quad  0                        # cr7:  secondary space segment table
+        .quad  0                        # cr8:  access registers translation
+        .quad  0                        # cr9:  tracing off
+        .quad  0                        # cr10: tracing off
+        .quad  0                        # cr11: tracing off
+        .quad  0                        # cr12: tracing off
+        .quad  0                        # cr13: home space segment table
+        .quad  0xc0000000               # cr14: machine check handling off
+        .quad  0                        # cr15: linkage stack operations
+.Lpcmsk:.quad  0x0000000180000000
+.L4malign:.quad 0xffffffffffc00000
+.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
+.Lnop:	.long  0x07000700
+.Ldiag44:.long 0x83000044
+
+	.org PARMAREA-64
+.Lduct:	.long 0,0,0,0,0,0,0,0
+	.long 0,0,0,0,0,0,0,0
+
+#
+# params at 10400 (setup.h)
+#
+	.org   PARMAREA
+	.global _pstart
+_pstart:
+	.quad  0                        # IPL_DEVICE
+        .quad  RAMDISK_ORIGIN           # INITRD_START
+        .quad  RAMDISK_SIZE             # INITRD_SIZE
+
+        .org   COMMAND_LINE
+    	.byte  "root=/dev/ram0 ro"
+        .byte  0
+	.org   0x11000
+.Lsccb:
+	.hword 0x1000			# length, one page
+	.byte 0x00,0x00,0x00
+	.byte 0x80			# variable response bit set
+.Lsccbr:
+	.hword 0x00			# response code
+.Lscpincr1:
+	.hword 0x00
+.Lscpa1:
+	.byte 0x00
+	.fill 89,1,0
+.Lscpa2:
+	.int 0x00
+.Lscpincr2:
+	.quad 0x00
+	.fill 3984,1,0
+	.org 0x12000
+	.global _pend
+_pend:	
+
+#ifdef CONFIG_SHARED_KERNEL
+	.org   0x100000
+#endif
+	
+#
+# startup-code, running in virtual mode
+#
+        .globl _stext
+_stext:	basr  %r13,0                    # get base
+.LPG2:
+#
+# Setup stack
+#
+	larl  %r15,init_thread_union
+	lg    %r14,__TI_task(%r15)      # cache current in lowcore
+	stg   %r14,__LC_CURRENT
+        aghi  %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+        stg   %r15,__LC_KERNEL_STACK    # set end of kernel stack
+        aghi  %r15,-160
+        xc    __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
+
+# check control registers
+        stctg  %c0,%c15,0(%r15)
+	oi     6(%r15),0x20             # enable sigp external interrupts
+	oi     4(%r15),0x10             # switch on low address proctection
+        lctlg  %c0,%c15,0(%r15)
+
+#
+        lam    0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess
+        brasl  %r14,start_kernel        # go to C code
+#
+# We returned from start_kernel ?!? PANIK
+#
+        basr  %r13,0
+	lpswe .Ldw-.(%r13)           # load disabled wait psw
+#
+            .align 8
+.Ldw:       .quad  0x0002000180000000,0x0000000000000000
+.Laregs:    .long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+
diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c
new file mode 100644
index 0000000..d73a740
--- /dev/null
+++ b/arch/s390/kernel/init_task.c
@@ -0,0 +1,44 @@
+/*
+ *  arch/s390/kernel/init_task.c
+ *
+ *  S390 version
+ *
+ *  Derived from "arch/i386/kernel/init_task.c"
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union 
+	__attribute__((__section__(".data.init_task"))) =
+		{ INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
new file mode 100644
index 0000000..480b6a5
--- /dev/null
+++ b/arch/s390/kernel/irq.c
@@ -0,0 +1,105 @@
+/*
+ *  arch/s390/kernel/irq.c
+ *
+ *  S390 version
+ *    Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *
+ * This file contains interrupt related functions.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/cpu.h>
+
+/*
+ * show_interrupts is needed by /proc/interrupts.
+ */
+int show_interrupts(struct seq_file *p, void *v)
+{
+	static const char *intrclass_names[] = { "EXT", "I/O", };
+	int i = *(loff_t *) v, j;
+
+	if (i == 0) {
+		seq_puts(p, "           ");
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ",j);
+		seq_putc(p, '\n');
+	}
+
+	if (i < NR_IRQS) {
+		seq_printf(p, "%s: ", intrclass_names[i]);
+#ifndef CONFIG_SMP
+		seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+                seq_putc(p, '\n');
+
+        }
+
+        return 0;
+}
+
+/*
+ * For compatibilty only. S/390 specific setup of interrupts et al. is done
+ * much later in init_channel_subsystem().
+ */
+void __init
+init_IRQ(void)
+{
+	/* nothing... */
+}
+
+/*
+ * Switch to the asynchronous interrupt stack for softirq execution.
+ */
+extern void __do_softirq(void);
+
+asmlinkage void do_softirq(void)
+{
+	unsigned long flags, old, new;
+
+	if (in_interrupt())
+		return;
+
+	local_irq_save(flags);
+
+	account_system_vtime(current);
+
+	local_bh_disable();
+
+	if (local_softirq_pending()) {
+		/* Get current stack pointer. */
+		asm volatile("la %0,0(15)" : "=a" (old));
+		/* Check against async. stack address range. */
+		new = S390_lowcore.async_stack;
+		if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
+			/* Need to switch to the async. stack. */
+			new -= STACK_FRAME_OVERHEAD;
+			((struct stack_frame *) new)->back_chain = old;
+
+			asm volatile("   la    15,0(%0)\n"
+				     "   basr  14,%2\n"
+				     "   la    15,0(%1)\n"
+				     : : "a" (new), "a" (old),
+				         "a" (__do_softirq)
+				     : "0", "1", "2", "3", "4", "5", "14",
+				       "cc", "memory" );
+		} else
+			/* We are already on the async stack. */
+			__do_softirq();
+	}
+
+	account_system_vtime(current);
+
+	__local_bh_enable();
+
+	local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(do_softirq);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
new file mode 100644
index 0000000..607d506
--- /dev/null
+++ b/arch/s390/kernel/module.c
@@ -0,0 +1,405 @@
+/*
+ *  arch/s390/kernel/module.c - Kernel module help for s390.
+ *
+ *  S390 version
+ *    Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH,
+ *			       IBM Corporation
+ *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ *  based on i386 version
+ *    Copyright (C) 2001 Rusty Russell.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/module.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt , ...)
+#endif
+
+#ifndef CONFIG_ARCH_S390X
+#define PLT_ENTRY_SIZE 12
+#else /* CONFIG_ARCH_S390X */
+#define PLT_ENTRY_SIZE 20
+#endif /* CONFIG_ARCH_S390X */
+
+void *module_alloc(unsigned long size)
+{
+	if (size == 0)
+		return NULL;
+	return vmalloc(size);
+}
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+	vfree(module_region);
+	/* FIXME: If module_region == mod->init_region, trim exception
+           table entries. */
+}
+
+static inline void
+check_rela(Elf_Rela *rela, struct module *me)
+{
+	struct mod_arch_syminfo *info;
+
+	info = me->arch.syminfo + ELF_R_SYM (rela->r_info);
+	switch (ELF_R_TYPE (rela->r_info)) {
+	case R_390_GOT12:	/* 12 bit GOT offset.  */
+	case R_390_GOT16:	/* 16 bit GOT offset.  */
+	case R_390_GOT20:	/* 20 bit GOT offset.  */
+	case R_390_GOT32:	/* 32 bit GOT offset.  */
+	case R_390_GOT64:	/* 64 bit GOT offset.  */
+	case R_390_GOTENT:	/* 32 bit PC rel. to GOT entry shifted by 1. */
+	case R_390_GOTPLT12:	/* 12 bit offset to jump slot.	*/
+	case R_390_GOTPLT16:	/* 16 bit offset to jump slot.  */
+	case R_390_GOTPLT20:	/* 20 bit offset to jump slot.  */
+	case R_390_GOTPLT32:	/* 32 bit offset to jump slot.  */
+	case R_390_GOTPLT64:	/* 64 bit offset to jump slot.	*/
+	case R_390_GOTPLTENT:	/* 32 bit rel. offset to jump slot >> 1. */
+		if (info->got_offset == -1UL) {
+			info->got_offset = me->arch.got_size;
+			me->arch.got_size += sizeof(void*);
+		}
+		break;
+	case R_390_PLT16DBL:	/* 16 bit PC rel. PLT shifted by 1.  */
+	case R_390_PLT32DBL:	/* 32 bit PC rel. PLT shifted by 1.  */
+	case R_390_PLT32:	/* 32 bit PC relative PLT address.  */
+	case R_390_PLT64:	/* 64 bit PC relative PLT address.  */
+	case R_390_PLTOFF16:	/* 16 bit offset from GOT to PLT. */
+	case R_390_PLTOFF32:	/* 32 bit offset from GOT to PLT. */
+	case R_390_PLTOFF64:	/* 16 bit offset from GOT to PLT. */
+		if (info->plt_offset == -1UL) {
+			info->plt_offset = me->arch.plt_size;
+			me->arch.plt_size += PLT_ENTRY_SIZE;
+		}
+		break;
+	case R_390_COPY:
+	case R_390_GLOB_DAT:
+	case R_390_JMP_SLOT:
+	case R_390_RELATIVE:
+		/* Only needed if we want to support loading of 
+		   modules linked with -shared. */
+		break;
+	}
+}
+
+/*
+ * Account for GOT and PLT relocations. We can't add sections for
+ * got and plt but we can increase the core module size.
+ */
+int
+module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+			  char *secstrings, struct module *me)
+{
+	Elf_Shdr *symtab;
+	Elf_Sym *symbols;
+	Elf_Rela *rela;
+	char *strings;
+	int nrela, i, j;
+
+	/* Find symbol table and string table. */
+	symtab = 0;
+	for (i = 0; i < hdr->e_shnum; i++)
+		switch (sechdrs[i].sh_type) {
+		case SHT_SYMTAB:
+			symtab = sechdrs + i;
+			break;
+		}
+	if (!symtab) {
+		printk(KERN_ERR "module %s: no symbol table\n", me->name);
+		return -ENOEXEC;
+	}
+
+	/* Allocate one syminfo structure per symbol. */
+	me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
+	me->arch.syminfo = vmalloc(me->arch.nsyms *
+				   sizeof(struct mod_arch_syminfo));
+	if (!me->arch.syminfo)
+		return -ENOMEM;
+	symbols = (void *) hdr + symtab->sh_offset;
+	strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset;
+	for (i = 0; i < me->arch.nsyms; i++) {
+		if (symbols[i].st_shndx == SHN_UNDEF &&
+		    strcmp(strings + symbols[i].st_name,
+			   "_GLOBAL_OFFSET_TABLE_") == 0)
+			/* "Define" it as absolute. */
+			symbols[i].st_shndx = SHN_ABS;
+		me->arch.syminfo[i].got_offset = -1UL;
+		me->arch.syminfo[i].plt_offset = -1UL;
+		me->arch.syminfo[i].got_initialized = 0;
+		me->arch.syminfo[i].plt_initialized = 0;
+	}
+
+	/* Search for got/plt relocations. */
+	me->arch.got_size = me->arch.plt_size = 0;
+	for (i = 0; i < hdr->e_shnum; i++) {
+		if (sechdrs[i].sh_type != SHT_RELA)
+			continue;
+		nrela = sechdrs[i].sh_size / sizeof(Elf_Rela);
+		rela = (void *) hdr + sechdrs[i].sh_offset;
+		for (j = 0; j < nrela; j++)
+			check_rela(rela + j, me);
+	}
+
+	/* Increase core size by size of got & plt and set start
+	   offsets for got and plt. */
+	me->core_size = ALIGN(me->core_size, 4);
+	me->arch.got_offset = me->core_size;
+	me->core_size += me->arch.got_size;
+	me->arch.plt_offset = me->core_size;
+	me->core_size += me->arch.plt_size;
+	return 0;
+}
+
+int
+apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
+	       unsigned int relsec, struct module *me)
+{
+	printk(KERN_ERR "module %s: RELOCATION unsupported\n",
+	       me->name);
+	return -ENOEXEC;
+}
+
+static inline int
+apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, 
+	   struct module *me)
+{
+	struct mod_arch_syminfo *info;
+	Elf_Addr loc, val;
+	int r_type, r_sym;
+
+	/* This is where to make the change */
+	loc = base + rela->r_offset;
+	/* This is the symbol it is referring to.  Note that all
+	   undefined symbols have been resolved.  */
+	r_sym = ELF_R_SYM(rela->r_info);
+	r_type = ELF_R_TYPE(rela->r_info);
+	info = me->arch.syminfo + r_sym;
+	val = symtab[r_sym].st_value;
+
+	switch (r_type) {
+	case R_390_8:		/* Direct 8 bit.   */
+	case R_390_12:		/* Direct 12 bit.  */
+	case R_390_16:		/* Direct 16 bit.  */
+	case R_390_20:		/* Direct 20 bit.  */
+	case R_390_32:		/* Direct 32 bit.  */
+	case R_390_64:		/* Direct 64 bit.  */
+		val += rela->r_addend;
+		if (r_type == R_390_8)
+			*(unsigned char *) loc = val;
+		else if (r_type == R_390_12)
+			*(unsigned short *) loc = (val & 0xfff) |
+				(*(unsigned short *) loc & 0xf000);
+		else if (r_type == R_390_16)
+			*(unsigned short *) loc = val;
+		else if (r_type == R_390_20)
+			*(unsigned int *) loc =
+				(*(unsigned int *) loc & 0xf00000ff) |
+				(val & 0xfff) << 16 | (val & 0xff000) >> 4;
+		else if (r_type == R_390_32)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_64)
+			*(unsigned long *) loc = val;
+		break;
+	case R_390_PC16:	/* PC relative 16 bit.  */
+	case R_390_PC16DBL:	/* PC relative 16 bit shifted by 1.  */
+	case R_390_PC32DBL:	/* PC relative 32 bit shifted by 1.  */
+	case R_390_PC32:	/* PC relative 32 bit.  */
+	case R_390_PC64:	/* PC relative 64 bit.	*/
+		val += rela->r_addend - loc;
+		if (r_type == R_390_PC16)
+			*(unsigned short *) loc = val;
+		else if (r_type == R_390_PC16DBL)
+			*(unsigned short *) loc = val >> 1;
+		else if (r_type == R_390_PC32DBL)
+			*(unsigned int *) loc = val >> 1;
+		else if (r_type == R_390_PC32)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_PC64)
+			*(unsigned long *) loc = val;
+		break;
+	case R_390_GOT12:	/* 12 bit GOT offset.  */
+	case R_390_GOT16:	/* 16 bit GOT offset.  */
+	case R_390_GOT20:	/* 20 bit GOT offset.  */
+	case R_390_GOT32:	/* 32 bit GOT offset.  */
+	case R_390_GOT64:	/* 64 bit GOT offset.  */
+	case R_390_GOTENT:	/* 32 bit PC rel. to GOT entry shifted by 1. */
+	case R_390_GOTPLT12:	/* 12 bit offset to jump slot.	*/
+	case R_390_GOTPLT20:	/* 20 bit offset to jump slot.  */
+	case R_390_GOTPLT16:	/* 16 bit offset to jump slot.  */
+	case R_390_GOTPLT32:	/* 32 bit offset to jump slot.  */
+	case R_390_GOTPLT64:	/* 64 bit offset to jump slot.	*/
+	case R_390_GOTPLTENT:	/* 32 bit rel. offset to jump slot >> 1. */
+		if (info->got_initialized == 0) {
+			Elf_Addr *gotent;
+
+			gotent = me->module_core + me->arch.got_offset +
+				info->got_offset;
+			*gotent = val;
+			info->got_initialized = 1;
+		}
+		val = info->got_offset + rela->r_addend;
+		if (r_type == R_390_GOT12 ||
+		    r_type == R_390_GOTPLT12)
+			*(unsigned short *) loc = (val & 0xfff) |
+				(*(unsigned short *) loc & 0xf000);
+		else if (r_type == R_390_GOT16 ||
+			 r_type == R_390_GOTPLT16)
+			*(unsigned short *) loc = val;
+		else if (r_type == R_390_GOT20 ||
+			 r_type == R_390_GOTPLT20)
+			*(unsigned int *) loc =
+				(*(unsigned int *) loc & 0xf00000ff) |
+				(val & 0xfff) << 16 | (val & 0xff000) >> 4;
+		else if (r_type == R_390_GOT32 ||
+			 r_type == R_390_GOTPLT32)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_GOTENT ||
+			 r_type == R_390_GOTPLTENT)
+			*(unsigned int *) loc =
+				(val + (Elf_Addr) me->module_core - loc) >> 1;
+		else if (r_type == R_390_GOT64 ||
+			 r_type == R_390_GOTPLT64)
+			*(unsigned long *) loc = val;
+		break;
+	case R_390_PLT16DBL:	/* 16 bit PC rel. PLT shifted by 1.  */
+	case R_390_PLT32DBL:	/* 32 bit PC rel. PLT shifted by 1.  */
+	case R_390_PLT32:	/* 32 bit PC relative PLT address.  */
+	case R_390_PLT64:	/* 64 bit PC relative PLT address.  */
+	case R_390_PLTOFF16:	/* 16 bit offset from GOT to PLT. */
+	case R_390_PLTOFF32:	/* 32 bit offset from GOT to PLT. */
+	case R_390_PLTOFF64:	/* 16 bit offset from GOT to PLT. */
+		if (info->plt_initialized == 0) {
+			unsigned int *ip;
+			ip = me->module_core + me->arch.plt_offset +
+				info->plt_offset;
+#ifndef CONFIG_ARCH_S390X
+			ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+			ip[1] = 0x100607f1;
+			ip[2] = val;
+#else /* CONFIG_ARCH_S390X */
+			ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+			ip[1] = 0x100a0004;
+			ip[2] = 0x07f10000;
+			ip[3] = (unsigned int) (val >> 32);
+			ip[4] = (unsigned int) val;
+#endif /* CONFIG_ARCH_S390X */
+			info->plt_initialized = 1;
+		}
+		if (r_type == R_390_PLTOFF16 ||
+		    r_type == R_390_PLTOFF32
+		    || r_type == R_390_PLTOFF64
+			)
+			val = me->arch.plt_offset - me->arch.got_offset +
+				info->plt_offset + rela->r_addend;
+		else
+			val =  (Elf_Addr) me->module_core +
+				me->arch.plt_offset + info->plt_offset + 
+				rela->r_addend - loc;
+		if (r_type == R_390_PLT16DBL)
+			*(unsigned short *) loc = val >> 1;
+		else if (r_type == R_390_PLTOFF16)
+			*(unsigned short *) loc = val;
+		else if (r_type == R_390_PLT32DBL)
+			*(unsigned int *) loc = val >> 1;
+		else if (r_type == R_390_PLT32 ||
+			 r_type == R_390_PLTOFF32)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_PLT64 ||
+			 r_type == R_390_PLTOFF64)
+			*(unsigned long *) loc = val;
+		break;
+	case R_390_GOTOFF16:	/* 16 bit offset to GOT.  */
+	case R_390_GOTOFF32:	/* 32 bit offset to GOT.  */
+	case R_390_GOTOFF64:	/* 64 bit offset to GOT. */
+		val = val + rela->r_addend -
+			((Elf_Addr) me->module_core + me->arch.got_offset);
+		if (r_type == R_390_GOTOFF16)
+			*(unsigned short *) loc = val;
+		else if (r_type == R_390_GOTOFF32)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_GOTOFF64)
+			*(unsigned long *) loc = val;
+		break;
+	case R_390_GOTPC:	/* 32 bit PC relative offset to GOT. */
+	case R_390_GOTPCDBL:	/* 32 bit PC rel. off. to GOT shifted by 1. */
+		val = (Elf_Addr) me->module_core + me->arch.got_offset +
+			rela->r_addend - loc;
+		if (r_type == R_390_GOTPC)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_GOTPCDBL)
+			*(unsigned int *) loc = val >> 1;
+		break;
+	case R_390_COPY:
+	case R_390_GLOB_DAT:	/* Create GOT entry.  */
+	case R_390_JMP_SLOT:	/* Create PLT entry.  */
+	case R_390_RELATIVE:	/* Adjust by program base.  */
+		/* Only needed if we want to support loading of 
+		   modules linked with -shared. */
+		break;
+	default:
+		printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+		       me->name, r_type);
+		return -ENOEXEC;
+	}
+	return 0;
+}
+
+int
+apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+		   unsigned int symindex, unsigned int relsec,
+		   struct module *me)
+{
+	Elf_Addr base;
+	Elf_Sym *symtab;
+	Elf_Rela *rela;
+	unsigned long i, n;
+	int rc;
+
+	DEBUGP("Applying relocate section %u to %u\n",
+	       relsec, sechdrs[relsec].sh_info);
+	base = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+	symtab = (Elf_Sym *) sechdrs[symindex].sh_addr;
+	rela = (Elf_Rela *) sechdrs[relsec].sh_addr;
+	n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
+
+	for (i = 0; i < n; i++, rela++) {
+		rc = apply_rela(rela, base, symtab, me);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+		    const Elf_Shdr *sechdrs,
+		    struct module *me)
+{
+	vfree(me->arch.syminfo);
+	return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
new file mode 100644
index 0000000..7aea25d
--- /dev/null
+++ b/arch/s390/kernel/process.c
@@ -0,0 +1,416 @@
+/*
+ *  arch/s390/kernel/process.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Hartmut Penner (hp@de.ibm.com),
+ *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *
+ *  Derived from "arch/i386/kernel/process.c"
+ *    Copyright (C) 1995, Linus Torvalds
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <linux/config.h>
+#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/timer.h>
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+/*
+ * Return saved PC of a blocked thread. used in kernel/sched.
+ * resume in entry.S does not create a new stack frame, it
+ * just stores the registers %r6-%r15 to the frame given by
+ * schedule. We want to return the address of the caller of
+ * schedule, so we have to walk the backchain one time to
+ * find the frame schedule() store its return address.
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+	struct stack_frame *sf;
+
+	sf = (struct stack_frame *) tsk->thread.ksp;
+	sf = (struct stack_frame *) sf->back_chain;
+	return sf->gprs[8];
+}
+
+/*
+ * Need to know about CPUs going idle?
+ */
+static struct notifier_block *idle_chain;
+
+int register_idle_notifier(struct notifier_block *nb)
+{
+	return notifier_chain_register(&idle_chain, nb);
+}
+EXPORT_SYMBOL(register_idle_notifier);
+
+int unregister_idle_notifier(struct notifier_block *nb)
+{
+	return notifier_chain_unregister(&idle_chain, nb);
+}
+EXPORT_SYMBOL(unregister_idle_notifier);
+
+void do_monitor_call(struct pt_regs *regs, long interruption_code)
+{
+	/* disable monitor call class 0 */
+	__ctl_clear_bit(8, 15);
+
+	notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
+			    (void *)(long) smp_processor_id());
+}
+
+/*
+ * The idle loop on a S390...
+ */
+void default_idle(void)
+{
+	psw_t wait_psw;
+	unsigned long reg;
+	int cpu, rc;
+
+	local_irq_disable();
+        if (need_resched()) {
+		local_irq_enable();
+                schedule();
+                return;
+        }
+
+	/* CPU is going idle. */
+	cpu = smp_processor_id();
+	rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu);
+	if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
+		BUG();
+	if (rc != NOTIFY_OK) {
+		local_irq_enable();
+		return;
+	}
+
+	/* enable monitor call class 0 */
+	__ctl_set_bit(8, 15);
+
+#ifdef CONFIG_HOTPLUG_CPU
+	if (cpu_is_offline(smp_processor_id()))
+		cpu_die();
+#endif
+
+	/* 
+	 * Wait for external, I/O or machine check interrupt and
+	 * switch off machine check bit after the wait has ended.
+	 */
+	wait_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK | PSW_MASK_WAIT |
+		PSW_MASK_IO | PSW_MASK_EXT;
+#ifndef CONFIG_ARCH_S390X
+	asm volatile (
+		"    basr %0,0\n"
+		"0:  la   %0,1f-0b(%0)\n"
+		"    st   %0,4(%1)\n"
+		"    oi   4(%1),0x80\n"
+		"    lpsw 0(%1)\n"
+		"1:  la   %0,2f-1b(%0)\n"
+		"    st   %0,4(%1)\n"
+		"    oi   4(%1),0x80\n"
+		"    ni   1(%1),0xf9\n"
+		"    lpsw 0(%1)\n"
+		"2:"
+		: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
+#else /* CONFIG_ARCH_S390X */
+	asm volatile (
+		"    larl  %0,0f\n"
+		"    stg   %0,8(%1)\n"
+		"    lpswe 0(%1)\n"
+		"0:  larl  %0,1f\n"
+		"    stg   %0,8(%1)\n"
+		"    ni    1(%1),0xf9\n"
+		"    lpswe 0(%1)\n"
+		"1:"
+		: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
+#endif /* CONFIG_ARCH_S390X */
+}
+
+void cpu_idle(void)
+{
+	for (;;)
+		default_idle();
+}
+
+void show_regs(struct pt_regs *regs)
+{
+	struct task_struct *tsk = current;
+
+        printk("CPU:    %d    %s\n", tsk->thread_info->cpu, print_tainted());
+        printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+	       current->comm, current->pid, (void *) tsk,
+	       (void *) tsk->thread.ksp);
+
+	show_registers(regs);
+	/* Show stack backtrace if pt_regs is from kernel mode */
+	if (!(regs->psw.mask & PSW_MASK_PSTATE))
+		show_trace(0,(unsigned long *) regs->gprs[15]);
+}
+
+extern void kernel_thread_starter(void);
+
+__asm__(".align 4\n"
+	"kernel_thread_starter:\n"
+	"    la    2,0(10)\n"
+	"    basr  14,9\n"
+	"    la    2,0\n"
+	"    br    11\n");
+
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+	struct pt_regs regs;
+
+	memset(&regs, 0, sizeof(regs));
+	regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
+	regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
+	regs.gprs[9] = (unsigned long) fn;
+	regs.gprs[10] = (unsigned long) arg;
+	regs.gprs[11] = (unsigned long) do_exit;
+	regs.orig_gpr2 = -1;
+
+	/* Ok, create the new process.. */
+	return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
+		       0, &regs, 0, NULL, NULL);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+}
+
+void flush_thread(void)
+{
+	clear_used_math();
+	clear_tsk_thread_flag(current, TIF_USEDFPU);
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
+	unsigned long unused,
+        struct task_struct * p, struct pt_regs * regs)
+{
+        struct fake_frame
+          {
+	    struct stack_frame sf;
+            struct pt_regs childregs;
+          } *frame;
+
+        frame = ((struct fake_frame *)
+		 (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+        p->thread.ksp = (unsigned long) frame;
+	/* Store access registers to kernel stack of new process. */
+        frame->childregs = *regs;
+	frame->childregs.gprs[2] = 0;	/* child returns 0 on fork. */
+        frame->childregs.gprs[15] = new_stackp;
+        frame->sf.back_chain = 0;
+
+        /* new return point is ret_from_fork */
+        frame->sf.gprs[8] = (unsigned long) ret_from_fork;
+
+        /* fake return stack for resume(), don't go back to schedule */
+        frame->sf.gprs[9] = (unsigned long) frame;
+
+	/* Save access registers to new thread structure. */
+	save_access_regs(&p->thread.acrs[0]);
+
+#ifndef CONFIG_ARCH_S390X
+        /*
+	 * save fprs to current->thread.fp_regs to merge them with
+	 * the emulated registers and then copy the result to the child.
+	 */
+	save_fp_regs(&current->thread.fp_regs);
+	memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
+	       sizeof(s390_fp_regs));
+        p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE;
+	/* Set a new TLS ?  */
+	if (clone_flags & CLONE_SETTLS)
+		p->thread.acrs[0] = regs->gprs[6];
+#else /* CONFIG_ARCH_S390X */
+	/* Save the fpu registers to new thread structure. */
+	save_fp_regs(&p->thread.fp_regs);
+        p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
+	/* Set a new TLS ?  */
+	if (clone_flags & CLONE_SETTLS) {
+		if (test_thread_flag(TIF_31BIT)) {
+			p->thread.acrs[0] = (unsigned int) regs->gprs[6];
+		} else {
+			p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
+			p->thread.acrs[1] = (unsigned int) regs->gprs[6];
+		}
+	}
+#endif /* CONFIG_ARCH_S390X */
+	/* start new process with ar4 pointing to the correct address space */
+	p->thread.mm_segment = get_fs();
+        /* Don't copy debug registers */
+        memset(&p->thread.per_info,0,sizeof(p->thread.per_info));
+
+        return 0;
+}
+
+asmlinkage long sys_fork(struct pt_regs regs)
+{
+	return do_fork(SIGCHLD, regs.gprs[15], &regs, 0, NULL, NULL);
+}
+
+asmlinkage long sys_clone(struct pt_regs regs)
+{
+        unsigned long clone_flags;
+        unsigned long newsp;
+	int __user *parent_tidptr, *child_tidptr;
+
+        clone_flags = regs.gprs[3];
+        newsp = regs.orig_gpr2;
+	parent_tidptr = (int __user *) regs.gprs[4];
+	child_tidptr = (int __user *) regs.gprs[5];
+        if (!newsp)
+                newsp = regs.gprs[15];
+        return do_fork(clone_flags, newsp, &regs, 0,
+		       parent_tidptr, child_tidptr);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage long sys_vfork(struct pt_regs regs)
+{
+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
+		       regs.gprs[15], &regs, 0, NULL, NULL);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage long sys_execve(struct pt_regs regs)
+{
+        int error;
+        char * filename;
+
+        filename = getname((char __user *) regs.orig_gpr2);
+        error = PTR_ERR(filename);
+        if (IS_ERR(filename))
+                goto out;
+        error = do_execve(filename, (char __user * __user *) regs.gprs[3],
+			  (char __user * __user *) regs.gprs[4], &regs);
+	if (error == 0) {
+		task_lock(current);
+		current->ptrace &= ~PT_DTRACE;
+		task_unlock(current);
+		current->thread.fp_regs.fpc = 0;
+		if (MACHINE_HAS_IEEE)
+			asm volatile("sfpc %0,%0" : : "d" (0));
+	}
+        putname(filename);
+out:
+        return error;
+}
+
+
+/*
+ * fill in the FPU structure for a core dump.
+ */
+int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
+{
+#ifndef CONFIG_ARCH_S390X
+        /*
+	 * save fprs to current->thread.fp_regs to merge them with
+	 * the emulated registers and then copy the result to the dump.
+	 */
+	save_fp_regs(&current->thread.fp_regs);
+	memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
+#else /* CONFIG_ARCH_S390X */
+	save_fp_regs(fpregs);
+#endif /* CONFIG_ARCH_S390X */
+	return 1;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+
+/* changed the size calculations - should hopefully work better. lbt */
+	dump->magic = CMAGIC;
+	dump->start_code = 0;
+	dump->start_stack = regs->gprs[15] & ~(PAGE_SIZE - 1);
+	dump->u_tsize = current->mm->end_code >> PAGE_SHIFT;
+	dump->u_dsize = (current->mm->brk + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	dump->u_dsize -= dump->u_tsize;
+	dump->u_ssize = 0;
+	if (dump->start_stack < TASK_SIZE)
+		dump->u_ssize = (TASK_SIZE - dump->start_stack) >> PAGE_SHIFT;
+	memcpy(&dump->regs, regs, sizeof(s390_regs));
+	dump_fpu (regs, &dump->regs.fp_regs);
+	dump->regs.per_info = current->thread.per_info;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	struct stack_frame *sf, *low, *high;
+	unsigned long return_address;
+	int count;
+
+	if (!p || p == current || p->state == TASK_RUNNING || !p->thread_info)
+		return 0;
+	low = (struct stack_frame *) p->thread_info;
+	high = (struct stack_frame *)
+		((unsigned long) p->thread_info + THREAD_SIZE) - 1;
+	sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
+	if (sf <= low || sf > high)
+		return 0;
+	for (count = 0; count < 16; count++) {
+		sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
+		if (sf <= low || sf > high)
+			return 0;
+		return_address = sf->gprs[8] & PSW_ADDR_INSN;
+		if (!in_sched_functions(return_address))
+			return return_address;
+	}
+	return 0;
+}
+
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c
new file mode 100644
index 0000000..7ba777e
--- /dev/null
+++ b/arch/s390/kernel/profile.c
@@ -0,0 +1,20 @@
+/*
+ * arch/s390/kernel/profile.c
+ *
+ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Thomas Spatzier (tspat@de.ibm.com)
+ *
+ */
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+
+static struct proc_dir_entry * root_irq_dir;
+
+void init_irq_proc(void)
+{
+	/* create /proc/irq */
+	root_irq_dir = proc_mkdir("irq", 0);
+
+	/* create /proc/irq/prof_cpu_mask */
+	create_prof_cpu_mask(root_irq_dir);
+}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
new file mode 100644
index 0000000..647233c
--- /dev/null
+++ b/arch/s390/kernel/ptrace.c
@@ -0,0 +1,738 @@
+/*
+ *  arch/s390/kernel/ptrace.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ *  Based on PowerPC version 
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Derived from "arch/m68k/kernel/ptrace.c"
+ *  Copyright (C) 1994 by Hamish Macdonald
+ *  Taken from linux/kernel/ptrace.c and modified for M680x0.
+ *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * Modified by Cort Dougan (cort@cs.nmt.edu) 
+ *
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file README.legal in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/security.h>
+#include <linux/audit.h>
+
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_S390_SUPPORT
+#include "compat_ptrace.h"
+#endif
+
+static void
+FixPerRegisters(struct task_struct *task)
+{
+	struct pt_regs *regs;
+	per_struct *per_info;
+
+	regs = __KSTK_PTREGS(task);
+	per_info = (per_struct *) &task->thread.per_info;
+	per_info->control_regs.bits.em_instruction_fetch =
+		per_info->single_step | per_info->instruction_fetch;
+	
+	if (per_info->single_step) {
+		per_info->control_regs.bits.starting_addr = 0;
+#ifdef CONFIG_S390_SUPPORT
+		if (test_thread_flag(TIF_31BIT))
+			per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
+		else
+#endif
+			per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
+	} else {
+		per_info->control_regs.bits.starting_addr =
+			per_info->starting_addr;
+		per_info->control_regs.bits.ending_addr =
+			per_info->ending_addr;
+	}
+	/*
+	 * if any of the control reg tracing bits are on 
+	 * we switch on per in the psw
+	 */
+	if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
+		regs->psw.mask |= PSW_MASK_PER;
+	else
+		regs->psw.mask &= ~PSW_MASK_PER;
+
+	if (per_info->control_regs.bits.em_storage_alteration)
+		per_info->control_regs.bits.storage_alt_space_ctl = 1;
+	else
+		per_info->control_regs.bits.storage_alt_space_ctl = 0;
+}
+
+void
+set_single_step(struct task_struct *task)
+{
+	task->thread.per_info.single_step = 1;
+	FixPerRegisters(task);
+}
+
+void
+clear_single_step(struct task_struct *task)
+{
+	task->thread.per_info.single_step = 0;
+	FixPerRegisters(task);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void
+ptrace_disable(struct task_struct *child)
+{
+	/* make sure the single step bit is not set. */
+	clear_single_step(child);
+}
+
+#ifndef CONFIG_ARCH_S390X
+# define __ADDR_MASK 3
+#else
+# define __ADDR_MASK 7
+#endif
+
+/*
+ * Read the word at offset addr from the user area of a process. The
+ * trouble here is that the information is littered over different
+ * locations. The process registers are found on the kernel stack,
+ * the floating point stuff and the trace settings are stored in
+ * the task structure. In addition the different structures in
+ * struct user contain pad bytes that should be read as zeroes.
+ * Lovely...
+ */
+static int
+peek_user(struct task_struct *child, addr_t addr, addr_t data)
+{
+	struct user *dummy = NULL;
+	addr_t offset, tmp;
+
+	/*
+	 * Stupid gdb peeks/pokes the access registers in 64 bit with
+	 * an alignment of 4. Programmers from hell...
+	 */
+	if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK)
+		return -EIO;
+
+	if (addr < (addr_t) &dummy->regs.acrs) {
+		/*
+		 * psw and gprs are stored on the stack
+		 */
+		tmp = *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr);
+		if (addr == (addr_t) &dummy->regs.psw.mask)
+			/* Remove per bit from user psw. */
+			tmp &= ~PSW_MASK_PER;
+
+	} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
+		/*
+		 * access registers are stored in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy->regs.acrs;
+		tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
+
+	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
+		/*
+		 * orig_gpr2 is stored on the kernel stack
+		 */
+		tmp = (addr_t) __KSTK_PTREGS(child)->orig_gpr2;
+
+	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
+		/* 
+		 * floating point regs. are stored in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy->regs.fp_regs;
+		tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
+
+	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
+		/*
+		 * per_info is found in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy->regs.per_info;
+		tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
+
+	} else
+		tmp = 0;
+
+	return put_user(tmp, (addr_t __user *) data);
+}
+
+/*
+ * Write a word to the user area of a process at location addr. This
+ * operation does have an additional problem compared to peek_user.
+ * Stores to the program status word and on the floating point
+ * control register needs to get checked for validity.
+ */
+static int
+poke_user(struct task_struct *child, addr_t addr, addr_t data)
+{
+	struct user *dummy = NULL;
+	addr_t offset;
+
+	/*
+	 * Stupid gdb peeks/pokes the access registers in 64 bit with
+	 * an alignment of 4. Programmers from hell indeed...
+	 */
+	if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK)
+		return -EIO;
+
+	if (addr < (addr_t) &dummy->regs.acrs) {
+		/*
+		 * psw and gprs are stored on the stack
+		 */
+		if (addr == (addr_t) &dummy->regs.psw.mask &&
+#ifdef CONFIG_S390_SUPPORT
+		    data != PSW_MASK_MERGE(PSW_USER32_BITS, data) &&
+#endif
+		    data != PSW_MASK_MERGE(PSW_USER_BITS, data))
+			/* Invalid psw mask. */
+			return -EINVAL;
+#ifndef CONFIG_ARCH_S390X
+		if (addr == (addr_t) &dummy->regs.psw.addr)
+			/* I'd like to reject addresses without the
+			   high order bit but older gdb's rely on it */
+			data |= PSW_ADDR_AMODE;
+#endif
+		*(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr) = data;
+
+	} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
+		/*
+		 * access registers are stored in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy->regs.acrs;
+		*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
+
+	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
+		/*
+		 * orig_gpr2 is stored on the kernel stack
+		 */
+		__KSTK_PTREGS(child)->orig_gpr2 = data;
+
+	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
+		/*
+		 * floating point regs. are stored in the thread structure
+		 */
+		if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
+		    (data & ~FPC_VALID_MASK) != 0)
+			return -EINVAL;
+		offset = addr - (addr_t) &dummy->regs.fp_regs;
+		*(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
+
+	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
+		/*
+		 * per_info is found in the thread structure 
+		 */
+		offset = addr - (addr_t) &dummy->regs.per_info;
+		*(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
+
+	}
+
+	FixPerRegisters(child);
+	return 0;
+}
+
+static int
+do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
+{
+	unsigned long tmp;
+	ptrace_area parea; 
+	int copied, ret;
+
+	switch (request) {
+	case PTRACE_PEEKTEXT:
+	case PTRACE_PEEKDATA:
+		/* Remove high order bit from address (only for 31 bit). */
+		addr &= PSW_ADDR_INSN;
+		/* read word at location addr. */
+		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+		if (copied != sizeof(tmp))
+			return -EIO;
+		return put_user(tmp, (unsigned long __user *) data);
+
+	case PTRACE_PEEKUSR:
+		/* read the word at location addr in the USER area. */
+		return peek_user(child, addr, data);
+
+	case PTRACE_POKETEXT:
+	case PTRACE_POKEDATA:
+		/* Remove high order bit from address (only for 31 bit). */
+		addr &= PSW_ADDR_INSN;
+		/* write the word at location addr. */
+		copied = access_process_vm(child, addr, &data, sizeof(data),1);
+		if (copied != sizeof(data))
+			return -EIO;
+		return 0;
+
+	case PTRACE_POKEUSR:
+		/* write the word at location addr in the USER area */
+		return poke_user(child, addr, data);
+
+	case PTRACE_PEEKUSR_AREA:
+	case PTRACE_POKEUSR_AREA:
+		if (copy_from_user(&parea, (void __user *) addr,
+							sizeof(parea)))
+			return -EFAULT;
+		addr = parea.kernel_addr;
+		data = parea.process_addr;
+		copied = 0;
+		while (copied < parea.len) {
+			if (request == PTRACE_PEEKUSR_AREA)
+				ret = peek_user(child, addr, data);
+			else {
+				addr_t tmp;
+				if (get_user (tmp, (addr_t __user *) data))
+					return -EFAULT;
+				ret = poke_user(child, addr, tmp);
+			}
+			if (ret)
+				return ret;
+			addr += sizeof(unsigned long);
+			data += sizeof(unsigned long);
+			copied += sizeof(unsigned long);
+		}
+		return 0;
+	}
+	return ptrace_request(child, request, addr, data);
+}
+
+#ifdef CONFIG_S390_SUPPORT
+/*
+ * Now the fun part starts... a 31 bit program running in the
+ * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
+ * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
+ * to handle, the difference to the 64 bit versions of the requests
+ * is that the access is done in multiples of 4 byte instead of
+ * 8 bytes (sizeof(unsigned long) on 31/64 bit).
+ * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
+ * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
+ * is a 31 bit program too, the content of struct user can be
+ * emulated. A 31 bit program peeking into the struct user of
+ * a 64 bit program is a no-no.
+ */
+
+/*
+ * Same as peek_user but for a 31 bit program.
+ */
+static int
+peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
+{
+	struct user32 *dummy32 = NULL;
+	per_struct32 *dummy_per32 = NULL;
+	addr_t offset;
+	__u32 tmp;
+
+	if (!test_thread_flag(TIF_31BIT) ||
+	    (addr & 3) || addr > sizeof(struct user) - 3)
+		return -EIO;
+
+	if (addr < (addr_t) &dummy32->regs.acrs) {
+		/*
+		 * psw and gprs are stored on the stack
+		 */
+		if (addr == (addr_t) &dummy32->regs.psw.mask) {
+			/* Fake a 31 bit psw mask. */
+			tmp = (__u32)(__KSTK_PTREGS(child)->psw.mask >> 32);
+			tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
+		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
+			/* Fake a 31 bit psw address. */
+			tmp = (__u32) __KSTK_PTREGS(child)->psw.addr |
+				PSW32_ADDR_AMODE31;
+		} else {
+			/* gpr 0-15 */
+			tmp = *(__u32 *)((addr_t) &__KSTK_PTREGS(child)->psw +
+					 addr*2 + 4);
+		}
+	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
+		/*
+		 * access registers are stored in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy32->regs.acrs;
+		tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
+
+	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
+		/*
+		 * orig_gpr2 is stored on the kernel stack
+		 */
+		tmp = *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4);
+
+	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
+		/*
+		 * floating point regs. are stored in the thread structure 
+		 */
+	        offset = addr - (addr_t) &dummy32->regs.fp_regs;
+		tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
+
+	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
+		/*
+		 * per_info is found in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy32->regs.per_info;
+		/* This is magic. See per_struct and per_struct32. */
+		if ((offset >= (addr_t) &dummy_per32->control_regs &&
+		     offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
+		    (offset >= (addr_t) &dummy_per32->starting_addr &&
+		     offset <= (addr_t) &dummy_per32->ending_addr) ||
+		    offset == (addr_t) &dummy_per32->lowcore.words.address)
+			offset = offset*2 + 4;
+		else
+			offset = offset*2;
+		tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
+
+	} else
+		tmp = 0;
+
+	return put_user(tmp, (__u32 __user *) data);
+}
+
+/*
+ * Same as poke_user but for a 31 bit program.
+ */
+static int
+poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
+{
+	struct user32 *dummy32 = NULL;
+	per_struct32 *dummy_per32 = NULL;
+	addr_t offset;
+	__u32 tmp;
+
+	if (!test_thread_flag(TIF_31BIT) ||
+	    (addr & 3) || addr > sizeof(struct user32) - 3)
+		return -EIO;
+
+	tmp = (__u32) data;
+
+	if (addr < (addr_t) &dummy32->regs.acrs) {
+		/*
+		 * psw, gprs, acrs and orig_gpr2 are stored on the stack
+		 */
+		if (addr == (addr_t) &dummy32->regs.psw.mask) {
+			/* Build a 64 bit psw mask from 31 bit mask. */
+			if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
+				/* Invalid psw mask. */
+				return -EINVAL;
+			__KSTK_PTREGS(child)->psw.mask =
+				PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
+		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
+			/* Build a 64 bit psw address from 31 bit address. */
+			__KSTK_PTREGS(child)->psw.addr = 
+				(__u64) tmp & PSW32_ADDR_INSN;
+		} else {
+			/* gpr 0-15 */
+			*(__u32*)((addr_t) &__KSTK_PTREGS(child)->psw
+				  + addr*2 + 4) = tmp;
+		}
+	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
+		/*
+		 * access registers are stored in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy32->regs.acrs;
+		*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
+
+	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
+		/*
+		 * orig_gpr2 is stored on the kernel stack
+		 */
+		*(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4) = tmp;
+
+	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
+		/*
+		 * floating point regs. are stored in the thread structure 
+		 */
+		if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
+		    (tmp & ~FPC_VALID_MASK) != 0)
+			/* Invalid floating point control. */
+			return -EINVAL;
+	        offset = addr - (addr_t) &dummy32->regs.fp_regs;
+		*(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
+
+	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
+		/*
+		 * per_info is found in the thread structure.
+		 */
+		offset = addr - (addr_t) &dummy32->regs.per_info;
+		/*
+		 * This is magic. See per_struct and per_struct32.
+		 * By incident the offsets in per_struct are exactly
+		 * twice the offsets in per_struct32 for all fields.
+		 * The 8 byte fields need special handling though,
+		 * because the second half (bytes 4-7) is needed and
+		 * not the first half.
+		 */
+		if ((offset >= (addr_t) &dummy_per32->control_regs &&
+		     offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
+		    (offset >= (addr_t) &dummy_per32->starting_addr &&
+		     offset <= (addr_t) &dummy_per32->ending_addr) ||
+		    offset == (addr_t) &dummy_per32->lowcore.words.address)
+			offset = offset*2 + 4;
+		else
+			offset = offset*2;
+		*(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
+
+	}
+
+	FixPerRegisters(child);
+	return 0;
+}
+
+static int
+do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
+{
+	unsigned int tmp;  /* 4 bytes !! */
+	ptrace_area_emu31 parea; 
+	int copied, ret;
+
+	switch (request) {
+	case PTRACE_PEEKTEXT:
+	case PTRACE_PEEKDATA:
+		/* read word at location addr. */
+		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+		if (copied != sizeof(tmp))
+			return -EIO;
+		return put_user(tmp, (unsigned int __user *) data);
+
+	case PTRACE_PEEKUSR:
+		/* read the word at location addr in the USER area. */
+		return peek_user_emu31(child, addr, data);
+
+	case PTRACE_POKETEXT:
+	case PTRACE_POKEDATA:
+		/* write the word at location addr. */
+		tmp = data;
+		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
+		if (copied != sizeof(tmp))
+			return -EIO;
+		return 0;
+
+	case PTRACE_POKEUSR:
+		/* write the word at location addr in the USER area */
+		return poke_user_emu31(child, addr, data);
+
+	case PTRACE_PEEKUSR_AREA:
+	case PTRACE_POKEUSR_AREA:
+		if (copy_from_user(&parea, (void __user *) addr,
+							sizeof(parea)))
+			return -EFAULT;
+		addr = parea.kernel_addr;
+		data = parea.process_addr;
+		copied = 0;
+		while (copied < parea.len) {
+			if (request == PTRACE_PEEKUSR_AREA)
+				ret = peek_user_emu31(child, addr, data);
+			else {
+				__u32 tmp;
+				if (get_user (tmp, (__u32 __user *) data))
+					return -EFAULT;
+				ret = poke_user_emu31(child, addr, tmp);
+			}
+			if (ret)
+				return ret;
+			addr += sizeof(unsigned int);
+			data += sizeof(unsigned int);
+			copied += sizeof(unsigned int);
+		}
+		return 0;
+	case PTRACE_GETEVENTMSG:
+		return put_user((__u32) child->ptrace_message,
+				(unsigned int __user *) data);
+	case PTRACE_GETSIGINFO:
+		if (child->last_siginfo == NULL)
+			return -EINVAL;
+		return copy_siginfo_to_user32((compat_siginfo_t __user *) data,
+					      child->last_siginfo);
+	case PTRACE_SETSIGINFO:
+		if (child->last_siginfo == NULL)
+			return -EINVAL;
+		return copy_siginfo_from_user32(child->last_siginfo,
+						(compat_siginfo_t __user *) data);
+	}
+	return ptrace_request(child, request, addr, data);
+}
+#endif
+
+#define PT32_IEEE_IP 0x13c
+
+static int
+do_ptrace(struct task_struct *child, long request, long addr, long data)
+{
+	int ret;
+
+	if (request == PTRACE_ATTACH)
+		return ptrace_attach(child);
+
+	/*
+	 * Special cases to get/store the ieee instructions pointer.
+	 */
+	if (child == current) {
+		if (request == PTRACE_PEEKUSR && addr == PT_IEEE_IP)
+			return peek_user(child, addr, data);
+		if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
+			return poke_user(child, addr, data);
+#ifdef CONFIG_S390_SUPPORT
+		if (request == PTRACE_PEEKUSR &&
+		    addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
+			return peek_user_emu31(child, addr, data);
+		if (request == PTRACE_POKEUSR &&
+		    addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
+			return poke_user_emu31(child, addr, data);
+#endif
+	}
+
+	ret = ptrace_check_attach(child, request == PTRACE_KILL);
+	if (ret < 0)
+		return ret;
+
+	switch (request) {
+	case PTRACE_SYSCALL:
+		/* continue and stop at next (return from) syscall */
+	case PTRACE_CONT:
+		/* restart after signal. */
+		if ((unsigned long) data >= _NSIG)
+			return -EIO;
+		if (request == PTRACE_SYSCALL)
+			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+		else
+			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+		child->exit_code = data;
+		/* make sure the single step bit is not set. */
+		clear_single_step(child);
+		wake_up_process(child);
+		return 0;
+
+	case PTRACE_KILL:
+		/*
+		 * make the child exit.  Best I can do is send it a sigkill. 
+		 * perhaps it should be put in the status that it wants to 
+		 * exit.
+		 */
+		if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+			return 0;
+		child->exit_code = SIGKILL;
+		/* make sure the single step bit is not set. */
+		clear_single_step(child);
+		wake_up_process(child);
+		return 0;
+
+	case PTRACE_SINGLESTEP:
+		/* set the trap flag. */
+		if ((unsigned long) data >= _NSIG)
+			return -EIO;
+		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+		child->exit_code = data;
+		if (data)
+			set_tsk_thread_flag(child, TIF_SINGLE_STEP);
+		else
+			set_single_step(child);
+		/* give it a chance to run. */
+		wake_up_process(child);
+		return 0;
+
+	case PTRACE_DETACH:
+		/* detach a process that was attached. */
+		return ptrace_detach(child, data);
+
+
+	/* Do requests that differ for 31/64 bit */
+	default:
+#ifdef CONFIG_S390_SUPPORT
+		if (test_thread_flag(TIF_31BIT))
+			return do_ptrace_emu31(child, request, addr, data);
+#endif
+		return do_ptrace_normal(child, request, addr, data);
+	}
+	/* Not reached.  */
+	return -EIO;
+}
+
+asmlinkage long
+sys_ptrace(long request, long pid, long addr, long data)
+{
+	struct task_struct *child;
+	int ret;
+
+	lock_kernel();
+
+	if (request == PTRACE_TRACEME) {
+		/* are we already being traced? */
+		ret = -EPERM;
+		if (current->ptrace & PT_PTRACED)
+			goto out;
+		ret = security_ptrace(current->parent, current);
+		if (ret)
+			goto out;
+		/* set the ptrace bit in the process flags. */
+		current->ptrace |= PT_PTRACED;
+		goto out;
+	}
+
+	ret = -EPERM;
+	if (pid == 1)		/* you may not mess with init */
+		goto out;
+
+	ret = -ESRCH;
+	read_lock(&tasklist_lock);
+	child = find_task_by_pid(pid);
+	if (child)
+		get_task_struct(child);
+	read_unlock(&tasklist_lock);
+	if (!child)
+		goto out;
+
+	ret = do_ptrace(child, request, addr, data);
+
+	put_task_struct(child);
+out:
+	unlock_kernel();
+	return ret;
+}
+
+asmlinkage void
+syscall_trace(struct pt_regs *regs, int entryexit)
+{
+	if (unlikely(current->audit_context)) {
+		if (!entryexit)
+			audit_syscall_entry(current, regs->gprs[2],
+					    regs->orig_gpr2, regs->gprs[3],
+					    regs->gprs[4], regs->gprs[5]);
+		else
+			audit_syscall_exit(current, regs->gprs[2]);
+	}
+	if (!test_thread_flag(TIF_SYSCALL_TRACE))
+		return;
+	if (!(current->ptrace & PT_PTRACED))
+		return;
+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+				 ? 0x80 : 0));
+
+	/*
+	 * this isn't the same as continuing with a signal, but it will do
+	 * for normal use.  strace only continues with a signal if the
+	 * stopping signal is not SIGTRAP.  -brl
+	 */
+	if (current->exit_code) {
+		send_sig(current->exit_code, current, 1);
+		current->exit_code = 0;
+	}
+}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
new file mode 100644
index 0000000..658e5ac
--- /dev/null
+++ b/arch/s390/kernel/reipl.S
@@ -0,0 +1,78 @@
+/*
+ *  arch/s390/kernel/reipl.S
+ *
+ *  S390 version
+ *    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
+ */
+
+#include <asm/lowcore.h>
+
+		.globl	do_reipl
+do_reipl:	basr	%r13,0
+.Lpg0:		lpsw	.Lnewpsw-.Lpg0(%r13)
+.Lpg1:		lctl	%c6,%c6,.Lall-.Lpg0(%r13)
+                stctl   %c0,%c0,.Lctlsave-.Lpg0(%r13)
+                ni      .Lctlsave-.Lpg0(%r13),0xef
+                lctl    %c0,%c0,.Lctlsave-.Lpg0(%r13)
+                lr      %r1,%r2
+        	mvc     __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
+                stsch   .Lschib-.Lpg0(%r13)                                    
+	        oi      .Lschib+5-.Lpg0(%r13),0x84 
+.Lecs:  	xi      .Lschib+27-.Lpg0(%r13),0x01 
+        	msch    .Lschib-.Lpg0(%r13) 
+                lhi     %r0,5
+.Lssch:		ssch	.Liplorb-.Lpg0(%r13)           
+		jz	.L001
+                brct    %r0,.Lssch  
+		bas	%r14,.Ldisab-.Lpg0(%r13)
+.L001:		mvc	__LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)	
+.Ltpi:		lpsw	.Lwaitpsw-.Lpg0(%r13)          
+.Lcont:		c	%r1,__LC_SUBCHANNEL_ID
+		jnz	.Ltpi
+		clc	__LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
+		jnz	.Ltpi
+		tsch	.Liplirb-.Lpg0(%r13)           
+		tm	.Liplirb+9-.Lpg0(%r13),0xbf
+                jz      .L002
+                bas     %r14,.Ldisab-.Lpg0(%r13)    
+.L002:		tm	.Liplirb+8-.Lpg0(%r13),0xf3    
+                jz      .L003
+                bas     %r14,.Ldisab-.Lpg0(%r13)	
+.L003:		spx	.Lnull-.Lpg0(%r13)
+		st 	%r1,__LC_SUBCHANNEL_ID
+                lpsw 	0
+		sigp    0,0,0(6)               
+.Ldisab:	st      %r14,.Ldispsw+4-.Lpg0(%r13)
+		lpsw	.Ldispsw-.Lpg0(%r13)
+                .align 	8
+.Lall:		.long	0xff000000
+.Lnull:		.long   0x00000000
+.Lctlsave:      .long   0x00000000
+                .align 	8
+.Lnewpsw:	.long   0x00080000,0x80000000+.Lpg1
+.Lpcnew:  	.long   0x00080000,0x80000000+.Lecs
+.Lionew:	.long   0x00080000,0x80000000+.Lcont
+.Lwaitpsw:	.long	0x020a0000,0x00000000+.Ltpi
+.Ldispsw:	.long   0x000a0000,0x00000000
+.Liplccws:	.long   0x02000000,0x60000018
+		.long   0x08000008,0x20000001
+.Liplorb:	.long	0x0049504c,0x0040ff80
+		.long	0x00000000+.Liplccws
+.Lschib:        .long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+.Liplirb:	.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+	
+
+	
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
new file mode 100644
index 0000000..4d090d6
--- /dev/null
+++ b/arch/s390/kernel/reipl64.S
@@ -0,0 +1,96 @@
+/*
+ *  arch/s390/kernel/reipl.S
+ *
+ *  S390 version
+ *    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
+	         Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#include <asm/lowcore.h>
+		.globl	do_reipl
+do_reipl:	basr	%r13,0
+.Lpg0:		lpswe   .Lnewpsw-.Lpg0(%r13)
+.Lpg1:		lctlg	%c6,%c6,.Lall-.Lpg0(%r13)
+                stctg   %c0,%c0,.Lctlsave-.Lpg0(%r13)
+                ni      .Lctlsave+4-.Lpg0(%r13),0xef
+                lctlg   %c0,%c0,.Lctlsave-.Lpg0(%r13)
+                lgr     %r1,%r2
+        	mvc     __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
+                stsch   .Lschib-.Lpg0(%r13)                                    
+	        oi      .Lschib+5-.Lpg0(%r13),0x84 
+.Lecs:  	xi      .Lschib+27-.Lpg0(%r13),0x01 
+        	msch    .Lschib-.Lpg0(%r13) 
+	        lghi    %r0,5
+.Lssch:		ssch	.Liplorb-.Lpg0(%r13)           
+		jz	.L001
+		brct    %r0,.Lssch   
+		bas	%r14,.Ldisab-.Lpg0(%r13)
+.L001:		mvc	__LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)	
+.Ltpi:		lpswe	.Lwaitpsw-.Lpg0(%r13)          
+.Lcont:		c	%r1,__LC_SUBCHANNEL_ID
+		jnz	.Ltpi
+		clc	__LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
+		jnz	.Ltpi
+		tsch	.Liplirb-.Lpg0(%r13)           
+		tm	.Liplirb+9-.Lpg0(%r13),0xbf
+                jz      .L002
+                bas     %r14,.Ldisab-.Lpg0(%r13)    
+.L002:		tm	.Liplirb+8-.Lpg0(%r13),0xf3    
+                jz      .L003
+                bas     %r14,.Ldisab-.Lpg0(%r13)	
+.L003:		spx	.Lnull-.Lpg0(%r13)
+		st 	%r1,__LC_SUBCHANNEL_ID
+                lhi     %r1,0            # mode 0 = esa
+                slr     %r0,%r0          # set cpuid to zero
+                sigp    %r1,%r0,0x12     # switch to esa mode
+                lpsw 	0
+.Ldisab:	sll    %r14,1
+		srl    %r14,1            # need to kill hi bit to avoid specification exceptions.
+		st     %r14,.Ldispsw+12-.Lpg0(%r13)
+		lpswe	.Ldispsw-.Lpg0(%r13)
+                .align 	8
+.Lall:		.quad	0x00000000ff000000
+.Lctlsave:      .quad   0x0000000000000000
+.Lnull:		.long   0x0000000000000000
+                .align 	16
+/*
+ * These addresses have to be 31 bit otherwise
+ * the sigp will throw a specifcation exception
+ * when switching to ESA mode as bit 31 be set
+ * in the ESA psw.
+ * Bit 31 of the addresses has to be 0 for the
+ * 31bit lpswe instruction a fact they appear to have
+ * ommited from the pop.
+ */
+.Lnewpsw:	.quad   0x0000000080000000
+		.quad   .Lpg1
+.Lpcnew:	.quad   0x0000000080000000
+	  	.quad   .Lecs
+.Lionew:	.quad   0x0000000080000000
+		.quad   .Lcont
+.Lwaitpsw:	.quad	0x0202000080000000
+		.quad   .Ltpi
+.Ldispsw:	.quad   0x0002000080000000
+		.quad   0x0000000000000000
+.Liplccws:	.long   0x02000000,0x60000018
+		.long   0x08000008,0x20000001
+.Liplorb:	.long	0x0049504c,0x0040ff80
+		.long	0x00000000+.Liplccws
+.Lschib:        .long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+.Liplirb:	.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+	
+
+	
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
new file mode 100644
index 0000000..3bdd38e
--- /dev/null
+++ b/arch/s390/kernel/s390_ext.c
@@ -0,0 +1,135 @@
+/*
+ *  arch/s390/kernel/s390_ext.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+
+#include <asm/lowcore.h>
+#include <asm/s390_ext.h>
+#include <asm/irq.h>
+
+/*
+ * Simple hash strategy: index = code & 0xff;
+ * ext_int_hash[index] is the start of the list for all external interrupts
+ * that hash to this index. With the current set of external interrupts 
+ * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
+ * iucv and 0x2603 pfault) this is always the first element. 
+ */
+ext_int_info_t *ext_int_hash[256] = { 0, };
+
+int register_external_interrupt(__u16 code, ext_int_handler_t handler)
+{
+        ext_int_info_t *p;
+        int index;
+
+	p = (ext_int_info_t *) kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
+        if (p == NULL)
+                return -ENOMEM;
+        p->code = code;
+        p->handler = handler;
+        index = code & 0xff;
+        p->next = ext_int_hash[index];
+        ext_int_hash[index] = p;
+        return 0;
+}
+
+int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
+				      ext_int_info_t *p)
+{
+        int index;
+
+        if (p == NULL)
+                return -EINVAL;
+        p->code = code;
+        p->handler = handler;
+        index = code & 0xff;
+        p->next = ext_int_hash[index];
+        ext_int_hash[index] = p;
+        return 0;
+}
+
+int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
+{
+        ext_int_info_t *p, *q;
+        int index;
+
+        index = code & 0xff;
+        q = NULL;
+        p = ext_int_hash[index];
+        while (p != NULL) {
+                if (p->code == code && p->handler == handler)
+                        break;
+                q = p;
+                p = p->next;
+        }
+        if (p == NULL)
+                return -ENOENT;
+        if (q != NULL)
+                q->next = p->next;
+        else
+                ext_int_hash[index] = p->next;
+	kfree(p);
+        return 0;
+}
+
+int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
+					ext_int_info_t *p)
+{
+	ext_int_info_t *q;
+	int index;
+
+	if (p == NULL || p->code != code || p->handler != handler)
+		return -EINVAL;
+	index = code & 0xff;
+	q = ext_int_hash[index];
+	if (p != q) {
+		while (q != NULL) {
+			if (q->next == p)
+				break;
+			q = q->next;
+		}
+		if (q == NULL)
+			return -ENOENT;
+		q->next = p->next;
+	} else
+		ext_int_hash[index] = p->next;
+	return 0;
+}
+
+void do_extint(struct pt_regs *regs, unsigned short code)
+{
+        ext_int_info_t *p;
+        int index;
+
+	irq_enter();
+	asm volatile ("mc 0,0");
+	if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
+		/**
+		 * Make sure that the i/o interrupt did not "overtake"
+		 * the last HZ timer interrupt.
+		 */
+		account_ticks(regs);
+	kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
+        index = code & 0xff;
+	for (p = ext_int_hash[index]; p; p = p->next) {
+		if (likely(p->code == code)) {
+			if (likely(p->handler))
+				p->handler(regs, code);
+		}
+	}
+	irq_exit();
+}
+
+EXPORT_SYMBOL(register_external_interrupt);
+EXPORT_SYMBOL(unregister_external_interrupt);
+
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
new file mode 100644
index 0000000..11fd6d5
--- /dev/null
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -0,0 +1,65 @@
+/*
+ *  arch/s390/kernel/s390_ksyms.c
+ *
+ *  S390 version
+ */
+#include <linux/config.h>
+#include <linux/highuid.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/syscalls.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl32.h>
+#include <asm/checksum.h>
+#include <asm/cpcmd.h>
+#include <asm/delay.h>
+#include <asm/pgalloc.h>
+#include <asm/setup.h>
+#ifdef CONFIG_IP_MULTICAST
+#include <net/arp.h>
+#endif
+
+/*
+ * memory management
+ */
+EXPORT_SYMBOL(_oi_bitmap);
+EXPORT_SYMBOL(_ni_bitmap);
+EXPORT_SYMBOL(_zb_findmap);
+EXPORT_SYMBOL(_sb_findmap);
+EXPORT_SYMBOL(__copy_from_user_asm);
+EXPORT_SYMBOL(__copy_to_user_asm);
+EXPORT_SYMBOL(__copy_in_user_asm);
+EXPORT_SYMBOL(__clear_user_asm);
+EXPORT_SYMBOL(__strncpy_from_user_asm);
+EXPORT_SYMBOL(__strnlen_user_asm);
+EXPORT_SYMBOL(diag10);
+EXPORT_SYMBOL(default_storage_key);
+
+/*
+ * semaphore ops
+ */
+EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_interruptible);
+
+/*
+ * binfmt_elf loader 
+ */
+extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(overflowuid);
+EXPORT_SYMBOL(overflowgid);
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * misc.
+ */
+EXPORT_SYMBOL(machine_flags);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(csum_fold);
+EXPORT_SYMBOL(console_mode);
+EXPORT_SYMBOL(console_devno);
+EXPORT_SYMBOL(console_irq);
+EXPORT_SYMBOL(sys_wait4);
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c
new file mode 100644
index 0000000..8dfb690
--- /dev/null
+++ b/arch/s390/kernel/semaphore.c
@@ -0,0 +1,108 @@
+/*
+ *  linux/arch/s390/kernel/semaphore.c
+ *
+ *  S390 version
+ *    Copyright (C) 1998-2000 IBM Corporation
+ *    Author(s): Martin Schwidefsky
+ *
+ *  Derived from "linux/arch/i386/kernel/semaphore.c
+ *    Copyright (C) 1999, Linus Torvalds
+ *
+ */
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <asm/semaphore.h>
+
+/*
+ * Atomically update sem->count. Equivalent to:
+ *   old_val = sem->count.counter;
+ *   new_val = ((old_val >= 0) ? old_val : 0) + incr;
+ *   sem->count.counter = new_val;
+ *   return old_val;
+ */
+static inline int __sem_update_count(struct semaphore *sem, int incr)
+{
+	int old_val, new_val;
+
+        __asm__ __volatile__("   l     %0,0(%3)\n"
+                             "0: ltr   %1,%0\n"
+			     "   jhe   1f\n"
+			     "   lhi   %1,0\n"
+			     "1: ar    %1,%4\n"
+                             "   cs    %0,%1,0(%3)\n"
+                             "   jl    0b\n"
+                             : "=&d" (old_val), "=&d" (new_val),
+			       "=m" (sem->count)
+			     : "a" (&sem->count), "d" (incr), "m" (sem->count)
+			     : "cc" );
+	return old_val;
+}
+
+/*
+ * The inline function up() incremented count but the result
+ * was <= 0. This indicates that some process is waiting on
+ * the semaphore. The semaphore is free and we'll wake the
+ * first sleeping process, so we set count to 1 unless some
+ * other cpu has called up in the meantime in which case
+ * we just increment count by 1.
+ */
+void __up(struct semaphore *sem)
+{
+	__sem_update_count(sem, 1);
+	wake_up(&sem->wait);
+}
+
+/*
+ * The inline function down() decremented count and the result
+ * was < 0. The wait loop will atomically test and update the
+ * semaphore counter following the rules:
+ *   count > 0: decrement count, wake up queue and exit.
+ *   count <= 0: set count to -1, go to sleep.
+ */
+void __sched __down(struct semaphore * sem)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	add_wait_queue_exclusive(&sem->wait, &wait);
+	while (__sem_update_count(sem, -1) <= 0) {
+		schedule();
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	}
+	remove_wait_queue(&sem->wait, &wait);
+	__set_task_state(tsk, TASK_RUNNING);
+	wake_up(&sem->wait);
+}
+
+/*
+ * Same as __down() with an additional test for signals.
+ * If a signal is pending the count is updated as follows:
+ *   count > 0: wake up queue and exit.
+ *   count <= 0: set count to 0, wake up queue and exit.
+ */
+int __sched __down_interruptible(struct semaphore * sem)
+{
+	int retval = 0;
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	__set_task_state(tsk, TASK_INTERRUPTIBLE);
+	add_wait_queue_exclusive(&sem->wait, &wait);
+	while (__sem_update_count(sem, -1) <= 0) {
+		if (signal_pending(current)) {
+			__sem_update_count(sem, 0);
+			retval = -EINTR;
+			break;
+		}
+		schedule();
+		set_task_state(tsk, TASK_INTERRUPTIBLE);
+	}
+	remove_wait_queue(&sem->wait, &wait);
+	__set_task_state(tsk, TASK_RUNNING);
+	wake_up(&sem->wait);
+	return retval;
+}
+
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
new file mode 100644
index 0000000..c879c40
--- /dev/null
+++ b/arch/s390/kernel/setup.c
@@ -0,0 +1,632 @@
+/*
+ *  arch/s390/kernel/setup.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ *  Derived from "arch/i386/kernel/setup.c"
+ *    Copyright (C) 1995, Linus Torvalds
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/bootmem.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/seq_file.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/smp.h>
+#include <asm/mmu_context.h>
+#include <asm/cpcmd.h>
+#include <asm/lowcore.h>
+#include <asm/irq.h>
+
+/*
+ * Machine setup..
+ */
+unsigned int console_mode = 0;
+unsigned int console_devno = -1;
+unsigned int console_irq = -1;
+unsigned long memory_size = 0;
+unsigned long machine_flags = 0;
+unsigned int default_storage_key = 0;
+struct {
+	unsigned long addr, size, type;
+} memory_chunk[MEMORY_CHUNKS] = { { 0 } };
+#define CHUNK_READ_WRITE 0
+#define CHUNK_READ_ONLY 1
+volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
+
+/*
+ * Setup options
+ */
+extern int _text,_etext, _edata, _end;
+
+/*
+ * This is set up by the setup-routine at boot-time
+ * for S390 need to find out, what we have to setup
+ * using address 0x10400 ...
+ */
+
+#include <asm/setup.h>
+
+static char command_line[COMMAND_LINE_SIZE] = { 0, };
+
+static struct resource code_resource = {
+	.name  = "Kernel code",
+	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource data_resource = {
+	.name = "Kernel data",
+	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+/*
+ * cpu_init() initializes state that is per-CPU.
+ */
+void __devinit cpu_init (void)
+{
+        int addr = hard_smp_processor_id();
+
+        /*
+         * Store processor id in lowcore (used e.g. in timer_interrupt)
+         */
+        asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
+        S390_lowcore.cpu_data.cpu_addr = addr;
+
+        /*
+         * Force FPU initialization:
+         */
+        clear_thread_flag(TIF_USEDFPU);
+        clear_used_math();
+
+	atomic_inc(&init_mm.mm_count);
+	current->active_mm = &init_mm;
+        if (current->mm)
+                BUG();
+        enter_lazy_tlb(&init_mm, current);
+}
+
+/*
+ * VM halt and poweroff setup routines
+ */
+char vmhalt_cmd[128] = "";
+char vmpoff_cmd[128] = "";
+
+static inline void strncpy_skip_quote(char *dst, char *src, int n)
+{
+        int sx, dx;
+
+        dx = 0;
+        for (sx = 0; src[sx] != 0; sx++) {
+                if (src[sx] == '"') continue;
+                dst[dx++] = src[sx];
+                if (dx >= n) break;
+        }
+}
+
+static int __init vmhalt_setup(char *str)
+{
+        strncpy_skip_quote(vmhalt_cmd, str, 127);
+        vmhalt_cmd[127] = 0;
+        return 1;
+}
+
+__setup("vmhalt=", vmhalt_setup);
+
+static int __init vmpoff_setup(char *str)
+{
+        strncpy_skip_quote(vmpoff_cmd, str, 127);
+        vmpoff_cmd[127] = 0;
+        return 1;
+}
+
+__setup("vmpoff=", vmpoff_setup);
+
+/*
+ * condev= and conmode= setup parameter.
+ */
+
+static int __init condev_setup(char *str)
+{
+	int vdev;
+
+	vdev = simple_strtoul(str, &str, 0);
+	if (vdev >= 0 && vdev < 65536) {
+		console_devno = vdev;
+		console_irq = -1;
+	}
+	return 1;
+}
+
+__setup("condev=", condev_setup);
+
+static int __init conmode_setup(char *str)
+{
+#if defined(CONFIG_SCLP_CONSOLE)
+	if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
+                SET_CONSOLE_SCLP;
+#endif
+#if defined(CONFIG_TN3215_CONSOLE)
+	if (strncmp(str, "3215", 5) == 0)
+		SET_CONSOLE_3215;
+#endif
+#if defined(CONFIG_TN3270_CONSOLE)
+	if (strncmp(str, "3270", 5) == 0)
+		SET_CONSOLE_3270;
+#endif
+        return 1;
+}
+
+__setup("conmode=", conmode_setup);
+
+static void __init conmode_default(void)
+{
+	char query_buffer[1024];
+	char *ptr;
+
+        if (MACHINE_IS_VM) {
+		__cpcmd("QUERY CONSOLE", query_buffer, 1024);
+		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
+		ptr = strstr(query_buffer, "SUBCHANNEL =");
+		console_irq = simple_strtoul(ptr + 13, NULL, 16);
+		__cpcmd("QUERY TERM", query_buffer, 1024);
+		ptr = strstr(query_buffer, "CONMODE");
+		/*
+		 * Set the conmode to 3215 so that the device recognition 
+		 * will set the cu_type of the console to 3215. If the
+		 * conmode is 3270 and we don't set it back then both
+		 * 3215 and the 3270 driver will try to access the console
+		 * device (3215 as console and 3270 as normal tty).
+		 */
+		__cpcmd("TERM CONMODE 3215", NULL, 0);
+		if (ptr == NULL) {
+#if defined(CONFIG_SCLP_CONSOLE)
+			SET_CONSOLE_SCLP;
+#endif
+			return;
+		}
+		if (strncmp(ptr + 8, "3270", 4) == 0) {
+#if defined(CONFIG_TN3270_CONSOLE)
+			SET_CONSOLE_3270;
+#elif defined(CONFIG_TN3215_CONSOLE)
+			SET_CONSOLE_3215;
+#elif defined(CONFIG_SCLP_CONSOLE)
+			SET_CONSOLE_SCLP;
+#endif
+		} else if (strncmp(ptr + 8, "3215", 4) == 0) {
+#if defined(CONFIG_TN3215_CONSOLE)
+			SET_CONSOLE_3215;
+#elif defined(CONFIG_TN3270_CONSOLE)
+			SET_CONSOLE_3270;
+#elif defined(CONFIG_SCLP_CONSOLE)
+			SET_CONSOLE_SCLP;
+#endif
+		}
+        } else if (MACHINE_IS_P390) {
+#if defined(CONFIG_TN3215_CONSOLE)
+		SET_CONSOLE_3215;
+#elif defined(CONFIG_TN3270_CONSOLE)
+		SET_CONSOLE_3270;
+#endif
+	} else {
+#if defined(CONFIG_SCLP_CONSOLE)
+		SET_CONSOLE_SCLP;
+#endif
+	}
+}
+
+#ifdef CONFIG_SMP
+extern void machine_restart_smp(char *);
+extern void machine_halt_smp(void);
+extern void machine_power_off_smp(void);
+
+void (*_machine_restart)(char *command) = machine_restart_smp;
+void (*_machine_halt)(void) = machine_halt_smp;
+void (*_machine_power_off)(void) = machine_power_off_smp;
+#else
+/*
+ * Reboot, halt and power_off routines for non SMP.
+ */
+extern void reipl(unsigned long devno);
+static void do_machine_restart_nonsmp(char * __unused)
+{
+	if (MACHINE_IS_VM)
+		cpcmd ("IPL", NULL, 0);
+	else
+		reipl (0x10000 | S390_lowcore.ipl_device);
+}
+
+static void do_machine_halt_nonsmp(void)
+{
+        if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+                cpcmd(vmhalt_cmd, NULL, 0);
+        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+}
+
+static void do_machine_power_off_nonsmp(void)
+{
+        if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+                cpcmd(vmpoff_cmd, NULL, 0);
+        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+}
+
+void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
+void (*_machine_halt)(void) = do_machine_halt_nonsmp;
+void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
+#endif
+
+ /*
+ * Reboot, halt and power_off stubs. They just call _machine_restart,
+ * _machine_halt or _machine_power_off. 
+ */
+
+void machine_restart(char *command)
+{
+	console_unblank();
+	_machine_restart(command);
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void machine_halt(void)
+{
+	console_unblank();
+	_machine_halt();
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_power_off(void)
+{
+	console_unblank();
+	_machine_power_off();
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+/*
+ * Setup function called from init/main.c just after the banner
+ * was printed.
+ */
+extern char _pstart, _pend, _stext;
+
+void __init setup_arch(char **cmdline_p)
+{
+        unsigned long bootmap_size;
+        unsigned long memory_start, memory_end;
+        char c = ' ', cn, *to = command_line, *from = COMMAND_LINE;
+	unsigned long start_pfn, end_pfn;
+        static unsigned int smptrap=0;
+        unsigned long delay = 0;
+	struct _lowcore *lc;
+	int i;
+
+        if (smptrap)
+                return;
+        smptrap=1;
+
+        /*
+         * print what head.S has found out about the machine 
+         */
+#ifndef CONFIG_ARCH_S390X
+	printk((MACHINE_IS_VM) ?
+	       "We are running under VM (31 bit mode)\n" :
+	       "We are running native (31 bit mode)\n");
+	printk((MACHINE_HAS_IEEE) ?
+	       "This machine has an IEEE fpu\n" :
+	       "This machine has no IEEE fpu\n");
+#else /* CONFIG_ARCH_S390X */
+	printk((MACHINE_IS_VM) ?
+	       "We are running under VM (64 bit mode)\n" :
+	       "We are running native (64 bit mode)\n");
+#endif /* CONFIG_ARCH_S390X */
+
+        ROOT_DEV = Root_RAM0;
+        memory_start = (unsigned long) &_end;    /* fixit if use $CODELO etc*/
+#ifndef CONFIG_ARCH_S390X
+	memory_end = memory_size & ~0x400000UL;  /* align memory end to 4MB */
+        /*
+         * We need some free virtual space to be able to do vmalloc.
+         * On a machine with 2GB memory we make sure that we have at
+         * least 128 MB free space for vmalloc.
+         */
+        if (memory_end > 1920*1024*1024)
+                memory_end = 1920*1024*1024;
+#else /* CONFIG_ARCH_S390X */
+	memory_end = memory_size & ~0x200000UL;  /* detected in head.s */
+#endif /* CONFIG_ARCH_S390X */
+        init_mm.start_code = PAGE_OFFSET;
+        init_mm.end_code = (unsigned long) &_etext;
+        init_mm.end_data = (unsigned long) &_edata;
+        init_mm.brk = (unsigned long) &_end;
+
+	code_resource.start = (unsigned long) &_text;
+	code_resource.end = (unsigned long) &_etext - 1;
+	data_resource.start = (unsigned long) &_etext;
+	data_resource.end = (unsigned long) &_edata - 1;
+
+        /* Save unparsed command line copy for /proc/cmdline */
+        memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
+        saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+        for (;;) {
+                /*
+                 * "mem=XXX[kKmM]" sets memsize 
+                 */
+                if (c == ' ' && strncmp(from, "mem=", 4) == 0) {
+                        memory_end = simple_strtoul(from+4, &from, 0);
+                        if ( *from == 'K' || *from == 'k' ) {
+                                memory_end = memory_end << 10;
+                                from++;
+                        } else if ( *from == 'M' || *from == 'm' ) {
+                                memory_end = memory_end << 20;
+                                from++;
+                        }
+                }
+                /*
+                 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
+                 */
+                if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) {
+                        delay = simple_strtoul(from+9, &from, 0);
+			if (*from == 's' || *from == 'S') {
+				delay = delay*1000000;
+				from++;
+			} else if (*from == 'm' || *from == 'M') {
+				delay = delay*60*1000000;
+				from++;
+			}
+			/* now wait for the requested amount of time */
+			udelay(delay);
+                }
+                cn = *(from++);
+                if (!cn)
+                        break;
+                if (cn == '\n')
+                        cn = ' ';  /* replace newlines with space */
+		if (cn == 0x0d)
+			cn = ' ';  /* replace 0x0d with space */
+                if (cn == ' ' && c == ' ')
+                        continue;  /* remove additional spaces */
+                c = cn;
+                if (to - command_line >= COMMAND_LINE_SIZE)
+                        break;
+                *(to++) = c;
+        }
+        if (c == ' ' && to > command_line) to--;
+        *to = '\0';
+        *cmdline_p = command_line;
+
+	/*
+	 * partially used pages are not usable - thus
+	 * we are rounding upwards:
+	 */
+	start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	end_pfn = max_pfn = memory_end >> PAGE_SHIFT;
+
+	/*
+	 * Initialize the boot-time allocator (with low memory only):
+	 */
+	bootmap_size = init_bootmem(start_pfn, end_pfn);
+
+	/*
+	 * Register RAM areas with the bootmem allocator.
+	 */
+	for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) {
+		unsigned long start_chunk, end_chunk;
+
+		if (memory_chunk[i].type != CHUNK_READ_WRITE)
+			continue;
+		start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
+		start_chunk >>= PAGE_SHIFT;
+		end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
+		end_chunk >>= PAGE_SHIFT;
+		if (start_chunk < start_pfn)
+			start_chunk = start_pfn;
+		if (end_chunk > end_pfn)
+			end_chunk = end_pfn;
+		if (start_chunk < end_chunk)
+			free_bootmem(start_chunk << PAGE_SHIFT,
+				     (end_chunk - start_chunk) << PAGE_SHIFT);
+	}
+
+        /*
+         * Reserve the bootmem bitmap itself as well. We do this in two
+         * steps (first step was init_bootmem()) because this catches
+         * the (very unlikely) case of us accidentally initializing the
+         * bootmem allocator with an invalid RAM area.
+         */
+        reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+        if (INITRD_START) {
+		if (INITRD_START + INITRD_SIZE <= memory_end) {
+			reserve_bootmem(INITRD_START, INITRD_SIZE);
+			initrd_start = INITRD_START;
+			initrd_end = initrd_start + INITRD_SIZE;
+		} else {
+                        printk("initrd extends beyond end of memory "
+                               "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+                               initrd_start + INITRD_SIZE, memory_end);
+                        initrd_start = initrd_end = 0;
+		}
+        }
+#endif
+
+	for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) {
+		struct resource *res;
+
+		res = alloc_bootmem_low(sizeof(struct resource));
+		res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+
+		switch (memory_chunk[i].type) {
+		case CHUNK_READ_WRITE:
+			res->name = "System RAM";
+			break;
+		case CHUNK_READ_ONLY:
+			res->name = "System ROM";
+			res->flags |= IORESOURCE_READONLY;
+			break;
+		default:
+			res->name = "reserved";
+		}
+		res->start = memory_chunk[i].addr;
+		res->end = memory_chunk[i].addr +  memory_chunk[i].size - 1;
+		request_resource(&iomem_resource, res);
+		request_resource(res, &code_resource);
+		request_resource(res, &data_resource);
+	}
+
+        /*
+         * Setup lowcore for boot cpu
+         */
+#ifndef CONFIG_ARCH_S390X
+	lc = (struct _lowcore *) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
+	memset(lc, 0, PAGE_SIZE);
+#else /* CONFIG_ARCH_S390X */
+	lc = (struct _lowcore *) __alloc_bootmem(2*PAGE_SIZE, 2*PAGE_SIZE, 0);
+	memset(lc, 0, 2*PAGE_SIZE);
+#endif /* CONFIG_ARCH_S390X */
+	lc->restart_psw.mask = PSW_BASE_BITS;
+	lc->restart_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
+	lc->external_new_psw.mask = PSW_KERNEL_BITS;
+	lc->external_new_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
+	lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
+	lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
+	lc->program_new_psw.mask = PSW_KERNEL_BITS;
+	lc->program_new_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
+	lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
+	lc->mcck_new_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
+	lc->io_new_psw.mask = PSW_KERNEL_BITS;
+	lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
+	lc->ipl_device = S390_lowcore.ipl_device;
+	lc->jiffy_timer = -1LL;
+	lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
+	lc->async_stack = (unsigned long)
+		__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
+#ifdef CONFIG_CHECK_STACK
+	lc->panic_stack = (unsigned long)
+		__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
+#endif
+	lc->current_task = (unsigned long) init_thread_union.thread_info.task;
+	lc->thread_info = (unsigned long) &init_thread_union;
+#ifdef CONFIG_ARCH_S390X
+	if (MACHINE_HAS_DIAG44)
+		lc->diag44_opcode = 0x83000044;
+	else
+		lc->diag44_opcode = 0x07000700;
+#endif /* CONFIG_ARCH_S390X */
+	set_prefix((u32)(unsigned long) lc);
+        cpu_init();
+        __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
+
+	/*
+	 * Create kernel page tables and switch to virtual addressing.
+	 */
+        paging_init();
+
+        /* Setup default console */
+	conmode_default();
+}
+
+void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
+{
+   printk("cpu %d "
+#ifdef CONFIG_SMP
+           "phys_idx=%d "
+#endif
+           "vers=%02X ident=%06X machine=%04X unused=%04X\n",
+           cpuinfo->cpu_nr,
+#ifdef CONFIG_SMP
+           cpuinfo->cpu_addr,
+#endif
+           cpuinfo->cpu_id.version,
+           cpuinfo->cpu_id.ident,
+           cpuinfo->cpu_id.machine,
+           cpuinfo->cpu_id.unused);
+}
+
+/*
+ * show_cpuinfo - Get information on one CPU for use by procfs.
+ */
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+        struct cpuinfo_S390 *cpuinfo;
+	unsigned long n = (unsigned long) v - 1;
+
+	if (!n) {
+		seq_printf(m, "vendor_id       : IBM/S390\n"
+			       "# processors    : %i\n"
+			       "bogomips per cpu: %lu.%02lu\n",
+			       num_online_cpus(), loops_per_jiffy/(500000/HZ),
+			       (loops_per_jiffy/(5000/HZ))%100);
+	}
+	if (cpu_online(n)) {
+#ifdef CONFIG_SMP
+		if (smp_processor_id() == n)
+			cpuinfo = &S390_lowcore.cpu_data;
+		else
+			cpuinfo = &lowcore_ptr[n]->cpu_data;
+#else
+		cpuinfo = &S390_lowcore.cpu_data;
+#endif
+		seq_printf(m, "processor %li: "
+			       "version = %02X,  "
+			       "identification = %06X,  "
+			       "machine = %04X\n",
+			       n, cpuinfo->cpu_id.version,
+			       cpuinfo->cpu_id.ident,
+			       cpuinfo->cpu_id.machine);
+	}
+        return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return c_start(m, pos);
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+struct seq_operations cpuinfo_op = {
+	.start	= c_start,
+	.next	= c_next,
+	.stop	= c_stop,
+	.show	= show_cpuinfo,
+};
+
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
new file mode 100644
index 0000000..610c1d0
--- /dev/null
+++ b/arch/s390/kernel/signal.c
@@ -0,0 +1,527 @@
+/*
+ *  arch/s390/kernel/signal.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ *    Based on Intel version
+ * 
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tty.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/lowcore.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+
+typedef struct 
+{
+	__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
+	struct sigcontext sc;
+	_sigregs sregs;
+	int signo;
+	__u8 retcode[S390_SYSCALL_SIZE];
+} sigframe;
+
+typedef struct 
+{
+	__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
+	__u8 retcode[S390_SYSCALL_SIZE];
+	struct siginfo info;
+	struct ucontext uc;
+} rt_sigframe;
+
+int do_signal(struct pt_regs *regs, sigset_t *oldset);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(struct pt_regs * regs, int history0, int history1,
+	       old_sigset_t mask)
+{
+	sigset_t saveset;
+
+	mask &= _BLOCKABLE;
+	spin_lock_irq(&current->sighand->siglock);
+	saveset = current->blocked;
+	siginitset(&current->blocked, mask);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	regs->gprs[2] = -EINTR;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+		if (do_signal(regs, &saveset))
+			return -EINTR;
+	}
+}
+
+asmlinkage long
+sys_rt_sigsuspend(struct pt_regs *regs, sigset_t __user *unewset,
+						size_t sigsetsize)
+{
+	sigset_t saveset, newset;
+
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(sigset_t))
+		return -EINVAL;
+
+	if (copy_from_user(&newset, unewset, sizeof(newset)))
+		return -EFAULT;
+	sigdelsetmask(&newset, ~_BLOCKABLE);
+
+	spin_lock_irq(&current->sighand->siglock);
+	saveset = current->blocked;
+	current->blocked = newset;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	regs->gprs[2] = -EINTR;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+		if (do_signal(regs, &saveset))
+			return -EINTR;
+	}
+}
+
+asmlinkage long
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+	      struct old_sigaction __user *oact)
+{
+	struct k_sigaction new_ka, old_ka;
+	int ret;
+
+	if (act) {
+		old_sigset_t mask;
+		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+			return -EFAULT;
+		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		__get_user(mask, &act->sa_mask);
+		siginitset(&new_ka.sa.sa_mask, mask);
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+			return -EFAULT;
+		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+	}
+
+	return ret;
+}
+
+asmlinkage long
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+					struct pt_regs *regs)
+{
+	return do_sigaltstack(uss, uoss, regs->gprs[15]);
+}
+
+
+
+/* Returns non-zero on fault. */
+static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
+{
+	unsigned long old_mask = regs->psw.mask;
+	int err;
+  
+	save_access_regs(current->thread.acrs);
+
+	/* Copy a 'clean' PSW mask to the user to avoid leaking
+	   information about whether PER is currently on.  */
+	regs->psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask);
+	err = __copy_to_user(&sregs->regs.psw, &regs->psw,
+			     sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs));
+	regs->psw.mask = old_mask;
+	if (err != 0)
+		return err;
+	err = __copy_to_user(&sregs->regs.acrs, current->thread.acrs,
+			     sizeof(sregs->regs.acrs));
+	if (err != 0)
+		return err;
+	/* 
+	 * We have to store the fp registers to current->thread.fp_regs
+	 * to merge them with the emulated registers.
+	 */
+	save_fp_regs(&current->thread.fp_regs);
+	return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
+			      sizeof(s390_fp_regs));
+}
+
+/* Returns positive number on error */
+static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
+{
+	unsigned long old_mask = regs->psw.mask;
+	int err;
+
+	/* Alwys make any pending restarted system call return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	err = __copy_from_user(&regs->psw, &sregs->regs.psw,
+			       sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs));
+	regs->psw.mask = PSW_MASK_MERGE(old_mask, regs->psw.mask);
+	regs->psw.addr |= PSW_ADDR_AMODE;
+	if (err)
+		return err;
+	err = __copy_from_user(&current->thread.acrs, &sregs->regs.acrs,
+			       sizeof(sregs->regs.acrs));
+	if (err)
+		return err;
+	restore_access_regs(current->thread.acrs);
+
+	err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
+			       sizeof(s390_fp_regs));
+	current->thread.fp_regs.fpc &= FPC_VALID_MASK;
+	if (err)
+		return err;
+
+	restore_fp_regs(&current->thread.fp_regs);
+	regs->trap = -1;	/* disable syscall checks */
+	return 0;
+}
+
+asmlinkage long sys_sigreturn(struct pt_regs *regs)
+{
+	sigframe __user *frame = (sigframe __user *)regs->gprs[15];
+	sigset_t set;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigregs(regs, &frame->sregs))
+		goto badframe;
+
+	return regs->gprs[2];
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+{
+	rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
+	sigset_t set;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigregs(regs, &frame->uc.uc_mcontext))
+		goto badframe;
+
+	/* It is more difficult to avoid calling this function than to
+	   call it and ignore errors.  */
+	do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]);
+	return regs->gprs[2];
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+{
+	unsigned long sp;
+
+	/* Default to using normal stack */
+	sp = regs->gprs[15];
+
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if (ka->sa.sa_flags & SA_ONSTACK) {
+		if (! sas_ss_flags(sp))
+			sp = current->sas_ss_sp + current->sas_ss_size;
+	}
+
+	/* This is the legacy signal stack switching. */
+	else if (!user_mode(regs) &&
+		 !(ka->sa.sa_flags & SA_RESTORER) &&
+		 ka->sa.sa_restorer) {
+		sp = (unsigned long) ka->sa.sa_restorer;
+	}
+
+	return (void __user *)((sp - frame_size) & -8ul);
+}
+
+static inline int map_signal(int sig)
+{
+	if (current_thread_info()->exec_domain
+	    && current_thread_info()->exec_domain->signal_invmap
+	    && sig < 32)
+		return current_thread_info()->exec_domain->signal_invmap[sig];
+	else
+		return sig;
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+			sigset_t *set, struct pt_regs * regs)
+{
+	sigframe __user *frame;
+
+	frame = get_sigframe(ka, regs, sizeof(sigframe));
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe)))
+		goto give_sigsegv;
+
+	if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE))
+		goto give_sigsegv;
+
+	if (save_sigregs(regs, &frame->sregs))
+		goto give_sigsegv;
+	if (__put_user(&frame->sregs, &frame->sc.sregs))
+		goto give_sigsegv;
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace.  */
+	if (ka->sa.sa_flags & SA_RESTORER) {
+                regs->gprs[14] = (unsigned long)
+			ka->sa.sa_restorer | PSW_ADDR_AMODE;
+	} else {
+                regs->gprs[14] = (unsigned long)
+			frame->retcode | PSW_ADDR_AMODE;
+		if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
+	                       (u16 __user *)(frame->retcode)))
+			goto give_sigsegv;
+	}
+
+	/* Set up backchain. */
+	if (__put_user(regs->gprs[15], (addr_t __user *) frame))
+		goto give_sigsegv;
+
+	/* Set up registers for signal handler */
+	regs->gprs[15] = (unsigned long) frame;
+	regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+
+	regs->gprs[2] = map_signal(sig);
+	regs->gprs[3] = (unsigned long) &frame->sc;
+
+	/* We forgot to include these in the sigcontext.
+	   To avoid breaking binary compatibility, they are passed as args. */
+	regs->gprs[4] = current->thread.trap_no;
+	regs->gprs[5] = current->thread.prot_addr;
+
+	/* Place signal number on stack to allow backtrace from handler.  */
+	if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
+		goto give_sigsegv;
+	return;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+			   sigset_t *set, struct pt_regs * regs)
+{
+	int err = 0;
+	rt_sigframe __user *frame;
+
+	frame = get_sigframe(ka, regs, sizeof(rt_sigframe));
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe)))
+		goto give_sigsegv;
+
+	if (copy_siginfo_to_user(&frame->info, info))
+		goto give_sigsegv;
+
+	/* Create the ucontext.  */
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(0, &frame->uc.uc_link);
+	err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+	err |= __put_user(sas_ss_flags(regs->gprs[15]),
+			  &frame->uc.uc_stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+	err |= save_sigregs(regs, &frame->uc.uc_mcontext);
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+	if (err)
+		goto give_sigsegv;
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace.  */
+	if (ka->sa.sa_flags & SA_RESTORER) {
+                regs->gprs[14] = (unsigned long)
+			ka->sa.sa_restorer | PSW_ADDR_AMODE;
+	} else {
+                regs->gprs[14] = (unsigned long)
+			frame->retcode | PSW_ADDR_AMODE;
+		err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
+	                          (u16 __user *)(frame->retcode));
+	}
+
+	/* Set up backchain. */
+	if (__put_user(regs->gprs[15], (addr_t __user *) frame))
+		goto give_sigsegv;
+
+	/* Set up registers for signal handler */
+	regs->gprs[15] = (unsigned long) frame;
+	regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+
+	regs->gprs[2] = map_signal(sig);
+	regs->gprs[3] = (unsigned long) &frame->info;
+	regs->gprs[4] = (unsigned long) &frame->uc;
+	return;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */	
+
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+	      siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+	/* Set up the stack frame */
+	if (ka->sa.sa_flags & SA_SIGINFO)
+		setup_rt_frame(sig, ka, info, oldset, regs);
+	else
+		setup_frame(sig, ka, oldset, regs);
+
+	if (!(ka->sa.sa_flags & SA_NODEFER)) {
+		spin_lock_irq(&current->sighand->siglock);
+		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+		sigaddset(&current->blocked,sig);
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+	unsigned long retval = 0, continue_addr = 0, restart_addr = 0;
+	siginfo_t info;
+	int signr;
+	struct k_sigaction ka;
+
+	/*
+	 * We want the common case to go fast, which
+	 * is why we may in certain cases get here from
+	 * kernel mode. Just return without doing anything
+	 * if so.
+	 */
+	if (!user_mode(regs))
+		return 1;
+
+	if (!oldset)
+		oldset = &current->blocked;
+
+	/* Are we from a system call? */
+	if (regs->trap == __LC_SVC_OLD_PSW) {
+		continue_addr = regs->psw.addr;
+		restart_addr = continue_addr - regs->ilc;
+		retval = regs->gprs[2];
+
+		/* Prepare for system call restart.  We do this here so that a
+		   debugger will see the already changed PSW. */
+		if (retval == -ERESTARTNOHAND ||
+		    retval == -ERESTARTSYS ||
+		    retval == -ERESTARTNOINTR) {
+			regs->gprs[2] = regs->orig_gpr2;
+			regs->psw.addr = restart_addr;
+		} else if (retval == -ERESTART_RESTARTBLOCK) {
+			regs->gprs[2] = -EINTR;
+		}
+	}
+
+	/* Get signal to deliver.  When running under ptrace, at this point
+	   the debugger may change all our registers ... */
+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+	/* Depending on the signal settings we may need to revert the
+	   decision to restart the system call. */
+	if (signr > 0 && regs->psw.addr == restart_addr) {
+		if (retval == -ERESTARTNOHAND
+		    || (retval == -ERESTARTSYS
+			 && !(current->sighand->action[signr-1].sa.sa_flags
+			      & SA_RESTART))) {
+			regs->gprs[2] = -EINTR;
+			regs->psw.addr = continue_addr;
+		}
+	}
+
+	if (signr > 0) {
+		/* Whee!  Actually deliver the signal.  */
+#ifdef CONFIG_S390_SUPPORT
+		if (test_thread_flag(TIF_31BIT)) {
+			extern void handle_signal32(unsigned long sig,
+						    struct k_sigaction *ka,
+						    siginfo_t *info,
+						    sigset_t *oldset,
+						    struct pt_regs *regs);
+			handle_signal32(signr, &ka, &info, oldset, regs);
+			return 1;
+	        }
+#endif
+		handle_signal(signr, &ka, &info, oldset, regs);
+		return 1;
+	}
+
+	/* Restart a different system call. */
+	if (retval == -ERESTART_RESTARTBLOCK
+	    && regs->psw.addr == continue_addr) {
+		regs->gprs[2] = __NR_restart_syscall;
+		set_thread_flag(TIF_RESTART_SVC);
+	}
+	return 0;
+}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
new file mode 100644
index 0000000..fdfcf04
--- /dev/null
+++ b/arch/s390/kernel/smp.c
@@ -0,0 +1,840 @@
+/*
+ *  arch/s390/kernel/smp.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *               Heiko Carstens (heiko.carstens@de.ibm.com)
+ *
+ *  based on other smp stuff by 
+ *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
+ *    (c) 1998 Ingo Molnar
+ *
+ * We work with logical cpu numbering everywhere we can. The only
+ * functions using the real cpu address (got from STAP) are the sigp
+ * functions. For all other functions we use the identity mapping.
+ * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
+ * used e.g. to find the idle task belonging to a logical cpu. Every array
+ * in the kernel is sorted by the logical cpu number and not by the physical
+ * one which is causing all the confusion with __cpu_logical_map and
+ * cpu_number_map in other architectures.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/kernel_stat.h>
+#include <linux/smp_lock.h>
+
+#include <linux/delay.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+
+#include <asm/sigp.h>
+#include <asm/pgalloc.h>
+#include <asm/irq.h>
+#include <asm/s390_ext.h>
+#include <asm/cpcmd.h>
+#include <asm/tlbflush.h>
+
+/* prototypes */
+
+extern volatile int __cpu_logical_map[];
+
+/*
+ * An array with a pointer the lowcore of every CPU.
+ */
+
+struct _lowcore *lowcore_ptr[NR_CPUS];
+
+cpumask_t cpu_online_map;
+cpumask_t cpu_possible_map;
+
+static struct task_struct *current_set[NR_CPUS];
+
+EXPORT_SYMBOL(cpu_online_map);
+
+/*
+ * Reboot, halt and power_off routines for SMP.
+ */
+extern char vmhalt_cmd[];
+extern char vmpoff_cmd[];
+
+extern void reipl(unsigned long devno);
+
+static void smp_ext_bitcall(int, ec_bit_sig);
+static void smp_ext_bitcall_others(ec_bit_sig);
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ */
+static DEFINE_SPINLOCK(call_lock);
+
+struct call_data_struct {
+	void (*func) (void *info);
+	void *info;
+	atomic_t started;
+	atomic_t finished;
+	int wait;
+};
+
+static struct call_data_struct * call_data;
+
+/*
+ * 'Call function' interrupt callback
+ */
+static void do_call_function(void)
+{
+	void (*func) (void *info) = call_data->func;
+	void *info = call_data->info;
+	int wait = call_data->wait;
+
+	atomic_inc(&call_data->started);
+	(*func)(info);
+	if (wait)
+		atomic_inc(&call_data->finished);
+}
+
+/*
+ * this function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
+ */
+
+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+			int wait)
+/*
+ * [SUMMARY] Run a function on all other CPUs.
+ * <func> The function to run. This must be fast and non-blocking.
+ * <info> An arbitrary pointer to pass to the function.
+ * <nonatomic> currently unused.
+ * <wait> If true, wait (atomically) until function has completed on other CPUs.
+ * [RETURNS] 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+{
+	struct call_data_struct data;
+	int cpus = num_online_cpus()-1;
+
+	if (cpus <= 0)
+		return 0;
+
+	/* Can deadlock when called with interrupts disabled */
+	WARN_ON(irqs_disabled());
+
+	data.func = func;
+	data.info = info;
+	atomic_set(&data.started, 0);
+	data.wait = wait;
+	if (wait)
+		atomic_set(&data.finished, 0);
+
+	spin_lock(&call_lock);
+	call_data = &data;
+	/* Send a message to all other CPUs and wait for them to respond */
+        smp_ext_bitcall_others(ec_call_function);
+
+	/* Wait for response */
+	while (atomic_read(&data.started) != cpus)
+		cpu_relax();
+
+	if (wait)
+		while (atomic_read(&data.finished) != cpus)
+			cpu_relax();
+	spin_unlock(&call_lock);
+
+	return 0;
+}
+
+/*
+ * Call a function on one CPU
+ * cpu : the CPU the function should be executed on
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler. You may call it from a bottom half.
+ *
+ * It is guaranteed that the called function runs on the specified CPU,
+ * preemption is disabled.
+ */
+int smp_call_function_on(void (*func) (void *info), void *info,
+			 int nonatomic, int wait, int cpu)
+{
+	struct call_data_struct data;
+	int curr_cpu;
+
+	if (!cpu_online(cpu))
+		return -EINVAL;
+
+	/* disable preemption for local function call */
+	curr_cpu = get_cpu();
+
+	if (curr_cpu == cpu) {
+		/* direct call to function */
+		func(info);
+		put_cpu();
+		return 0;
+	}
+
+	data.func = func;
+	data.info = info;
+	atomic_set(&data.started, 0);
+	data.wait = wait;
+	if (wait)
+		atomic_set(&data.finished, 0);
+
+	spin_lock_bh(&call_lock);
+	call_data = &data;
+	smp_ext_bitcall(cpu, ec_call_function);
+
+	/* Wait for response */
+	while (atomic_read(&data.started) != 1)
+		cpu_relax();
+
+	if (wait)
+		while (atomic_read(&data.finished) != 1)
+			cpu_relax();
+
+	spin_unlock_bh(&call_lock);
+	put_cpu();
+	return 0;
+}
+EXPORT_SYMBOL(smp_call_function_on);
+
+static inline void do_send_stop(void)
+{
+        int cpu, rc;
+
+        /* stop all processors */
+	for_each_online_cpu(cpu) {
+		if (cpu == smp_processor_id())
+			continue;
+		do {
+			rc = signal_processor(cpu, sigp_stop);
+		} while (rc == sigp_busy);
+	}
+}
+
+static inline void do_store_status(void)
+{
+        int cpu, rc;
+
+        /* store status of all processors in their lowcores (real 0) */
+	for_each_online_cpu(cpu) {
+		if (cpu == smp_processor_id())
+			continue;
+		do {
+			rc = signal_processor_p(
+				(__u32)(unsigned long) lowcore_ptr[cpu], cpu,
+				sigp_store_status_at_address);
+		} while(rc == sigp_busy);
+        }
+}
+
+/*
+ * this function sends a 'stop' sigp to all other CPUs in the system.
+ * it goes straight through.
+ */
+void smp_send_stop(void)
+{
+        /* write magic number to zero page (absolute 0) */
+	lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
+
+	/* stop other processors. */
+	do_send_stop();
+
+	/* store status of other processors. */
+	do_store_status();
+}
+
+/*
+ * Reboot, halt and power_off routines for SMP.
+ */
+
+static void do_machine_restart(void * __unused)
+{
+	int cpu;
+	static atomic_t cpuid = ATOMIC_INIT(-1);
+
+	if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
+		signal_processor(smp_processor_id(), sigp_stop);
+
+	/* Wait for all other cpus to enter stopped state */
+	for_each_online_cpu(cpu) {
+		if (cpu == smp_processor_id())
+			continue;
+		while(!smp_cpu_not_running(cpu))
+			cpu_relax();
+	}
+
+	/* Store status of other cpus. */
+	do_store_status();
+
+	/*
+	 * Finally call reipl. Because we waited for all other
+	 * cpus to enter this function we know that they do
+	 * not hold any s390irq-locks (the cpus have been
+	 * interrupted by an external interrupt and s390irq
+	 * locks are always held disabled).
+	 */
+	if (MACHINE_IS_VM)
+		cpcmd ("IPL", NULL, 0);
+	else
+		reipl (0x10000 | S390_lowcore.ipl_device);
+}
+
+void machine_restart_smp(char * __unused) 
+{
+        on_each_cpu(do_machine_restart, NULL, 0, 0);
+}
+
+static void do_wait_for_stop(void)
+{
+	unsigned long cr[16];
+
+	__ctl_store(cr, 0, 15);
+	cr[0] &= ~0xffff;
+	cr[6] = 0;
+	__ctl_load(cr, 0, 15);
+	for (;;)
+		enabled_wait();
+}
+
+static void do_machine_halt(void * __unused)
+{
+	static atomic_t cpuid = ATOMIC_INIT(-1);
+
+	if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
+		smp_send_stop();
+		if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+			cpcmd(vmhalt_cmd, NULL, 0);
+		signal_processor(smp_processor_id(),
+				 sigp_stop_and_store_status);
+	}
+	do_wait_for_stop();
+}
+
+void machine_halt_smp(void)
+{
+        on_each_cpu(do_machine_halt, NULL, 0, 0);
+}
+
+static void do_machine_power_off(void * __unused)
+{
+	static atomic_t cpuid = ATOMIC_INIT(-1);
+
+	if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
+		smp_send_stop();
+		if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+			cpcmd(vmpoff_cmd, NULL, 0);
+		signal_processor(smp_processor_id(),
+				 sigp_stop_and_store_status);
+	}
+	do_wait_for_stop();
+}
+
+void machine_power_off_smp(void)
+{
+        on_each_cpu(do_machine_power_off, NULL, 0, 0);
+}
+
+/*
+ * This is the main routine where commands issued by other
+ * cpus are handled.
+ */
+
+void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
+{
+        unsigned long bits;
+
+        /*
+         * handle bit signal external calls
+         *
+         * For the ec_schedule signal we have to do nothing. All the work
+         * is done automatically when we return from the interrupt.
+         */
+	bits = xchg(&S390_lowcore.ext_call_fast, 0);
+
+	if (test_bit(ec_call_function, &bits)) 
+		do_call_function();
+}
+
+/*
+ * Send an external call sigp to another cpu and return without waiting
+ * for its completion.
+ */
+static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
+{
+        /*
+         * Set signaling bit in lowcore of target cpu and kick it
+         */
+	set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
+	while(signal_processor(cpu, sigp_external_call) == sigp_busy)
+		udelay(10);
+}
+
+/*
+ * Send an external call sigp to every other cpu in the system and
+ * return without waiting for its completion.
+ */
+static void smp_ext_bitcall_others(ec_bit_sig sig)
+{
+        int cpu;
+
+	for_each_online_cpu(cpu) {
+		if (cpu == smp_processor_id())
+                        continue;
+                /*
+                 * Set signaling bit in lowcore of target cpu and kick it
+                 */
+		set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
+		while (signal_processor(cpu, sigp_external_call) == sigp_busy)
+			udelay(10);
+        }
+}
+
+#ifndef CONFIG_ARCH_S390X
+/*
+ * this function sends a 'purge tlb' signal to another CPU.
+ */
+void smp_ptlb_callback(void *info)
+{
+	local_flush_tlb();
+}
+
+void smp_ptlb_all(void)
+{
+        on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
+}
+EXPORT_SYMBOL(smp_ptlb_all);
+#endif /* ! CONFIG_ARCH_S390X */
+
+/*
+ * this function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+void smp_send_reschedule(int cpu)
+{
+        smp_ext_bitcall(cpu, ec_schedule);
+}
+
+/*
+ * parameter area for the set/clear control bit callbacks
+ */
+typedef struct
+{
+	__u16 start_ctl;
+	__u16 end_ctl;
+	unsigned long orvals[16];
+	unsigned long andvals[16];
+} ec_creg_mask_parms;
+
+/*
+ * callback for setting/clearing control bits
+ */
+void smp_ctl_bit_callback(void *info) {
+	ec_creg_mask_parms *pp;
+	unsigned long cregs[16];
+	int i;
+	
+	pp = (ec_creg_mask_parms *) info;
+	__ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
+	for (i = pp->start_ctl; i <= pp->end_ctl; i++)
+		cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
+	__ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
+}
+
+/*
+ * Set a bit in a control register of all cpus
+ */
+void smp_ctl_set_bit(int cr, int bit) {
+        ec_creg_mask_parms parms;
+
+	parms.start_ctl = cr;
+	parms.end_ctl = cr;
+	parms.orvals[cr] = 1 << bit;
+	parms.andvals[cr] = -1L;
+	preempt_disable();
+	smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
+        __ctl_set_bit(cr, bit);
+	preempt_enable();
+}
+
+/*
+ * Clear a bit in a control register of all cpus
+ */
+void smp_ctl_clear_bit(int cr, int bit) {
+        ec_creg_mask_parms parms;
+
+	parms.start_ctl = cr;
+	parms.end_ctl = cr;
+	parms.orvals[cr] = 0;
+	parms.andvals[cr] = ~(1L << bit);
+	preempt_disable();
+	smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
+        __ctl_clear_bit(cr, bit);
+	preempt_enable();
+}
+
+/*
+ * Lets check how many CPUs we have.
+ */
+
+void
+__init smp_check_cpus(unsigned int max_cpus)
+{
+	int cpu, num_cpus;
+	__u16 boot_cpu_addr;
+
+	/*
+	 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
+	 */
+
+	boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
+	current_thread_info()->cpu = 0;
+	num_cpus = 1;
+	for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) {
+		if ((__u16) cpu == boot_cpu_addr)
+			continue;
+		__cpu_logical_map[num_cpus] = (__u16) cpu;
+		if (signal_processor(num_cpus, sigp_sense) ==
+		    sigp_not_operational)
+			continue;
+		cpu_set(num_cpus, cpu_present_map);
+		num_cpus++;
+	}
+
+	for (cpu = 1; cpu < max_cpus; cpu++)
+		cpu_set(cpu, cpu_possible_map);
+
+	printk("Detected %d CPU's\n",(int) num_cpus);
+	printk("Boot cpu address %2X\n", boot_cpu_addr);
+}
+
+/*
+ *      Activate a secondary processor.
+ */
+extern void init_cpu_timer(void);
+extern void init_cpu_vtimer(void);
+extern int pfault_init(void);
+extern void pfault_fini(void);
+
+int __devinit start_secondary(void *cpuvoid)
+{
+        /* Setup the cpu */
+        cpu_init();
+        /* init per CPU timer */
+        init_cpu_timer();
+#ifdef CONFIG_VIRT_TIMER
+        init_cpu_vtimer();
+#endif
+#ifdef CONFIG_PFAULT
+	/* Enable pfault pseudo page faults on this cpu. */
+	pfault_init();
+#endif
+	/* Mark this cpu as online */
+	cpu_set(smp_processor_id(), cpu_online_map);
+	/* Switch on interrupts */
+	local_irq_enable();
+        /* Print info about this processor */
+        print_cpu_info(&S390_lowcore.cpu_data);
+        /* cpu_idle will call schedule for us */
+        cpu_idle();
+        return 0;
+}
+
+static void __init smp_create_idle(unsigned int cpu)
+{
+	struct task_struct *p;
+
+	/*
+	 *  don't care about the psw and regs settings since we'll never
+	 *  reschedule the forked task.
+	 */
+	p = fork_idle(cpu);
+	if (IS_ERR(p))
+		panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
+	current_set[cpu] = p;
+}
+
+/* Reserving and releasing of CPUs */
+
+static DEFINE_SPINLOCK(smp_reserve_lock);
+static int smp_cpu_reserved[NR_CPUS];
+
+int
+smp_get_cpu(cpumask_t cpu_mask)
+{
+	unsigned long flags;
+	int cpu;
+
+	spin_lock_irqsave(&smp_reserve_lock, flags);
+	/* Try to find an already reserved cpu. */
+	for_each_cpu_mask(cpu, cpu_mask) {
+		if (smp_cpu_reserved[cpu] != 0) {
+			smp_cpu_reserved[cpu]++;
+			/* Found one. */
+			goto out;
+		}
+	}
+	/* Reserve a new cpu from cpu_mask. */
+	for_each_cpu_mask(cpu, cpu_mask) {
+		if (cpu_online(cpu)) {
+			smp_cpu_reserved[cpu]++;
+			goto out;
+		}
+	}
+	cpu = -ENODEV;
+out:
+	spin_unlock_irqrestore(&smp_reserve_lock, flags);
+	return cpu;
+}
+
+void
+smp_put_cpu(int cpu)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&smp_reserve_lock, flags);
+	smp_cpu_reserved[cpu]--;
+	spin_unlock_irqrestore(&smp_reserve_lock, flags);
+}
+
+static inline int
+cpu_stopped(int cpu)
+{
+	__u32 status;
+
+	/* Check for stopped state */
+	if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
+		if (status & 0x40)
+			return 1;
+	}
+	return 0;
+}
+
+/* Upping and downing of CPUs */
+
+int
+__cpu_up(unsigned int cpu)
+{
+	struct task_struct *idle;
+        struct _lowcore    *cpu_lowcore;
+	struct stack_frame *sf;
+        sigp_ccode          ccode;
+	int                 curr_cpu;
+
+	for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
+		__cpu_logical_map[cpu] = (__u16) curr_cpu;
+		if (cpu_stopped(cpu))
+			break;
+	}
+
+	if (!cpu_stopped(cpu))
+		return -ENODEV;
+
+	ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
+				   cpu, sigp_set_prefix);
+	if (ccode){
+		printk("sigp_set_prefix failed for cpu %d "
+		       "with condition code %d\n",
+		       (int) cpu, (int) ccode);
+		return -EIO;
+	}
+
+	idle = current_set[cpu];
+        cpu_lowcore = lowcore_ptr[cpu];
+	cpu_lowcore->kernel_stack = (unsigned long)
+		idle->thread_info + (THREAD_SIZE);
+	sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
+				     - sizeof(struct pt_regs)
+				     - sizeof(struct stack_frame));
+	memset(sf, 0, sizeof(struct stack_frame));
+	sf->gprs[9] = (unsigned long) sf;
+	cpu_lowcore->save_area[15] = (unsigned long) sf;
+	__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
+	__asm__ __volatile__("stam  0,15,0(%0)"
+			     : : "a" (&cpu_lowcore->access_regs_save_area)
+			     : "memory");
+	cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
+        cpu_lowcore->current_task = (unsigned long) idle;
+        cpu_lowcore->cpu_data.cpu_nr = cpu;
+	eieio();
+	signal_processor(cpu,sigp_restart);
+
+	while (!cpu_online(cpu))
+		cpu_relax();
+	return 0;
+}
+
+int
+__cpu_disable(void)
+{
+	unsigned long flags;
+	ec_creg_mask_parms cr_parms;
+
+	spin_lock_irqsave(&smp_reserve_lock, flags);
+	if (smp_cpu_reserved[smp_processor_id()] != 0) {
+		spin_unlock_irqrestore(&smp_reserve_lock, flags);
+		return -EBUSY;
+	}
+
+#ifdef CONFIG_PFAULT
+	/* Disable pfault pseudo page faults on this cpu. */
+	pfault_fini();
+#endif
+
+	/* disable all external interrupts */
+
+	cr_parms.start_ctl = 0;
+	cr_parms.end_ctl = 0;
+	cr_parms.orvals[0] = 0;
+	cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
+				1<<11 | 1<<10 | 1<< 6 | 1<< 4);
+	smp_ctl_bit_callback(&cr_parms);
+
+	/* disable all I/O interrupts */
+
+	cr_parms.start_ctl = 6;
+	cr_parms.end_ctl = 6;
+	cr_parms.orvals[6] = 0;
+	cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
+				1<<27 | 1<<26 | 1<<25 | 1<<24);
+	smp_ctl_bit_callback(&cr_parms);
+
+	/* disable most machine checks */
+
+	cr_parms.start_ctl = 14;
+	cr_parms.end_ctl = 14;
+	cr_parms.orvals[14] = 0;
+	cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
+	smp_ctl_bit_callback(&cr_parms);
+
+	spin_unlock_irqrestore(&smp_reserve_lock, flags);
+	return 0;
+}
+
+void
+__cpu_die(unsigned int cpu)
+{
+	/* Wait until target cpu is down */
+	while (!smp_cpu_not_running(cpu))
+		cpu_relax();
+	printk("Processor %d spun down\n", cpu);
+}
+
+void
+cpu_die(void)
+{
+	idle_task_exit();
+	signal_processor(smp_processor_id(), sigp_stop);
+	BUG();
+	for(;;);
+}
+
+/*
+ *	Cycle through the processors and setup structures.
+ */
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	unsigned long stack;
+	unsigned int cpu;
+        int i;
+
+        /* request the 0x1202 external interrupt */
+        if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
+                panic("Couldn't request external interrupt 0x1202");
+        smp_check_cpus(max_cpus);
+        memset(lowcore_ptr,0,sizeof(lowcore_ptr));  
+        /*
+         *  Initialize prefix pages and stacks for all possible cpus
+         */
+	print_cpu_info(&S390_lowcore.cpu_data);
+
+        for(i = 0; i < NR_CPUS; i++) {
+		if (!cpu_possible(i))
+			continue;
+		lowcore_ptr[i] = (struct _lowcore *)
+			__get_free_pages(GFP_KERNEL|GFP_DMA, 
+					sizeof(void*) == 8 ? 1 : 0);
+		stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
+		if (lowcore_ptr[i] == NULL || stack == 0ULL)
+			panic("smp_boot_cpus failed to allocate memory\n");
+
+		*(lowcore_ptr[i]) = S390_lowcore;
+		lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
+#ifdef CONFIG_CHECK_STACK
+		stack = __get_free_pages(GFP_KERNEL,0);
+		if (stack == 0ULL)
+			panic("smp_boot_cpus failed to allocate memory\n");
+		lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
+#endif
+	}
+	set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
+
+	for_each_cpu(cpu)
+		if (cpu != smp_processor_id())
+			smp_create_idle(cpu);
+}
+
+void __devinit smp_prepare_boot_cpu(void)
+{
+	BUG_ON(smp_processor_id() != 0);
+
+	cpu_set(0, cpu_online_map);
+	cpu_set(0, cpu_present_map);
+	cpu_set(0, cpu_possible_map);
+	S390_lowcore.percpu_offset = __per_cpu_offset[0];
+	current_set[0] = current;
+}
+
+void smp_cpus_done(unsigned int max_cpus)
+{
+	cpu_present_map = cpu_possible_map;
+}
+
+/*
+ * the frequency of the profiling timer can be changed
+ * by writing a multiplier value into /proc/profile.
+ *
+ * usually you want to run this on all CPUs ;)
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+        return 0;
+}
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+	int cpu;
+	int ret;
+
+	for_each_cpu(cpu) {
+		ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+		if (ret)
+			printk(KERN_WARNING "topology_init: register_cpu %d "
+			       "failed (%d)\n", cpu, ret);
+	}
+	return 0;
+}
+
+subsys_initcall(topology_init);
+
+EXPORT_SYMBOL(cpu_possible_map);
+EXPORT_SYMBOL(lowcore_ptr);
+EXPORT_SYMBOL(smp_ctl_set_bit);
+EXPORT_SYMBOL(smp_ctl_clear_bit);
+EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_get_cpu);
+EXPORT_SYMBOL(smp_put_cpu);
+
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
new file mode 100644
index 0000000..efe6b83
--- /dev/null
+++ b/arch/s390/kernel/sys_s390.c
@@ -0,0 +1,270 @@
+/*
+ *  arch/s390/kernel/sys_s390.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Thomas Spatzier (tspat@de.ibm.com)
+ *
+ *  Derived from "arch/i386/kernel/sys_i386.c"
+ *
+ *  This file contains various random system calls that
+ *  have a non-standard calling sequence on the Linux/s390
+ *  platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+#ifdef CONFIG_ARCH_S390X
+#include <linux/personality.h>
+#endif /* CONFIG_ARCH_S390X */
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way Unix traditionally does this, though.
+ */
+asmlinkage long sys_pipe(unsigned long __user *fildes)
+{
+	int fd[2];
+	int error;
+
+	error = do_pipe(fd);
+	if (!error) {
+		if (copy_to_user(fildes, fd, 2*sizeof(int)))
+			error = -EFAULT;
+	}
+	return error;
+}
+
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+	unsigned long addr, unsigned long len,
+	unsigned long prot, unsigned long flags,
+	unsigned long fd, unsigned long pgoff)
+{
+	long error = -EBADF;
+	struct file * file = NULL;
+
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	if (!(flags & MAP_ANONYMOUS)) {
+		file = fget(fd);
+		if (!file)
+			goto out;
+	}
+
+	down_write(&current->mm->mmap_sem);
+	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+	up_write(&current->mm->mmap_sem);
+
+	if (file)
+		fput(file);
+out:
+	return error;
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux for S/390 isn't able to handle more than 5
+ * system call parameters, so these system calls used a memory
+ * block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+	unsigned long addr;
+	unsigned long len;
+	unsigned long prot;
+	unsigned long flags;
+	unsigned long fd;
+	unsigned long offset;
+};
+
+asmlinkage long sys_mmap2(struct mmap_arg_struct __user  *arg)
+{
+	struct mmap_arg_struct a;
+	int error = -EFAULT;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		goto out;
+	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+out:
+	return error;
+}
+
+asmlinkage long old_mmap(struct mmap_arg_struct __user *arg)
+{
+	struct mmap_arg_struct a;
+	long error = -EFAULT;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		goto out;
+
+	error = -EINVAL;
+	if (a.offset & ~PAGE_MASK)
+		goto out;
+
+	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+out:
+	return error;
+}
+
+#ifndef CONFIG_ARCH_S390X
+struct sel_arg_struct {
+	unsigned long n;
+	fd_set *inp, *outp, *exp;
+	struct timeval *tvp;
+};
+
+asmlinkage long old_select(struct sel_arg_struct __user *arg)
+{
+	struct sel_arg_struct a;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		return -EFAULT;
+	/* sys_select() does the appropriate kernel locking */
+	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+
+}
+#endif /* CONFIG_ARCH_S390X */
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage long sys_ipc(uint call, int first, unsigned long second,
+				  unsigned long third, void __user *ptr)
+{
+        struct ipc_kludge tmp;
+	int ret;
+
+        switch (call) {
+        case SEMOP:
+		return sys_semtimedop(first, (struct sembuf __user *)ptr,
+				       (unsigned)second, NULL);
+	case SEMTIMEDOP:
+		return sys_semtimedop(first, (struct sembuf __user *)ptr,
+				       (unsigned)second,
+				       (const struct timespec __user *) third);
+        case SEMGET:
+                return sys_semget(first, (int)second, third);
+        case SEMCTL: {
+                union semun fourth;
+                if (!ptr)
+                        return -EINVAL;
+                if (get_user(fourth.__pad, (void __user * __user *) ptr))
+                        return -EFAULT;
+                return sys_semctl(first, (int)second, third, fourth);
+        }
+        case MSGSND:
+		return sys_msgsnd (first, (struct msgbuf __user *) ptr,
+                                   (size_t)second, third);
+		break;
+        case MSGRCV:
+                if (!ptr)
+                        return -EINVAL;
+                if (copy_from_user (&tmp, (struct ipc_kludge __user *) ptr,
+                                    sizeof (struct ipc_kludge)))
+                        return -EFAULT;
+                return sys_msgrcv (first, tmp.msgp,
+                                   (size_t)second, tmp.msgtyp, third);
+        case MSGGET:
+                return sys_msgget((key_t)first, (int)second);
+        case MSGCTL:
+                return sys_msgctl(first, (int)second,
+				   (struct msqid_ds __user *)ptr);
+
+	case SHMAT: {
+		ulong raddr;
+		ret = do_shmat(first, (char __user *)ptr,
+				(int)second, &raddr);
+		if (ret)
+			return ret;
+		return put_user (raddr, (ulong __user *) third);
+		break;
+        }
+	case SHMDT:
+		return sys_shmdt ((char __user *)ptr);
+	case SHMGET:
+		return sys_shmget(first, (size_t)second, third);
+	case SHMCTL:
+		return sys_shmctl(first, (int)second,
+                                   (struct shmid_ds __user *) ptr);
+	default:
+		return -ENOSYS;
+
+	}
+
+	return -EINVAL;
+}
+
+#ifdef CONFIG_ARCH_S390X
+asmlinkage long s390x_newuname(struct new_utsname __user *name)
+{
+	int ret = sys_newuname(name);
+
+	if (current->personality == PER_LINUX32 && !ret) {
+		ret = copy_to_user(name->machine, "s390\0\0\0\0", 8);
+		if (ret) ret = -EFAULT;
+	}
+	return ret;
+}
+
+asmlinkage long s390x_personality(unsigned long personality)
+{
+	int ret;
+
+	if (current->personality == PER_LINUX32 && personality == PER_LINUX)
+		personality = PER_LINUX32;
+	ret = sys_personality(personality);
+	if (ret == PER_LINUX32)
+		ret = PER_LINUX;
+
+	return ret;
+}
+#endif /* CONFIG_ARCH_S390X */
+
+/*
+ * Wrapper function for sys_fadvise64/fadvise64_64
+ */
+#ifndef CONFIG_ARCH_S390X
+
+asmlinkage long
+s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
+{
+	return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
+			len, advice);
+}
+
+#endif
+
+struct fadvise64_64_args {
+	int fd;
+	long long offset;
+	long long len;
+	int advice;
+};
+
+asmlinkage long
+s390_fadvise64_64(struct fadvise64_64_args __user *args)
+{
+	struct fadvise64_64_args a;
+
+	if ( copy_from_user(&a, args, sizeof(a)) )
+		return -EFAULT;
+	return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
+}
+
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
new file mode 100644
index 0000000..5159386
--- /dev/null
+++ b/arch/s390/kernel/syscalls.S
@@ -0,0 +1,292 @@
+/*
+ * definitions for sys_call_table, each line represents an
+ * entry in the table in the form 
+ * SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall)
+ *
+ * this file is meant to be included from entry.S and entry64.S
+ */
+
+#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall)
+
+NI_SYSCALL							/* 0 */
+SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
+SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue)
+SYSCALL(sys_read,sys_read,sys32_read_wrapper)
+SYSCALL(sys_write,sys_write,sys32_write_wrapper)
+SYSCALL(sys_open,sys_open,sys32_open_wrapper)			/* 5 */
+SYSCALL(sys_close,sys_close,sys32_close_wrapper)
+SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
+SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
+SYSCALL(sys_link,sys_link,sys32_link_wrapper)
+SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper)		/* 10 */
+SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue)
+SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
+SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper)		/* old time syscall */
+SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
+SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper)		/* 15 */
+SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper)	/* old lchown16 syscall*/
+NI_SYSCALL							/* old break syscall holder */
+NI_SYSCALL							/* old stat syscall holder */
+SYSCALL(sys_lseek,sys_lseek,sys32_lseek_wrapper)
+SYSCALL(sys_getpid,sys_getpid,sys_getpid)			/* 20 */
+SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper)
+SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper)
+SYSCALL(sys_setuid16,sys_ni_syscall,sys32_setuid16_wrapper)	/* old setuid16 syscall*/
+SYSCALL(sys_getuid16,sys_ni_syscall,sys32_getuid16)		/* old getuid16 syscall*/
+SYSCALL(sys_stime,sys_ni_syscall,sys32_stime_wrapper)		/* 25 old stime syscall */
+SYSCALL(sys_ptrace,sys_ptrace,sys32_ptrace_wrapper)
+SYSCALL(sys_alarm,sys_alarm,sys32_alarm_wrapper)
+NI_SYSCALL							/* old fstat syscall */
+SYSCALL(sys_pause,sys_pause,sys32_pause)
+SYSCALL(sys_utime,sys_utime,compat_sys_utime_wrapper)		/* 30 */
+NI_SYSCALL							/* old stty syscall */
+NI_SYSCALL							/* old gtty syscall */
+SYSCALL(sys_access,sys_access,sys32_access_wrapper)
+SYSCALL(sys_nice,sys_nice,sys32_nice_wrapper)
+NI_SYSCALL							/* 35 old ftime syscall */
+SYSCALL(sys_sync,sys_sync,sys_sync)
+SYSCALL(sys_kill,sys_kill,sys32_kill_wrapper)
+SYSCALL(sys_rename,sys_rename,sys32_rename_wrapper)
+SYSCALL(sys_mkdir,sys_mkdir,sys32_mkdir_wrapper)
+SYSCALL(sys_rmdir,sys_rmdir,sys32_rmdir_wrapper)		/* 40 */
+SYSCALL(sys_dup,sys_dup,sys32_dup_wrapper)
+SYSCALL(sys_pipe,sys_pipe,sys32_pipe_wrapper)
+SYSCALL(sys_times,sys_times,compat_sys_times_wrapper)
+NI_SYSCALL							/* old prof syscall */
+SYSCALL(sys_brk,sys_brk,sys32_brk_wrapper)			/* 45 */
+SYSCALL(sys_setgid16,sys_ni_syscall,sys32_setgid16_wrapper)	/* old setgid16 syscall*/
+SYSCALL(sys_getgid16,sys_ni_syscall,sys32_getgid16)		/* old getgid16 syscall*/
+SYSCALL(sys_signal,sys_signal,sys32_signal_wrapper)
+SYSCALL(sys_geteuid16,sys_ni_syscall,sys32_geteuid16)		/* old geteuid16 syscall */
+SYSCALL(sys_getegid16,sys_ni_syscall,sys32_getegid16)		/* 50 old getegid16 syscall */
+SYSCALL(sys_acct,sys_acct,sys32_acct_wrapper)
+SYSCALL(sys_umount,sys_umount,sys32_umount_wrapper)
+NI_SYSCALL							/* old lock syscall */
+SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl_wrapper)
+SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl_wrapper)		/* 55 */
+NI_SYSCALL							/* intel mpx syscall */
+SYSCALL(sys_setpgid,sys_setpgid,sys32_setpgid_wrapper)
+NI_SYSCALL							/* old ulimit syscall */
+NI_SYSCALL							/* old uname syscall */
+SYSCALL(sys_umask,sys_umask,sys32_umask_wrapper)		/* 60 */
+SYSCALL(sys_chroot,sys_chroot,sys32_chroot_wrapper)
+SYSCALL(sys_ustat,sys_ustat,sys32_ustat_wrapper)
+SYSCALL(sys_dup2,sys_dup2,sys32_dup2_wrapper)
+SYSCALL(sys_getppid,sys_getppid,sys_getppid)
+SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp)			/* 65 */
+SYSCALL(sys_setsid,sys_setsid,sys_setsid)
+SYSCALL(sys_sigaction,sys_sigaction,sys32_sigaction_wrapper)
+NI_SYSCALL							/* old sgetmask syscall*/
+NI_SYSCALL							/* old ssetmask syscall*/
+SYSCALL(sys_setreuid16,sys_ni_syscall,sys32_setreuid16_wrapper)	/* old setreuid16 syscall */
+SYSCALL(sys_setregid16,sys_ni_syscall,sys32_setregid16_wrapper)	/* old setregid16 syscall */
+SYSCALL(sys_sigsuspend_glue,sys_sigsuspend_glue,sys32_sigsuspend_glue)
+SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending_wrapper)
+SYSCALL(sys_sethostname,sys_sethostname,sys32_sethostname_wrapper)
+SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit_wrapper)	/* 75 */
+SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit_wrapper)
+SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage_wrapper)
+SYSCALL(sys_gettimeofday,sys_gettimeofday,sys32_gettimeofday_wrapper)
+SYSCALL(sys_settimeofday,sys_settimeofday,sys32_settimeofday_wrapper)
+SYSCALL(sys_getgroups16,sys_ni_syscall,sys32_getgroups16_wrapper)	/* 80 old getgroups16 syscall */
+SYSCALL(sys_setgroups16,sys_ni_syscall,sys32_setgroups16_wrapper)	/* old setgroups16 syscall */
+NI_SYSCALL							/* old select syscall */
+SYSCALL(sys_symlink,sys_symlink,sys32_symlink_wrapper)
+NI_SYSCALL							/* old lstat syscall */
+SYSCALL(sys_readlink,sys_readlink,sys32_readlink_wrapper)	/* 85 */
+SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper)
+SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
+SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
+SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper)	/* old readdir syscall */
+SYSCALL(old_mmap,old_mmap,old32_mmap_wrapper)			/* 90 */
+SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
+SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
+SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
+SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper)
+SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper)	/* 95 old fchown16 syscall*/
+SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper)
+SYSCALL(sys_setpriority,sys_setpriority,sys32_setpriority_wrapper)
+NI_SYSCALL							/* old profil syscall */
+SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs_wrapper)
+SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs_wrapper)	/* 100 */
+NI_SYSCALL							/* ioperm for i386 */
+SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall_wrapper)
+SYSCALL(sys_syslog,sys_syslog,sys32_syslog_wrapper)
+SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer_wrapper)
+SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer_wrapper)	/* 105 */
+SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper)
+SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper)
+SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper)
+NI_SYSCALL							/* old uname syscall */
+SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,sys32_lookup_dcookie_wrapper)	/* 110 */
+SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
+NI_SYSCALL							/* old "idle" system call */
+NI_SYSCALL							/* vm86old for i386 */
+SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper)
+SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper)		/* 115 */
+SYSCALL(sys_sysinfo,sys_sysinfo,sys32_sysinfo_wrapper)
+SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper)
+SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
+SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue)
+SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue)		/* 120 */
+SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
+SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
+NI_SYSCALL							/* modify_ldt for i386 */
+SYSCALL(sys_adjtimex,sys_adjtimex,sys32_adjtimex_wrapper)
+SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper)	/* 125 */
+SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper)
+NI_SYSCALL							/* old "create module" */
+SYSCALL(sys_init_module,sys_init_module,sys32_init_module_wrapper)
+SYSCALL(sys_delete_module,sys_delete_module,sys32_delete_module_wrapper)
+NI_SYSCALL							/* 130: old get_kernel_syms */
+SYSCALL(sys_quotactl,sys_quotactl,sys32_quotactl_wrapper)
+SYSCALL(sys_getpgid,sys_getpgid,sys32_getpgid_wrapper)
+SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper)
+SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper)
+SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper)		/* 135 */
+SYSCALL(sys_personality,s390x_personality,sys32_personality_wrapper)
+NI_SYSCALL							/* for afs_syscall */
+SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper)	/* old setfsuid16 syscall */
+SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper)	/* old setfsgid16 syscall */
+SYSCALL(sys_llseek,sys_llseek,sys32_llseek_wrapper)		/* 140 */
+SYSCALL(sys_getdents,sys_getdents,sys32_getdents_wrapper)
+SYSCALL(sys_select,sys_select,compat_sys_select_wrapper)
+SYSCALL(sys_flock,sys_flock,sys32_flock_wrapper)
+SYSCALL(sys_msync,sys_msync,sys32_msync_wrapper)
+SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper)		/* 145 */
+SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
+SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
+SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
+SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper)
+SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper)		/* 150 */
+SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
+SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
+SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall)
+SYSCALL(sys_sched_setparam,sys_sched_setparam,sys32_sched_setparam_wrapper)
+SYSCALL(sys_sched_getparam,sys_sched_getparam,sys32_sched_getparam_wrapper)	/* 155 */
+SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,sys32_sched_setscheduler_wrapper)
+SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,sys32_sched_getscheduler_wrapper)
+SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield)
+SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper)
+SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper)	/* 160 */
+SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,sys32_sched_rr_get_interval_wrapper)
+SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep_wrapper)
+SYSCALL(sys_mremap,sys_mremap,sys32_mremap_wrapper)
+SYSCALL(sys_setresuid16,sys_ni_syscall,sys32_setresuid16_wrapper)	/* old setresuid16 syscall */
+SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper)	/* 165 old getresuid16 syscall */
+NI_SYSCALL							/* for vm86 */
+NI_SYSCALL							/* old sys_query_module */
+SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper)
+SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper)
+SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper)	/* 170 old setresgid16 syscall */
+SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper)	/* old getresgid16 syscall */
+SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
+SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue)
+SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper)
+SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper)	/* 175 */
+SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper)
+SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait_wrapper)
+SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,sys32_rt_sigqueueinfo_wrapper)
+SYSCALL(sys_rt_sigsuspend_glue,sys_rt_sigsuspend_glue,sys32_rt_sigsuspend_glue)
+SYSCALL(sys_pread64,sys_pread64,sys32_pread64_wrapper)		/* 180 */
+SYSCALL(sys_pwrite64,sys_pwrite64,sys32_pwrite64_wrapper)
+SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper)	/* old chown16 syscall */
+SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
+SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
+SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper)		/* 185 */
+SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue)
+SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
+NI_SYSCALL							/* streams1 */
+NI_SYSCALL							/* streams2 */
+SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue)		/* 190 */
+SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper)
+SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper)
+SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper)
+SYSCALL(sys_ftruncate64,sys_ni_syscall,sys32_ftruncate64_wrapper)
+SYSCALL(sys_stat64,sys_ni_syscall,sys32_stat64_wrapper)		/* 195 */
+SYSCALL(sys_lstat64,sys_ni_syscall,sys32_lstat64_wrapper)
+SYSCALL(sys_fstat64,sys_ni_syscall,sys32_fstat64_wrapper)
+SYSCALL(sys_lchown,sys_lchown,sys32_lchown_wrapper)
+SYSCALL(sys_getuid,sys_getuid,sys_getuid)
+SYSCALL(sys_getgid,sys_getgid,sys_getgid)			/* 200 */
+SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid)
+SYSCALL(sys_getegid,sys_getegid,sys_getegid)
+SYSCALL(sys_setreuid,sys_setreuid,sys32_setreuid_wrapper)
+SYSCALL(sys_setregid,sys_setregid,sys32_setregid_wrapper)
+SYSCALL(sys_getgroups,sys_getgroups,sys32_getgroups_wrapper)	/* 205 */
+SYSCALL(sys_setgroups,sys_setgroups,sys32_setgroups_wrapper)
+SYSCALL(sys_fchown,sys_fchown,sys32_fchown_wrapper)
+SYSCALL(sys_setresuid,sys_setresuid,sys32_setresuid_wrapper)
+SYSCALL(sys_getresuid,sys_getresuid,sys32_getresuid_wrapper)
+SYSCALL(sys_setresgid,sys_setresgid,sys32_setresgid_wrapper)	/* 210 */
+SYSCALL(sys_getresgid,sys_getresgid,sys32_getresgid_wrapper)
+SYSCALL(sys_chown,sys_chown,sys32_chown_wrapper)
+SYSCALL(sys_setuid,sys_setuid,sys32_setuid_wrapper)
+SYSCALL(sys_setgid,sys_setgid,sys32_setgid_wrapper)
+SYSCALL(sys_setfsuid,sys_setfsuid,sys32_setfsuid_wrapper)	/* 215 */
+SYSCALL(sys_setfsgid,sys_setfsgid,sys32_setfsgid_wrapper)
+SYSCALL(sys_pivot_root,sys_pivot_root,sys32_pivot_root_wrapper)
+SYSCALL(sys_mincore,sys_mincore,sys32_mincore_wrapper)
+SYSCALL(sys_madvise,sys_madvise,sys32_madvise_wrapper)
+SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper)	/* 220 */
+SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper)
+SYSCALL(sys_readahead,sys_readahead,sys32_readahead)
+SYSCALL(sys_sendfile64,sys_ni_syscall,sys32_sendfile64)
+SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper)
+SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper)	/* 225 */
+SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper)
+SYSCALL(sys_getxattr,sys_getxattr,sys32_getxattr_wrapper)
+SYSCALL(sys_lgetxattr,sys_lgetxattr,sys32_lgetxattr_wrapper)
+SYSCALL(sys_fgetxattr,sys_fgetxattr,sys32_fgetxattr_wrapper)
+SYSCALL(sys_listxattr,sys_listxattr,sys32_listxattr_wrapper)	/* 230 */
+SYSCALL(sys_llistxattr,sys_llistxattr,sys32_llistxattr_wrapper)
+SYSCALL(sys_flistxattr,sys_flistxattr,sys32_flistxattr_wrapper)
+SYSCALL(sys_removexattr,sys_removexattr,sys32_removexattr_wrapper)
+SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper)
+SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper)	/* 235 */
+SYSCALL(sys_gettid,sys_gettid,sys_gettid)
+SYSCALL(sys_tkill,sys_tkill,sys_tkill)
+SYSCALL(sys_futex,sys_futex,compat_sys_futex_wrapper)
+SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper)
+SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper)	/* 240 */
+SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill)
+NI_SYSCALL							/* reserved for TUX */
+SYSCALL(sys_io_setup,sys_io_setup,sys32_io_setup_wrapper)
+SYSCALL(sys_io_destroy,sys_io_destroy,sys32_io_destroy_wrapper)
+SYSCALL(sys_io_getevents,sys_io_getevents,sys32_io_getevents_wrapper)	/* 245 */
+SYSCALL(sys_io_submit,sys_io_submit,sys32_io_submit_wrapper)
+SYSCALL(sys_io_cancel,sys_io_cancel,sys32_io_cancel_wrapper)
+SYSCALL(sys_exit_group,sys_exit_group,sys32_exit_group_wrapper)
+SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper)
+SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper)	/* 250 */
+SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper)
+SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper)
+SYSCALL(s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
+SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper)
+SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper)	/* 255 */
+SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper)
+SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,sys32_timer_getoverrun_wrapper)
+SYSCALL(sys_timer_delete,sys_timer_delete,sys32_timer_delete_wrapper)
+SYSCALL(sys_clock_settime,sys_clock_settime,sys32_clock_settime_wrapper)
+SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper)	/* 260 */
+SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
+SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
+NI_SYSCALL							/* reserved for vserver */
+SYSCALL(s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
+SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
+SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
+SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper)
+NI_SYSCALL							/* 268 sys_mbind */
+NI_SYSCALL							/* 269 sys_get_mempolicy */
+NI_SYSCALL							/* 270 sys_set_mempolicy */
+SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open_wrapper)
+SYSCALL(sys_mq_unlink,sys_mq_unlink,sys32_mq_unlink_wrapper)
+SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend_wrapper)
+SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive_wrapper)
+SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify_wrapper) /* 275 */
+SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr_wrapper)
+NI_SYSCALL							/* reserved for kexec */
+SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper)
+SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper)
+SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl)		/* 280 */
+SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
new file mode 100644
index 0000000..061e811
--- /dev/null
+++ b/arch/s390/kernel/time.c
@@ -0,0 +1,382 @@
+/*
+ *  arch/s390/kernel/time.c
+ *    Time of day based timer functions.
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ *  Derived from "arch/i386/kernel/time.c"
+ *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/profile.h>
+#include <linux/timex.h>
+#include <linux/notifier.h>
+
+#include <asm/uaccess.h>
+#include <asm/delay.h>
+#include <asm/s390_ext.h>
+#include <asm/div64.h>
+#include <asm/irq.h>
+#include <asm/timer.h>
+
+/* change this if you have some constant time drift */
+#define USECS_PER_JIFFY     ((unsigned long) 1000000/HZ)
+#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
+
+/*
+ * Create a small time difference between the timer interrupts
+ * on the different cpus to avoid lock contention.
+ */
+#define CPU_DEVIATION       (smp_processor_id() << 12)
+
+#define TICK_SIZE tick
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+static ext_int_info_t ext_int_info_cc;
+static u64 init_timer_cc;
+static u64 jiffies_timer_cc;
+static u64 xtime_cc;
+
+extern unsigned long wall_jiffies;
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ */
+unsigned long long sched_clock(void)
+{
+	return ((get_clock() - jiffies_timer_cc) * 1000) >> 12;
+}
+
+void tod_to_timeval(__u64 todval, struct timespec *xtime)
+{
+	unsigned long long sec;
+
+	sec = todval >> 12;
+	do_div(sec, 1000000);
+	xtime->tv_sec = sec;
+	todval -= (sec * 1000000) << 12;
+	xtime->tv_nsec = ((todval * 1000) >> 12);
+}
+
+static inline unsigned long do_gettimeoffset(void) 
+{
+	__u64 now;
+
+        now = (get_clock() - jiffies_timer_cc) >> 12;
+	/* We require the offset from the latest update of xtime */
+	now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
+	return (unsigned long) now;
+}
+
+/*
+ * This version of gettimeofday has microsecond resolution.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+	unsigned long flags;
+	unsigned long seq;
+	unsigned long usec, sec;
+
+	do {
+		seq = read_seqbegin_irqsave(&xtime_lock, flags);
+
+		sec = xtime.tv_sec;
+		usec = xtime.tv_nsec / 1000 + do_gettimeoffset();
+	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+
+	while (usec >= 1000000) {
+		usec -= 1000000;
+		sec++;
+	}
+
+	tv->tv_sec = sec;
+	tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+	time_t wtm_sec, sec = tv->tv_sec;
+	long wtm_nsec, nsec = tv->tv_nsec;
+
+	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+		return -EINVAL;
+
+	write_seqlock_irq(&xtime_lock);
+	/* This is revolting. We need to set the xtime.tv_nsec
+	 * correctly. However, the value in this location is
+	 * is value at the last tick.
+	 * Discover what correction gettimeofday
+	 * would have done, and then undo it!
+	 */
+	nsec -= do_gettimeoffset() * 1000;
+
+	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+	set_normalized_timespec(&xtime, sec, nsec);
+	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+	time_adjust = 0;		/* stop active adjtime() */
+	time_status |= STA_UNSYNC;
+	time_maxerror = NTP_PHASE_LIMIT;
+	time_esterror = NTP_PHASE_LIMIT;
+	write_sequnlock_irq(&xtime_lock);
+	clock_was_set();
+	return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+
+#ifdef CONFIG_PROFILING
+#define s390_do_profile(regs)	profile_tick(CPU_PROFILING, regs)
+#else
+#define s390_do_profile(regs)  do { ; } while(0)
+#endif /* CONFIG_PROFILING */
+
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+void account_ticks(struct pt_regs *regs)
+{
+	__u64 tmp;
+	__u32 ticks, xticks;
+
+	/* Calculate how many ticks have passed. */
+	if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) {
+		/*
+		 * We have to program the clock comparator even if
+		 * no tick has passed. That happens if e.g. an i/o
+		 * interrupt wakes up an idle processor that has
+		 * switched off its hz timer.
+		 */
+		tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
+		asm volatile ("SCKC %0" : : "m" (tmp));
+		return;
+	}
+	tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
+	if (tmp >= 2*CLK_TICKS_PER_JIFFY) {  /* more than two ticks ? */
+		ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
+		S390_lowcore.jiffy_timer +=
+			CLK_TICKS_PER_JIFFY * (__u64) ticks;
+	} else if (tmp >= CLK_TICKS_PER_JIFFY) {
+		ticks = 2;
+		S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
+	} else {
+		ticks = 1;
+		S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
+	}
+
+	/* set clock comparator for next tick */
+	tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
+        asm volatile ("SCKC %0" : : "m" (tmp));
+
+#ifdef CONFIG_SMP
+	/*
+	 * Do not rely on the boot cpu to do the calls to do_timer.
+	 * Spread it over all cpus instead.
+	 */
+	write_seqlock(&xtime_lock);
+	if (S390_lowcore.jiffy_timer > xtime_cc) {
+		tmp = S390_lowcore.jiffy_timer - xtime_cc;
+		if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
+			xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
+			xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
+		} else {
+			xticks = 1;
+			xtime_cc += CLK_TICKS_PER_JIFFY;
+		}
+		while (xticks--)
+			do_timer(regs);
+	}
+	write_sequnlock(&xtime_lock);
+#else
+	for (xticks = ticks; xticks > 0; xticks--)
+		do_timer(regs);
+#endif
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	account_user_vtime(current);
+#else
+	while (ticks--)
+		update_process_times(user_mode(regs));
+#endif
+
+	s390_do_profile(regs);
+}
+
+#ifdef CONFIG_NO_IDLE_HZ
+
+#ifdef CONFIG_NO_IDLE_HZ_INIT
+int sysctl_hz_timer = 0;
+#else
+int sysctl_hz_timer = 1;
+#endif
+
+/*
+ * Stop the HZ tick on the current CPU.
+ * Only cpu_idle may call this function.
+ */
+static inline void stop_hz_timer(void)
+{
+	__u64 timer;
+
+	if (sysctl_hz_timer != 0)
+		return;
+
+	cpu_set(smp_processor_id(), nohz_cpu_mask);
+
+	/*
+	 * Leave the clock comparator set up for the next timer
+	 * tick if either rcu or a softirq is pending.
+	 */
+	if (rcu_pending(smp_processor_id()) || local_softirq_pending()) {
+		cpu_clear(smp_processor_id(), nohz_cpu_mask);
+		return;
+	}
+
+	/*
+	 * This cpu is going really idle. Set up the clock comparator
+	 * for the next event.
+	 */
+	timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64;
+	timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
+	asm volatile ("SCKC %0" : : "m" (timer));
+}
+
+/*
+ * Start the HZ tick on the current CPU.
+ * Only cpu_idle may call this function.
+ */
+static inline void start_hz_timer(void)
+{
+	if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
+		return;
+	account_ticks(__KSTK_PTREGS(current));
+	cpu_clear(smp_processor_id(), nohz_cpu_mask);
+}
+
+static int nohz_idle_notify(struct notifier_block *self,
+			    unsigned long action, void *hcpu)
+{
+	switch (action) {
+	case CPU_IDLE:
+		stop_hz_timer();
+		break;
+	case CPU_NOT_IDLE:
+		start_hz_timer();
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block nohz_idle_nb = {
+	.notifier_call = nohz_idle_notify,
+};
+
+void __init nohz_init(void)
+{
+	if (register_idle_notifier(&nohz_idle_nb))
+		panic("Couldn't register idle notifier");
+}
+
+#endif
+
+/*
+ * Start the clock comparator on the current CPU.
+ */
+void init_cpu_timer(void)
+{
+	unsigned long cr0;
+	__u64 timer;
+
+	timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY;
+	S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY;
+	timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION;
+	asm volatile ("SCKC %0" : : "m" (timer));
+        /* allow clock comparator timer interrupt */
+	__ctl_store(cr0, 0, 0);
+        cr0 |= 0x800;
+	__ctl_load(cr0, 0, 0);
+}
+
+extern void vtime_init(void);
+
+/*
+ * Initialize the TOD clock and the CPU timer of
+ * the boot cpu.
+ */
+void __init time_init(void)
+{
+	__u64 set_time_cc;
+	int cc;
+
+        /* kick the TOD clock */
+        asm volatile ("STCK 0(%1)\n\t"
+                      "IPM  %0\n\t"
+                      "SRL  %0,28" : "=r" (cc) : "a" (&init_timer_cc) 
+				   : "memory", "cc");
+        switch (cc) {
+        case 0: /* clock in set state: all is fine */
+                break;
+        case 1: /* clock in non-set state: FIXME */
+                printk("time_init: TOD clock in non-set state\n");
+                break;
+        case 2: /* clock in error state: FIXME */
+                printk("time_init: TOD clock in error state\n");
+                break;
+        case 3: /* clock in stopped or not-operational state: FIXME */
+                printk("time_init: TOD clock stopped/non-operational\n");
+                break;
+        }
+	jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
+
+	/* set xtime */
+	xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
+	set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
+		(0x3c26700LL*1000000*4096);
+        tod_to_timeval(set_time_cc, &xtime);
+        set_normalized_timespec(&wall_to_monotonic,
+                                -xtime.tv_sec, -xtime.tv_nsec);
+
+	/* request the clock comparator external interrupt */
+        if (register_early_external_interrupt(0x1004, 0,
+					      &ext_int_info_cc) != 0)
+                panic("Couldn't request external interrupt 0x1004");
+
+        init_cpu_timer();
+
+#ifdef CONFIG_NO_IDLE_HZ
+	nohz_init();
+#endif
+
+#ifdef CONFIG_VIRT_TIMER
+	vtime_init();
+#endif
+}
+
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
new file mode 100644
index 0000000..8b90e95
--- /dev/null
+++ b/arch/s390/kernel/traps.c
@@ -0,0 +1,738 @@
+/*
+ *  arch/s390/kernel/traps.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *
+ *  Derived from "arch/i386/kernel/traps.c"
+ *    Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'asm.s'.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/mathemu.h>
+#include <asm/cpcmd.h>
+#include <asm/s390_ext.h>
+#include <asm/lowcore.h>
+#include <asm/debug.h>
+
+/* Called from entry.S only */
+extern void handle_per_exception(struct pt_regs *regs);
+
+typedef void pgm_check_handler_t(struct pt_regs *, long);
+pgm_check_handler_t *pgm_check_table[128];
+
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_PROCESS_DEBUG
+int sysctl_userprocess_debug = 1;
+#else
+int sysctl_userprocess_debug = 0;
+#endif
+#endif
+
+extern pgm_check_handler_t do_protection_exception;
+extern pgm_check_handler_t do_dat_exception;
+extern pgm_check_handler_t do_pseudo_page_fault;
+#ifdef CONFIG_PFAULT
+extern int pfault_init(void);
+extern void pfault_fini(void);
+extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
+static ext_int_info_t ext_int_pfault;
+#endif
+extern pgm_check_handler_t do_monitor_call;
+
+#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
+
+#ifndef CONFIG_ARCH_S390X
+#define FOURLONG "%08lx %08lx %08lx %08lx\n"
+static int kstack_depth_to_print = 12;
+#else /* CONFIG_ARCH_S390X */
+#define FOURLONG "%016lx %016lx %016lx %016lx\n"
+static int kstack_depth_to_print = 20;
+#endif /* CONFIG_ARCH_S390X */
+
+/*
+ * For show_trace we have tree different stack to consider:
+ *   - the panic stack which is used if the kernel stack has overflown
+ *   - the asynchronous interrupt stack (cpu related)
+ *   - the synchronous kernel stack (process related)
+ * The stack trace can start at any of the three stack and can potentially
+ * touch all of them. The order is: panic stack, async stack, sync stack.
+ */
+static unsigned long
+__show_trace(unsigned long sp, unsigned long low, unsigned long high)
+{
+	struct stack_frame *sf;
+	struct pt_regs *regs;
+
+	while (1) {
+		sp = sp & PSW_ADDR_INSN;
+		if (sp < low || sp > high - sizeof(*sf))
+			return sp;
+		sf = (struct stack_frame *) sp;
+		printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+		print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
+		/* Follow the backchain. */
+		while (1) {
+			low = sp;
+			sp = sf->back_chain & PSW_ADDR_INSN;
+			if (!sp)
+				break;
+			if (sp <= low || sp > high - sizeof(*sf))
+				return sp;
+			sf = (struct stack_frame *) sp;
+			printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+			print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
+		}
+		/* Zero backchain detected, check for interrupt frame. */
+		sp = (unsigned long) (sf + 1);
+		if (sp <= low || sp > high - sizeof(*regs))
+			return sp;
+		regs = (struct pt_regs *) sp;
+		printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
+		print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
+		low = sp;
+		sp = regs->gprs[15];
+	}
+}
+
+void show_trace(struct task_struct *task, unsigned long * stack)
+{
+	register unsigned long __r15 asm ("15");
+	unsigned long sp;
+
+	sp = (unsigned long) stack;
+	if (!sp)
+		sp = task ? task->thread.ksp : __r15;
+	printk("Call Trace:\n");
+#ifdef CONFIG_CHECK_STACK
+	sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
+			  S390_lowcore.panic_stack);
+#endif
+	sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
+			  S390_lowcore.async_stack);
+	if (task)
+		__show_trace(sp, (unsigned long) task->thread_info,
+			     (unsigned long) task->thread_info + THREAD_SIZE);
+	else
+		__show_trace(sp, S390_lowcore.thread_info,
+			     S390_lowcore.thread_info + THREAD_SIZE);
+	printk("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+	register unsigned long * __r15 asm ("15");
+	unsigned long *stack;
+	int i;
+
+	// debugging aid: "show_stack(NULL);" prints the
+	// back trace for this cpu.
+
+	if (!sp)
+		sp = task ? (unsigned long *) task->thread.ksp : __r15;
+
+	stack = sp;
+	for (i = 0; i < kstack_depth_to_print; i++) {
+		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
+			break;
+		if (i && ((i * sizeof (long) % 32) == 0))
+			printk("\n       ");
+		printk("%p ", (void *)*stack++);
+	}
+	printk("\n");
+	show_trace(task, sp);
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+	show_stack(0, 0);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void show_registers(struct pt_regs *regs)
+{
+	mm_segment_t old_fs;
+	char *mode;
+	int i;
+
+	mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
+	printk("%s PSW : %p %p",
+	       mode, (void *) regs->psw.mask,
+	       (void *) regs->psw.addr);
+	print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
+	printk("%s GPRS: " FOURLONG, mode,
+	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
+	printk("           " FOURLONG,
+	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
+	printk("           " FOURLONG,
+	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
+	printk("           " FOURLONG,
+	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
+
+#if 0
+	/* FIXME: this isn't needed any more but it changes the ksymoops
+	 * input. To remove or not to remove ... */
+	save_access_regs(regs->acrs);
+	printk("%s ACRS: %08x %08x %08x %08x\n", mode,
+	       regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
+	printk("           %08x %08x %08x %08x\n",
+	       regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
+	printk("           %08x %08x %08x %08x\n",
+	       regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
+	printk("           %08x %08x %08x %08x\n",
+	       regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
+#endif
+
+	/*
+	 * Print the first 20 byte of the instruction stream at the
+	 * time of the fault.
+	 */
+	old_fs = get_fs();
+	if (regs->psw.mask & PSW_MASK_PSTATE)
+		set_fs(USER_DS);
+	else
+		set_fs(KERNEL_DS);
+	printk("%s Code: ", mode);
+	for (i = 0; i < 20; i++) {
+		unsigned char c;
+		if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
+			printk(" Bad PSW.");
+			break;
+		}
+		printk("%02x ", c);
+	}
+	set_fs(old_fs);
+
+	printk("\n");
+}	
+
+/* This is called from fs/proc/array.c */
+char *task_show_regs(struct task_struct *task, char *buffer)
+{
+	struct pt_regs *regs;
+
+	regs = __KSTK_PTREGS(task);
+	buffer += sprintf(buffer, "task: %p, ksp: %p\n",
+		       task, (void *)task->thread.ksp);
+	buffer += sprintf(buffer, "User PSW : %p %p\n",
+		       (void *) regs->psw.mask, (void *)regs->psw.addr);
+
+	buffer += sprintf(buffer, "User GPRS: " FOURLONG,
+			  regs->gprs[0], regs->gprs[1],
+			  regs->gprs[2], regs->gprs[3]);
+	buffer += sprintf(buffer, "           " FOURLONG,
+			  regs->gprs[4], regs->gprs[5],
+			  regs->gprs[6], regs->gprs[7]);
+	buffer += sprintf(buffer, "           " FOURLONG,
+			  regs->gprs[8], regs->gprs[9],
+			  regs->gprs[10], regs->gprs[11]);
+	buffer += sprintf(buffer, "           " FOURLONG,
+			  regs->gprs[12], regs->gprs[13],
+			  regs->gprs[14], regs->gprs[15]);
+	buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
+			  task->thread.acrs[0], task->thread.acrs[1],
+			  task->thread.acrs[2], task->thread.acrs[3]);
+	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
+			  task->thread.acrs[4], task->thread.acrs[5],
+			  task->thread.acrs[6], task->thread.acrs[7]);
+	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
+			  task->thread.acrs[8], task->thread.acrs[9],
+			  task->thread.acrs[10], task->thread.acrs[11]);
+	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
+			  task->thread.acrs[12], task->thread.acrs[13],
+			  task->thread.acrs[14], task->thread.acrs[15]);
+	return buffer;
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+	static int die_counter;
+
+	debug_stop_all();
+	console_verbose();
+	spin_lock_irq(&die_lock);
+	bust_spinlocks(1);
+	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+        show_regs(regs);
+	bust_spinlocks(0);
+        spin_unlock_irq(&die_lock);
+	if (in_interrupt())
+		panic("Fatal exception in interrupt");
+	if (panic_on_oops)
+		panic("Fatal exception: panic_on_oops");
+        do_exit(SIGSEGV);
+}
+
+static void inline
+report_user_fault(long interruption_code, struct pt_regs *regs)
+{
+#if defined(CONFIG_SYSCTL)
+	if (!sysctl_userprocess_debug)
+		return;
+#endif
+#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
+	printk("User process fault: interruption code 0x%lX\n",
+	       interruption_code);
+	show_regs(regs);
+#endif
+}
+
+static void inline do_trap(long interruption_code, int signr, char *str,
+                           struct pt_regs *regs, siginfo_t *info)
+{
+	/*
+	 * We got all needed information from the lowcore and can
+	 * now safely switch on interrupts.
+	 */
+        if (regs->psw.mask & PSW_MASK_PSTATE)
+		local_irq_enable();
+
+        if (regs->psw.mask & PSW_MASK_PSTATE) {
+                struct task_struct *tsk = current;
+
+                tsk->thread.trap_no = interruption_code & 0xffff;
+		force_sig_info(signr, info, tsk);
+		report_user_fault(interruption_code, regs);
+        } else {
+                const struct exception_table_entry *fixup;
+                fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+                if (fixup)
+                        regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
+                else
+                        die(str, regs, interruption_code);
+        }
+}
+
+static inline void *get_check_address(struct pt_regs *regs)
+{
+	return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
+}
+
+void do_single_step(struct pt_regs *regs)
+{
+	if ((current->ptrace & PT_PTRACED) != 0)
+		force_sig(SIGTRAP, current);
+}
+
+asmlinkage void
+default_trap_handler(struct pt_regs * regs, long interruption_code)
+{
+        if (regs->psw.mask & PSW_MASK_PSTATE) {
+		local_irq_enable();
+		do_exit(SIGSEGV);
+		report_user_fault(interruption_code, regs);
+	} else
+		die("Unknown program exception", regs, interruption_code);
+}
+
+#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
+asmlinkage void name(struct pt_regs * regs, long interruption_code) \
+{ \
+        siginfo_t info; \
+        info.si_signo = signr; \
+        info.si_errno = 0; \
+        info.si_code = sicode; \
+        info.si_addr = (void *)siaddr; \
+        do_trap(interruption_code, signr, str, regs, &info); \
+}
+
+DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
+	      ILL_ILLADR, get_check_address(regs))
+DO_ERROR_INFO(SIGILL,  "execute exception", execute_exception,
+	      ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "fixpoint divide exception", divide_exception,
+	      FPE_INTDIV, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "fixpoint overflow exception", overflow_exception,
+	      FPE_INTOVF, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "HFP overflow exception", hfp_overflow_exception,
+	      FPE_FLTOVF, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "HFP underflow exception", hfp_underflow_exception,
+	      FPE_FLTUND, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "HFP significance exception", hfp_significance_exception,
+	      FPE_FLTRES, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "HFP divide exception", hfp_divide_exception,
+	      FPE_FLTDIV, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "HFP square root exception", hfp_sqrt_exception,
+	      FPE_FLTINV, get_check_address(regs))
+DO_ERROR_INFO(SIGILL,  "operand exception", operand_exception,
+	      ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGILL,  "privileged operation", privileged_op,
+	      ILL_PRVOPC, get_check_address(regs))
+DO_ERROR_INFO(SIGILL,  "special operation exception", special_op_exception,
+	      ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGILL,  "translation exception", translation_exception,
+	      ILL_ILLOPN, get_check_address(regs))
+
+static inline void
+do_fp_trap(struct pt_regs *regs, void *location,
+           int fpc, long interruption_code)
+{
+	siginfo_t si;
+
+	si.si_signo = SIGFPE;
+	si.si_errno = 0;
+	si.si_addr = location;
+	si.si_code = 0;
+	/* FPC[2] is Data Exception Code */
+	if ((fpc & 0x00000300) == 0) {
+		/* bits 6 and 7 of DXC are 0 iff IEEE exception */
+		if (fpc & 0x8000) /* invalid fp operation */
+			si.si_code = FPE_FLTINV;
+		else if (fpc & 0x4000) /* div by 0 */
+			si.si_code = FPE_FLTDIV;
+		else if (fpc & 0x2000) /* overflow */
+			si.si_code = FPE_FLTOVF;
+		else if (fpc & 0x1000) /* underflow */
+			si.si_code = FPE_FLTUND;
+		else if (fpc & 0x0800) /* inexact */
+			si.si_code = FPE_FLTRES;
+	}
+	current->thread.ieee_instruction_pointer = (addr_t) location;
+	do_trap(interruption_code, SIGFPE,
+		"floating point exception", regs, &si);
+}
+
+asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
+{
+	siginfo_t info;
+        __u8 opcode[6];
+	__u16 *location;
+	int signal = 0;
+
+	location = (__u16 *) get_check_address(regs);
+
+	/*
+	 * We got all needed information from the lowcore and can
+	 * now safely switch on interrupts.
+	 */
+	if (regs->psw.mask & PSW_MASK_PSTATE)
+		local_irq_enable();
+
+	if (regs->psw.mask & PSW_MASK_PSTATE) {
+		get_user(*((__u16 *) opcode), (__u16 __user *) location);
+		if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
+			if (current->ptrace & PT_PTRACED)
+				force_sig(SIGTRAP, current);
+			else
+				signal = SIGILL;
+#ifdef CONFIG_MATHEMU
+		} else if (opcode[0] == 0xb3) {
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_b3(opcode, regs);
+                } else if (opcode[0] == 0xed) {
+			get_user(*((__u32 *) (opcode+2)),
+				 (__u32 *)(location+1));
+			signal = math_emu_ed(opcode, regs);
+		} else if (*((__u16 *) opcode) == 0xb299) {
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_srnm(opcode, regs);
+		} else if (*((__u16 *) opcode) == 0xb29c) {
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_stfpc(opcode, regs);
+		} else if (*((__u16 *) opcode) == 0xb29d) {
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_lfpc(opcode, regs);
+#endif
+		} else
+			signal = SIGILL;
+	} else
+		signal = SIGILL;
+
+#ifdef CONFIG_MATHEMU
+        if (signal == SIGFPE)
+		do_fp_trap(regs, location,
+                           current->thread.fp_regs.fpc, interruption_code);
+        else if (signal == SIGSEGV) {
+		info.si_signo = signal;
+		info.si_errno = 0;
+		info.si_code = SEGV_MAPERR;
+		info.si_addr = (void *) location;
+		do_trap(interruption_code, signal,
+			"user address fault", regs, &info);
+	} else
+#endif
+        if (signal) {
+		info.si_signo = signal;
+		info.si_errno = 0;
+		info.si_code = ILL_ILLOPC;
+		info.si_addr = (void *) location;
+		do_trap(interruption_code, signal,
+			"illegal operation", regs, &info);
+	}
+}
+
+
+#ifdef CONFIG_MATHEMU
+asmlinkage void 
+specification_exception(struct pt_regs * regs, long interruption_code)
+{
+        __u8 opcode[6];
+	__u16 *location = NULL;
+	int signal = 0;
+
+	location = (__u16 *) get_check_address(regs);
+
+	/*
+	 * We got all needed information from the lowcore and can
+	 * now safely switch on interrupts.
+	 */
+        if (regs->psw.mask & PSW_MASK_PSTATE)
+		local_irq_enable();
+
+        if (regs->psw.mask & PSW_MASK_PSTATE) {
+		get_user(*((__u16 *) opcode), location);
+		switch (opcode[0]) {
+		case 0x28: /* LDR Rx,Ry   */
+			signal = math_emu_ldr(opcode);
+			break;
+		case 0x38: /* LER Rx,Ry   */
+			signal = math_emu_ler(opcode);
+			break;
+		case 0x60: /* STD R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_std(opcode, regs);
+			break;
+		case 0x68: /* LD R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_ld(opcode, regs);
+			break;
+		case 0x70: /* STE R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_ste(opcode, regs);
+			break;
+		case 0x78: /* LE R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_le(opcode, regs);
+			break;
+		default:
+			signal = SIGILL;
+			break;
+                }
+        } else
+		signal = SIGILL;
+
+        if (signal == SIGFPE)
+		do_fp_trap(regs, location,
+                           current->thread.fp_regs.fpc, interruption_code);
+        else if (signal) {
+		siginfo_t info;
+		info.si_signo = signal;
+		info.si_errno = 0;
+		info.si_code = ILL_ILLOPN;
+		info.si_addr = location;
+		do_trap(interruption_code, signal, 
+			"specification exception", regs, &info);
+	}
+}
+#else
+DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
+	      ILL_ILLOPN, get_check_address(regs));
+#endif
+
+asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
+{
+	__u16 *location;
+	int signal = 0;
+
+	location = (__u16 *) get_check_address(regs);
+
+	/*
+	 * We got all needed information from the lowcore and can
+	 * now safely switch on interrupts.
+	 */
+	if (regs->psw.mask & PSW_MASK_PSTATE)
+		local_irq_enable();
+
+	if (MACHINE_HAS_IEEE)
+		__asm__ volatile ("stfpc %0\n\t" 
+				  : "=m" (current->thread.fp_regs.fpc));
+
+#ifdef CONFIG_MATHEMU
+        else if (regs->psw.mask & PSW_MASK_PSTATE) {
+        	__u8 opcode[6];
+		get_user(*((__u16 *) opcode), location);
+		switch (opcode[0]) {
+		case 0x28: /* LDR Rx,Ry   */
+			signal = math_emu_ldr(opcode);
+			break;
+		case 0x38: /* LER Rx,Ry   */
+			signal = math_emu_ler(opcode);
+			break;
+		case 0x60: /* STD R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_std(opcode, regs);
+			break;
+		case 0x68: /* LD R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_ld(opcode, regs);
+			break;
+		case 0x70: /* STE R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_ste(opcode, regs);
+			break;
+		case 0x78: /* LE R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_le(opcode, regs);
+			break;
+		case 0xb3:
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_b3(opcode, regs);
+			break;
+                case 0xed:
+			get_user(*((__u32 *) (opcode+2)),
+				 (__u32 *)(location+1));
+			signal = math_emu_ed(opcode, regs);
+			break;
+	        case 0xb2:
+			if (opcode[1] == 0x99) {
+				get_user(*((__u16 *) (opcode+2)), location+1);
+				signal = math_emu_srnm(opcode, regs);
+			} else if (opcode[1] == 0x9c) {
+				get_user(*((__u16 *) (opcode+2)), location+1);
+				signal = math_emu_stfpc(opcode, regs);
+			} else if (opcode[1] == 0x9d) {
+				get_user(*((__u16 *) (opcode+2)), location+1);
+				signal = math_emu_lfpc(opcode, regs);
+			} else
+				signal = SIGILL;
+			break;
+		default:
+			signal = SIGILL;
+			break;
+                }
+        }
+#endif 
+	if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
+		signal = SIGFPE;
+	else
+		signal = SIGILL;
+        if (signal == SIGFPE)
+		do_fp_trap(regs, location,
+                           current->thread.fp_regs.fpc, interruption_code);
+        else if (signal) {
+		siginfo_t info;
+		info.si_signo = signal;
+		info.si_errno = 0;
+		info.si_code = ILL_ILLOPN;
+		info.si_addr = location;
+		do_trap(interruption_code, signal, 
+			"data exception", regs, &info);
+	}
+}
+
+asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code)
+{
+        siginfo_t info;
+
+	/* Set user psw back to home space mode. */
+	if (regs->psw.mask & PSW_MASK_PSTATE)
+		regs->psw.mask |= PSW_ASC_HOME;
+	/* Send SIGILL. */
+        info.si_signo = SIGILL;
+        info.si_errno = 0;
+        info.si_code = ILL_PRVOPC;
+        info.si_addr = get_check_address(regs);
+        do_trap(int_code, SIGILL, "space switch event", regs, &info);
+}
+
+asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
+{
+	die("Kernel stack overflow", regs, 0);
+	panic("Corrupt kernel stack, can't continue.");
+}
+
+
+/* init is done in lowcore.S and head.S */
+
+void __init trap_init(void)
+{
+        int i;
+
+        for (i = 0; i < 128; i++)
+          pgm_check_table[i] = &default_trap_handler;
+        pgm_check_table[1] = &illegal_op;
+        pgm_check_table[2] = &privileged_op;
+        pgm_check_table[3] = &execute_exception;
+        pgm_check_table[4] = &do_protection_exception;
+        pgm_check_table[5] = &addressing_exception;
+        pgm_check_table[6] = &specification_exception;
+        pgm_check_table[7] = &data_exception;
+        pgm_check_table[8] = &overflow_exception;
+        pgm_check_table[9] = &divide_exception;
+        pgm_check_table[0x0A] = &overflow_exception;
+        pgm_check_table[0x0B] = &divide_exception;
+        pgm_check_table[0x0C] = &hfp_overflow_exception;
+        pgm_check_table[0x0D] = &hfp_underflow_exception;
+        pgm_check_table[0x0E] = &hfp_significance_exception;
+        pgm_check_table[0x0F] = &hfp_divide_exception;
+        pgm_check_table[0x10] = &do_dat_exception;
+        pgm_check_table[0x11] = &do_dat_exception;
+        pgm_check_table[0x12] = &translation_exception;
+        pgm_check_table[0x13] = &special_op_exception;
+#ifndef CONFIG_ARCH_S390X
+ 	pgm_check_table[0x14] = &do_pseudo_page_fault;
+#else /* CONFIG_ARCH_S390X */
+        pgm_check_table[0x38] = &do_dat_exception;
+	pgm_check_table[0x39] = &do_dat_exception;
+	pgm_check_table[0x3A] = &do_dat_exception;
+        pgm_check_table[0x3B] = &do_dat_exception;
+#endif /* CONFIG_ARCH_S390X */
+        pgm_check_table[0x15] = &operand_exception;
+        pgm_check_table[0x1C] = &space_switch_exception;
+        pgm_check_table[0x1D] = &hfp_sqrt_exception;
+	pgm_check_table[0x40] = &do_monitor_call;
+
+	if (MACHINE_IS_VM) {
+		/*
+		 * First try to get pfault pseudo page faults going.
+		 * If this isn't available turn on pagex page faults.
+		 */
+#ifdef CONFIG_PFAULT
+		/* request the 0x2603 external interrupt */
+		if (register_early_external_interrupt(0x2603, pfault_interrupt,
+						      &ext_int_pfault) != 0)
+			panic("Couldn't request external interrupt 0x2603");
+
+		if (pfault_init() == 0) 
+			return;
+		
+		/* Tough luck, no pfault. */
+		unregister_early_external_interrupt(0x2603, pfault_interrupt,
+						    &ext_int_pfault);
+#endif
+#ifndef CONFIG_ARCH_S390X
+		cpcmd("SET PAGEX ON", NULL, 0);
+#endif
+	}
+}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..89fdb38
--- /dev/null
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -0,0 +1,130 @@
+/* ld script to make s390 Linux kernel
+ * Written by Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <linux/config.h>
+
+#ifndef CONFIG_ARCH_S390X
+OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
+OUTPUT_ARCH(s390)
+ENTRY(_start)
+jiffies = jiffies_64 + 4;
+#else
+OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
+OUTPUT_ARCH(s390:64-bit)
+ENTRY(_start)
+jiffies = jiffies_64;
+#endif
+
+SECTIONS
+{
+  . = 0x00000000;
+  _text = .;			/* Text and read-only data */
+  .text : {
+	*(.text)
+	SCHED_TEXT
+	LOCK_TEXT
+	*(.fixup)
+	*(.gnu.warning)
+	} = 0x0700
+
+  _etext = .;			/* End of text section */
+
+  . = ALIGN(16);		/* Exception table */
+  __start___ex_table = .;
+  __ex_table : { *(__ex_table) }
+  __stop___ex_table = .;
+
+  RODATA
+
+#ifdef CONFIG_SHARED_KERNEL
+  . = ALIGN(1048576);		/* VM shared segments are 1MB aligned */
+
+  _eshared = .;			/* End of shareable data */
+#endif
+
+  .data : {			/* Data */
+	*(.data)
+	CONSTRUCTORS
+	}
+
+  . = ALIGN(4096);
+  __nosave_begin = .;
+  .data_nosave : { *(.data.nosave) }
+  . = ALIGN(4096);
+  __nosave_end = .;
+
+  . = ALIGN(4096);
+  .data.page_aligned : { *(.data.idt) }
+
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+  _edata = .;			/* End of data section */
+
+  . = ALIGN(8192);		/* init_task */
+  .data.init_task : { *(.data.init_task) }
+
+  /* will be freed after init */
+  . = ALIGN(4096);		/* Init code and data */
+  __init_begin = .;
+  .init.text : { 
+	_sinittext = .;
+	*(.init.text)
+	_einittext = .;
+  }
+  .init.data : { *(.init.data) }
+  . = ALIGN(256);
+  __setup_start = .;
+  .init.setup : { *(.init.setup) }
+  __setup_end = .;
+  __initcall_start = .;
+  .initcall.init : {
+	*(.initcall1.init) 
+	*(.initcall2.init) 
+	*(.initcall3.init) 
+	*(.initcall4.init) 
+	*(.initcall5.init) 
+	*(.initcall6.init) 
+	*(.initcall7.init)
+  }
+  __initcall_end = .;
+  __con_initcall_start = .;
+  .con_initcall.init : { *(.con_initcall.init) }
+  __con_initcall_end = .;
+  SECURITY_INIT
+  . = ALIGN(256);
+  __initramfs_start = .;
+  .init.ramfs : { *(.init.initramfs) }
+  . = ALIGN(2);
+  __initramfs_end = .;
+  . = ALIGN(256);
+  __per_cpu_start = .;
+  .data.percpu  : { *(.data.percpu) }
+  __per_cpu_end = .;
+  . = ALIGN(4096);
+  __init_end = .;
+  /* freed after init ends here */
+
+  __bss_start = .;		/* BSS */
+  .bss : { *(.bss) }
+  . = ALIGN(2);
+  __bss_stop = .;
+
+  _end = . ;
+
+  /* Sections to be discarded */
+  /DISCARD/ : {
+	*(.exitcall.exit)
+	}
+
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
new file mode 100644
index 0000000..bb6cf02
--- /dev/null
+++ b/arch/s390/kernel/vtime.c
@@ -0,0 +1,565 @@
+/*
+ *  arch/s390/kernel/vtime.c
+ *    Virtual cpu timer based timer functions.
+ *
+ *  S390 version
+ *    Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Jan Glauber <jan.glauber@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/timex.h>
+#include <linux/notifier.h>
+#include <linux/kernel_stat.h>
+#include <linux/rcupdate.h>
+#include <linux/posix-timers.h>
+
+#include <asm/s390_ext.h>
+#include <asm/timer.h>
+
+#define VTIMER_MAGIC (TIMER_MAGIC + 1)
+static ext_int_info_t ext_int_info_timer;
+DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+/*
+ * Update process times based on virtual cpu times stored by entry.S
+ * to the lowcore fields user_timer, system_timer & steal_clock.
+ */
+void account_user_vtime(struct task_struct *tsk)
+{
+	cputime_t cputime;
+	__u64 timer, clock;
+	int rcu_user_flag;
+
+	timer = S390_lowcore.last_update_timer;
+	clock = S390_lowcore.last_update_clock;
+	asm volatile ("  STPT %0\n"    /* Store current cpu timer value */
+		      "  STCK %1"      /* Store current tod clock value */
+		      : "=m" (S390_lowcore.last_update_timer),
+		        "=m" (S390_lowcore.last_update_clock) );
+	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
+	S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;
+
+	cputime = S390_lowcore.user_timer >> 12;
+	rcu_user_flag = cputime != 0;
+	S390_lowcore.user_timer -= cputime << 12;
+	S390_lowcore.steal_clock -= cputime << 12;
+	account_user_time(tsk, cputime);
+
+	cputime =  S390_lowcore.system_timer >> 12;
+	S390_lowcore.system_timer -= cputime << 12;
+	S390_lowcore.steal_clock -= cputime << 12;
+	account_system_time(tsk, HARDIRQ_OFFSET, cputime);
+
+	cputime = S390_lowcore.steal_clock;
+	if ((__s64) cputime > 0) {
+		cputime >>= 12;
+		S390_lowcore.steal_clock -= cputime << 12;
+		account_steal_time(tsk, cputime);
+	}
+
+	run_local_timers();
+	if (rcu_pending(smp_processor_id()))
+		rcu_check_callbacks(smp_processor_id(), rcu_user_flag);
+	scheduler_tick();
+ 	run_posix_cpu_timers(tsk);
+}
+
+/*
+ * Update process times based on virtual cpu times stored by entry.S
+ * to the lowcore fields user_timer, system_timer & steal_clock.
+ */
+void account_system_vtime(struct task_struct *tsk)
+{
+	cputime_t cputime;
+	__u64 timer;
+
+	timer = S390_lowcore.last_update_timer;
+	asm volatile ("  STPT %0"    /* Store current cpu timer value */
+		      : "=m" (S390_lowcore.last_update_timer) );
+	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
+
+	cputime =  S390_lowcore.system_timer >> 12;
+	S390_lowcore.system_timer -= cputime << 12;
+	S390_lowcore.steal_clock -= cputime << 12;
+	account_system_time(tsk, 0, cputime);
+}
+
+static inline void set_vtimer(__u64 expires)
+{
+	__u64 timer;
+
+	asm volatile ("  STPT %0\n"  /* Store current cpu timer value */
+		      "  SPT %1"     /* Set new value immediatly afterwards */
+		      : "=m" (timer) : "m" (expires) );
+	S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
+	S390_lowcore.last_update_timer = expires;
+
+	/* store expire time for this CPU timer */
+	per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+}
+#else
+static inline void set_vtimer(__u64 expires)
+{
+	S390_lowcore.last_update_timer = expires;
+	asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
+
+	/* store expire time for this CPU timer */
+	per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+}
+#endif
+
+static void start_cpu_timer(void)
+{
+	struct vtimer_queue *vt_list;
+
+	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+	set_vtimer(vt_list->idle);
+}
+
+static void stop_cpu_timer(void)
+{
+	__u64 done;
+	struct vtimer_queue *vt_list;
+
+	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+
+	/* nothing to do */
+	if (list_empty(&vt_list->list)) {
+		vt_list->idle = VTIMER_MAX_SLICE;
+		goto fire;
+	}
+
+	/* store progress */
+	asm volatile ("STPT %0" : "=m" (done));
+
+	/*
+	 * If done is negative we do not stop the CPU timer
+	 * because we will get instantly an interrupt that
+	 * will start the CPU timer again.
+	 */
+	if (done & 1LL<<63)
+		return;
+	else
+		vt_list->offset += vt_list->to_expire - done;
+
+	/* save the actual expire value */
+	vt_list->idle = done;
+
+	/*
+	 * We cannot halt the CPU timer, we just write a value that
+	 * nearly never expires (only after 71 years) and re-write
+	 * the stored expire value if we continue the timer
+	 */
+ fire:
+	set_vtimer(VTIMER_MAX_SLICE);
+}
+
+/*
+ * Sorted add to a list. List is linear searched until first bigger
+ * element is found.
+ */
+static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
+{
+	struct vtimer_list *event;
+
+	list_for_each_entry(event, head, entry) {
+		if (event->expires > timer->expires) {
+			list_add_tail(&timer->entry, &event->entry);
+			return;
+		}
+	}
+	list_add_tail(&timer->entry, head);
+}
+
+/*
+ * Do the callback functions of expired vtimer events.
+ * Called from within the interrupt handler.
+ */
+static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
+{
+	struct vtimer_queue *vt_list;
+	struct vtimer_list *event, *tmp;
+	void (*fn)(unsigned long, struct pt_regs*);
+	unsigned long data;
+
+	if (list_empty(cb_list))
+		return;
+
+	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+
+	list_for_each_entry_safe(event, tmp, cb_list, entry) {
+		fn = event->function;
+		data = event->data;
+		fn(data, regs);
+
+		if (!event->interval)
+			/* delete one shot timer */
+			list_del_init(&event->entry);
+		else {
+			/* move interval timer back to list */
+			spin_lock(&vt_list->lock);
+			list_del_init(&event->entry);
+			list_add_sorted(event, &vt_list->list);
+			spin_unlock(&vt_list->lock);
+		}
+	}
+}
+
+/*
+ * Handler for the virtual CPU timer.
+ */
+static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
+{
+	int cpu;
+	__u64 next, delta;
+	struct vtimer_queue *vt_list;
+	struct vtimer_list *event, *tmp;
+	struct list_head *ptr;
+	/* the callback queue */
+	struct list_head cb_list;
+
+	INIT_LIST_HEAD(&cb_list);
+	cpu = smp_processor_id();
+	vt_list = &per_cpu(virt_cpu_timer, cpu);
+
+	/* walk timer list, fire all expired events */
+	spin_lock(&vt_list->lock);
+
+	if (vt_list->to_expire < VTIMER_MAX_SLICE)
+		vt_list->offset += vt_list->to_expire;
+
+	list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
+		if (event->expires > vt_list->offset)
+			/* found first unexpired event, leave */
+			break;
+
+		/* re-charge interval timer, we have to add the offset */
+		if (event->interval)
+			event->expires = event->interval + vt_list->offset;
+
+		/* move expired timer to the callback queue */
+		list_move_tail(&event->entry, &cb_list);
+	}
+	spin_unlock(&vt_list->lock);
+	do_callbacks(&cb_list, regs);
+
+	/* next event is first in list */
+	spin_lock(&vt_list->lock);
+	if (!list_empty(&vt_list->list)) {
+		ptr = vt_list->list.next;
+		event = list_entry(ptr, struct vtimer_list, entry);
+		next = event->expires - vt_list->offset;
+
+		/* add the expired time from this interrupt handler
+		 * and the callback functions
+		 */
+		asm volatile ("STPT %0" : "=m" (delta));
+		delta = 0xffffffffffffffffLL - delta + 1;
+		vt_list->offset += delta;
+		next -= delta;
+	} else {
+		vt_list->offset = 0;
+		next = VTIMER_MAX_SLICE;
+	}
+	spin_unlock(&vt_list->lock);
+	set_vtimer(next);
+}
+
+void init_virt_timer(struct vtimer_list *timer)
+{
+	timer->magic = VTIMER_MAGIC;
+	timer->function = NULL;
+	INIT_LIST_HEAD(&timer->entry);
+	spin_lock_init(&timer->lock);
+}
+EXPORT_SYMBOL(init_virt_timer);
+
+static inline int check_vtimer(struct vtimer_list *timer)
+{
+	if (timer->magic != VTIMER_MAGIC)
+		return -EINVAL;
+	return 0;
+}
+
+static inline int vtimer_pending(struct vtimer_list *timer)
+{
+	return (!list_empty(&timer->entry));
+}
+
+/*
+ * this function should only run on the specified CPU
+ */
+static void internal_add_vtimer(struct vtimer_list *timer)
+{
+	unsigned long flags;
+	__u64 done;
+	struct vtimer_list *event;
+	struct vtimer_queue *vt_list;
+
+	vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
+	spin_lock_irqsave(&vt_list->lock, flags);
+
+	if (timer->cpu != smp_processor_id())
+		printk("internal_add_vtimer: BUG, running on wrong CPU");
+
+	/* if list is empty we only have to set the timer */
+	if (list_empty(&vt_list->list)) {
+		/* reset the offset, this may happen if the last timer was
+		 * just deleted by mod_virt_timer and the interrupt
+		 * didn't happen until here
+		 */
+		vt_list->offset = 0;
+		goto fire;
+	}
+
+	/* save progress */
+	asm volatile ("STPT %0" : "=m" (done));
+
+	/* calculate completed work */
+	done = vt_list->to_expire - done + vt_list->offset;
+	vt_list->offset = 0;
+
+	list_for_each_entry(event, &vt_list->list, entry)
+		event->expires -= done;
+
+ fire:
+	list_add_sorted(timer, &vt_list->list);
+
+	/* get first element, which is the next vtimer slice */
+	event = list_entry(vt_list->list.next, struct vtimer_list, entry);
+
+	set_vtimer(event->expires);
+	spin_unlock_irqrestore(&vt_list->lock, flags);
+	/* release CPU aquired in prepare_vtimer or mod_virt_timer() */
+	put_cpu();
+}
+
+static inline int prepare_vtimer(struct vtimer_list *timer)
+{
+	if (check_vtimer(timer) || !timer->function) {
+		printk("add_virt_timer: uninitialized timer\n");
+		return -EINVAL;
+	}
+
+	if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
+		printk("add_virt_timer: invalid timer expire value!\n");
+		return -EINVAL;
+	}
+
+	if (vtimer_pending(timer)) {
+		printk("add_virt_timer: timer pending\n");
+		return -EBUSY;
+	}
+
+	timer->cpu = get_cpu();
+	return 0;
+}
+
+/*
+ * add_virt_timer - add an oneshot virtual CPU timer
+ */
+void add_virt_timer(void *new)
+{
+	struct vtimer_list *timer;
+
+	timer = (struct vtimer_list *)new;
+
+	if (prepare_vtimer(timer) < 0)
+		return;
+
+	timer->interval = 0;
+	internal_add_vtimer(timer);
+}
+EXPORT_SYMBOL(add_virt_timer);
+
+/*
+ * add_virt_timer_int - add an interval virtual CPU timer
+ */
+void add_virt_timer_periodic(void *new)
+{
+	struct vtimer_list *timer;
+
+	timer = (struct vtimer_list *)new;
+
+	if (prepare_vtimer(timer) < 0)
+		return;
+
+	timer->interval = timer->expires;
+	internal_add_vtimer(timer);
+}
+EXPORT_SYMBOL(add_virt_timer_periodic);
+
+/*
+ * If we change a pending timer the function must be called on the CPU
+ * where the timer is running on, e.g. by smp_call_function_on()
+ *
+ * The original mod_timer adds the timer if it is not pending. For compatibility
+ * we do the same. The timer will be added on the current CPU as a oneshot timer.
+ *
+ * returns whether it has modified a pending timer (1) or not (0)
+ */
+int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
+{
+	struct vtimer_queue *vt_list;
+	unsigned long flags;
+	int cpu;
+
+	if (check_vtimer(timer) || !timer->function) {
+		printk("mod_virt_timer: uninitialized timer\n");
+		return	-EINVAL;
+	}
+
+	if (!expires || expires > VTIMER_MAX_SLICE) {
+		printk("mod_virt_timer: invalid expire range\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * This is a common optimization triggered by the
+	 * networking code - if the timer is re-modified
+	 * to be the same thing then just return:
+	 */
+	if (timer->expires == expires && vtimer_pending(timer))
+		return 1;
+
+	cpu = get_cpu();
+	vt_list = &per_cpu(virt_cpu_timer, cpu);
+
+	/* disable interrupts before test if timer is pending */
+	spin_lock_irqsave(&vt_list->lock, flags);
+
+	/* if timer isn't pending add it on the current CPU */
+	if (!vtimer_pending(timer)) {
+		spin_unlock_irqrestore(&vt_list->lock, flags);
+		/* we do not activate an interval timer with mod_virt_timer */
+		timer->interval = 0;
+		timer->expires = expires;
+		timer->cpu = cpu;
+		internal_add_vtimer(timer);
+		return 0;
+	}
+
+	/* check if we run on the right CPU */
+	if (timer->cpu != cpu) {
+		printk("mod_virt_timer: running on wrong CPU, check your code\n");
+		spin_unlock_irqrestore(&vt_list->lock, flags);
+		put_cpu();
+		return -EINVAL;
+	}
+
+	list_del_init(&timer->entry);
+	timer->expires = expires;
+
+	/* also change the interval if we have an interval timer */
+	if (timer->interval)
+		timer->interval = expires;
+
+	/* the timer can't expire anymore so we can release the lock */
+	spin_unlock_irqrestore(&vt_list->lock, flags);
+	internal_add_vtimer(timer);
+	return 1;
+}
+EXPORT_SYMBOL(mod_virt_timer);
+
+/*
+ * delete a virtual timer
+ *
+ * returns whether the deleted timer was pending (1) or not (0)
+ */
+int del_virt_timer(struct vtimer_list *timer)
+{
+	unsigned long flags;
+	struct vtimer_queue *vt_list;
+
+	if (check_vtimer(timer)) {
+		printk("del_virt_timer: timer not initialized\n");
+		return -EINVAL;
+	}
+
+	/* check if timer is pending */
+	if (!vtimer_pending(timer))
+		return 0;
+
+	vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
+	spin_lock_irqsave(&vt_list->lock, flags);
+
+	/* we don't interrupt a running timer, just let it expire! */
+	list_del_init(&timer->entry);
+
+	/* last timer removed */
+	if (list_empty(&vt_list->list)) {
+		vt_list->to_expire = 0;
+		vt_list->offset = 0;
+	}
+
+	spin_unlock_irqrestore(&vt_list->lock, flags);
+	return 1;
+}
+EXPORT_SYMBOL(del_virt_timer);
+
+/*
+ * Start the virtual CPU timer on the current CPU.
+ */
+void init_cpu_vtimer(void)
+{
+	struct vtimer_queue *vt_list;
+	unsigned long cr0;
+
+	/* kick the virtual timer */
+	S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
+	S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
+	asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
+	asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
+	__ctl_store(cr0, 0, 0);
+	cr0 |= 0x400;
+	__ctl_load(cr0, 0, 0);
+
+	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+	INIT_LIST_HEAD(&vt_list->list);
+	spin_lock_init(&vt_list->lock);
+	vt_list->to_expire = 0;
+	vt_list->offset = 0;
+	vt_list->idle = 0;
+
+}
+
+static int vtimer_idle_notify(struct notifier_block *self,
+			      unsigned long action, void *hcpu)
+{
+	switch (action) {
+	case CPU_IDLE:
+		stop_cpu_timer();
+		break;
+	case CPU_NOT_IDLE:
+		start_cpu_timer();
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block vtimer_idle_nb = {
+	.notifier_call = vtimer_idle_notify,
+};
+
+void __init vtime_init(void)
+{
+	/* request the cpu timer external interrupt */
+	if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
+					      &ext_int_info_timer) != 0)
+		panic("Couldn't request external interrupt 0x1005");
+
+	if (register_idle_notifier(&vtimer_idle_nb))
+		panic("Couldn't register idle notifier");
+
+	init_cpu_vtimer();
+}
+
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
new file mode 100644
index 0000000..a8758b1
--- /dev/null
+++ b/arch/s390/lib/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for s390-specific library files..
+#
+
+EXTRA_AFLAGS := -traditional
+
+lib-y += delay.o string.o
+lib-$(CONFIG_ARCH_S390_31) += uaccess.o
+lib-$(CONFIG_ARCH_S390X) += uaccess64.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
new file mode 100644
index 0000000..e96c35b
--- /dev/null
+++ b/arch/s390/lib/delay.c
@@ -0,0 +1,51 @@
+/*
+ *  arch/s390/kernel/delay.c
+ *    Precise Delay Loops for S390
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *
+ *  Derived from "arch/i386/lib/delay.c"
+ *    Copyright (C) 1993 Linus Torvalds
+ *    Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#endif
+
+void __delay(unsigned long loops)
+{
+        /*
+         * To end the bloody studid and useless discussion about the
+         * BogoMips number I took the liberty to define the __delay
+         * function in a way that that resulting BogoMips number will
+         * yield the megahertz number of the cpu. The important function
+         * is udelay and that is done using the tod clock. -- martin.
+         */
+        __asm__ __volatile__(
+                "0: brct %0,0b"
+                : /* no outputs */ : "r" (loops/2) );
+}
+
+/*
+ * Waits for 'usecs' microseconds using the tod clock, giving up the time slice
+ * of the virtual PU inbetween to avoid congestion.
+ */
+void __udelay(unsigned long usecs)
+{
+        uint64_t start_cc, end_cc;
+
+        if (usecs == 0)
+                return;
+        asm volatile ("STCK %0" : "=m" (start_cc));
+        do {
+		cpu_relax();
+                asm volatile ("STCK %0" : "=m" (end_cc));
+        } while (((end_cc - start_cc)/4096) < usecs);
+}
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
new file mode 100644
index 0000000..8240cc7
--- /dev/null
+++ b/arch/s390/lib/string.c
@@ -0,0 +1,381 @@
+/*
+ *  arch/s390/lib/string.c
+ *    Optimized string functions
+ *
+ *  S390 version
+ *    Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define IN_ARCH_STRING_C 1
+
+#include <linux/types.h>
+#include <linux/module.h>
+
+/*
+ * Helper functions to find the end of a string
+ */
+static inline char *__strend(const char *s)
+{
+	register unsigned long r0 asm("0") = 0;
+
+	asm volatile ("0: srst  %0,%1\n"
+		      "   jo    0b"
+		      : "+d" (r0), "+a" (s) :  : "cc" );
+	return (char *) r0;
+}
+
+static inline char *__strnend(const char *s, size_t n)
+{
+	register unsigned long r0 asm("0") = 0;
+	const char *p = s + n;
+
+	asm volatile ("0: srst  %0,%1\n"
+		      "   jo    0b"
+		      : "+d" (p), "+a" (s) : "d" (r0) : "cc" );
+	return (char *) p;
+}
+
+/**
+ * strlen - Find the length of a string
+ * @s: The string to be sized
+ *
+ * returns the length of @s
+ */
+size_t strlen(const char *s)
+{
+	return __strend(s) - s;
+}
+EXPORT_SYMBOL(strlen);
+
+/**
+ * strnlen - Find the length of a length-limited string
+ * @s: The string to be sized
+ * @n: The maximum number of bytes to search
+ *
+ * returns the minimum of the length of @s and @n
+ */
+size_t strnlen(const char * s, size_t n)
+{
+	return __strnend(s, n) - s;
+}
+EXPORT_SYMBOL(strnlen);
+
+/**
+ * strcpy - Copy a %NUL terminated string
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ *
+ * returns a pointer to @dest
+ */
+char *strcpy(char *dest, const char *src)
+{
+	register int r0 asm("0") = 0;
+	char *ret = dest;
+
+	asm volatile ("0: mvst  %0,%1\n"
+		      "   jo    0b"
+		      : "+&a" (dest), "+&a" (src) : "d" (r0)
+		      : "cc", "memory" );
+	return ret;
+}
+EXPORT_SYMBOL(strcpy);
+
+/**
+ * strlcpy - Copy a %NUL terminated string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @size: size of destination buffer
+ *
+ * Compatible with *BSD: the result is always a valid
+ * NUL-terminated string that fits in the buffer (unless,
+ * of course, the buffer size is zero). It does not pad
+ * out the result like strncpy() does.
+ */
+size_t strlcpy(char *dest, const char *src, size_t size)
+{
+	size_t ret = __strend(src) - src;
+
+	if (size) {
+		size_t len = (ret >= size) ? size-1 : ret;
+		dest[len] = '\0';
+		__builtin_memcpy(dest, src, len);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(strlcpy);
+
+/**
+ * strncpy - Copy a length-limited, %NUL-terminated string
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @n: The maximum number of bytes to copy
+ *
+ * The result is not %NUL-terminated if the source exceeds
+ * @n bytes.
+ */
+char *strncpy(char *dest, const char *src, size_t n)
+{
+	size_t len = __strnend(src, n) - src;
+	__builtin_memset(dest + len, 0, n - len);
+	__builtin_memcpy(dest, src, len);
+	return dest;
+}
+EXPORT_SYMBOL(strncpy);
+
+/**
+ * strcat - Append one %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ *
+ * returns a pointer to @dest
+ */
+char *strcat(char *dest, const char *src)
+{
+	register int r0 asm("0") = 0;
+	unsigned long dummy;
+	char *ret = dest;
+
+	asm volatile ("0: srst  %0,%1\n"
+		      "   jo    0b\n"
+		      "1: mvst  %0,%2\n"
+		      "   jo    1b"
+		      : "=&a" (dummy), "+a" (dest), "+a" (src)
+		      : "d" (r0), "0" (0UL) : "cc", "memory" );
+	return ret;
+}
+EXPORT_SYMBOL(strcat);
+
+/**
+ * strlcat - Append a length-limited, %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ * @n: The size of the destination buffer.
+ */
+size_t strlcat(char *dest, const char *src, size_t n)
+{
+	size_t dsize = __strend(dest) - dest;
+	size_t len = __strend(src) - src;
+	size_t res = dsize + len;
+
+	if (dsize < n) {
+		dest += dsize;
+		n -= dsize;
+		if (len >= n)
+			len = n - 1;
+		dest[len] = '\0';
+		__builtin_memcpy(dest, src, len);
+	}
+	return res;
+}
+EXPORT_SYMBOL(strlcat);
+
+/**
+ * strncat - Append a length-limited, %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ * @n: The maximum numbers of bytes to copy
+ *
+ * returns a pointer to @dest
+ *
+ * Note that in contrast to strncpy, strncat ensures the result is
+ * terminated.
+ */
+char *strncat(char *dest, const char *src, size_t n)
+{
+	size_t len = __strnend(src, n) - src;
+	char *p = __strend(dest);
+
+	p[len] = '\0';
+	__builtin_memcpy(p, src, len);
+	return dest;
+}
+EXPORT_SYMBOL(strncat);
+
+/**
+ * strcmp - Compare two strings
+ * @cs: One string
+ * @ct: Another string
+ *
+ * returns   0 if @cs and @ct are equal,
+ *         < 0 if @cs is less than @ct
+ *         > 0 if @cs is greater than @ct
+ */
+int strcmp(const char *cs, const char *ct)
+{
+	register int r0 asm("0") = 0;
+	int ret = 0;
+
+	asm volatile ("0: clst %2,%3\n"
+		      "   jo   0b\n"
+		      "   je   1f\n"
+		      "   ic   %0,0(%2)\n"
+		      "   ic   %1,0(%3)\n"
+		      "   sr   %0,%1\n"
+		      "1:"
+		      : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
+		      : : "cc" );
+	return ret;
+}
+EXPORT_SYMBOL(strcmp);
+
+/**
+ * strrchr - Find the last occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ */
+char * strrchr(const char * s, int c)
+{
+       size_t len = __strend(s) - s;
+
+       if (len)
+	       do {
+		       if (s[len] == (char) c)
+			       return (char *) s + len;
+	       } while (--len > 0);
+       return 0;
+}
+EXPORT_SYMBOL(strrchr);
+
+/**
+ * strstr - Find the first substring in a %NUL terminated string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ */
+char * strstr(const char * s1,const char * s2)
+{
+	int l1, l2;
+
+	l2 = __strend(s2) - s2;
+	if (!l2)
+		return (char *) s1;
+	l1 = __strend(s1) - s1;
+	while (l1-- >= l2) {
+		register unsigned long r2 asm("2") = (unsigned long) s1;
+		register unsigned long r3 asm("3") = (unsigned long) l2;
+		register unsigned long r4 asm("4") = (unsigned long) s2;
+		register unsigned long r5 asm("5") = (unsigned long) l2;
+		int cc;
+
+		asm volatile ("0: clcle %1,%3,0\n"
+			      "   jo    0b\n"
+			      "   ipm   %0\n"
+			      "   srl   %0,28"
+			      : "=&d" (cc), "+a" (r2), "+a" (r3),
+			        "+a" (r4), "+a" (r5) : : "cc" );
+		if (!cc)
+			return (char *) s1;
+		s1++;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(strstr);
+
+/**
+ * memchr - Find a character in an area of memory.
+ * @s: The memory area
+ * @c: The byte to search for
+ * @n: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or %NULL
+ * if @c is not found
+ */
+void *memchr(const void *s, int c, size_t n)
+{
+	register int r0 asm("0") = (char) c;
+	const void *ret = s + n;
+
+	asm volatile ("0: srst  %0,%1\n"
+		      "   jo    0b\n"
+		      "   jl	1f\n"
+		      "   la    %0,0\n"
+		      "1:"
+		      : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
+	return (void *) ret;
+}
+EXPORT_SYMBOL(memchr);
+
+/**
+ * memcmp - Compare two areas of memory
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ */
+int memcmp(const void *cs, const void *ct, size_t n)
+{
+	register unsigned long r2 asm("2") = (unsigned long) cs;
+	register unsigned long r3 asm("3") = (unsigned long) n;
+	register unsigned long r4 asm("4") = (unsigned long) ct;
+	register unsigned long r5 asm("5") = (unsigned long) n;
+	int ret;
+
+	asm volatile ("0: clcle %1,%3,0\n"
+		      "   jo    0b\n"
+		      "   ipm   %0\n"
+		      "   srl   %0,28"
+		      : "=&d" (ret), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5)
+		      : : "cc" );
+	if (ret)
+		ret = *(char *) r2 - *(char *) r4;
+	return ret;
+}
+EXPORT_SYMBOL(memcmp);
+
+/**
+ * memscan - Find a character in an area of memory.
+ * @s: The memory area
+ * @c: The byte to search for
+ * @n: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or 1 byte past
+ * the area if @c is not found
+ */
+void *memscan(void *s, int c, size_t n)
+{
+	register int r0 asm("0") = (char) c;
+	const void *ret = s + n;
+
+	asm volatile ("0: srst  %0,%1\n"
+		      "   jo    0b\n"
+		      : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
+	return (void *) ret;
+}
+EXPORT_SYMBOL(memscan);
+
+/**
+ * memcpy - Copy one area of memory to another
+ * @dest: Where to copy to
+ * @src: Where to copy from
+ * @n: The size of the area.
+ *
+ * returns a pointer to @dest
+ */
+void *memcpy(void *dest, const void *src, size_t n)
+{
+	return __builtin_memcpy(dest, src, n);
+}
+EXPORT_SYMBOL(memcpy);
+
+/**
+ * memset - Fill a region of memory with the given value
+ * @s: Pointer to the start of the area.
+ * @c: The byte to fill the area with
+ * @n: The size of the area.
+ *
+ * returns a pointer to @s
+ */
+void *memset(void *s, int c, size_t n)
+{
+	char *xs;
+
+	if (c == 0)
+		return __builtin_memset(s, 0, n);
+
+	xs = (char *) s;
+	if (n > 0)
+		do {
+			*xs++ = c;
+		} while (--n > 0);
+	return s;
+}
+EXPORT_SYMBOL(memset);
diff --git a/arch/s390/lib/uaccess.S b/arch/s390/lib/uaccess.S
new file mode 100644
index 0000000..e8029ef
--- /dev/null
+++ b/arch/s390/lib/uaccess.S
@@ -0,0 +1,210 @@
+/*
+ *  arch/s390/lib/uaccess.S
+ *    __copy_{from|to}_user functions.
+ *
+ *  s390
+ *    Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ *  These functions have standard call interface
+ */
+
+#include <linux/errno.h>
+#include <asm/lowcore.h>
+#include <asm/offsets.h>
+
+        .text
+        .align 4
+        .globl __copy_from_user_asm
+	# %r2 = to, %r3 = n, %r4 = from
+__copy_from_user_asm:
+	slr	%r0,%r0
+0:	mvcp	0(%r3,%r2),0(%r4),%r0
+	jnz	1f
+	slr	%r2,%r2
+	br	%r14
+1:	la	%r2,256(%r2)
+	la	%r4,256(%r4)
+	ahi	%r3,-256
+2:	mvcp	0(%r3,%r2),0(%r4),%r0
+	jnz	1b
+3:	slr	%r2,%r2
+	br	%r14
+4:	lhi	%r0,-4096
+	lr	%r5,%r4
+	slr	%r5,%r0
+	nr	%r5,%r0		# %r5 = (%r4 + 4096) & -4096
+	slr	%r5,%r4		# %r5 = #bytes to next user page boundary
+	clr	%r3,%r5		# copy crosses next page boundary ?
+	jnh	6f		# no, the current page faulted
+	# move with the reduced length which is < 256
+5:	mvcp	0(%r5,%r2),0(%r4),%r0
+	slr	%r3,%r5
+6:	lr	%r2,%r3
+	br	%r14
+        .section __ex_table,"a"
+	.long	0b,4b
+	.long	2b,4b
+	.long	5b,6b
+        .previous
+
+        .align 4
+        .text
+        .globl __copy_to_user_asm
+	# %r2 = from, %r3 = n, %r4 = to
+__copy_to_user_asm:
+	slr	%r0,%r0
+0:	mvcs	0(%r3,%r4),0(%r2),%r0
+	jnz	1f
+	slr	%r2,%r2
+	br	%r14
+1:	la	%r2,256(%r2)
+	la	%r4,256(%r4)
+	ahi	%r3,-256
+2:	mvcs	0(%r3,%r4),0(%r2),%r0
+	jnz	1b
+3:	slr	%r2,%r2
+	br	%r14
+4:	lhi	%r0,-4096
+	lr	%r5,%r4
+	slr	%r5,%r0
+	nr	%r5,%r0		# %r5 = (%r4 + 4096) & -4096
+	slr	%r5,%r4		# %r5 = #bytes to next user page boundary
+	clr	%r3,%r5		# copy crosses next page boundary ?
+	jnh	6f		# no, the current page faulted
+	# move with the reduced length which is < 256
+5:	mvcs	0(%r5,%r4),0(%r2),%r0
+	slr	%r3,%r5
+6:	lr	%r2,%r3
+	br	%r14
+        .section __ex_table,"a"
+	.long	0b,4b
+	.long	2b,4b
+	.long	5b,6b
+        .previous
+
+        .align 4
+        .text
+        .globl __copy_in_user_asm
+	# %r2 = from, %r3 = n, %r4 = to
+__copy_in_user_asm:
+	sacf	256
+	bras	1,1f
+	mvc	0(1,%r4),0(%r2)
+0:	mvc	0(256,%r4),0(%r2)
+	la	%r2,256(%r2)
+	la	%r4,256(%r4)
+1:	ahi	%r3,-256
+	jnm	0b
+2:	ex	%r3,0(%r1)
+	sacf	0
+	slr	%r2,%r2
+	br	14
+3:	mvc	0(1,%r4),0(%r2)
+	la	%r2,1(%r2)
+	la	%r4,1(%r4)
+	ahi	%r3,-1
+	jnm	3b
+4:	lr	%r2,%r3
+	sacf	0
+	br	%r14
+        .section __ex_table,"a"
+	.long	0b,3b
+	.long	2b,3b
+	.long	3b,4b
+        .previous
+
+        .align 4
+        .text
+        .globl __clear_user_asm
+	# %r2 = to, %r3 = n
+__clear_user_asm:
+	bras	%r5,0f
+	.long	empty_zero_page
+0:	l	%r5,0(%r5)
+	slr	%r0,%r0
+1:	mvcs	0(%r3,%r2),0(%r5),%r0
+	jnz	2f
+	slr	%r2,%r2
+	br	%r14
+2:	la	%r2,256(%r2)
+	ahi	%r3,-256
+3:	mvcs	0(%r3,%r2),0(%r5),%r0
+	jnz	2b
+4:	slr	%r2,%r2
+	br	%r14
+5:	lhi	%r0,-4096
+	lr	%r4,%r2
+	slr	%r4,%r0
+	nr	%r4,%r0		# %r4 = (%r2 + 4096) & -4096
+	slr	%r4,%r2		# %r4 = #bytes to next user page boundary
+	clr	%r3,%r4		# clear crosses next page boundary ?
+	jnh	7f		# no, the current page faulted
+	# clear with the reduced length which is < 256
+6:	mvcs	0(%r4,%r2),0(%r5),%r0
+	slr	%r3,%r4
+7:	lr	%r2,%r3
+	br	%r14
+        .section __ex_table,"a"
+	.long	1b,5b
+	.long	3b,5b
+	.long	6b,7b
+        .previous
+
+        .align 4
+        .text
+        .globl __strncpy_from_user_asm
+	# %r2 = count, %r3 = dst, %r4 = src
+__strncpy_from_user_asm:
+	lhi	%r0,0
+	lr	%r1,%r4
+	la	%r4,0(%r4)	# clear high order bit from %r4
+	la	%r2,0(%r2,%r4)	# %r2 points to first byte after string
+	sacf	256
+0:	srst	%r2,%r1
+	jo	0b
+	sacf	0
+	lr	%r1,%r2
+	jh	1f		# \0 found in string ?
+	ahi	%r1,1		# include \0 in copy
+1:	slr	%r1,%r4		# %r1 = copy length (without \0)
+	slr	%r2,%r4		# %r2 = return length (including \0)
+2:	mvcp	0(%r1,%r3),0(%r4),%r0
+	jnz	3f
+	br	%r14
+3:	la	%r3,256(%r3)
+	la	%r4,256(%r4)
+	ahi	%r1,-256
+	mvcp	0(%r1,%r3),0(%r4),%r0
+	jnz	3b
+	br	%r14
+4:	sacf	0
+	lhi	%r2,-EFAULT
+	br	%r14
+	.section __ex_table,"a"
+	.long	0b,4b
+	.previous
+
+        .align 4
+        .text
+        .globl __strnlen_user_asm
+	# %r2 = count, %r3 = src
+__strnlen_user_asm:
+	lhi	%r0,0
+	lr	%r1,%r3
+	la	%r3,0(%r3)	# clear high order bit from %r4
+	la	%r2,0(%r2,%r3)	# %r2 points to first byte after string
+	sacf	256
+0:	srst	%r2,%r1
+	jo	0b
+	sacf	0
+	jh	1f		# \0 found in string ?
+	ahi	%r2,1		# strnlen_user result includes the \0
+1:	slr	%r2,%r3
+	br	%r14
+2:	sacf	0
+	lhi	%r2,-EFAULT
+	br	%r14
+	.section __ex_table,"a"
+	.long	0b,2b
+	.previous
diff --git a/arch/s390/lib/uaccess64.S b/arch/s390/lib/uaccess64.S
new file mode 100644
index 0000000..0ca5697
--- /dev/null
+++ b/arch/s390/lib/uaccess64.S
@@ -0,0 +1,206 @@
+/*
+ *  arch/s390x/lib/uaccess.S
+ *    __copy_{from|to}_user functions.
+ *
+ *  s390
+ *    Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ *  These functions have standard call interface
+ */
+
+#include <linux/errno.h>
+#include <asm/lowcore.h>
+#include <asm/offsets.h>
+
+        .text
+        .align 4
+        .globl __copy_from_user_asm
+	# %r2 = to, %r3 = n, %r4 = from
+__copy_from_user_asm:
+	slgr	%r0,%r0
+0:	mvcp	0(%r3,%r2),0(%r4),%r0
+	jnz	1f
+	slgr	%r2,%r2
+	br	%r14
+1:	la	%r2,256(%r2)
+	la	%r4,256(%r4)
+	aghi	%r3,-256
+2:	mvcp	0(%r3,%r2),0(%r4),%r0
+	jnz	1b
+3:	slgr	%r2,%r2
+	br	%r14
+4:	lghi	%r0,-4096
+	lgr	%r5,%r4
+	slgr	%r5,%r0
+	ngr	%r5,%r0		# %r5 = (%r4 + 4096) & -4096
+	slgr	%r5,%r4		# %r5 = #bytes to next user page boundary
+	clgr	%r3,%r5		# copy crosses next page boundary ?
+	jnh	6f		# no, the current page faulted
+	# move with the reduced length which is < 256
+5:	mvcp	0(%r5,%r2),0(%r4),%r0
+	slgr	%r3,%r5
+6:	lgr	%r2,%r3
+	br	%r14
+        .section __ex_table,"a"
+	.quad	0b,4b
+	.quad	2b,4b
+	.quad	5b,6b
+        .previous
+
+        .align 4
+        .text
+        .globl __copy_to_user_asm
+	# %r2 = from, %r3 = n, %r4 = to
+__copy_to_user_asm:
+	slgr	%r0,%r0
+0:	mvcs	0(%r3,%r4),0(%r2),%r0
+	jnz	1f
+	slgr	%r2,%r2
+	br	%r14
+1:	la	%r2,256(%r2)
+	la	%r4,256(%r4)
+	aghi	%r3,-256
+2:	mvcs	0(%r3,%r4),0(%r2),%r0
+	jnz	1b
+3:	slgr	%r2,%r2
+	br	%r14
+4:	lghi	%r0,-4096
+	lgr	%r5,%r4
+	slgr	%r5,%r0
+	ngr	%r5,%r0		# %r5 = (%r4 + 4096) & -4096
+	slgr	%r5,%r4		# %r5 = #bytes to next user page boundary
+	clgr	%r3,%r5		# copy crosses next page boundary ?
+	jnh	6f		# no, the current page faulted
+	# move with the reduced length which is < 256
+5:	mvcs	0(%r5,%r4),0(%r2),%r0
+	slgr	%r3,%r5
+6:	lgr	%r2,%r3
+	br	%r14
+        .section __ex_table,"a"
+	.quad	0b,4b
+	.quad	2b,4b
+	.quad	5b,6b
+        .previous
+
+        .align 4
+        .text
+        .globl __copy_in_user_asm
+	# %r2 = from, %r3 = n, %r4 = to
+__copy_in_user_asm:
+	sacf	256
+	bras	1,1f
+	mvc	0(1,%r4),0(%r2)
+0:	mvc	0(256,%r4),0(%r2)
+	la	%r2,256(%r2)
+	la	%r4,256(%r4)
+1:	aghi	%r3,-256
+	jnm	0b
+2:	ex	%r3,0(%r1)
+	sacf	0
+	slgr	%r2,%r2
+	br	14
+3:	mvc	0(1,%r4),0(%r2)
+	la	%r2,1(%r2)
+	la	%r4,1(%r4)
+	aghi	%r3,-1
+	jnm	3b
+4:	lgr	%r2,%r3
+	sacf	0
+	br	%r14
+        .section __ex_table,"a"
+	.quad	0b,3b
+	.quad	2b,3b
+	.quad	3b,4b
+        .previous
+
+        .align 4
+        .text
+        .globl __clear_user_asm
+	# %r2 = to, %r3 = n
+__clear_user_asm:
+	slgr	%r0,%r0
+	larl	%r5,empty_zero_page
+1:	mvcs	0(%r3,%r2),0(%r5),%r0
+	jnz	2f
+	slgr	%r2,%r2
+	br	%r14
+2:	la	%r2,256(%r2)
+	aghi	%r3,-256
+3:	mvcs	0(%r3,%r2),0(%r5),%r0
+	jnz	2b
+4:	slgr	%r2,%r2
+	br	%r14
+5:	lghi	%r0,-4096
+	lgr	%r4,%r2
+	slgr	%r4,%r0
+	ngr	%r4,%r0		# %r4 = (%r2 + 4096) & -4096
+	slgr	%r4,%r2		# %r4 = #bytes to next user page boundary
+	clgr	%r3,%r4		# clear crosses next page boundary ?
+	jnh	7f		# no, the current page faulted
+	# clear with the reduced length which is < 256
+6:	mvcs	0(%r4,%r2),0(%r5),%r0
+	slgr	%r3,%r4
+7:	lgr	%r2,%r3
+	br	%r14
+        .section __ex_table,"a"
+	.quad	1b,5b
+	.quad	3b,5b
+	.quad	6b,7b
+        .previous
+
+        .align 4
+        .text
+        .globl __strncpy_from_user_asm
+	# %r2 = count, %r3 = dst, %r4 = src
+__strncpy_from_user_asm:
+	lghi	%r0,0
+	lgr	%r1,%r4
+	la	%r2,0(%r2,%r4)	# %r2 points to first byte after string
+	sacf	256
+0:	srst	%r2,%r1
+	jo	0b
+	sacf	0
+	lgr	%r1,%r2
+	jh	1f		# \0 found in string ?
+	aghi	%r1,1		# include \0 in copy
+1:	slgr	%r1,%r4		# %r1 = copy length (without \0)
+	slgr	%r2,%r4		# %r2 = return length (including \0)
+2:	mvcp	0(%r1,%r3),0(%r4),%r0
+	jnz	3f
+	br	%r14
+3:	la	%r3,256(%r3)
+	la	%r4,256(%r4)
+	aghi	%r1,-256
+	mvcp	0(%r1,%r3),0(%r4),%r0
+	jnz	3b
+	br	%r14
+4:	sacf	0
+	lghi	%r2,-EFAULT
+	br	%r14
+	.section __ex_table,"a"
+	.quad	0b,4b
+	.previous
+
+        .align 4
+        .text
+        .globl __strnlen_user_asm
+	# %r2 = count, %r3 = src
+__strnlen_user_asm:
+	lghi	%r0,0
+	lgr	%r1,%r3
+	la	%r2,0(%r2,%r3)	# %r2 points to first byte after string
+	sacf	256
+0:	srst	%r2,%r1
+	jo	0b
+	sacf	0
+	jh	1f		# \0 found in string ?
+	aghi	%r2,1		# strnlen_user result includes the \0
+1:	slgr	%r2,%r3
+	br	%r14
+2:	sacf	0
+	lghi	%r2,-EFAULT
+	br	%r14
+	.section __ex_table,"a"
+	.quad	0b,2b
+	.previous
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile
new file mode 100644
index 0000000..c10df14
--- /dev/null
+++ b/arch/s390/math-emu/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the FPU instruction emulation.
+#
+
+obj-$(CONFIG_MATHEMU) := math.o qrnnd.o
+
+EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w
+EXTRA_AFLAGS := -traditional
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
new file mode 100644
index 0000000..648df71
--- /dev/null
+++ b/arch/s390/math-emu/math.c
@@ -0,0 +1,2258 @@
+/*
+ *  arch/s390/math-emu/math.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *
+ * 'math.c' emulates IEEE instructions on a S390 processor
+ *          that does not have the IEEE fpu (all processors before G5).
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+#include <asm/lowcore.h>
+
+#include "sfp-util.h"
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+#include <math-emu/double.h>
+#include <math-emu/quad.h>
+
+/*
+ * I miss a macro to round a floating point number to the
+ * nearest integer in the same floating point format.
+ */
+#define _FP_TO_FPINT_ROUND(fs, wc, X)					\
+  do {									\
+    switch (X##_c)							\
+      {									\
+      case FP_CLS_NORMAL:						\
+        if (X##_e > _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs)		\
+          { /* floating point number has no bits after the dot. */	\
+          }								\
+        else if (X##_e <= _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs &&	\
+                 X##_e > _FP_EXPBIAS_##fs)				\
+	  { /* some bits before the dot, some after it. */		\
+            _FP_FRAC_SRS_##wc(X, _FP_WFRACBITS_##fs,			\
+                              X##_e - _FP_EXPBIAS_##fs			\
+                              + _FP_FRACBITS_##fs);			\
+	    _FP_ROUND(wc, X);						\
+	    _FP_FRAC_SLL_##wc(X, X##_e - _FP_EXPBIAS_##fs		\
+                              + _FP_FRACBITS_##fs);			\
+          }								\
+        else								\
+          { /* all bits after the dot. */				\
+	    FP_SET_EXCEPTION(FP_EX_INEXACT);				\
+            X##_c = FP_CLS_ZERO;					\
+	  }								\
+        break;								\
+      case FP_CLS_NAN:							\
+      case FP_CLS_INF:							\
+      case FP_CLS_ZERO:							\
+        break;								\
+      }									\
+  } while (0)
+
+#define FP_TO_FPINT_ROUND_S(X)	_FP_TO_FPINT_ROUND(S,1,X)
+#define FP_TO_FPINT_ROUND_D(X)	_FP_TO_FPINT_ROUND(D,2,X)
+#define FP_TO_FPINT_ROUND_Q(X)	_FP_TO_FPINT_ROUND(Q,4,X)
+
+typedef union {
+        long double ld;
+        struct {
+                __u64 high;
+                __u64 low;
+        } w;
+} mathemu_ldcv;
+
+#ifdef CONFIG_SYSCTL
+int sysctl_ieee_emulation_warnings=1;
+#endif
+
+#define mathemu_put_user(x, p) \
+        do { \
+                if (put_user((x),(p))) \
+                        return SIGSEGV; \
+        } while (0)
+
+#define mathemu_get_user(x, p) \
+        do { \
+                if (get_user((x),(p))) \
+                        return SIGSEGV; \
+        } while (0)
+
+#define mathemu_copy_from_user(d, s, n)\
+        do { \
+                if (copy_from_user((d),(s),(n)) != 0) \
+                        return SIGSEGV; \
+        } while (0)
+
+#define mathemu_copy_to_user(d, s, n) \
+        do { \
+                if (copy_to_user((d),(s),(n)) != 0) \
+                        return SIGSEGV; \
+        } while (0)
+
+static void display_emulation_not_implemented(struct pt_regs *regs, char *instr)
+{
+        __u16 *location;
+        
+#ifdef CONFIG_SYSCTL
+        if(sysctl_ieee_emulation_warnings)
+#endif
+        {
+                location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
+                printk("%s ieee fpu instruction not emulated "
+                       "process name: %s pid: %d \n",
+                       instr, current->comm, current->pid);
+                printk("%s's PSW:    %08lx %08lx\n", instr,
+                       (unsigned long) regs->psw.mask,
+                       (unsigned long) location);
+        }
+}
+
+static inline void emu_set_CC (struct pt_regs *regs, int cc)
+{
+        regs->psw.mask = (regs->psw.mask & 0xFFFFCFFF) | ((cc&3) << 12);
+}
+
+/*
+ * Set the condition code in the user psw.
+ *  0 : Result is zero
+ *  1 : Result is less than zero
+ *  2 : Result is greater than zero
+ *  3 : Result is NaN or INF
+ */
+static inline void emu_set_CC_cs(struct pt_regs *regs, int class, int sign)
+{
+        switch (class) {
+        case FP_CLS_NORMAL:
+        case FP_CLS_INF:
+                emu_set_CC(regs, sign ? 1 : 2);
+                break;
+        case FP_CLS_ZERO:
+                emu_set_CC(regs, 0);
+                break;
+        case FP_CLS_NAN:
+                emu_set_CC(regs, 3);
+                break;
+        }
+}
+
+/* Add long double */
+static int emu_axbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QB, &cvt.ld);
+        FP_ADD_Q(QR, QA, QB);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        emu_set_CC_cs(regs, QR_c, QR_s);
+        return _fex;
+}
+
+/* Add double */
+static int emu_adbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
+        FP_ADD_D(DR, DA, DB);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        emu_set_CC_cs(regs, DR_c, DR_s);
+        return _fex;
+}
+
+/* Add double */
+static int emu_adb (struct pt_regs *regs, int rx, double *val) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, val);
+        FP_ADD_D(DR, DA, DB);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        emu_set_CC_cs(regs, DR_c, DR_s);
+        return _fex;
+}
+
+/* Add float */
+static int emu_aebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
+        FP_ADD_S(SR, SA, SB);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        emu_set_CC_cs(regs, SR_c, SR_s);
+        return _fex;
+}
+
+/* Add float */
+static int emu_aeb (struct pt_regs *regs, int rx, float *val) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, val);
+        FP_ADD_S(SR, SA, SB);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        emu_set_CC_cs(regs, SR_c, SR_s);
+        return _fex;
+}
+
+/* Compare long double */
+static int emu_cxbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_Q(QB);
+	mathemu_ldcv cvt;
+        int IR;
+
+        cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
+        FP_UNPACK_RAW_QP(QA, &cvt.ld);
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_RAW_QP(QB, &cvt.ld);
+        FP_CMP_Q(IR, QA, QB, 3);
+        /*
+         * IR == -1 if DA < DB, IR == 0 if DA == DB,
+         * IR == 1 if DA > DB and IR == 3 if unorderded
+         */
+        emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+        return 0;
+}
+
+/* Compare double */
+static int emu_cdbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_D(DB);
+        int IR;
+
+        FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_RAW_DP(DB, &current->thread.fp_regs.fprs[ry].d);
+        FP_CMP_D(IR, DA, DB, 3);
+        /*
+         * IR == -1 if DA < DB, IR == 0 if DA == DB,
+         * IR == 1 if DA > DB and IR == 3 if unorderded
+         */
+        emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+        return 0;
+}
+
+/* Compare double */
+static int emu_cdb (struct pt_regs *regs, int rx, double *val) {
+        FP_DECL_D(DA); FP_DECL_D(DB);
+        int IR;
+
+        FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_RAW_DP(DB, val);
+        FP_CMP_D(IR, DA, DB, 3);
+        /*
+         * IR == -1 if DA < DB, IR == 0 if DA == DB,
+         * IR == 1 if DA > DB and IR == 3 if unorderded
+         */
+        emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+        return 0;
+}
+
+/* Compare float */
+static int emu_cebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_S(SB);
+        int IR;
+
+        FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_RAW_SP(SB, &current->thread.fp_regs.fprs[ry].f);
+        FP_CMP_S(IR, SA, SB, 3);
+        /*
+         * IR == -1 if DA < DB, IR == 0 if DA == DB,
+         * IR == 1 if DA > DB and IR == 3 if unorderded
+         */
+        emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+        return 0;
+}
+
+/* Compare float */
+static int emu_ceb (struct pt_regs *regs, int rx, float *val) {
+        FP_DECL_S(SA); FP_DECL_S(SB);
+        int IR;
+
+        FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_RAW_SP(SB, val);
+        FP_CMP_S(IR, SA, SB, 3);
+        /*
+         * IR == -1 if DA < DB, IR == 0 if DA == DB,
+         * IR == 1 if DA > DB and IR == 3 if unorderded
+         */
+        emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+        return 0;
+}
+
+/* Compare and signal long double */
+static int emu_kxbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_Q(QB);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int IR;
+
+        cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
+        FP_UNPACK_RAW_QP(QA, &cvt.ld);
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QB, &cvt.ld);
+        FP_CMP_Q(IR, QA, QB, 3);
+        /*
+         * IR == -1 if DA < DB, IR == 0 if DA == DB,
+         * IR == 1 if DA > DB and IR == 3 if unorderded
+         */
+        emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+        if (IR == 3)
+                FP_SET_EXCEPTION (FP_EX_INVALID);
+        return _fex;
+}
+
+/* Compare and signal double */
+static int emu_kdbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_D(DB);
+        FP_DECL_EX;
+        int IR;
+
+        FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_RAW_DP(DB, &current->thread.fp_regs.fprs[ry].d);
+        FP_CMP_D(IR, DA, DB, 3);
+        /*
+         * IR == -1 if DA < DB, IR == 0 if DA == DB,
+         * IR == 1 if DA > DB and IR == 3 if unorderded
+         */
+        emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+        if (IR == 3)
+                FP_SET_EXCEPTION (FP_EX_INVALID);
+        return _fex;
+}
+
+/* Compare and signal double */
+static int emu_kdb (struct pt_regs *regs, int rx, double *val) {
+        FP_DECL_D(DA); FP_DECL_D(DB);
+        FP_DECL_EX;
+        int IR;
+
+        FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_RAW_DP(DB, val);
+        FP_CMP_D(IR, DA, DB, 3);
+        /*
+         * IR == -1 if DA < DB, IR == 0 if DA == DB,
+         * IR == 1 if DA > DB and IR == 3 if unorderded
+         */
+        emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+        if (IR == 3)
+                FP_SET_EXCEPTION (FP_EX_INVALID);
+        return _fex;
+}
+
+/* Compare and signal float */
+static int emu_kebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_S(SB);
+        FP_DECL_EX;
+        int IR;
+
+        FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_RAW_SP(SB, &current->thread.fp_regs.fprs[ry].f);
+        FP_CMP_S(IR, SA, SB, 3);
+        /*
+         * IR == -1 if DA < DB, IR == 0 if DA == DB,
+         * IR == 1 if DA > DB and IR == 3 if unorderded
+         */
+        emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+        if (IR == 3)
+                FP_SET_EXCEPTION (FP_EX_INVALID);
+        return _fex;
+}
+
+/* Compare and signal float */
+static int emu_keb (struct pt_regs *regs, int rx, float *val) {
+        FP_DECL_S(SA); FP_DECL_S(SB);
+        FP_DECL_EX;
+        int IR;
+
+        FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_RAW_SP(SB, val);
+        FP_CMP_S(IR, SA, SB, 3);
+        /*
+         * IR == -1 if DA < DB, IR == 0 if DA == DB,
+         * IR == 1 if DA > DB and IR == 3 if unorderded
+         */
+        emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+        if (IR == 3)
+                FP_SET_EXCEPTION (FP_EX_INVALID);
+        return _fex;
+}
+
+/* Convert from fixed long double */
+static int emu_cxfbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QR);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        __s32 si;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        si = regs->gprs[ry];
+        FP_FROM_INT_Q(QR, si, 32, int);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        return _fex;
+}
+
+/* Convert from fixed double */
+static int emu_cdfbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DR);
+        FP_DECL_EX;
+        __s32 si;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        si = regs->gprs[ry];
+        FP_FROM_INT_D(DR, si, 32, int);
+        FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        return _fex;
+}
+
+/* Convert from fixed float */
+static int emu_cefbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SR);
+        FP_DECL_EX;
+        __s32 si;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        si = regs->gprs[ry];
+        FP_FROM_INT_S(SR, si, 32, int);
+        FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        return _fex;
+}
+
+/* Convert to fixed long double */
+static int emu_cfxbr (struct pt_regs *regs, int rx, int ry, int mask) {
+        FP_DECL_Q(QA);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        __s32 si;
+        int mode;
+
+	if (mask == 0)
+		mode = current->thread.fp_regs.fpc & 3;
+	else if (mask == 1)
+		mode = FP_RND_NEAREST;
+	else
+		mode = mask - 4;
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+        FP_TO_INT_ROUND_Q(si, QA, 32, 1);
+        regs->gprs[rx] = si;
+        emu_set_CC_cs(regs, QA_c, QA_s);
+        return _fex;
+}
+
+/* Convert to fixed double */
+static int emu_cfdbr (struct pt_regs *regs, int rx, int ry, int mask) {
+        FP_DECL_D(DA);
+        FP_DECL_EX;
+        __s32 si;
+        int mode;
+
+	if (mask == 0)
+		mode = current->thread.fp_regs.fpc & 3;
+	else if (mask == 1)
+		mode = FP_RND_NEAREST;
+	else
+		mode = mask - 4;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
+        FP_TO_INT_ROUND_D(si, DA, 32, 1);
+        regs->gprs[rx] = si;
+        emu_set_CC_cs(regs, DA_c, DA_s);
+        return _fex;
+}
+
+/* Convert to fixed float */
+static int emu_cfebr (struct pt_regs *regs, int rx, int ry, int mask) {
+        FP_DECL_S(SA);
+        FP_DECL_EX;
+        __s32 si;
+        int mode;
+
+	if (mask == 0)
+		mode = current->thread.fp_regs.fpc & 3;
+	else if (mask == 1)
+		mode = FP_RND_NEAREST;
+	else
+		mode = mask - 4;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
+        FP_TO_INT_ROUND_S(si, SA, 32, 1);
+        regs->gprs[rx] = si;
+        emu_set_CC_cs(regs, SA_c, SA_s);
+        return _fex;
+}
+
+/* Divide long double */
+static int emu_dxbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QB, &cvt.ld);
+        FP_DIV_Q(QR, QA, QB);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        return _fex;
+}
+
+/* Divide double */
+static int emu_ddbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
+        FP_DIV_D(DR, DA, DB);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        return _fex;
+}
+
+/* Divide double */
+static int emu_ddb (struct pt_regs *regs, int rx, double *val) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, val);
+        FP_DIV_D(DR, DA, DB);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        return _fex;
+}
+
+/* Divide float */
+static int emu_debr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
+        FP_DIV_S(SR, SA, SB);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        return _fex;
+}
+
+/* Divide float */
+static int emu_deb (struct pt_regs *regs, int rx, float *val) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, val);
+        FP_DIV_S(SR, SA, SB);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        return _fex;
+}
+
+/* Divide to integer double */
+static int emu_didbr (struct pt_regs *regs, int rx, int ry, int mask) {
+        display_emulation_not_implemented(regs, "didbr");
+        return 0;
+}
+
+/* Divide to integer float */
+static int emu_diebr (struct pt_regs *regs, int rx, int ry, int mask) {
+        display_emulation_not_implemented(regs, "diebr");
+        return 0;
+}
+
+/* Extract fpc */
+static int emu_efpc (struct pt_regs *regs, int rx, int ry) {
+        regs->gprs[rx] = current->thread.fp_regs.fpc;
+        return 0;
+}
+
+/* Load and test long double */
+static int emu_ltxbr (struct pt_regs *regs, int rx, int ry) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+	mathemu_ldcv cvt;
+        FP_DECL_Q(QA);
+        FP_DECL_EX;
+
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+        fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
+        fp_regs->fprs[rx+2].ui = fp_regs->fprs[ry+2].ui;
+        emu_set_CC_cs(regs, QA_c, QA_s);
+        return _fex;
+}
+
+/* Load and test double */
+static int emu_ltdbr (struct pt_regs *regs, int rx, int ry) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        FP_DECL_D(DA);
+        FP_DECL_EX;
+
+        FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d);
+        fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
+        emu_set_CC_cs(regs, DA_c, DA_s);
+        return _fex;
+}
+
+/* Load and test double */
+static int emu_ltebr (struct pt_regs *regs, int rx, int ry) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        FP_DECL_S(SA);
+        FP_DECL_EX;
+
+        FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f);
+        fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
+        emu_set_CC_cs(regs, SA_c, SA_s);
+        return _fex;
+}
+
+/* Load complement long double */
+static int emu_lcxbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_Q(QR);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+	FP_NEG_Q(QR, QA);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        emu_set_CC_cs(regs, QR_c, QR_s);
+        return _fex;
+}
+
+/* Load complement double */
+static int emu_lcdbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
+	FP_NEG_D(DR, DA);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        emu_set_CC_cs(regs, DR_c, DR_s);
+        return _fex;
+}
+
+/* Load complement float */
+static int emu_lcebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
+	FP_NEG_S(SR, SA);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        emu_set_CC_cs(regs, SR_c, SR_s);
+        return _fex;
+}
+
+/* Load floating point integer long double */
+static int emu_fixbr (struct pt_regs *regs, int rx, int ry, int mask) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        FP_DECL_Q(QA);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        __s32 si;
+        int mode;
+
+	if (mask == 0)
+		mode = fp_regs->fpc & 3;
+	else if (mask == 1)
+		mode = FP_RND_NEAREST;
+	else
+		mode = mask - 4;
+        cvt.w.high = fp_regs->fprs[ry].ui;
+        cvt.w.low = fp_regs->fprs[ry+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+	FP_TO_FPINT_ROUND_Q(QA);
+	FP_PACK_QP(&cvt.ld, QA);
+	fp_regs->fprs[rx].ui = cvt.w.high;
+	fp_regs->fprs[rx+2].ui = cvt.w.low;
+        return _fex;
+}
+
+/* Load floating point integer double */
+static int emu_fidbr (struct pt_regs *regs, int rx, int ry, int mask) {
+	/* FIXME: rounding mode !! */
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        FP_DECL_D(DA);
+        FP_DECL_EX;
+        __s32 si;
+        int mode;
+
+	if (mask == 0)
+		mode = fp_regs->fpc & 3;
+	else if (mask == 1)
+		mode = FP_RND_NEAREST;
+	else
+		mode = mask - 4;
+        FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d);
+	FP_TO_FPINT_ROUND_D(DA);
+	FP_PACK_DP(&fp_regs->fprs[rx].d, DA);
+        return _fex;
+}
+
+/* Load floating point integer float */
+static int emu_fiebr (struct pt_regs *regs, int rx, int ry, int mask) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        FP_DECL_S(SA);
+        FP_DECL_EX;
+        __s32 si;
+        int mode;
+
+	if (mask == 0)
+		mode = fp_regs->fpc & 3;
+	else if (mask == 1)
+		mode = FP_RND_NEAREST;
+	else
+		mode = mask - 4;
+        FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f);
+	FP_TO_FPINT_ROUND_S(SA);
+	FP_PACK_SP(&fp_regs->fprs[rx].f, SA);
+        return _fex;
+}
+
+/* Load lengthened double to long double */
+static int emu_lxdbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_Q(QR);
+	FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
+	FP_CONV (Q, D, 4, 2, QR, DA);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        return _fex;
+}
+
+/* Load lengthened double to long double */
+static int emu_lxdb (struct pt_regs *regs, int rx, double *val) {
+        FP_DECL_D(DA); FP_DECL_Q(QR);
+	FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, val);
+	FP_CONV (Q, D, 4, 2, QR, DA);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        return _fex;
+}
+
+/* Load lengthened float to long double */
+static int emu_lxebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_Q(QR);
+	FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
+	FP_CONV (Q, S, 4, 1, QR, SA);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        return _fex;
+}
+
+/* Load lengthened float to long double */
+static int emu_lxeb (struct pt_regs *regs, int rx, float *val) {
+        FP_DECL_S(SA); FP_DECL_Q(QR);
+	FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, val);
+	FP_CONV (Q, S, 4, 1, QR, SA);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        return _fex;
+}
+
+/* Load lengthened float to double */
+static int emu_ldebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_D(DR);
+	FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
+	FP_CONV (D, S, 2, 1, DR, SA);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        return _fex;
+}
+
+/* Load lengthened float to double */
+static int emu_ldeb (struct pt_regs *regs, int rx, float *val) {
+        FP_DECL_S(SA); FP_DECL_D(DR);
+	FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, val);
+	FP_CONV (D, S, 2, 1, DR, SA);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        return _fex;
+}
+
+/* Load negative long double */
+static int emu_lnxbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_Q(QR);
+	FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+        if (QA_s == 0) {
+		FP_NEG_Q(QR, QA);
+		FP_PACK_QP(&cvt.ld, QR);
+		current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+		current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+	} else {
+		current->thread.fp_regs.fprs[rx].ui =
+			current->thread.fp_regs.fprs[ry].ui;
+		current->thread.fp_regs.fprs[rx+2].ui =
+			current->thread.fp_regs.fprs[ry+2].ui;
+	}
+	emu_set_CC_cs(regs, QR_c, QR_s);
+        return _fex;
+}
+
+/* Load negative double */
+static int emu_lndbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_D(DR);
+	FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
+        if (DA_s == 0) {
+		FP_NEG_D(DR, DA);
+		FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+	} else
+		current->thread.fp_regs.fprs[rx].ui =
+			current->thread.fp_regs.fprs[ry].ui;
+	emu_set_CC_cs(regs, DR_c, DR_s);
+        return _fex;
+}
+
+/* Load negative float */
+static int emu_lnebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_S(SR);
+	FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
+        if (SA_s == 0) {
+		FP_NEG_S(SR, SA);
+		FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+	} else
+		current->thread.fp_regs.fprs[rx].ui =
+			current->thread.fp_regs.fprs[ry].ui;
+	emu_set_CC_cs(regs, SR_c, SR_s);
+        return _fex;
+}
+
+/* Load positive long double */
+static int emu_lpxbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_Q(QR);
+	FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+        if (QA_s != 0) {
+		FP_NEG_Q(QR, QA);
+		FP_PACK_QP(&cvt.ld, QR);
+		current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+		current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+	} else{
+		current->thread.fp_regs.fprs[rx].ui =
+			current->thread.fp_regs.fprs[ry].ui;
+		current->thread.fp_regs.fprs[rx+2].ui =
+			current->thread.fp_regs.fprs[ry+2].ui;
+	}
+	emu_set_CC_cs(regs, QR_c, QR_s);
+        return _fex;
+}
+
+/* Load positive double */
+static int emu_lpdbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_D(DR);
+	FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
+        if (DA_s != 0) {
+		FP_NEG_D(DR, DA);
+		FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+	} else
+		current->thread.fp_regs.fprs[rx].ui =
+			current->thread.fp_regs.fprs[ry].ui;
+	emu_set_CC_cs(regs, DR_c, DR_s);
+        return _fex;
+}
+
+/* Load positive float */
+static int emu_lpebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_S(SR);
+	FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
+        if (SA_s != 0) {
+		FP_NEG_S(SR, SA);
+		FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+	} else
+		current->thread.fp_regs.fprs[rx].ui =
+			current->thread.fp_regs.fprs[ry].ui;
+	emu_set_CC_cs(regs, SR_c, SR_s);
+        return _fex;
+}
+
+/* Load rounded long double to double */
+static int emu_ldxbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_D(DR);
+	FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+	FP_CONV (D, Q, 2, 4, DR, QA);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].f, DR);
+        return _fex;
+}
+
+/* Load rounded long double to float */
+static int emu_lexbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_S(SR);
+	FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+	FP_CONV (S, Q, 1, 4, SR, QA);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        return _fex;
+}
+
+/* Load rounded double to float */
+static int emu_ledbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_S(SR);
+	FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
+	FP_CONV (S, D, 1, 2, SR, DA);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        return _fex;
+}
+
+/* Multiply long double */
+static int emu_mxbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QB, &cvt.ld);
+        FP_MUL_Q(QR, QA, QB);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        return _fex;
+}
+
+/* Multiply double */
+static int emu_mdbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
+        FP_MUL_D(DR, DA, DB);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        return _fex;
+}
+
+/* Multiply double */
+static int emu_mdb (struct pt_regs *regs, int rx, double *val) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, val);
+        FP_MUL_D(DR, DA, DB);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        return _fex;
+}
+
+/* Multiply double to long double */
+static int emu_mxdbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
+	FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+	FP_CONV (Q, D, 4, 2, QA, DA);
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
+	FP_CONV (Q, D, 4, 2, QB, DA);
+        FP_MUL_Q(QR, QA, QB);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        return _fex;
+}
+
+/* Multiply double to long double */
+static int emu_mxdb (struct pt_regs *regs, int rx, long double *val) {
+        FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+        FP_UNPACK_QP(QB, val);
+        FP_MUL_Q(QR, QA, QB);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        return _fex;
+}
+
+/* Multiply float */
+static int emu_meebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
+        FP_MUL_S(SR, SA, SB);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        return _fex;
+}
+
+/* Multiply float */
+static int emu_meeb (struct pt_regs *regs, int rx, float *val) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, val);
+        FP_MUL_S(SR, SA, SB);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        return _fex;
+}
+
+/* Multiply float to double */
+static int emu_mdebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+	FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+	FP_CONV (D, S, 2, 1, DA, SA);
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
+	FP_CONV (D, S, 2, 1, DB, SA);
+        FP_MUL_D(DR, DA, DB);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        return _fex;
+}
+
+/* Multiply float to double */
+static int emu_mdeb (struct pt_regs *regs, int rx, float *val) {
+        FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+	FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+	FP_CONV (D, S, 2, 1, DA, SA);
+        FP_UNPACK_SP(SA, val);
+	FP_CONV (D, S, 2, 1, DB, SA);
+        FP_MUL_D(DR, DA, DB);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        return _fex;
+}
+
+/* Multiply and add double */
+static int emu_madbr (struct pt_regs *regs, int rx, int ry, int rz) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
+        FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
+        FP_MUL_D(DR, DA, DB);
+        FP_ADD_D(DR, DR, DC);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
+        return _fex;
+}
+
+/* Multiply and add double */
+static int emu_madb (struct pt_regs *regs, int rx, double *val, int rz) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, val);
+        FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
+        FP_MUL_D(DR, DA, DB);
+        FP_ADD_D(DR, DR, DC);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
+        return _fex;
+}
+
+/* Multiply and add float */
+static int emu_maebr (struct pt_regs *regs, int rx, int ry, int rz) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
+        FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
+        FP_MUL_S(SR, SA, SB);
+        FP_ADD_S(SR, SR, SC);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
+        return _fex;
+}
+
+/* Multiply and add float */
+static int emu_maeb (struct pt_regs *regs, int rx, float *val, int rz) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, val);
+        FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
+        FP_MUL_S(SR, SA, SB);
+        FP_ADD_S(SR, SR, SC);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
+        return _fex;
+}
+
+/* Multiply and subtract double */
+static int emu_msdbr (struct pt_regs *regs, int rx, int ry, int rz) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
+        FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
+        FP_MUL_D(DR, DA, DB);
+        FP_SUB_D(DR, DR, DC);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
+        return _fex;
+}
+
+/* Multiply and subtract double */
+static int emu_msdb (struct pt_regs *regs, int rx, double *val, int rz) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, val);
+        FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
+        FP_MUL_D(DR, DA, DB);
+        FP_SUB_D(DR, DR, DC);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
+        return _fex;
+}
+
+/* Multiply and subtract float */
+static int emu_msebr (struct pt_regs *regs, int rx, int ry, int rz) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
+        FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
+        FP_MUL_S(SR, SA, SB);
+        FP_SUB_S(SR, SR, SC);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
+        return _fex;
+}
+
+/* Multiply and subtract float */
+static int emu_mseb (struct pt_regs *regs, int rx, float *val, int rz) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, val);
+        FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
+        FP_MUL_S(SR, SA, SB);
+        FP_SUB_S(SR, SR, SC);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
+        return _fex;
+}
+
+/* Set floating point control word */
+static int emu_sfpc (struct pt_regs *regs, int rx, int ry) {
+        __u32 temp;
+
+        temp = regs->gprs[rx];
+        if ((temp & ~FPC_VALID_MASK) != 0)
+		return SIGILL;
+	current->thread.fp_regs.fpc = temp;
+        return 0;
+}
+
+/* Square root long double */
+static int emu_sqxbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_Q(QR);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+	FP_SQRT_Q(QR, QA);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        emu_set_CC_cs(regs, QR_c, QR_s);
+        return _fex;
+}
+
+/* Square root double */
+static int emu_sqdbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
+	FP_SQRT_D(DR, DA);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        emu_set_CC_cs(regs, DR_c, DR_s);
+        return _fex;
+}
+
+/* Square root double */
+static int emu_sqdb (struct pt_regs *regs, int rx, double *val) {
+        FP_DECL_D(DA); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, val);
+	FP_SQRT_D(DR, DA);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        emu_set_CC_cs(regs, DR_c, DR_s);
+        return _fex;
+}
+
+/* Square root float */
+static int emu_sqebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
+	FP_SQRT_S(SR, SA);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        emu_set_CC_cs(regs, SR_c, SR_s);
+        return _fex;
+}
+
+/* Square root float */
+static int emu_sqeb (struct pt_regs *regs, int rx, float *val) {
+        FP_DECL_S(SA); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, val);
+	FP_SQRT_S(SR, SA);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        emu_set_CC_cs(regs, SR_c, SR_s);
+        return _fex;
+}
+
+/* Subtract long double */
+static int emu_sxbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
+        FP_DECL_EX;
+	mathemu_ldcv cvt;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
+        FP_UNPACK_QP(QA, &cvt.ld);
+        cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
+        FP_UNPACK_QP(QB, &cvt.ld);
+        FP_SUB_Q(QR, QA, QB);
+        FP_PACK_QP(&cvt.ld, QR);
+        current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
+        current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
+        emu_set_CC_cs(regs, QR_c, QR_s);
+        return _fex;
+}
+
+/* Subtract double */
+static int emu_sdbr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
+        FP_SUB_D(DR, DA, DB);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        emu_set_CC_cs(regs, DR_c, DR_s);
+        return _fex;
+}
+
+/* Subtract double */
+static int emu_sdb (struct pt_regs *regs, int rx, double *val) {
+        FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+        FP_UNPACK_DP(DB, val);
+        FP_SUB_D(DR, DA, DB);
+	FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
+        emu_set_CC_cs(regs, DR_c, DR_s);
+        return _fex;
+}
+
+/* Subtract float */
+static int emu_sebr (struct pt_regs *regs, int rx, int ry) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
+        FP_SUB_S(SR, SA, SB);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        emu_set_CC_cs(regs, SR_c, SR_s);
+        return _fex;
+}
+
+/* Subtract float */
+static int emu_seb (struct pt_regs *regs, int rx, float *val) {
+        FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+        FP_DECL_EX;
+        int mode;
+
+	mode = current->thread.fp_regs.fpc & 3;
+        FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+        FP_UNPACK_SP(SB, val);
+        FP_SUB_S(SR, SA, SB);
+	FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
+        emu_set_CC_cs(regs, SR_c, SR_s);
+        return _fex;
+}
+
+/* Test data class long double */
+static int emu_tcxb (struct pt_regs *regs, int rx, long val) {
+        FP_DECL_Q(QA);
+	mathemu_ldcv cvt;
+	int bit;
+
+        cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
+        cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
+        FP_UNPACK_RAW_QP(QA, &cvt.ld);
+	switch (QA_e) {
+	default:
+		bit = 8;		/* normalized number */
+		break;
+	case 0:
+		if (_FP_FRAC_ZEROP_4(QA))
+			bit = 10;	/* zero */
+		else
+			bit = 6;	/* denormalized number */
+		break;
+	case _FP_EXPMAX_Q:
+		if (_FP_FRAC_ZEROP_4(QA))
+			bit = 4;	/* infinity */
+		else if (_FP_FRAC_HIGH_RAW_Q(QA) & _FP_QNANBIT_Q)
+			bit = 2;	/* quiet NAN */
+		else
+			bit = 0;	/* signaling NAN */
+		break;
+	}
+	if (!QA_s)
+		bit++;
+	emu_set_CC(regs, ((__u32) val >> bit) & 1);
+        return 0;
+}
+
+/* Test data class double */
+static int emu_tcdb (struct pt_regs *regs, int rx, long val) {
+        FP_DECL_D(DA);
+	int bit;
+
+        FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
+	switch (DA_e) {
+	default:
+		bit = 8;		/* normalized number */
+		break;
+	case 0:
+		if (_FP_FRAC_ZEROP_2(DA))
+			bit = 10;	/* zero */
+		else
+			bit = 6;	/* denormalized number */
+		break;
+	case _FP_EXPMAX_D:
+		if (_FP_FRAC_ZEROP_2(DA))
+			bit = 4;	/* infinity */
+		else if (_FP_FRAC_HIGH_RAW_D(DA) & _FP_QNANBIT_D)
+			bit = 2;	/* quiet NAN */
+		else
+			bit = 0;	/* signaling NAN */
+		break;
+	}
+	if (!DA_s)
+		bit++;
+	emu_set_CC(regs, ((__u32) val >> bit) & 1);
+        return 0;
+}
+
+/* Test data class float */
+static int emu_tceb (struct pt_regs *regs, int rx, long val) {
+        FP_DECL_S(SA);
+	int bit;
+
+        FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
+	switch (SA_e) {
+	default:
+		bit = 8;		/* normalized number */
+		break;
+	case 0:
+		if (_FP_FRAC_ZEROP_1(SA))
+			bit = 10;	/* zero */
+		else
+			bit = 6;	/* denormalized number */
+		break;
+	case _FP_EXPMAX_S:
+		if (_FP_FRAC_ZEROP_1(SA))
+			bit = 4;	/* infinity */
+		else if (_FP_FRAC_HIGH_RAW_S(SA) & _FP_QNANBIT_S)
+			bit = 2;	/* quiet NAN */
+		else
+			bit = 0;	/* signaling NAN */
+		break;
+	}
+	if (!SA_s)
+		bit++;
+	emu_set_CC(regs, ((__u32) val >> bit) & 1);
+        return 0;
+}
+
+static inline void emu_load_regd(int reg) {
+        if ((reg&9) != 0)         /* test if reg in {0,2,4,6} */
+                return;
+        asm volatile (            /* load reg from fp_regs.fprs[reg] */
+                "     bras  1,0f\n"
+                "     ld    0,0(%1)\n"
+                "0:   ex    %0,0(1)"
+                : /* no output */
+                : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d)
+                : "1" );
+}
+
+static inline void emu_load_rege(int reg) {
+        if ((reg&9) != 0)         /* test if reg in {0,2,4,6} */
+                return;
+        asm volatile (            /* load reg from fp_regs.fprs[reg] */
+                "     bras  1,0f\n"
+                "     le    0,0(%1)\n"
+                "0:   ex    %0,0(1)"
+                : /* no output */
+                : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
+                : "1" );
+}
+
+static inline void emu_store_regd(int reg) {
+        if ((reg&9) != 0)         /* test if reg in {0,2,4,6} */
+                return;
+        asm volatile (            /* store reg to fp_regs.fprs[reg] */
+                "     bras  1,0f\n"
+                "     std   0,0(%1)\n"
+                "0:   ex    %0,0(1)"
+                : /* no output */
+                : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d)
+                : "1" );
+}
+
+
+static inline void emu_store_rege(int reg) {
+        if ((reg&9) != 0)         /* test if reg in {0,2,4,6} */
+                return;
+        asm volatile (            /* store reg to fp_regs.fprs[reg] */
+                "     bras  1,0f\n"
+                "     ste   0,0(%1)\n"
+                "0:   ex    %0,0(1)"
+                : /* no output */
+                : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
+                : "1" );
+}
+
+int math_emu_b3(__u8 *opcode, struct pt_regs * regs) {
+        int _fex = 0;
+        static const __u8 format_table[256] = {
+                [0x00] = 0x03,[0x01] = 0x03,[0x02] = 0x03,[0x03] = 0x03,
+		[0x04] = 0x0f,[0x05] = 0x0d,[0x06] = 0x0e,[0x07] = 0x0d,
+		[0x08] = 0x03,[0x09] = 0x03,[0x0a] = 0x03,[0x0b] = 0x03,
+                [0x0c] = 0x0f,[0x0d] = 0x03,[0x0e] = 0x06,[0x0f] = 0x06,
+		[0x10] = 0x02,[0x11] = 0x02,[0x12] = 0x02,[0x13] = 0x02,
+		[0x14] = 0x03,[0x15] = 0x02,[0x16] = 0x01,[0x17] = 0x03,
+                [0x18] = 0x02,[0x19] = 0x02,[0x1a] = 0x02,[0x1b] = 0x02,
+		[0x1c] = 0x02,[0x1d] = 0x02,[0x1e] = 0x05,[0x1f] = 0x05,
+		[0x40] = 0x01,[0x41] = 0x01,[0x42] = 0x01,[0x43] = 0x01,
+                [0x44] = 0x12,[0x45] = 0x0d,[0x46] = 0x11,[0x47] = 0x04,
+		[0x48] = 0x01,[0x49] = 0x01,[0x4a] = 0x01,[0x4b] = 0x01,
+		[0x4c] = 0x01,[0x4d] = 0x01,[0x53] = 0x06,[0x57] = 0x06,
+                [0x5b] = 0x05,[0x5f] = 0x05,[0x84] = 0x13,[0x8c] = 0x13,
+		[0x94] = 0x09,[0x95] = 0x08,[0x96] = 0x07,[0x98] = 0x0c,
+		[0x99] = 0x0b,[0x9a] = 0x0a
+        };
+        static const void *jump_table[256]= {
+                [0x00] = emu_lpebr,[0x01] = emu_lnebr,[0x02] = emu_ltebr,
+                [0x03] = emu_lcebr,[0x04] = emu_ldebr,[0x05] = emu_lxdbr,
+                [0x06] = emu_lxebr,[0x07] = emu_mxdbr,[0x08] = emu_kebr,
+                [0x09] = emu_cebr, [0x0a] = emu_aebr, [0x0b] = emu_sebr,
+                [0x0c] = emu_mdebr,[0x0d] = emu_debr, [0x0e] = emu_maebr,
+                [0x0f] = emu_msebr,[0x10] = emu_lpdbr,[0x11] = emu_lndbr, 
+                [0x12] = emu_ltdbr,[0x13] = emu_lcdbr,[0x14] = emu_sqebr,
+                [0x15] = emu_sqdbr,[0x16] = emu_sqxbr,[0x17] = emu_meebr,
+                [0x18] = emu_kdbr, [0x19] = emu_cdbr, [0x1a] = emu_adbr,
+                [0x1b] = emu_sdbr, [0x1c] = emu_mdbr, [0x1d] = emu_ddbr,  
+                [0x1e] = emu_madbr,[0x1f] = emu_msdbr,[0x40] = emu_lpxbr,
+                [0x41] = emu_lnxbr,[0x42] = emu_ltxbr,[0x43] = emu_lcxbr,
+                [0x44] = emu_ledbr,[0x45] = emu_ldxbr,[0x46] = emu_lexbr,
+                [0x47] = emu_fixbr,[0x48] = emu_kxbr, [0x49] = emu_cxbr,  
+                [0x4a] = emu_axbr, [0x4b] = emu_sxbr, [0x4c] = emu_mxbr,
+                [0x4d] = emu_dxbr, [0x53] = emu_diebr,[0x57] = emu_fiebr,
+                [0x5b] = emu_didbr,[0x5f] = emu_fidbr,[0x84] = emu_sfpc,
+                [0x8c] = emu_efpc, [0x94] = emu_cefbr,[0x95] = emu_cdfbr, 
+                [0x96] = emu_cxfbr,[0x98] = emu_cfebr,[0x99] = emu_cfdbr,
+                [0x9a] = emu_cfxbr
+        };
+
+        switch (format_table[opcode[1]]) {
+        case 1: /* RRE format, long double operation */
+                if (opcode[3] & 0x22)
+			return SIGILL;
+                emu_store_regd((opcode[3] >> 4) & 15);
+                emu_store_regd(((opcode[3] >> 4) & 15) + 2);
+                emu_store_regd(opcode[3] & 15);
+                emu_store_regd((opcode[3] & 15) + 2);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *,int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_regd((opcode[3] >> 4) & 15);
+                emu_load_regd(((opcode[3] >> 4) & 15) + 2);
+                emu_load_regd(opcode[3] & 15);
+                emu_load_regd((opcode[3] & 15) + 2);
+		break;
+        case 2: /* RRE format, double operation */
+                emu_store_regd((opcode[3] >> 4) & 15);
+                emu_store_regd(opcode[3] & 15);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_regd((opcode[3] >> 4) & 15);
+                emu_load_regd(opcode[3] & 15);
+		break;
+        case 3: /* RRE format, float operation */
+                emu_store_rege((opcode[3] >> 4) & 15);
+                emu_store_rege(opcode[3] & 15);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_rege((opcode[3] >> 4) & 15);
+                emu_load_rege(opcode[3] & 15);
+		break;
+        case 4: /* RRF format, long double operation */
+                if (opcode[3] & 0x22)
+			return SIGILL;
+                emu_store_regd((opcode[3] >> 4) & 15);
+                emu_store_regd(((opcode[3] >> 4) & 15) + 2);
+                emu_store_regd(opcode[3] & 15);
+                emu_store_regd((opcode[3] & 15) + 2);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+                emu_load_regd((opcode[3] >> 4) & 15);
+                emu_load_regd(((opcode[3] >> 4) & 15) + 2);
+                emu_load_regd(opcode[3] & 15);
+                emu_load_regd((opcode[3] & 15) + 2);
+		break;
+        case 5: /* RRF format, double operation */
+                emu_store_regd((opcode[2] >> 4) & 15);
+                emu_store_regd((opcode[3] >> 4) & 15);
+                emu_store_regd(opcode[3] & 15);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+                emu_load_regd((opcode[2] >> 4) & 15);
+                emu_load_regd((opcode[3] >> 4) & 15);
+                emu_load_regd(opcode[3] & 15);
+		break;
+        case 6: /* RRF format, float operation */
+                emu_store_rege((opcode[2] >> 4) & 15);
+                emu_store_rege((opcode[3] >> 4) & 15);
+                emu_store_rege(opcode[3] & 15);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+                emu_load_rege((opcode[2] >> 4) & 15);
+                emu_load_rege((opcode[3] >> 4) & 15);
+                emu_load_rege(opcode[3] & 15);
+		break;
+        case 7: /* RRE format, cxfbr instruction */
+                /* call the emulation function */
+                if (opcode[3] & 0x20)
+			return SIGILL;
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_regd((opcode[3] >> 4) & 15);
+                emu_load_regd(((opcode[3] >> 4) & 15) + 2);
+		break;
+        case 8: /* RRE format, cdfbr instruction */
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_regd((opcode[3] >> 4) & 15);
+		break;
+        case 9: /* RRE format, cefbr instruction */
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_rege((opcode[3] >> 4) & 15);
+		break;
+        case 10: /* RRF format, cfxbr instruction */
+                if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
+			/* mask of { 2,3,8-15 } is invalid */
+			return SIGILL;
+                if (opcode[3] & 2)
+			return SIGILL;
+                emu_store_regd(opcode[3] & 15);
+                emu_store_regd((opcode[3] & 15) + 2);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+		break;
+        case 11: /* RRF format, cfdbr instruction */
+                if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
+			/* mask of { 2,3,8-15 } is invalid */
+			return SIGILL;
+                emu_store_regd(opcode[3] & 15);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+		break;
+        case 12: /* RRF format, cfebr instruction */
+                if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
+			/* mask of { 2,3,8-15 } is invalid */
+			return SIGILL;
+                emu_store_rege(opcode[3] & 15);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+		break;
+        case 13: /* RRE format, ldxbr & mdxbr instruction */
+                /* double store but long double load */
+                if (opcode[3] & 0x20)
+			return SIGILL;
+                emu_store_regd((opcode[3] >> 4) & 15);
+                emu_store_regd(opcode[3]  & 15);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_regd((opcode[3] >> 4) & 15);
+                emu_load_regd(((opcode[3] >> 4) & 15) + 2);
+		break;
+        case 14: /* RRE format, ldxbr & mdxbr instruction */
+                /* float store but long double load */
+                if (opcode[3] & 0x20)
+			return SIGILL;
+                emu_store_rege((opcode[3] >> 4) & 15);
+                emu_store_rege(opcode[3]  & 15);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_regd((opcode[3] >> 4) & 15);
+                emu_load_regd(((opcode[3] >> 4) & 15) + 2);
+		break;
+        case 15: /* RRE format, ldebr & mdebr instruction */
+                /* float store but double load */
+                emu_store_rege((opcode[3] >> 4) & 15);
+                emu_store_rege(opcode[3]  & 15);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_regd((opcode[3] >> 4) & 15);
+		break;
+        case 16: /* RRE format, ldxbr instruction */
+                /* long double store but double load */
+                if (opcode[3] & 2)
+			return SIGILL;
+                emu_store_regd(opcode[3] & 15);
+                emu_store_regd((opcode[3] & 15) + 2);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_regd((opcode[3] >> 4) & 15);
+                break;
+        case 17: /* RRE format, ldxbr instruction */
+                /* long double store but float load */
+                if (opcode[3] & 2)
+			return SIGILL;
+                emu_store_regd(opcode[3] & 15);
+                emu_store_regd((opcode[3] & 15) + 2);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_rege((opcode[3] >> 4) & 15);
+                break;
+        case 18: /* RRE format, ledbr instruction */
+                /* double store but float load */
+                emu_store_regd(opcode[3] & 15);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                emu_load_rege((opcode[3] >> 4) & 15);
+                break;
+        case 19: /* RRE format, efpc & sfpc instruction */
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, int))
+			jump_table[opcode[1]])
+                        (regs, opcode[3] >> 4, opcode[3] & 15);
+                break;
+        default: /* invalid operation */
+                return SIGILL;
+        }
+	if (_fex != 0) {
+		current->thread.fp_regs.fpc |= _fex;
+		if (current->thread.fp_regs.fpc & (_fex << 8))
+			return SIGFPE;
+	}
+	return 0;
+}
+
+static void* calc_addr(struct pt_regs *regs, int rx, int rb, int disp)
+{
+        addr_t addr;
+
+        rx &= 15;
+        rb &= 15;
+        addr = disp & 0xfff;
+        addr += (rx != 0) ? regs->gprs[rx] : 0;  /* + index */
+        addr += (rb != 0) ? regs->gprs[rb] : 0;  /* + base  */
+        return (void*) addr;
+}
+    
+int math_emu_ed(__u8 *opcode, struct pt_regs * regs) {
+        int _fex = 0;
+
+        static const __u8 format_table[256] = {
+                [0x04] = 0x06,[0x05] = 0x05,[0x06] = 0x07,[0x07] = 0x05,
+		[0x08] = 0x02,[0x09] = 0x02,[0x0a] = 0x02,[0x0b] = 0x02,
+		[0x0c] = 0x06,[0x0d] = 0x02,[0x0e] = 0x04,[0x0f] = 0x04,
+                [0x10] = 0x08,[0x11] = 0x09,[0x12] = 0x0a,[0x14] = 0x02,
+		[0x15] = 0x01,[0x17] = 0x02,[0x18] = 0x01,[0x19] = 0x01,
+		[0x1a] = 0x01,[0x1b] = 0x01,[0x1c] = 0x01,[0x1d] = 0x01,
+                [0x1e] = 0x03,[0x1f] = 0x03,
+        };
+        static const void *jump_table[]= {
+                [0x04] = emu_ldeb,[0x05] = emu_lxdb,[0x06] = emu_lxeb,
+                [0x07] = emu_mxdb,[0x08] = emu_keb, [0x09] = emu_ceb,
+                [0x0a] = emu_aeb, [0x0b] = emu_seb, [0x0c] = emu_mdeb,
+                [0x0d] = emu_deb, [0x0e] = emu_maeb,[0x0f] = emu_mseb,
+                [0x10] = emu_tceb,[0x11] = emu_tcdb,[0x12] = emu_tcxb,
+                [0x14] = emu_sqeb,[0x15] = emu_sqdb,[0x17] = emu_meeb,
+                [0x18] = emu_kdb, [0x19] = emu_cdb, [0x1a] = emu_adb,
+                [0x1b] = emu_sdb, [0x1c] = emu_mdb, [0x1d] = emu_ddb,
+                [0x1e] = emu_madb,[0x1f] = emu_msdb
+        };
+
+        switch (format_table[opcode[5]]) {
+        case 1: /* RXE format, double constant */ {
+                __u64 *dxb, temp;
+                __u32 opc;
+
+                emu_store_regd((opcode[1] >> 4) & 15);
+                opc = *((__u32 *) opcode);
+                dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+                mathemu_copy_from_user(&temp, dxb, 8);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, double *))
+			jump_table[opcode[5]])
+                        (regs, opcode[1] >> 4, (double *) &temp);
+                emu_load_regd((opcode[1] >> 4) & 15);
+                break;
+        }
+        case 2: /* RXE format, float constant */ {
+                __u32 *dxb, temp;
+                __u32 opc;
+
+                emu_store_rege((opcode[1] >> 4) & 15);
+                opc = *((__u32 *) opcode);
+                dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+                mathemu_get_user(temp, dxb);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, float *))
+			jump_table[opcode[5]])
+                        (regs, opcode[1] >> 4, (float *) &temp);
+                emu_load_rege((opcode[1] >> 4) & 15);
+                break;
+        }
+        case 3: /* RXF format, double constant */ {
+                __u64 *dxb, temp;
+                __u32 opc;
+
+                emu_store_regd((opcode[1] >> 4) & 15);
+                emu_store_regd((opcode[4] >> 4) & 15);
+                opc = *((__u32 *) opcode);
+                dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+                mathemu_copy_from_user(&temp, dxb, 8);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, double *, int))
+			jump_table[opcode[5]])
+                        (regs, opcode[1] >> 4, (double *) &temp, opcode[4] >> 4);
+                emu_load_regd((opcode[1] >> 4) & 15);
+                break;
+        }
+        case 4: /* RXF format, float constant */ {
+                __u32 *dxb, temp;
+                __u32 opc;
+
+                emu_store_rege((opcode[1] >> 4) & 15);
+                emu_store_rege((opcode[4] >> 4) & 15);
+                opc = *((__u32 *) opcode);
+                dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+                mathemu_get_user(temp, dxb);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, float *, int))
+			jump_table[opcode[5]])
+                        (regs, opcode[1] >> 4, (float *) &temp, opcode[4] >> 4);
+                emu_load_rege((opcode[4] >> 4) & 15);
+                break;
+        }
+        case 5: /* RXE format, double constant */
+                /* store double and load long double */ 
+        {
+                __u64 *dxb, temp;
+                __u32 opc;
+                if ((opcode[1] >> 4) & 0x20)
+			return SIGILL;
+                emu_store_regd((opcode[1] >> 4) & 15);
+                opc = *((__u32 *) opcode);
+                dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+                mathemu_copy_from_user(&temp, dxb, 8);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, double *))
+			jump_table[opcode[5]])
+                        (regs, opcode[1] >> 4, (double *) &temp);
+                emu_load_regd((opcode[1] >> 4) & 15);
+                emu_load_regd(((opcode[1] >> 4) & 15) + 2);
+                break;
+        }
+        case 6: /* RXE format, float constant */
+                /* store float and load double */ 
+        {
+                __u32 *dxb, temp;
+                __u32 opc;
+                emu_store_rege((opcode[1] >> 4) & 15);
+                opc = *((__u32 *) opcode);
+                dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+                mathemu_get_user(temp, dxb);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, float *))
+			jump_table[opcode[5]])
+                        (regs, opcode[1] >> 4, (float *) &temp);
+                emu_load_regd((opcode[1] >> 4) & 15);
+                break;
+        }
+        case 7: /* RXE format, float constant */
+                /* store float and load long double */ 
+        {
+                __u32 *dxb, temp;
+                __u32 opc;
+                if ((opcode[1] >> 4) & 0x20)
+			return SIGILL;
+                emu_store_rege((opcode[1] >> 4) & 15);
+                opc = *((__u32 *) opcode);
+                dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+                mathemu_get_user(temp, dxb);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, float *))
+			jump_table[opcode[5]])
+                        (regs, opcode[1] >> 4, (float *) &temp);
+                emu_load_regd((opcode[1] >> 4) & 15);
+                emu_load_regd(((opcode[1] >> 4) & 15) + 2);
+                break;
+        }
+        case 8: /* RXE format, RX address used as int value */ {
+                __u64 dxb;
+                __u32 opc;
+
+                emu_store_rege((opcode[1] >> 4) & 15);
+                opc = *((__u32 *) opcode);
+                dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, long))
+			jump_table[opcode[5]])
+                        (regs, opcode[1] >> 4, dxb);
+                break;
+        }
+        case 9: /* RXE format, RX address used as int value */ {
+                __u64 dxb;
+                __u32 opc;
+
+                emu_store_regd((opcode[1] >> 4) & 15);
+                opc = *((__u32 *) opcode);
+                dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, long))
+			jump_table[opcode[5]])
+                        (regs, opcode[1] >> 4, dxb);
+                break;
+        }
+        case 10: /* RXE format, RX address used as int value */ {
+                __u64 dxb;
+                __u32 opc;
+
+                if ((opcode[1] >> 4) & 2)
+			return SIGILL;
+                emu_store_regd((opcode[1] >> 4) & 15);
+                emu_store_regd(((opcode[1] >> 4) & 15) + 2);
+                opc = *((__u32 *) opcode);
+                dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
+                /* call the emulation function */
+                _fex = ((int (*)(struct pt_regs *, int, long))
+			jump_table[opcode[5]])
+                        (regs, opcode[1] >> 4, dxb);
+                break;
+        }
+        default: /* invalid operation */
+                return SIGILL;
+        }
+	if (_fex != 0) {
+		current->thread.fp_regs.fpc |= _fex;
+		if (current->thread.fp_regs.fpc & (_fex << 8))
+			return SIGFPE;
+	}
+	return 0;
+}
+
+/*
+ * Emulate LDR Rx,Ry with Rx or Ry not in {0, 2, 4, 6}
+ */
+int math_emu_ldr(__u8 *opcode) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        __u16 opc = *((__u16 *) opcode);
+
+        if ((opc & 0x90) == 0) {           /* test if rx in {0,2,4,6} */
+                /* we got an exception therfore ry can't be in {0,2,4,6} */
+                __asm__ __volatile (       /* load rx from fp_regs.fprs[ry] */
+                        "     bras  1,0f\n"
+                        "     ld    0,0(%1)\n"
+                        "0:   ex    %0,0(1)"
+                        : /* no output */
+                        : "a" (opc & 0xf0),
+                          "a" (&fp_regs->fprs[opc & 0xf].d)
+                        : "1" );
+        } else if ((opc & 0x9) == 0) {     /* test if ry in {0,2,4,6} */
+                __asm__ __volatile (       /* store ry to fp_regs.fprs[rx] */
+                        "     bras  1,0f\n"
+                        "     std   0,0(%1)\n"
+                        "0:   ex    %0,0(1)"
+                        : /* no output */
+                        : "a" ((opc & 0xf) << 4),
+                          "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d)
+                        : "1" );
+        } else  /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
+                fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
+	return 0;
+}
+
+/*
+ * Emulate LER Rx,Ry with Rx or Ry not in {0, 2, 4, 6}
+ */
+int math_emu_ler(__u8 *opcode) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        __u16 opc = *((__u16 *) opcode);
+
+        if ((opc & 0x90) == 0) {           /* test if rx in {0,2,4,6} */
+                /* we got an exception therfore ry can't be in {0,2,4,6} */
+                __asm__ __volatile (       /* load rx from fp_regs.fprs[ry] */
+                        "     bras  1,0f\n"
+                        "     le    0,0(%1)\n"
+                        "0:   ex    %0,0(1)"
+                        : /* no output */
+                        : "a" (opc & 0xf0),
+                          "a" (&fp_regs->fprs[opc & 0xf].f)
+                        : "1" );
+        } else if ((opc & 0x9) == 0) {     /* test if ry in {0,2,4,6} */
+                __asm__ __volatile (       /* store ry to fp_regs.fprs[rx] */
+                        "     bras  1,0f\n"
+                        "     ste   0,0(%1)\n"
+                        "0:   ex    %0,0(1)"
+                        : /* no output */
+                        : "a" ((opc & 0xf) << 4),
+                          "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f)
+                        : "1" );
+        } else  /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
+                fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
+	return 0;
+}
+
+/*
+ * Emulate LD R,D(X,B) with R not in {0, 2, 4, 6}
+ */
+int math_emu_ld(__u8 *opcode, struct pt_regs * regs) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        __u32 opc = *((__u32 *) opcode);
+        __u64 *dxb;
+
+        dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+        mathemu_copy_from_user(&fp_regs->fprs[(opc >> 20) & 0xf].d, dxb, 8);
+	return 0;
+}
+
+/*
+ * Emulate LE R,D(X,B) with R not in {0, 2, 4, 6}
+ */
+int math_emu_le(__u8 *opcode, struct pt_regs * regs) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        __u32 opc = *((__u32 *) opcode);
+        __u32 *mem, *dxb;
+
+        dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+        mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f);
+        mathemu_get_user(mem[0], dxb);
+	return 0;
+}
+
+/*
+ * Emulate STD R,D(X,B) with R not in {0, 2, 4, 6}
+ */
+int math_emu_std(__u8 *opcode, struct pt_regs * regs) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        __u32 opc = *((__u32 *) opcode);
+        __u64 *dxb;
+
+        dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+        mathemu_copy_to_user(dxb, &fp_regs->fprs[(opc >> 20) & 0xf].d, 8);
+	return 0;
+}
+
+/*
+ * Emulate STE R,D(X,B) with R not in {0, 2, 4, 6}
+ */
+int math_emu_ste(__u8 *opcode, struct pt_regs * regs) {
+        s390_fp_regs *fp_regs = &current->thread.fp_regs;
+        __u32 opc = *((__u32 *) opcode);
+        __u32 *mem, *dxb;
+
+        dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
+        mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f);
+        mathemu_put_user(mem[0], dxb);
+	return 0;
+}
+
+/*
+ * Emulate LFPC D(B)
+ */
+int math_emu_lfpc(__u8 *opcode, struct pt_regs *regs) {
+        __u32 opc = *((__u32 *) opcode);
+        __u32 *dxb, temp;
+
+        dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc);
+        mathemu_get_user(temp, dxb);
+        if ((temp & ~FPC_VALID_MASK) != 0)
+		return SIGILL;
+	current->thread.fp_regs.fpc = temp;
+        return 0;
+}
+
+/*
+ * Emulate STFPC D(B)
+ */
+int math_emu_stfpc(__u8 *opcode, struct pt_regs *regs) {
+        __u32 opc = *((__u32 *) opcode);
+        __u32 *dxb;
+
+        dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc);
+        mathemu_put_user(current->thread.fp_regs.fpc, dxb);
+        return 0;
+}
+
+/*
+ * Emulate SRNM D(B)
+ */
+int math_emu_srnm(__u8 *opcode, struct pt_regs *regs) {
+        __u32 opc = *((__u32 *) opcode);
+        __u32 temp;
+
+        temp = calc_addr(regs, 0, opc>>12, opc);
+	current->thread.fp_regs.fpc &= ~3;
+        current->thread.fp_regs.fpc |= (temp & 3);
+        return 0;
+}
+
+/* broken compiler ... */
+long long
+__negdi2 (long long u)
+{
+
+  union lll {
+    long long ll;
+    long s[2];
+  };
+
+  union lll w,uu;
+
+  uu.ll = u;
+
+  w.s[1] = -uu.s[1];
+  w.s[0] = -uu.s[0] - ((int) w.s[1] != 0);
+
+  return w.ll;
+}
diff --git a/arch/s390/math-emu/qrnnd.S b/arch/s390/math-emu/qrnnd.S
new file mode 100644
index 0000000..b01c2b6
--- /dev/null
+++ b/arch/s390/math-emu/qrnnd.S
@@ -0,0 +1,77 @@
+# S/390 __udiv_qrnnd
+
+# r2 : &__r
+# r3 : upper half of 64 bit word n
+# r4 : lower half of 64 bit word n
+# r5 : divisor d
+# the reminder r of the division is to be stored to &__r and
+# the quotient q is to be returned
+
+        .text
+        .globl __udiv_qrnnd
+__udiv_qrnnd:
+        st    %r2,24(%r15)        # store pointer to reminder for later
+        lr    %r0,%r3             # reload n
+	lr    %r1,%r4
+	ltr   %r2,%r5             # reload and test divisor
+        jp    5f
+        # divisor >= 0x80000000
+	srdl  %r0,2               # n/4
+        srl   %r2,1               # d/2
+        slr   %r1,%r2             # special case if last bit of d is set
+        brc   3,0f                #  (n/4) div (n/2) can overflow by 1
+        ahi   %r0,-1              #  trick: subtract n/2, then divide
+0:      dr    %r0,%r2             # signed division
+        ahi   %r1,1               #  trick part 2: add 1 to the quotient
+        # now (n >> 2) = (d >> 1) * %r1 + %r0
+	lhi   %r3,1
+        nr    %r3,%r1             # test last bit of q
+        jz    1f
+        alr   %r0,%r2             # add (d>>1) to r
+1:      srl   %r1,1               # q >>= 1
+        # now (n >> 2) = (d&-2) * %r1 + %r0
+        lhi   %r3,1
+        nr    %r3,%r5             # test last bit of d
+        jz    2f
+        slr   %r0,%r1             # r -= q
+	brc   3,2f                # borrow ?
+	alr   %r0,%r5             # r += d
+	ahi   %r1,-1
+2:      # now (n >> 2) = d * %r1 + %r0
+        alr   %r1,%r1             # q <<= 1
+        alr   %r0,%r0             # r <<= 1
+        brc   12,3f               # overflow on r ?
+        slr   %r0,%r5             # r -= d
+        ahi   %r1,1               # q += 1
+3:      lhi   %r3,2
+        nr    %r3,%r4             # test next to last bit of n
+        jz    4f
+        ahi   %r0,1               # r += 1
+4:      clr   %r0,%r5             # r >= d ?
+        jl    6f
+        slr   %r0,%r5             # r -= d
+        ahi   %r1,1               # q += 1
+        # now (n >> 1) = d * %r1 + %r0
+        j     6f
+5:      # divisor < 0x80000000
+	srdl  %r0,1
+        dr    %r0,%r2             # signed division
+        # now (n >> 1) = d * %r1 + %r0
+6:      alr   %r1,%r1             # q <<= 1
+        alr   %r0,%r0             # r <<= 1
+        brc   12,7f               # overflow on r ?
+        slr   %r0,%r5             # r -= d
+        ahi   %r1,1               # q += 1
+7:      lhi   %r3,1
+        nr    %r3,%r4             # isolate last bit of n
+        alr   %r0,%r3             # r += (n & 1)
+        clr   %r0,%r5             # r >= d ?
+        jl    8f
+        slr   %r0,%r5             # r -= d
+        ahi   %r1,1               # q += 1
+8:      # now n = d * %r1 + %r0
+	l     %r2,24(%r15)
+        st    %r0,0(%r2)
+        lr    %r2,%r1
+        br    %r14
+	.end	__udiv_qrnnd
diff --git a/arch/s390/math-emu/sfp-util.h b/arch/s390/math-emu/sfp-util.h
new file mode 100644
index 0000000..ab556b6
--- /dev/null
+++ b/arch/s390/math-emu/sfp-util.h
@@ -0,0 +1,63 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({		\
+        unsigned int __sh = (ah);			\
+        unsigned int __sl = (al);			\
+        __asm__ ("   alr  %1,%3\n"			\
+                 "   brc  12,0f\n"			\
+                 "   ahi  %0,1\n"			\
+                 "0: alr  %0,%2"			\
+                 : "+&d" (__sh), "+d" (__sl)		\
+                 : "d" (bh), "d" (bl) : "cc" );		\
+        (sh) = __sh;					\
+        (sl) = __sl;					\
+})
+
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({		\
+       unsigned int __sh = (ah);			\
+       unsigned int __sl = (al);			\
+       __asm__ ("   slr  %1,%3\n"			\
+                "   brc  3,0f\n"			\
+                "   ahi  %0,-1\n"			\
+                "0: slr  %0,%2"				\
+                : "+&d" (__sh), "+d" (__sl)		\
+                : "d" (bh), "d" (bl) : "cc" );		\
+       (sh) = __sh;					\
+       (sl) = __sl;					\
+})
+
+/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */
+#define umul_ppmm(wh, wl, u, v) ({			\
+        unsigned int __wh = u;				\
+        unsigned int __wl = v;				\
+        __asm__ ("   ltr  1,%0\n"			\
+                 "   mr   0,%1\n"			\
+                 "   jnm  0f\n"				\
+                 "   alr  0,%1\n"			\
+                 "0: ltr  %1,%1\n"			\
+                 "   jnm  1f\n"				\
+                 "   alr  0,%0\n"			\
+                 "1: lr   %0,0\n"			\
+                 "   lr   %1,1\n"			\
+                 : "+d" (__wh), "+d" (__wl)		\
+                 : : "0", "1", "cc" );			\
+        wh = __wh;					\
+        wl = __wl;					\
+})
+
+#define udiv_qrnnd(q, r, n1, n0, d)			\
+  do { unsigned long __r;				\
+    (q) = __udiv_qrnnd (&__r, (n1), (n0), (d));		\
+    (r) = __r;						\
+  } while (0)
+extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long,
+				   unsigned long , unsigned long);
+
+#define UDIV_NEEDS_NORMALIZATION 0
+
+#define abort() return 0
+
+#define __BYTE_ORDER __BIG_ENDIAN
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
new file mode 100644
index 0000000..aa9a42b
--- /dev/null
+++ b/arch/s390/mm/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux s390-specific parts of the memory manager.
+#
+
+obj-y	 := init.o fault.o ioremap.o extmem.o mmap.o
+obj-$(CONFIG_CMM) += cmm.o
+
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
new file mode 100644
index 0000000..d30cdb4
--- /dev/null
+++ b/arch/s390/mm/cmm.c
@@ -0,0 +1,443 @@
+/*
+ *  arch/s390/mm/cmm.c
+ *
+ *  S390 version
+ *    Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ *  Collaborative memory management interface.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
+#include <linux/ctype.h>
+
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+
+#include "../../../drivers/s390/net/smsgiucv.h"
+
+#define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
+
+struct cmm_page_array {
+	struct cmm_page_array *next;
+	unsigned long index;
+	unsigned long pages[CMM_NR_PAGES];
+};
+
+static long cmm_pages = 0;
+static long cmm_timed_pages = 0;
+static volatile long cmm_pages_target = 0;
+static volatile long cmm_timed_pages_target = 0;
+static long cmm_timeout_pages = 0;
+static long cmm_timeout_seconds = 0;
+
+static struct cmm_page_array *cmm_page_list = 0;
+static struct cmm_page_array *cmm_timed_page_list = 0;
+
+static unsigned long cmm_thread_active = 0;
+static struct work_struct cmm_thread_starter;
+static wait_queue_head_t cmm_thread_wait;
+static struct timer_list cmm_timer;
+
+static void cmm_timer_fn(unsigned long);
+static void cmm_set_timer(void);
+
+static long
+cmm_strtoul(const char *cp, char **endp)
+{
+	unsigned int base = 10;
+
+	if (*cp == '0') {
+		base = 8;
+		cp++;
+		if ((*cp == 'x' || *cp == 'X') && isxdigit(cp[1])) {
+			base = 16;
+			cp++;
+		}
+	}
+	return simple_strtoul(cp, endp, base);
+}
+
+static long
+cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list)
+{
+	struct cmm_page_array *pa;
+	unsigned long page;
+
+	pa = *list;
+	while (pages) {
+		page = __get_free_page(GFP_NOIO);
+		if (!page)
+			break;
+		if (!pa || pa->index >= CMM_NR_PAGES) {
+			/* Need a new page for the page list. */
+			pa = (struct cmm_page_array *)
+				__get_free_page(GFP_NOIO);
+			if (!pa) {
+				free_page(page);
+				break;
+			}
+			pa->next = *list;
+			pa->index = 0;
+			*list = pa;
+		}
+		diag10(page);
+		pa->pages[pa->index++] = page;
+		(*counter)++;
+		pages--;
+	}
+	return pages;
+}
+
+static void
+cmm_free_pages(long pages, long *counter, struct cmm_page_array **list)
+{
+	struct cmm_page_array *pa;
+	unsigned long page;
+
+	pa = *list;
+	while (pages) {
+		if (!pa || pa->index <= 0)
+			break;
+		page = pa->pages[--pa->index];
+		if (pa->index == 0) {
+			pa = pa->next;
+			free_page((unsigned long) *list);
+			*list = pa;
+		}
+		free_page(page);
+		(*counter)--;
+		pages--;
+	}
+}
+
+static int
+cmm_thread(void *dummy)
+{
+	int rc;
+
+	daemonize("cmmthread");
+	while (1) {
+		rc = wait_event_interruptible(cmm_thread_wait,
+			(cmm_pages != cmm_pages_target ||
+			 cmm_timed_pages != cmm_timed_pages_target));
+		if (rc == -ERESTARTSYS) {
+			/* Got kill signal. End thread. */
+			clear_bit(0, &cmm_thread_active);
+			cmm_pages_target = cmm_pages;
+			cmm_timed_pages_target = cmm_timed_pages;
+			break;
+		}
+		if (cmm_pages_target > cmm_pages) {
+			if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
+				cmm_pages_target = cmm_pages;
+		} else if (cmm_pages_target < cmm_pages) {
+			cmm_free_pages(1, &cmm_pages, &cmm_page_list);
+		}
+		if (cmm_timed_pages_target > cmm_timed_pages) {
+			if (cmm_alloc_pages(1, &cmm_timed_pages,
+					   &cmm_timed_page_list))
+				cmm_timed_pages_target = cmm_timed_pages;
+		} else if (cmm_timed_pages_target < cmm_timed_pages) {
+			cmm_free_pages(1, &cmm_timed_pages,
+			       	       &cmm_timed_page_list);
+		}
+		if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
+			cmm_set_timer();
+	}
+	return 0;
+}
+
+static void
+cmm_start_thread(void)
+{
+	kernel_thread(cmm_thread, 0, 0);
+}
+
+static void
+cmm_kick_thread(void)
+{
+	if (!test_and_set_bit(0, &cmm_thread_active))
+		schedule_work(&cmm_thread_starter);
+	wake_up(&cmm_thread_wait);
+}
+
+static void
+cmm_set_timer(void)
+{
+	if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
+		if (timer_pending(&cmm_timer))
+			del_timer(&cmm_timer);
+		return;
+	}
+	if (timer_pending(&cmm_timer)) {
+		if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
+			return;
+	}
+	cmm_timer.function = cmm_timer_fn;
+	cmm_timer.data = 0;
+	cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
+	add_timer(&cmm_timer);
+}
+
+static void
+cmm_timer_fn(unsigned long ignored)
+{
+	long pages;
+
+	pages = cmm_timed_pages_target - cmm_timeout_pages;
+	if (pages < 0)
+		cmm_timed_pages_target = 0;
+	else
+		cmm_timed_pages_target = pages;
+	cmm_kick_thread();
+	cmm_set_timer();
+}
+
+void
+cmm_set_pages(long pages)
+{
+	cmm_pages_target = pages;
+	cmm_kick_thread();
+}
+
+long
+cmm_get_pages(void)
+{
+	return cmm_pages;
+}
+
+void
+cmm_add_timed_pages(long pages)
+{
+	cmm_timed_pages_target += pages;
+	cmm_kick_thread();
+}
+
+long
+cmm_get_timed_pages(void)
+{
+	return cmm_timed_pages;
+}
+
+void
+cmm_set_timeout(long pages, long seconds)
+{
+	cmm_timeout_pages = pages;
+	cmm_timeout_seconds = seconds;
+	cmm_set_timer();
+}
+
+static inline int
+cmm_skip_blanks(char *cp, char **endp)
+{
+	char *str;
+
+	for (str = cp; *str == ' ' || *str == '\t'; str++);
+	*endp = str;
+	return str != cp;
+}
+
+#ifdef CONFIG_CMM_PROC
+/* These will someday get removed. */
+#define VM_CMM_PAGES		1111
+#define VM_CMM_TIMED_PAGES	1112
+#define VM_CMM_TIMEOUT		1113
+
+static struct ctl_table cmm_table[];
+
+static int
+cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
+		  void *buffer, size_t *lenp, loff_t *ppos)
+{
+	char buf[16], *p;
+	long pages;
+	int len;
+
+	if (!*lenp || (*ppos && !write)) {
+		*lenp = 0;
+		return 0;
+	}
+
+	if (write) {
+		len = *lenp;
+		if (copy_from_user(buf, buffer,
+				   len > sizeof(buf) ? sizeof(buf) : len))
+			return -EFAULT;
+		buf[sizeof(buf) - 1] = '\0';
+		cmm_skip_blanks(buf, &p);
+		pages = cmm_strtoul(p, &p);
+		if (ctl == &cmm_table[0])
+			cmm_set_pages(pages);
+		else
+			cmm_add_timed_pages(pages);
+	} else {
+		if (ctl == &cmm_table[0])
+			pages = cmm_get_pages();
+		else
+			pages = cmm_get_timed_pages();
+		len = sprintf(buf, "%ld\n", pages);
+		if (len > *lenp)
+			len = *lenp;
+		if (copy_to_user(buffer, buf, len))
+			return -EFAULT;
+	}
+	*lenp = len;
+	*ppos += len;
+	return 0;
+}
+
+static int
+cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
+		    void *buffer, size_t *lenp, loff_t *ppos)
+{
+	char buf[64], *p;
+	long pages, seconds;
+	int len;
+
+	if (!*lenp || (*ppos && !write)) {
+		*lenp = 0;
+		return 0;
+	}
+
+	if (write) {
+		len = *lenp;
+		if (copy_from_user(buf, buffer,
+				   len > sizeof(buf) ? sizeof(buf) : len))
+			return -EFAULT;
+		buf[sizeof(buf) - 1] = '\0';
+		cmm_skip_blanks(buf, &p);
+		pages = cmm_strtoul(p, &p);
+		cmm_skip_blanks(p, &p);
+		seconds = cmm_strtoul(p, &p);
+		cmm_set_timeout(pages, seconds);
+	} else {
+		len = sprintf(buf, "%ld %ld\n",
+			      cmm_timeout_pages, cmm_timeout_seconds);
+		if (len > *lenp)
+			len = *lenp;
+		if (copy_to_user(buffer, buf, len))
+			return -EFAULT;
+	}
+	*lenp = len;
+	*ppos += len;
+	return 0;
+}
+
+static struct ctl_table cmm_table[] = {
+	{
+		.ctl_name	= VM_CMM_PAGES,
+		.procname	= "cmm_pages",
+		.mode		= 0600,
+		.proc_handler	= &cmm_pages_handler,
+	},
+	{
+		.ctl_name	= VM_CMM_TIMED_PAGES,
+		.procname	= "cmm_timed_pages",
+		.mode		= 0600,
+		.proc_handler	= &cmm_pages_handler,
+	},
+	{
+		.ctl_name	= VM_CMM_TIMEOUT,
+		.procname	= "cmm_timeout",
+		.mode		= 0600,
+		.proc_handler	= &cmm_timeout_handler,
+	},
+	{ .ctl_name = 0 }
+};
+
+static struct ctl_table cmm_dir_table[] = {
+	{
+		.ctl_name	= CTL_VM,
+		.procname	= "vm",
+		.maxlen		= 0,
+		.mode		= 0555,
+		.child		= cmm_table,
+	},
+	{ .ctl_name = 0 }
+};
+#endif
+
+#ifdef CONFIG_CMM_IUCV
+#define SMSG_PREFIX "CMM"
+static void
+cmm_smsg_target(char *msg)
+{
+	long pages, seconds;
+
+	if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
+		return;
+	if (strncmp(msg, "SHRINK", 6) == 0) {
+		if (!cmm_skip_blanks(msg + 6, &msg))
+			return;
+		pages = cmm_strtoul(msg, &msg);
+		cmm_skip_blanks(msg, &msg);
+		if (*msg == '\0')
+			cmm_set_pages(pages);
+	} else if (strncmp(msg, "RELEASE", 7) == 0) {
+		if (!cmm_skip_blanks(msg + 7, &msg))
+			return;
+		pages = cmm_strtoul(msg, &msg);
+		cmm_skip_blanks(msg, &msg);
+		if (*msg == '\0')
+			cmm_add_timed_pages(pages);
+	} else if (strncmp(msg, "REUSE", 5) == 0) {
+		if (!cmm_skip_blanks(msg + 5, &msg))
+			return;
+		pages = cmm_strtoul(msg, &msg);
+		if (!cmm_skip_blanks(msg, &msg))
+			return;
+		seconds = cmm_strtoul(msg, &msg);
+		cmm_skip_blanks(msg, &msg);
+		if (*msg == '\0')
+			cmm_set_timeout(pages, seconds);
+	}
+}
+#endif
+
+struct ctl_table_header *cmm_sysctl_header;
+
+static int
+cmm_init (void)
+{
+#ifdef CONFIG_CMM_PROC
+	cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1);
+#endif
+#ifdef CONFIG_CMM_IUCV
+	smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
+#endif
+	INIT_WORK(&cmm_thread_starter, (void *) cmm_start_thread, 0);
+	init_waitqueue_head(&cmm_thread_wait);
+	init_timer(&cmm_timer);
+	return 0;
+}
+
+static void
+cmm_exit(void)
+{
+	cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
+	cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
+#ifdef CONFIG_CMM_PROC
+	unregister_sysctl_table(cmm_sysctl_header);
+#endif
+#ifdef CONFIG_CMM_IUCV
+	smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
+#endif
+}
+
+module_init(cmm_init);
+module_exit(cmm_exit);
+
+EXPORT_SYMBOL(cmm_set_pages);
+EXPORT_SYMBOL(cmm_get_pages);
+EXPORT_SYMBOL(cmm_add_timed_pages);
+EXPORT_SYMBOL(cmm_get_timed_pages);
+EXPORT_SYMBOL(cmm_set_timeout);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
new file mode 100644
index 0000000..648deed
--- /dev/null
+++ b/arch/s390/mm/extmem.c
@@ -0,0 +1,588 @@
+/*
+ * File...........: arch/s390/mm/extmem.c
+ * Author(s)......: Carsten Otte <cotte@de.ibm.com>
+ * 		    Rob M van der Heij <rvdheij@nl.ibm.com>
+ * 		    Steven Shultz <shultzss@us.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation 2002-2004
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/ebcdic.h>
+#include <asm/errno.h>
+#include <asm/extmem.h>
+#include <asm/cpcmd.h>
+#include <linux/ctype.h>
+
+#define DCSS_DEBUG	/* Debug messages on/off */
+
+#define DCSS_NAME "extmem"
+#ifdef DCSS_DEBUG
+#define PRINT_DEBUG(x...)	printk(KERN_DEBUG DCSS_NAME " debug:" x)
+#else
+#define PRINT_DEBUG(x...)   do {} while (0)
+#endif
+#define PRINT_INFO(x...)	printk(KERN_INFO DCSS_NAME " info:" x)
+#define PRINT_WARN(x...)	printk(KERN_WARNING DCSS_NAME " warning:" x)
+#define PRINT_ERR(x...)		printk(KERN_ERR DCSS_NAME " error:" x)
+
+
+#define DCSS_LOADSHR    0x00
+#define DCSS_LOADNSR    0x04
+#define DCSS_PURGESEG   0x08
+#define DCSS_FINDSEG    0x0c
+#define DCSS_LOADNOLY   0x10
+#define DCSS_SEGEXT     0x18
+#define DCSS_FINDSEGA   0x0c
+
+struct qrange {
+	unsigned int  start; // 3byte start address, 1 byte type
+	unsigned int  end;   // 3byte end address, 1 byte reserved
+};
+
+struct qout64 {
+	int segstart;
+	int segend;
+	int segcnt;
+	int segrcnt;
+	struct qrange range[6];
+};
+
+struct qin64 {
+	char qopcode;
+	char rsrv1[3];
+	char qrcode;
+	char rsrv2[3];
+	char qname[8];
+	unsigned int qoutptr;
+	short int qoutlen;
+};
+
+struct dcss_segment {
+	struct list_head list;
+	char dcss_name[8];
+	unsigned long start_addr;
+	unsigned long end;
+	atomic_t ref_count;
+	int do_nonshared;
+	unsigned int vm_segtype;
+	struct qrange range[6];
+	int segcnt;
+};
+
+static DEFINE_SPINLOCK(dcss_lock);
+static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
+static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
+					"EW/EN-MIXED" };
+
+extern struct {
+	unsigned long addr, size, type;
+} memory_chunk[MEMORY_CHUNKS];
+
+/*
+ * Create the 8 bytes, ebcdic VM segment name from
+ * an ascii name.
+ */
+static void inline
+dcss_mkname(char *name, char *dcss_name)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		if (name[i] == '\0')
+			break;
+		dcss_name[i] = toupper(name[i]);
+	};
+	for (; i < 8; i++)
+		dcss_name[i] = ' ';
+	ASCEBC(dcss_name, 8);
+}
+
+
+/*
+ * search all segments in dcss_list, and return the one
+ * namend *name. If not found, return NULL.
+ */
+static struct dcss_segment *
+segment_by_name (char *name)
+{
+	char dcss_name[9];
+	struct list_head *l;
+	struct dcss_segment *tmp, *retval = NULL;
+
+	assert_spin_locked(&dcss_lock);
+	dcss_mkname (name, dcss_name);
+	list_for_each (l, &dcss_list) {
+		tmp = list_entry (l, struct dcss_segment, list);
+		if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
+			retval = tmp;
+			break;
+		}
+	}
+	return retval;
+}
+
+
+/*
+ * Perform a function on a dcss segment.
+ */
+static inline int
+dcss_diag (__u8 func, void *parameter,
+           unsigned long *ret1, unsigned long *ret2)
+{
+	unsigned long rx, ry;
+	int rc;
+
+	rx = (unsigned long) parameter;
+	ry = (unsigned long) func;
+	__asm__ __volatile__(
+#ifdef CONFIG_ARCH_S390X
+		"   sam31\n" // switch to 31 bit
+		"   diag    %0,%1,0x64\n"
+		"   sam64\n" // switch back to 64 bit
+#else
+		"   diag    %0,%1,0x64\n"
+#endif
+		"   ipm     %2\n"
+		"   srl     %2,28\n"
+		: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
+	*ret1 = rx;
+	*ret2 = ry;
+	return rc;
+}
+
+static inline int
+dcss_diag_translate_rc (int vm_rc) {
+	if (vm_rc == 44)
+		return -ENOENT;
+	return -EIO;
+}
+
+
+/* do a diag to get info about a segment.
+ * fills start_address, end and vm_segtype fields
+ */
+static int
+query_segment_type (struct dcss_segment *seg)
+{
+	struct qin64  *qin = kmalloc (sizeof(struct qin64), GFP_DMA);
+	struct qout64 *qout = kmalloc (sizeof(struct qout64), GFP_DMA);
+
+	int diag_cc, rc, i;
+	unsigned long dummy, vmrc;
+
+	if ((qin == NULL) || (qout == NULL)) {
+		rc = -ENOMEM;
+		goto out_free;
+	}
+
+	/* initialize diag input parameters */
+	qin->qopcode = DCSS_FINDSEGA;
+	qin->qoutptr = (unsigned long) qout;
+	qin->qoutlen = sizeof(struct qout64);
+	memcpy (qin->qname, seg->dcss_name, 8);
+
+	diag_cc = dcss_diag (DCSS_SEGEXT, qin, &dummy, &vmrc);
+
+	if (diag_cc > 1) {
+		rc = dcss_diag_translate_rc (vmrc);
+		goto out_free;
+	}
+
+	if (qout->segcnt > 6) {
+		rc = -ENOTSUPP;
+		goto out_free;
+	}
+
+	if (qout->segcnt == 1) {
+		seg->vm_segtype = qout->range[0].start & 0xff;
+	} else {
+		/* multi-part segment. only one type supported here:
+		    - all parts are contiguous
+		    - all parts are either EW or EN type
+		    - maximum 6 parts allowed */
+		unsigned long start = qout->segstart >> PAGE_SHIFT;
+		for (i=0; i<qout->segcnt; i++) {
+			if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
+			    ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
+				rc = -ENOTSUPP;
+				goto out_free;
+			}
+			if (start != qout->range[i].start >> PAGE_SHIFT) {
+				rc = -ENOTSUPP;
+				goto out_free;
+			}
+			start = (qout->range[i].end >> PAGE_SHIFT) + 1;
+		}
+		seg->vm_segtype = SEG_TYPE_EWEN;
+	}
+
+	/* analyze diag output and update seg */
+	seg->start_addr = qout->segstart;
+	seg->end = qout->segend;
+
+	memcpy (seg->range, qout->range, 6*sizeof(struct qrange));
+	seg->segcnt = qout->segcnt;
+
+	rc = 0;
+
+ out_free:
+	if (qin) kfree(qin);
+	if (qout) kfree(qout);
+	return rc;
+}
+
+/*
+ * check if the given segment collides with guest storage.
+ * returns 1 if this is the case, 0 if no collision was found
+ */
+static int
+segment_overlaps_storage(struct dcss_segment *seg)
+{
+	int i;
+
+	for (i=0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
+		if (memory_chunk[i].type != 0)
+			continue;
+		if ((memory_chunk[i].addr >> 20) > (seg->end >> 20))
+			continue;
+		if (((memory_chunk[i].addr + memory_chunk[i].size - 1) >> 20)
+				< (seg->start_addr >> 20))
+			continue;
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * check if segment collides with other segments that are currently loaded
+ * returns 1 if this is the case, 0 if no collision was found
+ */
+static int
+segment_overlaps_others (struct dcss_segment *seg)
+{
+	struct list_head *l;
+	struct dcss_segment *tmp;
+
+	assert_spin_locked(&dcss_lock);
+	list_for_each(l, &dcss_list) {
+		tmp = list_entry(l, struct dcss_segment, list);
+		if ((tmp->start_addr >> 20) > (seg->end >> 20))
+			continue;
+		if ((tmp->end >> 20) < (seg->start_addr >> 20))
+			continue;
+		if (seg == tmp)
+			continue;
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * check if segment exceeds the kernel mapping range (detected or set via mem=)
+ * returns 1 if this is the case, 0 if segment fits into the range
+ */
+static inline int
+segment_exceeds_range (struct dcss_segment *seg)
+{
+	int seg_last_pfn = (seg->end) >> PAGE_SHIFT;
+	if (seg_last_pfn > max_pfn)
+		return 1;
+	return 0;
+}
+
+/*
+ * get info about a segment
+ * possible return values:
+ * -ENOSYS  : we are not running on VM
+ * -EIO     : could not perform query diagnose
+ * -ENOENT  : no such segment
+ * -ENOTSUPP: multi-part segment cannot be used with linux
+ * -ENOSPC  : segment cannot be used (overlaps with storage)
+ * -ENOMEM  : out of memory
+ * 0 .. 6   : type of segment as defined in include/asm-s390/extmem.h
+ */
+int
+segment_type (char* name)
+{
+	int rc;
+	struct dcss_segment seg;
+
+	if (!MACHINE_IS_VM)
+		return -ENOSYS;
+
+	dcss_mkname(name, seg.dcss_name);
+	rc = query_segment_type (&seg);
+	if (rc < 0)
+		return rc;
+	return seg.vm_segtype;
+}
+
+/*
+ * real segment loading function, called from segment_load
+ */
+static int
+__segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
+{
+	struct dcss_segment *seg = kmalloc(sizeof(struct dcss_segment),
+			GFP_DMA);
+	int dcss_command, rc, diag_cc;
+
+	if (seg == NULL) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	dcss_mkname (name, seg->dcss_name);
+	rc = query_segment_type (seg);
+	if (rc < 0)
+		goto out_free;
+	if (segment_exceeds_range(seg)) {
+		PRINT_WARN ("segment_load: not loading segment %s - exceeds"
+				" kernel mapping range\n",name);
+		rc = -ERANGE;
+		goto out_free;
+	}
+	if (segment_overlaps_storage(seg)) {
+		PRINT_WARN ("segment_load: not loading segment %s - overlaps"
+				" storage\n",name);
+		rc = -ENOSPC;
+		goto out_free;
+	}
+	if (segment_overlaps_others(seg)) {
+		PRINT_WARN ("segment_load: not loading segment %s - overlaps"
+				" other segments\n",name);
+		rc = -EBUSY;
+		goto out_free;
+	}
+	if (do_nonshared)
+		dcss_command = DCSS_LOADNSR;
+	else
+		dcss_command = DCSS_LOADNOLY;
+
+	diag_cc = dcss_diag(dcss_command, seg->dcss_name,
+			&seg->start_addr, &seg->end);
+	if (diag_cc > 1) {
+		PRINT_WARN ("segment_load: could not load segment %s - "
+				"diag returned error (%ld)\n",name,seg->end);
+		rc = dcss_diag_translate_rc (seg->end);
+		dcss_diag(DCSS_PURGESEG, seg->dcss_name,
+				&seg->start_addr, &seg->end);
+		goto out_free;
+	}
+	seg->do_nonshared = do_nonshared;
+	atomic_set(&seg->ref_count, 1);
+	list_add(&seg->list, &dcss_list);
+	rc = seg->vm_segtype;
+	*addr = seg->start_addr;
+	*end  = seg->end;
+	if (do_nonshared)
+		PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
+				"type %s in non-shared mode\n", name,
+				(void*)seg->start_addr, (void*)seg->end,
+				segtype_string[seg->vm_segtype]);
+	else
+		PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
+				"type %s in shared mode\n", name,
+				(void*)seg->start_addr, (void*)seg->end,
+				segtype_string[seg->vm_segtype]);
+	goto out;
+ out_free:
+	kfree (seg);
+ out:
+	return rc;
+}
+
+/*
+ * this function loads a DCSS segment
+ * name         : name of the DCSS
+ * do_nonshared : 0 indicates that the dcss should be shared with other linux images
+ *                1 indicates that the dcss should be exclusive for this linux image
+ * addr         : will be filled with start address of the segment
+ * end          : will be filled with end address of the segment
+ * return values:
+ * -ENOSYS  : we are not running on VM
+ * -EIO     : could not perform query or load diagnose
+ * -ENOENT  : no such segment
+ * -ENOTSUPP: multi-part segment cannot be used with linux
+ * -ENOSPC  : segment cannot be used (overlaps with storage)
+ * -EBUSY   : segment can temporarily not be used (overlaps with dcss)
+ * -ERANGE  : segment cannot be used (exceeds kernel mapping range)
+ * -EPERM   : segment is currently loaded with incompatible permissions
+ * -ENOMEM  : out of memory
+ * 0 .. 6   : type of segment as defined in include/asm-s390/extmem.h
+ */
+int
+segment_load (char *name, int do_nonshared, unsigned long *addr,
+		unsigned long *end)
+{
+	struct dcss_segment *seg;
+	int rc;
+
+	if (!MACHINE_IS_VM)
+		return -ENOSYS;
+
+	spin_lock (&dcss_lock);
+	seg = segment_by_name (name);
+	if (seg == NULL)
+		rc = __segment_load (name, do_nonshared, addr, end);
+	else {
+		if (do_nonshared == seg->do_nonshared) {
+			atomic_inc(&seg->ref_count);
+			*addr = seg->start_addr;
+			*end  = seg->end;
+			rc    = seg->vm_segtype;
+		} else {
+			*addr = *end = 0;
+			rc    = -EPERM;
+		}
+	}
+	spin_unlock (&dcss_lock);
+	return rc;
+}
+
+/*
+ * this function modifies the shared state of a DCSS segment. note that
+ * name         : name of the DCSS
+ * do_nonshared : 0 indicates that the dcss should be shared with other linux images
+ *                1 indicates that the dcss should be exclusive for this linux image
+ * return values:
+ * -EIO     : could not perform load diagnose (segment gone!)
+ * -ENOENT  : no such segment (segment gone!)
+ * -EAGAIN  : segment is in use by other exploiters, try later
+ * -EINVAL  : no segment with the given name is currently loaded - name invalid
+ * 0	    : operation succeeded
+ */
+int
+segment_modify_shared (char *name, int do_nonshared)
+{
+	struct dcss_segment *seg;
+	unsigned long dummy;
+	int dcss_command, rc, diag_cc;
+
+	spin_lock (&dcss_lock);
+	seg = segment_by_name (name);
+	if (seg == NULL) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+	if (do_nonshared == seg->do_nonshared) {
+		PRINT_INFO ("segment_modify_shared: not reloading segment %s"
+				" - already in requested mode\n",name);
+		rc = 0;
+		goto out_unlock;
+	}
+	if (atomic_read (&seg->ref_count) != 1) {
+		PRINT_WARN ("segment_modify_shared: not reloading segment %s - "
+				"segment is in use by other driver(s)\n",name);
+		rc = -EAGAIN;
+		goto out_unlock;
+	}
+	dcss_diag(DCSS_PURGESEG, seg->dcss_name,
+		  &dummy, &dummy);
+	if (do_nonshared)
+		dcss_command = DCSS_LOADNSR;
+	else
+	dcss_command = DCSS_LOADNOLY;
+	diag_cc = dcss_diag(dcss_command, seg->dcss_name,
+			&seg->start_addr, &seg->end);
+	if (diag_cc > 1) {
+		PRINT_WARN ("segment_modify_shared: could not reload segment %s"
+				" - diag returned error (%ld)\n",name,seg->end);
+		rc = dcss_diag_translate_rc (seg->end);
+		goto out_del;
+	}
+	seg->do_nonshared = do_nonshared;
+	rc = 0;
+	goto out_unlock;
+ out_del:
+	list_del(&seg->list);
+	dcss_diag(DCSS_PURGESEG, seg->dcss_name,
+		  &dummy, &dummy);
+	kfree (seg);
+ out_unlock:
+	spin_unlock(&dcss_lock);
+	return rc;
+}
+
+/*
+ * Decrease the use count of a DCSS segment and remove
+ * it from the address space if nobody is using it
+ * any longer.
+ */
+void
+segment_unload(char *name)
+{
+	unsigned long dummy;
+	struct dcss_segment *seg;
+
+	if (!MACHINE_IS_VM)
+		return;
+
+	spin_lock(&dcss_lock);
+	seg = segment_by_name (name);
+	if (seg == NULL) {
+		PRINT_ERR ("could not find segment %s in segment_unload, "
+				"please report to linux390@de.ibm.com\n",name);
+		goto out_unlock;
+	}
+	if (atomic_dec_return(&seg->ref_count) == 0) {
+		list_del(&seg->list);
+		dcss_diag(DCSS_PURGESEG, seg->dcss_name,
+			  &dummy, &dummy);
+		kfree(seg);
+	}
+out_unlock:
+	spin_unlock(&dcss_lock);
+}
+
+/*
+ * save segment content permanently
+ */
+void
+segment_save(char *name)
+{
+	struct dcss_segment *seg;
+	int startpfn = 0;
+	int endpfn = 0;
+	char cmd1[160];
+	char cmd2[80];
+	int i;
+
+	if (!MACHINE_IS_VM)
+		return;
+
+	spin_lock(&dcss_lock);
+	seg = segment_by_name (name);
+
+	if (seg == NULL) {
+		PRINT_ERR ("could not find segment %s in segment_save, please report to linux390@de.ibm.com\n",name);
+		return;
+	}
+
+	startpfn = seg->start_addr >> PAGE_SHIFT;
+	endpfn = (seg->end) >> PAGE_SHIFT;
+	sprintf(cmd1, "DEFSEG %s", name);
+	for (i=0; i<seg->segcnt; i++) {
+		sprintf(cmd1+strlen(cmd1), " %X-%X %s",
+			seg->range[i].start >> PAGE_SHIFT,
+			seg->range[i].end >> PAGE_SHIFT,
+			segtype_string[seg->range[i].start & 0xff]);
+	}
+	sprintf(cmd2, "SAVESEG %s", name);
+	cpcmd(cmd1, NULL, 0);
+	cpcmd(cmd2, NULL, 0);
+	spin_unlock(&dcss_lock);
+}
+
+EXPORT_SYMBOL(segment_load);
+EXPORT_SYMBOL(segment_unload);
+EXPORT_SYMBOL(segment_save);
+EXPORT_SYMBOL(segment_type);
+EXPORT_SYMBOL(segment_modify_shared);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
new file mode 100644
index 0000000..80306bc
--- /dev/null
+++ b/arch/s390/mm/fault.c
@@ -0,0 +1,586 @@
+/*
+ *  arch/s390/mm/fault.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com)
+ *               Ulrich Weigand (uweigand@de.ibm.com)
+ *
+ *  Derived from "arch/i386/mm/fault.c"
+ *    Copyright (C) 1995  Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/hardirq.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+#ifndef CONFIG_ARCH_S390X
+#define __FAIL_ADDR_MASK 0x7ffff000
+#define __FIXUP_MASK 0x7fffffff
+#define __SUBCODE_MASK 0x0200
+#define __PF_RES_FIELD 0ULL
+#else /* CONFIG_ARCH_S390X */
+#define __FAIL_ADDR_MASK -4096L
+#define __FIXUP_MASK ~0L
+#define __SUBCODE_MASK 0x0600
+#define __PF_RES_FIELD 0x8000000000000000ULL
+#endif /* CONFIG_ARCH_S390X */
+
+#ifdef CONFIG_SYSCTL
+extern int sysctl_userprocess_debug;
+#endif
+
+extern void die(const char *,struct pt_regs *,long);
+
+extern spinlock_t timerlist_lock;
+
+/*
+ * Unlock any spinlocks which will prevent us from getting the
+ * message out (timerlist_lock is acquired through the
+ * console unblank code)
+ */
+void bust_spinlocks(int yes)
+{
+	if (yes) {
+		oops_in_progress = 1;
+	} else {
+		int loglevel_save = console_loglevel;
+		console_unblank();
+		oops_in_progress = 0;
+		/*
+		 * OK, the message is on the console.  Now we call printk()
+		 * without oops_in_progress set so that printk will give klogd
+		 * a poke.  Hold onto your hats...
+		 */
+		console_loglevel = 15;
+		printk(" ");
+		console_loglevel = loglevel_save;
+	}
+}
+
+/*
+ * Check which address space is addressed by the access
+ * register in S390_lowcore.exc_access_id.
+ * Returns 1 for user space and 0 for kernel space.
+ */
+static int __check_access_register(struct pt_regs *regs, int error_code)
+{
+	int areg = S390_lowcore.exc_access_id;
+
+	if (areg == 0)
+		/* Access via access register 0 -> kernel address */
+		return 0;
+	save_access_regs(current->thread.acrs);
+	if (regs && areg < NUM_ACRS && current->thread.acrs[areg] <= 1)
+		/*
+		 * access register contains 0 -> kernel address,
+		 * access register contains 1 -> user space address
+		 */
+		return current->thread.acrs[areg];
+
+	/* Something unhealthy was done with the access registers... */
+	die("page fault via unknown access register", regs, error_code);
+	do_exit(SIGKILL);
+	return 0;
+}
+
+/*
+ * Check which address space the address belongs to.
+ * Returns 1 for user space and 0 for kernel space.
+ */
+static inline int check_user_space(struct pt_regs *regs, int error_code)
+{
+	/*
+	 * The lowest two bits of S390_lowcore.trans_exc_code indicate
+	 * which paging table was used:
+	 *   0: Primary Segment Table Descriptor
+	 *   1: STD determined via access register
+	 *   2: Secondary Segment Table Descriptor
+	 *   3: Home Segment Table Descriptor
+	 */
+	int descriptor = S390_lowcore.trans_exc_code & 3;
+	if (unlikely(descriptor == 1))
+		return __check_access_register(regs, error_code);
+	if (descriptor == 2)
+		return current->thread.mm_segment.ar4;
+	return descriptor != 0;
+}
+
+/*
+ * Send SIGSEGV to task.  This is an external routine
+ * to keep the stack usage of do_page_fault small.
+ */
+static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
+		       int si_code, unsigned long address)
+{
+	struct siginfo si;
+
+#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
+#if defined(CONFIG_SYSCTL)
+	if (sysctl_userprocess_debug)
+#endif
+	{
+		printk("User process fault: interruption code 0x%lX\n",
+		       error_code);
+		printk("failing address: %lX\n", address);
+		show_regs(regs);
+	}
+#endif
+	si.si_signo = SIGSEGV;
+	si.si_code = si_code;
+	si.si_addr = (void *) address;
+	force_sig_info(SIGSEGV, &si, current);
+}
+
+/*
+ * This routine handles page faults.  It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * error_code:
+ *   04       Protection           ->  Write-Protection  (suprression)
+ *   10       Segment translation  ->  Not present       (nullification)
+ *   11       Page translation     ->  Not present       (nullification)
+ *   3b       Region third trans.  ->  Not present       (nullification)
+ */
+extern inline void
+do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
+{
+        struct task_struct *tsk;
+        struct mm_struct *mm;
+        struct vm_area_struct * vma;
+        unsigned long address;
+	int user_address;
+	const struct exception_table_entry *fixup;
+	int si_code = SEGV_MAPERR;
+
+        tsk = current;
+        mm = tsk->mm;
+	
+	/* 
+         * Check for low-address protection.  This needs to be treated
+	 * as a special case because the translation exception code 
+	 * field is not guaranteed to contain valid data in this case.
+	 */
+	if (is_protection && !(S390_lowcore.trans_exc_code & 4)) {
+
+		/* Low-address protection hit in kernel mode means 
+		   NULL pointer write access in kernel mode.  */
+ 		if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
+			address = 0;
+			user_address = 0;
+			goto no_context;
+		}
+
+		/* Low-address protection hit in user mode 'cannot happen'.  */
+		die ("Low-address protection", regs, error_code);
+        	do_exit(SIGKILL);
+	}
+
+        /* 
+         * get the failing address 
+         * more specific the segment and page table portion of 
+         * the address 
+         */
+        address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
+	user_address = check_user_space(regs, error_code);
+
+	/*
+	 * Verify that the fault happened in user space, that
+	 * we are not in an interrupt and that there is a 
+	 * user context.
+	 */
+        if (user_address == 0 || in_interrupt() || !mm)
+                goto no_context;
+
+	/*
+	 * When we get here, the fault happened in the current
+	 * task's user address space, so we can switch on the
+	 * interrupts again and then search the VMAs
+	 */
+	local_irq_enable();
+
+        down_read(&mm->mmap_sem);
+
+        vma = find_vma(mm, address);
+        if (!vma)
+                goto bad_area;
+        if (vma->vm_start <= address) 
+                goto good_area;
+        if (!(vma->vm_flags & VM_GROWSDOWN))
+                goto bad_area;
+        if (expand_stack(vma, address))
+                goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+	si_code = SEGV_ACCERR;
+	if (!is_protection) {
+		/* page not present, check vm flags */
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+			goto bad_area;
+	} else {
+		if (!(vma->vm_flags & VM_WRITE))
+			goto bad_area;
+	}
+
+survive:
+	/*
+	 * If for any reason at all we couldn't handle the fault,
+	 * make sure we exit gracefully rather than endlessly redo
+	 * the fault.
+	 */
+	switch (handle_mm_fault(mm, vma, address, is_protection)) {
+	case VM_FAULT_MINOR:
+		tsk->min_flt++;
+		break;
+	case VM_FAULT_MAJOR:
+		tsk->maj_flt++;
+		break;
+	case VM_FAULT_SIGBUS:
+		goto do_sigbus;
+	case VM_FAULT_OOM:
+		goto out_of_memory;
+	default:
+		BUG();
+	}
+
+        up_read(&mm->mmap_sem);
+	/*
+	 * The instruction that caused the program check will
+	 * be repeated. Don't signal single step via SIGTRAP.
+	 */
+	clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
+        return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+        up_read(&mm->mmap_sem);
+
+        /* User mode accesses just cause a SIGSEGV */
+        if (regs->psw.mask & PSW_MASK_PSTATE) {
+                tsk->thread.prot_addr = address;
+                tsk->thread.trap_no = error_code;
+		do_sigsegv(regs, error_code, si_code, address);
+                return;
+	}
+
+no_context:
+        /* Are we prepared to handle this kernel fault?  */
+	fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
+	if (fixup) {
+		regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
+                return;
+        }
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+        if (user_address == 0)
+                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
+        	       " at virtual kernel address %p\n", (void *)address);
+        else
+                printk(KERN_ALERT "Unable to handle kernel paging request"
+		       " at virtual user address %p\n", (void *)address);
+
+        die("Oops", regs, error_code);
+        do_exit(SIGKILL);
+
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+*/
+out_of_memory:
+	up_read(&mm->mmap_sem);
+	if (tsk->pid == 1) {
+		yield();
+		goto survive;
+	}
+	printk("VM: killing process %s\n", tsk->comm);
+	if (regs->psw.mask & PSW_MASK_PSTATE)
+		do_exit(SIGKILL);
+	goto no_context;
+
+do_sigbus:
+	up_read(&mm->mmap_sem);
+
+	/*
+	 * Send a sigbus, regardless of whether we were in kernel
+	 * or user mode.
+	 */
+        tsk->thread.prot_addr = address;
+        tsk->thread.trap_no = error_code;
+	force_sig(SIGBUS, tsk);
+
+	/* Kernel mode? Handle exceptions or die */
+	if (!(regs->psw.mask & PSW_MASK_PSTATE))
+		goto no_context;
+}
+
+void do_protection_exception(struct pt_regs *regs, unsigned long error_code)
+{
+	regs->psw.addr -= (error_code >> 16);
+	do_exception(regs, 4, 1);
+}
+
+void do_dat_exception(struct pt_regs *regs, unsigned long error_code)
+{
+	do_exception(regs, error_code & 0xff, 0);
+}
+
+#ifndef CONFIG_ARCH_S390X
+
+typedef struct _pseudo_wait_t {
+       struct _pseudo_wait_t *next;
+       wait_queue_head_t queue;
+       unsigned long address;
+       int resolved;
+} pseudo_wait_t;
+
+static pseudo_wait_t *pseudo_lock_queue = NULL;
+static spinlock_t pseudo_wait_spinlock; /* spinlock to protect lock queue */
+
+/*
+ * This routine handles 'pagex' pseudo page faults.
+ */
+asmlinkage void
+do_pseudo_page_fault(struct pt_regs *regs, unsigned long error_code)
+{
+        pseudo_wait_t wait_struct;
+        pseudo_wait_t *ptr, *last, *next;
+        unsigned long address;
+
+        /*
+         * get the failing address
+         * more specific the segment and page table portion of
+         * the address
+         */
+        address = S390_lowcore.trans_exc_code & 0xfffff000;
+
+        if (address & 0x80000000) {
+                /* high bit set -> a page has been swapped in by VM */
+                address &= 0x7fffffff;
+                spin_lock(&pseudo_wait_spinlock);
+                last = NULL;
+                ptr = pseudo_lock_queue;
+                while (ptr != NULL) {
+                        next = ptr->next;
+                        if (address == ptr->address) {
+				 /*
+                                 * This is one of the processes waiting
+                                 * for the page. Unchain from the queue.
+                                 * There can be more than one process
+                                 * waiting for the same page. VM presents
+                                 * an initial and a completion interrupt for
+                                 * every process that tries to access a 
+                                 * page swapped out by VM. 
+                                 */
+                                if (last == NULL)
+                                        pseudo_lock_queue = next;
+                                else
+                                        last->next = next;
+                                /* now wake up the process */
+                                ptr->resolved = 1;
+                                wake_up(&ptr->queue);
+                        } else
+                                last = ptr;
+                        ptr = next;
+                }
+                spin_unlock(&pseudo_wait_spinlock);
+        } else {
+                /* Pseudo page faults in kernel mode is a bad idea */
+                if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
+                        /*
+			 * VM presents pseudo page faults if the interrupted
+			 * state was not disabled for interrupts. So we can
+			 * get pseudo page fault interrupts while running
+			 * in kernel mode. We simply access the page here
+			 * while we are running disabled. VM will then swap
+			 * in the page synchronously.
+                         */
+                         if (check_user_space(regs, error_code) == 0)
+                                 /* dereference a virtual kernel address */
+                                 __asm__ __volatile__ (
+                                         "  ic 0,0(%0)"
+                                         : : "a" (address) : "0");
+                         else
+                                 /* dereference a virtual user address */
+                                 __asm__ __volatile__ (
+                                         "  la   2,0(%0)\n"
+                                         "  sacf 512\n"
+                                         "  ic   2,0(2)\n"
+					 "0:sacf 0\n"
+					 ".section __ex_table,\"a\"\n"
+					 "  .align 4\n"
+					 "  .long  0b,0b\n"
+					 ".previous"
+                                         : : "a" (address) : "2" );
+
+                        return;
+                }
+		/* initialize and add element to pseudo_lock_queue */
+                init_waitqueue_head (&wait_struct.queue);
+                wait_struct.address = address;
+                wait_struct.resolved = 0;
+                spin_lock(&pseudo_wait_spinlock);
+                wait_struct.next = pseudo_lock_queue;
+                pseudo_lock_queue = &wait_struct;
+                spin_unlock(&pseudo_wait_spinlock);
+		/*
+		 * The instruction that caused the program check will
+		 * be repeated. Don't signal single step via SIGTRAP.
+		 */
+		clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
+                /* go to sleep */
+                wait_event(wait_struct.queue, wait_struct.resolved);
+        }
+}
+#endif /* CONFIG_ARCH_S390X */
+
+#ifdef CONFIG_PFAULT 
+/*
+ * 'pfault' pseudo page faults routines.
+ */
+static int pfault_disable = 0;
+
+static int __init nopfault(char *str)
+{
+	pfault_disable = 1;
+	return 1;
+}
+
+__setup("nopfault", nopfault);
+
+typedef struct {
+	__u16 refdiagc;
+	__u16 reffcode;
+	__u16 refdwlen;
+	__u16 refversn;
+	__u64 refgaddr;
+	__u64 refselmk;
+	__u64 refcmpmk;
+	__u64 reserved;
+} __attribute__ ((packed)) pfault_refbk_t;
+
+int pfault_init(void)
+{
+	pfault_refbk_t refbk =
+		{ 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48,
+		  __PF_RES_FIELD };
+        int rc;
+
+	if (pfault_disable)
+		return -1;
+        __asm__ __volatile__(
+                "    diag  %1,%0,0x258\n"
+		"0:  j     2f\n"
+		"1:  la    %0,8\n"
+		"2:\n"
+		".section __ex_table,\"a\"\n"
+		"   .align 4\n"
+#ifndef CONFIG_ARCH_S390X
+		"   .long  0b,1b\n"
+#else /* CONFIG_ARCH_S390X */
+		"   .quad  0b,1b\n"
+#endif /* CONFIG_ARCH_S390X */
+		".previous"
+                : "=d" (rc) : "a" (&refbk) : "cc" );
+        __ctl_set_bit(0, 9);
+        return rc;
+}
+
+void pfault_fini(void)
+{
+	pfault_refbk_t refbk =
+	{ 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
+
+	if (pfault_disable)
+		return;
+	__ctl_clear_bit(0,9);
+        __asm__ __volatile__(
+                "    diag  %0,0,0x258\n"
+		"0:\n"
+		".section __ex_table,\"a\"\n"
+		"   .align 4\n"
+#ifndef CONFIG_ARCH_S390X
+		"   .long  0b,0b\n"
+#else /* CONFIG_ARCH_S390X */
+		"   .quad  0b,0b\n"
+#endif /* CONFIG_ARCH_S390X */
+		".previous"
+		: : "a" (&refbk) : "cc" );
+}
+
+asmlinkage void
+pfault_interrupt(struct pt_regs *regs, __u16 error_code)
+{
+	struct task_struct *tsk;
+	__u16 subcode;
+
+	/*
+	 * Get the external interruption subcode & pfault
+	 * initial/completion signal bit. VM stores this 
+	 * in the 'cpu address' field associated with the
+         * external interrupt. 
+	 */
+	subcode = S390_lowcore.cpu_addr;
+	if ((subcode & 0xff00) != __SUBCODE_MASK)
+		return;
+
+	/*
+	 * Get the token (= address of the task structure of the affected task).
+	 */
+	tsk = *(struct task_struct **) __LC_PFAULT_INTPARM;
+
+	if (subcode & 0x0080) {
+		/* signal bit is set -> a page has been swapped in by VM */
+		if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
+			/* Initial interrupt was faster than the completion
+			 * interrupt. pfault_wait is valid. Set pfault_wait
+			 * back to zero and wake up the process. This can
+			 * safely be done because the task is still sleeping
+			 * and can't procude new pfaults. */
+			tsk->thread.pfault_wait = 0;
+			wake_up_process(tsk);
+		}
+	} else {
+		/* signal bit not set -> a real page is missing. */
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+		if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
+			/* Completion interrupt was faster than the initial
+			 * interrupt (swapped in a -1 for pfault_wait). Set
+			 * pfault_wait back to zero and exit. This can be
+			 * done safely because tsk is running in kernel 
+			 * mode and can't produce new pfaults. */
+			tsk->thread.pfault_wait = 0;
+			set_task_state(tsk, TASK_RUNNING);
+		} else
+			set_tsk_need_resched(tsk);
+	}
+}
+#endif
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
new file mode 100644
index 0000000..8e723bc
--- /dev/null
+++ b/arch/s390/mm/init.c
@@ -0,0 +1,310 @@
+/*
+ *  arch/s390/mm/init.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com)
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1995  Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/dma.h>
+#include <asm/lowcore.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
+char  empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+
+void diag10(unsigned long addr)
+{
+        if (addr >= 0x7ff00000)
+                return;
+#ifdef __s390x__
+        asm volatile (
+		"   sam31\n"
+		"   diag %0,%0,0x10\n"
+		"0: sam64\n"
+		".section __ex_table,\"a\"\n"
+		"   .align 8\n"
+		"   .quad 0b, 0b\n"
+		".previous\n"
+		: : "a" (addr));
+#else
+        asm volatile (
+		"   diag %0,%0,0x10\n"
+		"0:\n"
+		".section __ex_table,\"a\"\n"
+		"   .align 4\n"
+		"   .long 0b, 0b\n"
+		".previous\n"
+		: : "a" (addr));
+#endif
+}
+
+void show_mem(void)
+{
+        int i, total = 0, reserved = 0;
+        int shared = 0, cached = 0;
+
+        printk("Mem-info:\n");
+        show_free_areas();
+        printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+        i = max_mapnr;
+        while (i-- > 0) {
+                total++;
+                if (PageReserved(mem_map+i))
+                        reserved++;
+                else if (PageSwapCache(mem_map+i))
+                        cached++;
+                else if (page_count(mem_map+i))
+                        shared += page_count(mem_map+i) - 1;
+        }
+        printk("%d pages of RAM\n",total);
+        printk("%d reserved pages\n",reserved);
+        printk("%d pages shared\n",shared);
+        printk("%d pages swap cached\n",cached);
+}
+
+/* References to section boundaries */
+
+extern unsigned long _text;
+extern unsigned long _etext;
+extern unsigned long _edata;
+extern unsigned long __bss_start;
+extern unsigned long _end;
+
+extern unsigned long __init_begin;
+extern unsigned long __init_end;
+
+/*
+ * paging_init() sets up the page tables
+ */
+
+#ifndef CONFIG_ARCH_S390X
+void __init paging_init(void)
+{
+        pgd_t * pg_dir;
+        pte_t * pg_table;
+        pte_t   pte;
+	int     i;
+        unsigned long tmp;
+        unsigned long pfn = 0;
+        unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
+        static const int ssm_mask = 0x04000000L;
+
+	/* unmap whole virtual address space */
+	
+        pg_dir = swapper_pg_dir;
+
+	for (i=0;i<KERNEL_PGD_PTRS;i++) 
+	        pmd_clear((pmd_t*)pg_dir++);
+		
+	/*
+	 * map whole physical memory to virtual memory (identity mapping) 
+	 */
+
+        pg_dir = swapper_pg_dir;
+
+        while (pfn < max_low_pfn) {
+                /*
+                 * pg_table is physical at this point
+                 */
+		pg_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+
+                pg_dir->pgd0 =  (_PAGE_TABLE | __pa(pg_table));
+                pg_dir->pgd1 =  (_PAGE_TABLE | (__pa(pg_table)+1024));
+                pg_dir->pgd2 =  (_PAGE_TABLE | (__pa(pg_table)+2048));
+                pg_dir->pgd3 =  (_PAGE_TABLE | (__pa(pg_table)+3072));
+                pg_dir++;
+
+                for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
+                        pte = pfn_pte(pfn, PAGE_KERNEL);
+                        if (pfn >= max_low_pfn)
+                                pte_clear(&init_mm, 0, &pte);
+                        set_pte(pg_table, pte);
+                        pfn++;
+                }
+        }
+
+	S390_lowcore.kernel_asce = pgdir_k;
+
+        /* enable virtual mapping in kernel mode */
+        __asm__ __volatile__("    LCTL  1,1,%0\n"
+                             "    LCTL  7,7,%0\n"
+                             "    LCTL  13,13,%0\n"
+                             "    SSM   %1" 
+			     : : "m" (pgdir_k), "m" (ssm_mask));
+
+        local_flush_tlb();
+
+	{
+		unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0};
+
+		zones_size[ZONE_DMA] = max_low_pfn;
+		free_area_init(zones_size);
+	}
+        return;
+}
+
+#else /* CONFIG_ARCH_S390X */
+void __init paging_init(void)
+{
+        pgd_t * pg_dir;
+	pmd_t * pm_dir;
+        pte_t * pt_dir;
+        pte_t   pte;
+	int     i,j,k;
+        unsigned long pfn = 0;
+        unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
+          _KERN_REGION_TABLE;
+	static const int ssm_mask = 0x04000000L;
+
+	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+	unsigned long dma_pfn, high_pfn;
+
+	dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
+	high_pfn = max_low_pfn;
+
+	if (dma_pfn > high_pfn)
+		zones_size[ZONE_DMA] = high_pfn;
+	else {
+		zones_size[ZONE_DMA] = dma_pfn;
+		zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
+	}
+
+	/* Initialize mem_map[].  */
+	free_area_init(zones_size);
+
+
+	/*
+	 * map whole physical memory to virtual memory (identity mapping) 
+	 */
+
+        pg_dir = swapper_pg_dir;
+	
+        for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
+	
+                if (pfn >= max_low_pfn) {
+                        pgd_clear(pg_dir);
+                        continue;
+                }          
+        
+	        pm_dir = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE*4);
+                pgd_populate(&init_mm, pg_dir, pm_dir);
+
+                for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) {
+                        if (pfn >= max_low_pfn) {
+                                pmd_clear(pm_dir);
+                                continue; 
+                        }          
+                        
+                        pt_dir = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+                        pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
+	
+                        for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
+                                pte = pfn_pte(pfn, PAGE_KERNEL);
+                                if (pfn >= max_low_pfn) {
+                                        pte_clear(&init_mm, 0, &pte); 
+                                        continue;
+                                }
+                                set_pte(pt_dir, pte);
+                                pfn++;
+                        }
+                }
+        }
+
+	S390_lowcore.kernel_asce = pgdir_k;
+
+        /* enable virtual mapping in kernel mode */
+        __asm__ __volatile__("lctlg 1,1,%0\n\t"
+                             "lctlg 7,7,%0\n\t"
+                             "lctlg 13,13,%0\n\t"
+                             "ssm   %1"
+			     : :"m" (pgdir_k), "m" (ssm_mask));
+
+        local_flush_tlb();
+
+        return;
+}
+#endif /* CONFIG_ARCH_S390X */
+
+void __init mem_init(void)
+{
+	unsigned long codesize, reservedpages, datasize, initsize;
+
+        max_mapnr = num_physpages = max_low_pfn;
+        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+
+        /* clear the zero-page */
+        memset(empty_zero_page, 0, PAGE_SIZE);
+
+	/* this will put all low memory onto the freelists */
+	totalram_pages += free_all_bootmem();
+
+	reservedpages = 0;
+
+	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
+	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
+	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+        printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
+                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+                max_mapnr << (PAGE_SHIFT-10),
+                codesize >> 10,
+                reservedpages << (PAGE_SHIFT-10),
+                datasize >>10,
+                initsize >> 10);
+}
+
+void free_initmem(void)
+{
+        unsigned long addr;
+
+        addr = (unsigned long)(&__init_begin);
+        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+		ClearPageReserved(virt_to_page(addr));
+		set_page_count(virt_to_page(addr), 1);
+		free_page(addr);
+		totalram_pages++;
+        }
+        printk ("Freeing unused kernel memory: %ldk freed\n",
+		((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+        if (start < end)
+                printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+        for (; start < end; start += PAGE_SIZE) {
+                ClearPageReserved(virt_to_page(start));
+                set_page_count(virt_to_page(start), 1);
+                free_page(start);
+                totalram_pages++;
+        }
+}
+#endif
diff --git a/arch/s390/mm/ioremap.c b/arch/s390/mm/ioremap.c
new file mode 100644
index 0000000..c6c39d8
--- /dev/null
+++ b/arch/s390/mm/ioremap.c
@@ -0,0 +1,138 @@
+/*
+ *  arch/s390/mm/ioremap.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com)
+ *
+ *  Derived from "arch/i386/mm/extable.c"
+ *    (C) Copyright 1995 1996 Linus Torvalds
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ * This is needed for high PCI addresses that aren't mapped in the
+ * 640k-1MB IO memory area on PC's
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
+        unsigned long phys_addr, unsigned long flags)
+{
+        unsigned long end;
+        unsigned long pfn;
+
+        address &= ~PMD_MASK;
+        end = address + size;
+        if (end > PMD_SIZE)
+                end = PMD_SIZE;
+	if (address >= end)
+		BUG();
+        pfn = phys_addr >> PAGE_SHIFT;
+        do {
+                if (!pte_none(*pte)) {
+                        printk("remap_area_pte: page already exists\n");
+			BUG();
+		}
+                set_pte(pte, pfn_pte(pfn, __pgprot(flags)));
+                address += PAGE_SIZE;
+                pfn++;
+                pte++;
+        } while (address && (address < end));
+}
+
+static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
+        unsigned long phys_addr, unsigned long flags)
+{
+	unsigned long end;
+
+	address &= ~PGDIR_MASK;
+	end = address + size;
+	if (end > PGDIR_SIZE)
+		end = PGDIR_SIZE;
+	phys_addr -= address;
+	if (address >= end)
+		BUG();
+	do {
+		pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+		if (!pte)
+			return -ENOMEM;
+		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
+		address = (address + PMD_SIZE) & PMD_MASK;
+		pmd++;
+	} while (address && (address < end));
+	return 0;
+}
+
+static int remap_area_pages(unsigned long address, unsigned long phys_addr,
+				 unsigned long size, unsigned long flags)
+{
+	int error;
+	pgd_t * dir;
+	unsigned long end = address + size;
+
+	phys_addr -= address;
+	dir = pgd_offset(&init_mm, address);
+	flush_cache_all();
+	if (address >= end)
+		BUG();
+	spin_lock(&init_mm.page_table_lock);
+	do {
+		pmd_t *pmd;
+		pmd = pmd_alloc(&init_mm, dir, address);
+		error = -ENOMEM;
+		if (!pmd)
+			break;
+		if (remap_area_pmd(pmd, address, end - address,
+					 phys_addr + address, flags))
+			break;
+		error = 0;
+		address = (address + PGDIR_SIZE) & PGDIR_MASK;
+		dir++;
+	} while (address && (address < end));
+	spin_unlock(&init_mm.page_table_lock);
+	flush_tlb_all();
+	return 0;
+}
+
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ */
+void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+{
+	void * addr;
+	struct vm_struct * area;
+
+	if (phys_addr < virt_to_phys(high_memory))
+		return phys_to_virt(phys_addr);
+	if (phys_addr & ~PAGE_MASK)
+		return NULL;
+	size = PAGE_ALIGN(size);
+	if (!size || size > phys_addr + size)
+		return NULL;
+	area = get_vm_area(size, VM_IOREMAP);
+	if (!area)
+		return NULL;
+	addr = area->addr;
+	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
+		vfree(addr);
+		return NULL;
+	}
+	return addr;
+}
+
+void iounmap(void *addr)
+{
+	if (addr > high_memory)
+		vfree(addr);
+}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
new file mode 100644
index 0000000..fb187e5
--- /dev/null
+++ b/arch/s390/mm/mmap.c
@@ -0,0 +1,86 @@
+/*
+ *  linux/arch/s390/mm/mmap.c
+ *
+ *  flexible mmap layout support
+ *
+ * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ *
+ * Started by Ingo Molnar <mingo@elte.hu>
+ */
+
+#include <linux/personality.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+/*
+ * Top of mmap area (just below the process stack).
+ *
+ * Leave an at least ~128 MB hole.
+ */
+#define MIN_GAP (128*1024*1024)
+#define MAX_GAP (TASK_SIZE/6*5)
+
+static inline unsigned long mmap_base(void)
+{
+	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+
+	if (gap < MIN_GAP)
+		gap = MIN_GAP;
+	else if (gap > MAX_GAP)
+		gap = MAX_GAP;
+
+	return TASK_SIZE - (gap & PAGE_MASK);
+}
+
+static inline int mmap_is_legacy(void)
+{
+#ifdef CONFIG_ARCH_S390X
+	/*
+	 * Force standard allocation for 64 bit programs.
+	 */
+	if (!test_thread_flag(TIF_31BIT))
+		return 1;
+#endif
+	return sysctl_legacy_va_layout ||
+	    (current->personality & ADDR_COMPAT_LAYOUT) ||
+	    current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY;
+}
+
+/*
+ * This function, called very early during the creation of a new
+ * process VM image, sets up which VM layout function to use:
+ */
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+	/*
+	 * Fall back to the standard layout if the personality
+	 * bit is set, or if the expected stack growth is unlimited:
+	 */
+	if (mmap_is_legacy()) {
+		mm->mmap_base = TASK_UNMAPPED_BASE;
+		mm->get_unmapped_area = arch_get_unmapped_area;
+		mm->unmap_area = arch_unmap_area;
+	} else {
+		mm->mmap_base = mmap_base();
+		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+		mm->unmap_area = arch_unmap_area_topdown;
+	}
+}
+EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
+
diff --git a/arch/s390/oprofile/Kconfig b/arch/s390/oprofile/Kconfig
new file mode 100644
index 0000000..208220a
--- /dev/null
+++ b/arch/s390/oprofile/Kconfig
@@ -0,0 +1,22 @@
+
+menu "Profiling support"
+
+config PROFILING
+	bool "Profiling support"
+	help
+	  Say Y here to enable profiling support mechanisms used by
+	  profilers such as readprofile or OProfile.
+
+
+config OPROFILE
+	tristate "OProfile system profiling"
+	depends on PROFILING
+	help
+	  OProfile is a profiling system capable of profiling the
+	  whole system, include the kernel, kernel modules, libraries,
+	  and applications.
+
+	  If unsure, say N.
+
+endmenu
+
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
new file mode 100644
index 0000000..ec34927
--- /dev/null
+++ b/arch/s390/oprofile/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+		oprof.o cpu_buffer.o buffer_sync.o \
+		event_buffer.o oprofile_files.o \
+		oprofilefs.o oprofile_stats.o  \
+		timer_int.o )
+
+oprofile-y				:= $(DRIVER_OBJS) init.o
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
new file mode 100644
index 0000000..a65ead0
--- /dev/null
+++ b/arch/s390/oprofile/init.c
@@ -0,0 +1,22 @@
+/**
+ * arch/s390/oprofile/init.c
+ *
+ * S390 Version
+ *   Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *   Author(s): Thomas Spatzier (tspat@de.ibm.com)
+ *
+ * @remark Copyright 2002 OProfile authors
+ */
+
+#include <linux/oprofile.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+
+int __init oprofile_arch_init(struct oprofile_operations* ops)
+{
+	return -ENODEV;
+}
+
+void oprofile_arch_exit(void)
+{
+}