Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/include/asm-sh64/a.out.h b/include/asm-sh64/a.out.h
new file mode 100644
index 0000000..e1995e8
--- /dev/null
+++ b/include/asm-sh64/a.out.h
@@ -0,0 +1,37 @@
+#ifndef __ASM_SH64_A_OUT_H
+#define __ASM_SH64_A_OUT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/a.out.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+struct exec
+{
+  unsigned long a_info;		/* Use macros N_MAGIC, etc for access */
+  unsigned a_text;		/* length of text, in bytes */
+  unsigned a_data;		/* length of data, in bytes */
+  unsigned a_bss;		/* length of uninitialized data area for file, in bytes */
+  unsigned a_syms;		/* length of symbol table data in file, in bytes */
+  unsigned a_entry;		/* start address */
+  unsigned a_trsize;		/* length of relocation info for text, in bytes */
+  unsigned a_drsize;		/* length of relocation info for data, in bytes */
+};
+
+#define N_TRSIZE(a)	((a).a_trsize)
+#define N_DRSIZE(a)	((a).a_drsize)
+#define N_SYMSIZE(a)	((a).a_syms)
+
+#ifdef __KERNEL__
+
+#define STACK_TOP	TASK_SIZE
+
+#endif
+
+#endif /* __ASM_SH64_A_OUT_H */
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
new file mode 100644
index 0000000..8c3872d
--- /dev/null
+++ b/include/asm-sh64/atomic.h
@@ -0,0 +1,126 @@
+#ifndef __ASM_SH64_ATOMIC_H
+#define __ASM_SH64_ATOMIC_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/atomic.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ *
+ */
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ *
+ */
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i)	( (atomic_t) { (i) } )
+
+#define atomic_read(v)		((v)->counter)
+#define atomic_set(v,i)		((v)->counter = (i))
+
+#include <asm/system.h>
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+
+static __inline__ void atomic_add(int i, atomic_t * v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	*(long *)v += i;
+	local_irq_restore(flags);
+}
+
+static __inline__ void atomic_sub(int i, atomic_t *v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	*(long *)v -= i;
+	local_irq_restore(flags);
+}
+
+static __inline__ int atomic_add_return(int i, atomic_t * v)
+{
+	unsigned long temp, flags;
+
+	local_irq_save(flags);
+	temp = *(long *)v;
+	temp += i;
+	*(long *)v = temp;
+	local_irq_restore(flags);
+
+	return temp;
+}
+
+#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
+
+static __inline__ int atomic_sub_return(int i, atomic_t * v)
+{
+	unsigned long temp, flags;
+
+	local_irq_save(flags);
+	temp = *(long *)v;
+	temp -= i;
+	*(long *)v = temp;
+	local_irq_restore(flags);
+
+	return temp;
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1,(v))
+#define atomic_inc_return(v) atomic_add_return(1,(v))
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+#define atomic_inc(v) atomic_add(1,(v))
+#define atomic_dec(v) atomic_sub(1,(v))
+
+static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	*(long *)v &= ~mask;
+	local_irq_restore(flags);
+}
+
+static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	*(long *)v |= mask;
+	local_irq_restore(flags);
+}
+
+/* Atomic operations are already serializing on SH */
+#define smp_mb__before_atomic_dec()	barrier()
+#define smp_mb__after_atomic_dec()	barrier()
+#define smp_mb__before_atomic_inc()	barrier()
+#define smp_mb__after_atomic_inc()	barrier()
+
+#endif /* __ASM_SH64_ATOMIC_H */
diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h
new file mode 100644
index 0000000..e1ff63e
--- /dev/null
+++ b/include/asm-sh64/bitops.h
@@ -0,0 +1,516 @@
+#ifndef __ASM_SH64_BITOPS_H
+#define __ASM_SH64_BITOPS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/bitops.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ */
+
+#ifdef __KERNEL__
+#include <linux/compiler.h>
+#include <asm/system.h>
+/* For __swab32 */
+#include <asm/byteorder.h>
+
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	*a |= mask;
+	local_irq_restore(flags);
+}
+
+static inline void __set_bit(int nr, void *addr)
+{
+	int	mask;
+	unsigned int *a = addr;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	*a |= mask;
+}
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit()	barrier()
+#define smp_mb__after_clear_bit()	barrier()
+static inline void clear_bit(int nr, volatile unsigned long *a)
+{
+	int	mask;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	*a &= ~mask;
+	local_irq_restore(flags);
+}
+
+static inline void __clear_bit(int nr, volatile unsigned long *a)
+{
+	int	mask;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	*a &= ~mask;
+}
+
+static __inline__ void change_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	*a ^= mask;
+	local_irq_restore(flags);
+}
+
+static __inline__ void __change_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	*a ^= mask;
+}
+
+static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	retval = (mask & *a) != 0;
+	*a |= mask;
+	local_irq_restore(flags);
+
+	return retval;
+}
+
+static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	retval = (mask & *a) != 0;
+	*a |= mask;
+
+	return retval;
+}
+
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	retval = (mask & *a) != 0;
+	*a &= ~mask;
+	local_irq_restore(flags);
+
+	return retval;
+}
+
+static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	retval = (mask & *a) != 0;
+	*a &= ~mask;
+
+	return retval;
+}
+
+static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	retval = (mask & *a) != 0;
+	*a ^= mask;
+	local_irq_restore(flags);
+
+	return retval;
+}
+
+static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	retval = (mask & *a) != 0;
+	*a ^= mask;
+
+	return retval;
+}
+
+static __inline__ int test_bit(int nr, const volatile void *addr)
+{
+	return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
+}
+
+static __inline__ unsigned long ffz(unsigned long word)
+{
+	unsigned long result, __d2, __d3;
+
+        __asm__("gettr  tr0, %2\n\t"
+                "pta    $+32, tr0\n\t"
+                "andi   %1, 1, %3\n\t"
+                "beq    %3, r63, tr0\n\t"
+                "pta    $+4, tr0\n"
+                "0:\n\t"
+                "shlri.l        %1, 1, %1\n\t"
+                "addi   %0, 1, %0\n\t"
+                "andi   %1, 1, %3\n\t"
+                "beqi   %3, 1, tr0\n"
+                "1:\n\t"
+                "ptabs  %2, tr0\n\t"
+                : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
+                : "0" (0L), "1" (word));
+
+	return result;
+}
+
+/**
+ * __ffs - find first bit in word
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+	int r = 0;
+
+	if (!word)
+		return 0;
+	if (!(word & 0xffff)) {
+		word >>= 16;
+		r += 16;
+	}
+	if (!(word & 0xff)) {
+		word >>= 8;
+		r += 8;
+	}
+	if (!(word & 0xf)) {
+		word >>= 4;
+		r += 4;
+	}
+	if (!(word & 3)) {
+		word >>= 2;
+		r += 2;
+	}
+	if (!(word & 1)) {
+		word >>= 1;
+		r += 1;
+	}
+	return r;
+}
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+static inline unsigned long find_next_bit(const unsigned long *addr,
+	unsigned long size, unsigned long offset)
+{
+	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
+	unsigned int result = offset & ~31UL;
+	unsigned int tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 31UL;
+	if (offset) {
+		tmp = *p++;
+		tmp &= ~0UL << offset;
+		if (size < 32)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= 32;
+		result += 32;
+	}
+	while (size >= 32) {
+		if ((tmp = *p++) != 0)
+			goto found_middle;
+		result += 32;
+		size -= 32;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp &= ~0UL >> (32 - size);
+	if (tmp == 0UL)        /* Are any bits set? */
+		return result + size; /* Nope. */
+found_middle:
+	return result + __ffs(tmp);
+}
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+#define find_first_bit(addr, size) \
+	find_next_bit((addr), (size), 0)
+
+
+static inline int find_next_zero_bit(void *addr, int size, int offset)
+{
+	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+	unsigned long result = offset & ~31UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 31UL;
+	if (offset) {
+		tmp = *(p++);
+		tmp |= ~0UL >> (32-offset);
+		if (size < 32)
+			goto found_first;
+		if (~tmp)
+			goto found_middle;
+		size -= 32;
+		result += 32;
+	}
+	while (size & ~31UL) {
+		if (~(tmp = *(p++)))
+			goto found_middle;
+		result += 32;
+		size -= 32;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp |= ~0UL << size;
+found_middle:
+	return result + ffz(tmp);
+}
+
+#define find_first_zero_bit(addr, size) \
+        find_next_zero_bit((addr), (size), 0)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x)	generic_hweight32(x)
+#define hweight16(x)	generic_hweight16(x)
+#define hweight8(x)	generic_hweight8(x)
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is cleared.
+ */
+
+static inline int sched_find_first_bit(unsigned long *b)
+{
+	if (unlikely(b[0]))
+		return __ffs(b[0]);
+	if (unlikely(b[1]))
+		return __ffs(b[1]) + 32;
+	if (unlikely(b[2]))
+		return __ffs(b[2]) + 64;
+	if (b[3])
+		return __ffs(b[3]) + 96;
+	return __ffs(b[4]) + 128;
+}
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+#define ffs(x) generic_ffs(x)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#ifdef __LITTLE_ENDIAN__
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
+#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
+#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
+#define ext2_find_next_zero_bit(addr, size, offset) \
+                find_next_zero_bit((addr), (size), (offset))
+#else
+static __inline__ int ext2_set_bit(int nr, volatile void * addr)
+{
+	int		mask, retval;
+	unsigned long	flags;
+	volatile unsigned char	*ADDR = (unsigned char *) addr;
+
+	ADDR += nr >> 3;
+	mask = 1 << (nr & 0x07);
+	local_irq_save(flags);
+	retval = (mask & *ADDR) != 0;
+	*ADDR |= mask;
+	local_irq_restore(flags);
+	return retval;
+}
+
+static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
+{
+	int		mask, retval;
+	unsigned long	flags;
+	volatile unsigned char	*ADDR = (unsigned char *) addr;
+
+	ADDR += nr >> 3;
+	mask = 1 << (nr & 0x07);
+	local_irq_save(flags);
+	retval = (mask & *ADDR) != 0;
+	*ADDR &= ~mask;
+	local_irq_restore(flags);
+	return retval;
+}
+
+static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
+{
+	int			mask;
+	const volatile unsigned char	*ADDR = (const unsigned char *) addr;
+
+	ADDR += nr >> 3;
+	mask = 1 << (nr & 0x07);
+	return ((mask & *ADDR) != 0);
+}
+
+#define ext2_find_first_zero_bit(addr, size) \
+        ext2_find_next_zero_bit((addr), (size), 0)
+
+static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
+{
+	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+	unsigned long result = offset & ~31UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 31UL;
+	if(offset) {
+		/* We hold the little endian value in tmp, but then the
+		 * shift is illegal. So we could keep a big endian value
+		 * in tmp, like this:
+		 *
+		 * tmp = __swab32(*(p++));
+		 * tmp |= ~0UL >> (32-offset);
+		 *
+		 * but this would decrease preformance, so we change the
+		 * shift:
+		 */
+		tmp = *(p++);
+		tmp |= __swab32(~0UL >> (32-offset));
+		if(size < 32)
+			goto found_first;
+		if(~tmp)
+			goto found_middle;
+		size -= 32;
+		result += 32;
+	}
+	while(size & ~31UL) {
+		if(~(tmp = *(p++)))
+			goto found_middle;
+		result += 32;
+		size -= 32;
+	}
+	if(!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	/* tmp is little endian, so we would have to swab the shift,
+	 * see above. But then we have to swab tmp below for ffz, so
+	 * we might as well do this here.
+	 */
+	return result + ffz(__swab32(tmp) | (~0UL << size));
+found_middle:
+	return result + ffz(__swab32(tmp));
+}
+#endif
+
+#define ext2_set_bit_atomic(lock, nr, addr)		\
+	({						\
+		int ret;				\
+		spin_lock(lock);			\
+		ret = ext2_set_bit((nr), (addr));	\
+		spin_unlock(lock);			\
+		ret;					\
+	})
+
+#define ext2_clear_bit_atomic(lock, nr, addr)		\
+	({						\
+		int ret;				\
+		spin_lock(lock);			\
+		ret = ext2_clear_bit((nr), (addr));	\
+		spin_unlock(lock);			\
+		ret;					\
+	})
+
+/* Bitmap functions for the minix filesystem.  */
+#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
+#define ffs(x)	generic_ffs(x)
+#define fls(x)	generic_fls(x)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_BITOPS_H */
diff --git a/include/asm-sh64/bug.h b/include/asm-sh64/bug.h
new file mode 100644
index 0000000..3acd54d
--- /dev/null
+++ b/include/asm-sh64/bug.h
@@ -0,0 +1,32 @@
+#ifndef __ASM_SH64_BUG_H
+#define __ASM_SH64_BUG_H
+
+#include <linux/config.h>
+
+/*
+ * Tell the user there is some problem, then force a segfault (in process
+ * context) or a panic (interrupt context).
+ */
+#define BUG() do { \
+	printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+	*(volatile int *)0 = 0; \
+} while (0)
+
+#define BUG_ON(condition) do { \
+	if (unlikely((condition)!=0)) \
+		BUG(); \
+} while(0)
+
+#define PAGE_BUG(page) do { \
+	BUG(); \
+} while (0)
+
+#define WARN_ON(condition) do { \
+	if (unlikely((condition)!=0)) { \
+		printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
+		dump_stack(); \
+	} \
+} while (0)
+
+#endif /* __ASM_SH64_BUG_H */
+
diff --git a/include/asm-sh64/bugs.h b/include/asm-sh64/bugs.h
new file mode 100644
index 0000000..05554aa
--- /dev/null
+++ b/include/asm-sh64/bugs.h
@@ -0,0 +1,38 @@
+#ifndef __ASM_SH64_BUGS_H
+#define __ASM_SH64_BUGS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/bugs.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ *
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ *	void check_bugs(void);
+ */
+
+/*
+ * I don't know of any Super-H bugs yet.
+ */
+
+#include <asm/processor.h>
+
+static void __init check_bugs(void)
+{
+	extern char *get_cpu_subtype(void);
+	extern unsigned long loops_per_jiffy;
+
+	cpu_data->loops_per_jiffy = loops_per_jiffy;
+
+	printk("CPU: %s\n", get_cpu_subtype());
+}
+#endif /* __ASM_SH64_BUGS_H */
diff --git a/include/asm-sh64/byteorder.h b/include/asm-sh64/byteorder.h
new file mode 100644
index 0000000..f602ebe
--- /dev/null
+++ b/include/asm-sh64/byteorder.h
@@ -0,0 +1,49 @@
+#ifndef __ASM_SH64_BYTEORDER_H
+#define __ASM_SH64_BYTEORDER_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/byteorder.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#include <asm/types.h>
+
+static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
+{
+	__asm__("byterev	%0, %0\n\t"
+		"shari		%0, 32, %0"
+		: "=r" (x)
+		: "0" (x));
+	return x;
+}
+
+static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
+{
+	__asm__("byterev	%0, %0\n\t"
+		"shari		%0, 48, %0"
+		: "=r" (x)
+		: "0" (x));
+	return x;
+}
+
+#define __arch__swab32(x) ___arch__swab32(x)
+#define __arch__swab16(x) ___arch__swab16(x)
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#  define __BYTEORDER_HAS_U64__
+#  define __SWAB_64_THRU_32__
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#include <linux/byteorder/little_endian.h>
+#else
+#include <linux/byteorder/big_endian.h>
+#endif
+
+#endif /* __ASM_SH64_BYTEORDER_H */
diff --git a/include/asm-sh64/cache.h b/include/asm-sh64/cache.h
new file mode 100644
index 0000000..f54e85e
--- /dev/null
+++ b/include/asm-sh64/cache.h
@@ -0,0 +1,141 @@
+#ifndef __ASM_SH64_CACHE_H
+#define __ASM_SH64_CACHE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/cache.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003, 2004  Paul Mundt
+ *
+ */
+#include <asm/cacheflush.h>
+
+#define L1_CACHE_SHIFT		5
+/* bytes per L1 cache line */
+#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_ALIGN_MASK	(~(L1_CACHE_BYTES - 1))
+#define L1_CACHE_ALIGN(x)	(((x)+(L1_CACHE_BYTES - 1)) & L1_CACHE_ALIGN_MASK)
+#define L1_CACHE_SIZE_BYTES	(L1_CACHE_BYTES << 10)
+/* Largest L1 which this arch supports */
+#define L1_CACHE_SHIFT_MAX	5
+
+#ifdef MODULE
+#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
+#else
+#define __cacheline_aligned					\
+  __attribute__((__aligned__(L1_CACHE_BYTES),			\
+		 __section__(".data.cacheline_aligned")))
+#endif
+
+/*
+ * Control Registers.
+ */
+#define ICCR_BASE	0x01600000	/* Instruction Cache Control Register */
+#define ICCR_REG0	0		/* Register 0 offset */
+#define ICCR_REG1	1		/* Register 1 offset */
+#define ICCR0		ICCR_BASE+ICCR_REG0
+#define ICCR1		ICCR_BASE+ICCR_REG1
+
+#define ICCR0_OFF	0x0		/* Set ICACHE off */
+#define ICCR0_ON	0x1		/* Set ICACHE on */
+#define ICCR0_ICI	0x2		/* Invalidate all in IC */
+
+#define ICCR1_NOLOCK	0x0		/* Set No Locking */
+
+#define OCCR_BASE	0x01E00000	/* Operand Cache Control Register */
+#define OCCR_REG0	0		/* Register 0 offset */
+#define OCCR_REG1	1		/* Register 1 offset */
+#define OCCR0		OCCR_BASE+OCCR_REG0
+#define OCCR1		OCCR_BASE+OCCR_REG1
+
+#define OCCR0_OFF	0x0		/* Set OCACHE off */
+#define OCCR0_ON	0x1		/* Set OCACHE on */
+#define OCCR0_OCI	0x2		/* Invalidate all in OC */
+#define OCCR0_WT	0x4		/* Set OCACHE in WT Mode */
+#define OCCR0_WB	0x0		/* Set OCACHE in WB Mode */
+
+#define OCCR1_NOLOCK	0x0		/* Set No Locking */
+
+
+/*
+ * SH-5
+ * A bit of description here, for neff=32.
+ *
+ *                               |<--- tag  (19 bits) --->|
+ * +-----------------------------+-----------------+------+----------+------+
+ * |                             |                 | ways |set index |offset|
+ * +-----------------------------+-----------------+------+----------+------+
+ *                                ^                 2 bits   8 bits   5 bits
+ *                                +- Bit 31
+ *
+ * Cacheline size is based on offset: 5 bits = 32 bytes per line
+ * A cache line is identified by a tag + set but OCACHETAG/ICACHETAG
+ * have a broader space for registers. These are outlined by
+ * CACHE_?C_*_STEP below.
+ *
+ */
+
+/* Valid and Dirty bits */
+#define SH_CACHE_VALID		(1LL<<0)
+#define SH_CACHE_UPDATED	(1LL<<57)
+
+/* Cache flags */
+#define SH_CACHE_MODE_WT	(1LL<<0)
+#define SH_CACHE_MODE_WB	(1LL<<1)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Cache information structure.
+ *
+ * Defined for both I and D cache, per-processor.
+ */
+struct cache_info {
+	unsigned int ways;
+	unsigned int sets;
+	unsigned int linesz;
+
+	unsigned int way_shift;
+	unsigned int entry_shift;
+	unsigned int set_shift;
+	unsigned int way_step_shift;
+	unsigned int asid_shift;
+
+	unsigned int way_ofs;
+
+	unsigned int asid_mask;
+	unsigned int idx_mask;
+	unsigned int epn_mask;
+
+	unsigned long flags;
+};
+
+#endif /* __ASSEMBLY__ */
+
+/* Instruction cache */
+#define CACHE_IC_ADDRESS_ARRAY 0x01000000
+
+/* Operand Cache */
+#define CACHE_OC_ADDRESS_ARRAY 0x01800000
+
+/* These declarations relate to cache 'synonyms' in the operand cache.  A
+   'synonym' occurs where effective address bits overlap between those used for
+   indexing the cache sets and those passed to the MMU for translation.  In the
+   case of SH5-101 & SH5-103, only bit 12 is affected for 4k pages. */
+
+#define CACHE_OC_N_SYNBITS  1               /* Number of synonym bits */
+#define CACHE_OC_SYN_SHIFT  12
+/* Mask to select synonym bit(s) */
+#define CACHE_OC_SYN_MASK   (((1UL<<CACHE_OC_N_SYNBITS)-1)<<CACHE_OC_SYN_SHIFT)
+
+
+/*
+ * Instruction cache can't be invalidated based on physical addresses.
+ * No Instruction Cache defines required, then.
+ */
+
+#endif /* __ASM_SH64_CACHE_H */
diff --git a/include/asm-sh64/cacheflush.h b/include/asm-sh64/cacheflush.h
new file mode 100644
index 0000000..55f71aa
--- /dev/null
+++ b/include/asm-sh64/cacheflush.h
@@ -0,0 +1,48 @@
+#ifndef __ASM_SH64_CACHEFLUSH_H
+#define __ASM_SH64_CACHEFLUSH_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+
+struct vm_area_struct;
+struct page;
+struct mm_struct;
+
+extern void flush_cache_all(void);
+extern void flush_cache_mm(struct mm_struct *mm);
+extern void flush_cache_sigtramp(unsigned long start, unsigned long end);
+extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+			      unsigned long end);
+extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
+extern void flush_dcache_page(struct page *pg);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+extern void flush_icache_user_range(struct vm_area_struct *vma,
+				    struct page *page, unsigned long addr,
+				    int len);
+
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+
+#define flush_cache_vmap(start, end)		flush_cache_all()
+#define flush_cache_vunmap(start, end)		flush_cache_all()
+
+#define flush_icache_page(vma, page)	do { } while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+	do {							\
+		flush_cache_page(vma, vaddr, page_to_pfn(page));\
+		memcpy(dst, src, len);				\
+		flush_icache_user_range(vma, page, vaddr, len);	\
+	} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+	do {							\
+		flush_cache_page(vma, vaddr, page_to_pfn(page));\
+		memcpy(dst, src, len);				\
+	} while (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_SH64_CACHEFLUSH_H */
+
diff --git a/include/asm-sh64/cayman.h b/include/asm-sh64/cayman.h
new file mode 100644
index 0000000..7b6b9684
--- /dev/null
+++ b/include/asm-sh64/cayman.h
@@ -0,0 +1,20 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/cayman.h
+ *
+ * Cayman definitions
+ *
+ * Global defintions for the SH5 Cayman board
+ *
+ * Copyright (C) 2002 Stuart Menefy
+ */
+
+
+/* Setup for the SMSC FDC37C935 / LAN91C100FD */
+#define SMSC_IRQ         IRQ_IRL1
+
+/* Setup for PCI Bus 2, which transmits interrupts via the EPLD */
+#define PCI2_IRQ         IRQ_IRL3
diff --git a/include/asm-sh64/checksum.h b/include/asm-sh64/checksum.h
new file mode 100644
index 0000000..aa3911a
--- /dev/null
+++ b/include/asm-sh64/checksum.h
@@ -0,0 +1,95 @@
+#ifndef __ASM_SH64_CHECKSUM_H
+#define __ASM_SH64_CHECKSUM_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/checksum.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#include <asm/registers.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage unsigned int csum_partial(const unsigned char *buff, int len,
+				     unsigned int sum);
+
+/*
+ *	Note: when you get a NULL pointer exception here this means someone
+ *	passed in an incorrect kernel address to one of these functions.
+ *
+ *	If you use these functions directly please don't forget the
+ *	verify_area().
+ */
+
+
+unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len,
+				       unsigned int sum);
+
+unsigned int csum_partial_copy_from_user(const char *src, char *dst,
+					 int len, int sum, int *err_ptr);
+
+/*
+ * These are the old (and unsafe) way of doing checksums, a warning message will be
+ * printed if they are used and an exeption occurs.
+ *
+ * these functions should go away after some time.
+ */
+
+#define csum_partial_copy_fromuser csum_partial_copy
+
+unsigned int csum_partial_copy(const char *src, char *dst, int len,
+			       unsigned int sum);
+
+static inline unsigned short csum_fold(unsigned int sum)
+{
+        sum = (sum & 0xffff) + (sum >> 16);
+        sum = (sum & 0xffff) + (sum >> 16);
+        return ~(sum);
+}
+
+unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
+
+unsigned long csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
+				 unsigned short len, unsigned short proto,
+				 unsigned int sum);
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
+						   unsigned long daddr,
+						   unsigned short len,
+						   unsigned short proto,
+						   unsigned int sum)
+{
+	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+{
+	return csum_fold(csum_partial(buff, len, 0));
+}
+
+#endif /* __ASM_SH64_CHECKSUM_H */
+
diff --git a/include/asm-sh64/cpumask.h b/include/asm-sh64/cpumask.h
new file mode 100644
index 0000000..b7b105d
--- /dev/null
+++ b/include/asm-sh64/cpumask.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_CPUMASK_H
+#define __ASM_SH64_CPUMASK_H
+
+#include <asm-generic/cpumask.h>
+
+#endif /* __ASM_SH64_CPUMASK_H */
diff --git a/include/asm-sh64/cputime.h b/include/asm-sh64/cputime.h
new file mode 100644
index 0000000..0fd89da
--- /dev/null
+++ b/include/asm-sh64/cputime.h
@@ -0,0 +1,6 @@
+#ifndef __SH64_CPUTIME_H
+#define __SH64_CPUTIME_H
+
+#include <asm-generic/cputime.h>
+
+#endif /* __SH64_CPUTIME_H */
diff --git a/include/asm-sh64/current.h b/include/asm-sh64/current.h
new file mode 100644
index 0000000..26122433
--- /dev/null
+++ b/include/asm-sh64/current.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_SH64_CURRENT_H
+#define __ASM_SH64_CURRENT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/current.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ *
+ */
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+static __inline__ struct task_struct * get_current(void)
+{
+	return current_thread_info()->task;
+}
+
+#define current get_current()
+
+#endif /* __ASM_SH64_CURRENT_H */
+
diff --git a/include/asm-sh64/delay.h b/include/asm-sh64/delay.h
new file mode 100644
index 0000000..6ae3130
--- /dev/null
+++ b/include/asm-sh64/delay.h
@@ -0,0 +1,11 @@
+#ifndef __ASM_SH64_DELAY_H
+#define __ASM_SH64_DELAY_H
+
+extern void __delay(int loops);
+extern void __udelay(unsigned long long usecs, unsigned long lpj);
+extern void __ndelay(unsigned long long nsecs, unsigned long lpj);
+extern void udelay(unsigned long usecs);
+extern void ndelay(unsigned long nsecs);
+
+#endif /* __ASM_SH64_DELAY_H */
+
diff --git a/include/asm-sh64/div64.h b/include/asm-sh64/div64.h
new file mode 100644
index 0000000..f758695
--- /dev/null
+++ b/include/asm-sh64/div64.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_DIV64_H
+#define __ASM_SH64_DIV64_H
+
+#include <asm-generic/div64.h>
+
+#endif /* __ASM_SH64_DIV64_H */
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
new file mode 100644
index 0000000..b8d26fe
--- /dev/null
+++ b/include/asm-sh64/dma-mapping.h
@@ -0,0 +1,162 @@
+#ifndef __ASM_SH_DMA_MAPPING_H
+#define __ASM_SH_DMA_MAPPING_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+
+struct pci_dev;
+extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
+				    dma_addr_t *dma_handle);
+extern void consistent_free(struct pci_dev *hwdev, size_t size,
+				  void *vaddr, dma_addr_t dma_handle);
+
+#define dma_supported(dev, mask)	(1)
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+	if (!dev->dma_mask || !dma_supported(dev, mask))
+		return -EIO;
+
+	*dev->dma_mask = mask;
+
+	return 0;
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+			 dma_addr_t *dma_handle, int flag)
+{
+	return consistent_alloc(NULL, size, dma_handle);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+		       void *vaddr, dma_addr_t dma_handle)
+{
+	consistent_free(NULL, size, vaddr, dma_handle);
+}
+
+static inline void dma_cache_sync(void *vaddr, size_t size,
+				  enum dma_data_direction dir)
+{
+	dma_cache_wback_inv((unsigned long)vaddr, size);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev,
+					void *ptr, size_t size,
+					enum dma_data_direction dir)
+{
+#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
+	if (dev->bus == &pci_bus_type)
+		return virt_to_bus(ptr);
+#endif
+	dma_cache_sync(ptr, size, dir);
+
+	return virt_to_bus(ptr);
+}
+
+#define dma_unmap_single(dev, addr, size, dir)	do { } while (0)
+
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+			     int nents, enum dma_data_direction dir)
+{
+	int i;
+
+	for (i = 0; i < nents; i++) {
+#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
+		dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+			       sg[i].length, dir);
+#endif
+		sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+	}
+
+	return nents;
+}
+
+#define dma_unmap_sg(dev, sg, nents, dir)	do { } while (0)
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+				      unsigned long offset, size_t size,
+				      enum dma_data_direction dir)
+{
+	return dma_map_single(dev, page_address(page) + offset, size, dir);
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+				  size_t size, enum dma_data_direction dir)
+{
+	dma_unmap_single(dev, dma_address, size, dir);
+}
+
+static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
+				   size_t size, enum dma_data_direction dir)
+{
+#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
+	if (dev->bus == &pci_bus_type)
+		return;
+#endif
+	dma_cache_sync(bus_to_virt(dma_handle), size, dir);
+}
+
+static inline void dma_sync_single_range(struct device *dev,
+					 dma_addr_t dma_handle,
+					 unsigned long offset, size_t size,
+					 enum dma_data_direction dir)
+{
+#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
+	if (dev->bus == &pci_bus_type)
+		return;
+#endif
+	dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
+}
+
+static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
+			       int nelems, enum dma_data_direction dir)
+{
+	int i;
+
+	for (i = 0; i < nelems; i++) {
+#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
+		dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+			       sg[i].length, dir);
+#endif
+		sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+	}
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev,
+					   dma_addr_t dma_handle, size_t size,
+					   enum dma_data_direction dir)
+	__attribute__ ((alias("dma_sync_single")));
+
+static inline void dma_sync_single_for_device(struct device *dev,
+					   dma_addr_t dma_handle, size_t size,
+					   enum dma_data_direction dir)
+	__attribute__ ((alias("dma_sync_single")));
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+				       struct scatterlist *sg, int nelems,
+				       enum dma_data_direction dir)
+	__attribute__ ((alias("dma_sync_sg")));
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+				       struct scatterlist *sg, int nelems,
+				       enum dma_data_direction dir)
+	__attribute__ ((alias("dma_sync_sg")));
+
+static inline int dma_get_cache_alignment(void)
+{
+	/*
+	 * Each processor family will define its own L1_CACHE_SHIFT,
+	 * L1_CACHE_BYTES wraps to this, so this is always safe.
+	 */
+	return L1_CACHE_BYTES;
+}
+
+static inline int dma_mapping_error(dma_addr_t dma_addr)
+{
+	return dma_addr == 0;
+}
+
+#endif /* __ASM_SH_DMA_MAPPING_H */
+
diff --git a/include/asm-sh64/dma.h b/include/asm-sh64/dma.h
new file mode 100644
index 0000000..e701f39
--- /dev/null
+++ b/include/asm-sh64/dma.h
@@ -0,0 +1,41 @@
+#ifndef __ASM_SH64_DMA_H
+#define __ASM_SH64_DMA_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/dma.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ *
+ */
+
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#define MAX_DMA_CHANNELS	4
+
+/*
+ * SH5 can DMA in any memory area.
+ *
+ * The static definition is dodgy because it should limit
+ * the highest DMA-able address based on the actual
+ * Physical memory available. This is actually performed
+ * at run time in defining the memory allowed to DMA_ZONE.
+ */
+#define MAX_DMA_ADDRESS		~(NPHYS_MASK)
+
+#define DMA_MODE_READ		0
+#define DMA_MODE_WRITE		1
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy 	(0)
+#endif
+
+#endif /* __ASM_SH64_DMA_H */
diff --git a/include/asm-sh64/elf.h b/include/asm-sh64/elf.h
new file mode 100644
index 0000000..f994286
--- /dev/null
+++ b/include/asm-sh64/elf.h
@@ -0,0 +1,107 @@
+#ifndef __ASM_SH64_ELF_H
+#define __ASM_SH64_ELF_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/elf.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#include <asm/byteorder.h>
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_fpu_struct elf_fpregset_t;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ( (x)->e_machine == EM_SH )
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS	ELFCLASS32
+#ifdef __LITTLE_ENDIAN__
+#define ELF_DATA	ELFDATA2LSB
+#else
+#define ELF_DATA	ELFDATA2MSB
+#endif
+#define ELF_ARCH	EM_SH
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE	4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE         (2 * TASK_SIZE / 3)
+
+#define	R_SH_DIR32		1
+#define	R_SH_REL32		2
+#define	R_SH_IMM_LOW16		246
+#define	R_SH_IMM_LOW16_PCREL	247
+#define	R_SH_IMM_MEDLOW16	248
+#define	R_SH_IMM_MEDLOW16_PCREL	249
+
+#define ELF_CORE_COPY_REGS(_dest,_regs)				\
+	memcpy((char *) &_dest, (char *) _regs,			\
+	       sizeof(struct pt_regs));
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this CPU supports.  This could be done in user space,
+   but it's not easy, and we've already done it here.  */
+
+#define ELF_HWCAP	(0)
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.
+
+   For the moment, we have only optimizations for the Intel generations,
+   but that could change... */
+
+#define ELF_PLATFORM  (NULL)
+
+#define ELF_PLAT_INIT(_r, load_addr) \
+  do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
+       _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
+       _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
+       _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
+       _r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
+       _r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
+       _r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
+       _r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
+       _r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
+       _r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
+       _r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
+       _r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
+       _r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
+       _r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
+       _r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
+       _r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
+       _r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
+       _r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
+       _r->sr = SR_FD | SR_MMU; } while (0)
+
+#ifdef __KERNEL__
+#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
+#endif
+
+#endif /* __ASM_SH64_ELF_H */
diff --git a/include/asm-sh64/errno.h b/include/asm-sh64/errno.h
new file mode 100644
index 0000000..57b46d4
--- /dev/null
+++ b/include/asm-sh64/errno.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_ERRNO_H
+#define __ASM_SH64_ERRNO_H
+
+#include <asm-generic/errno.h>
+
+#endif /* __ASM_SH64_ERRNO_H */
diff --git a/include/asm-sh64/fcntl.h b/include/asm-sh64/fcntl.h
new file mode 100644
index 0000000..ffcc36c
--- /dev/null
+++ b/include/asm-sh64/fcntl.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_FCNTL_H
+#define __ASM_SH64_FCNTL_H
+
+#include <asm-sh/fcntl.h>
+
+#endif /* __ASM_SH64_FCNTL_H */
+
diff --git a/include/asm-sh64/hardirq.h b/include/asm-sh64/hardirq.h
new file mode 100644
index 0000000..ad2330e
--- /dev/null
+++ b/include/asm-sh64/hardirq.h
@@ -0,0 +1,19 @@
+#ifndef __ASM_SH64_HARDIRQ_H
+#define __ASM_SH64_HARDIRQ_H
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+	unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
+
+/* arch/sh64/kernel/irq.c */
+extern void ack_bad_irq(unsigned int irq);
+
+#endif /* __ASM_SH64_HARDIRQ_H */
+
diff --git a/include/asm-sh64/hardware.h b/include/asm-sh64/hardware.h
new file mode 100644
index 0000000..931c1ad
--- /dev/null
+++ b/include/asm-sh64/hardware.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_SH64_HARDWARE_H
+#define __ASM_SH64_HARDWARE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/hardware.h
+ *
+ * Copyright (C) 2002 Stuart Menefy
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * Defitions of the locations of registers in the physical address space.
+ */
+
+#define	PHYS_PERIPHERAL_BLOCK	0x09000000
+#define PHYS_DMAC_BLOCK		0x0e000000
+#define PHYS_PCI_BLOCK		0x60000000
+#define PHYS_EMI_BLOCK		0xff000000
+
+#endif /* __ASM_SH64_HARDWARE_H */
diff --git a/include/asm-sh64/hdreg.h b/include/asm-sh64/hdreg.h
new file mode 100644
index 0000000..52d9836
--- /dev/null
+++ b/include/asm-sh64/hdreg.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_HDREG_H
+#define __ASM_SH64_HDREG_H
+
+#include <asm-generic/hdreg.h>
+
+#endif /* __ASM_SH64_HDREG_H */
diff --git a/include/asm-sh64/hw_irq.h b/include/asm-sh64/hw_irq.h
new file mode 100644
index 0000000..ae718d1
--- /dev/null
+++ b/include/asm-sh64/hw_irq.h
@@ -0,0 +1,16 @@
+#ifndef __ASM_SH64_HW_IRQ_H
+#define __ASM_SH64_HW_IRQ_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/hw_irq.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+static __inline__ void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { /* Nothing to do */ }
+
+#endif /* __ASM_SH64_HW_IRQ_H */
diff --git a/include/asm-sh64/ide.h b/include/asm-sh64/ide.h
new file mode 100644
index 0000000..6fd514da
--- /dev/null
+++ b/include/asm-sh64/ide.h
@@ -0,0 +1,35 @@
+/*
+ *  linux/include/asm-sh64/ide.h
+ *
+ *  Copyright (C) 1994-1996  Linus Torvalds & authors
+ *
+ *  sh64 version by Richard Curnow & Paul Mundt
+ */
+
+/*
+ *  This file contains the sh64 architecture specific IDE code.
+ */
+
+#ifndef __ASM_SH64_IDE_H
+#define __ASM_SH64_IDE_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#ifndef MAX_HWIFS
+#define MAX_HWIFS	CONFIG_IDE_MAX_HWIFS
+#endif
+
+/* Without this, the initialisation of PCI IDE cards end up calling
+ * ide_init_hwif_ports, which won't work. */
+#ifdef CONFIG_BLK_DEV_IDEPCI
+#define IDE_ARCH_OBSOLETE_INIT 1
+#define ide_default_io_ctl(base)	(0)
+#endif
+
+#include <asm-generic/ide_iops.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_IDE_H */
diff --git a/include/asm-sh64/io.h b/include/asm-sh64/io.h
new file mode 100644
index 0000000..cfafaa7
--- /dev/null
+++ b/include/asm-sh64/io.h
@@ -0,0 +1,250 @@
+#ifndef __ASM_SH64_IO_H
+#define __ASM_SH64_IO_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/io.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ *
+ */
+
+/*
+ * Convention:
+ *    read{b,w,l}/write{b,w,l} are for PCI,
+ *    while in{b,w,l}/out{b,w,l} are for ISA
+ * These may (will) be platform specific function.
+ *
+ * In addition, we have
+ *   ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
+ * which are processor specific. Address should be the result of
+ * onchip_remap();
+ */
+
+#include <linux/compiler.h>
+#include <asm/cache.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm-generic/iomap.h>
+
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+#define page_to_bus page_to_phys
+
+/*
+ * Nothing overly special here.. instead of doing the same thing
+ * over and over again, we just define a set of sh64_in/out functions
+ * with an implicit size. The traditional read{b,w,l}/write{b,w,l}
+ * mess is wrapped to this, as are the SH-specific ctrl_in/out routines.
+ */
+static inline unsigned char sh64_in8(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned char __force *)addr;
+}
+
+static inline unsigned short sh64_in16(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned short __force *)addr;
+}
+
+static inline unsigned int sh64_in32(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned int __force *)addr;
+}
+
+static inline unsigned long long sh64_in64(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned long long __force *)addr;
+}
+
+static inline void sh64_out8(unsigned char b, volatile void __iomem *addr)
+{
+	*(volatile unsigned char __force *)addr = b;
+	wmb();
+}
+
+static inline void sh64_out16(unsigned short b, volatile void __iomem *addr)
+{
+	*(volatile unsigned short __force *)addr = b;
+	wmb();
+}
+
+static inline void sh64_out32(unsigned int b, volatile void __iomem *addr)
+{
+	*(volatile unsigned int __force *)addr = b;
+	wmb();
+}
+
+static inline void sh64_out64(unsigned long long b, volatile void __iomem *addr)
+{
+	*(volatile unsigned long long __force *)addr = b;
+	wmb();
+}
+
+#define readb(addr)		sh64_in8(addr)
+#define readw(addr)		sh64_in16(addr)
+#define readl(addr)		sh64_in32(addr)
+#define readb_relaxed(addr)	sh64_in8(addr)
+#define readw_relaxed(addr)	sh64_in16(addr)
+#define readl_relaxed(addr)	sh64_in32(addr)
+
+#define writeb(b, addr)		sh64_out8(b, addr)
+#define writew(b, addr)		sh64_out16(b, addr)
+#define writel(b, addr)		sh64_out32(b, addr)
+
+#define ctrl_inb(addr)		sh64_in8(ioport_map(addr, 1))
+#define ctrl_inw(addr)		sh64_in16(ioport_map(addr, 2))
+#define ctrl_inl(addr)		sh64_in32(ioport_map(addr, 4))
+
+#define ctrl_outb(b, addr)	sh64_out8(b, ioport_map(addr, 1))
+#define ctrl_outw(b, addr)	sh64_out16(b, ioport_map(addr, 2))
+#define ctrl_outl(b, addr)	sh64_out32(b, ioport_map(addr, 4))
+
+#define ioread8(addr)		sh64_in8(addr)
+#define ioread16(addr)		sh64_in16(addr)
+#define ioread32(addr)		sh64_in32(addr)
+#define iowrite8(b, addr)	sh64_out8(b, addr)
+#define iowrite16(b, addr)	sh64_out16(b, addr)
+#define iowrite32(b, addr)	sh64_out32(b, addr)
+
+#define inb(addr)		ctrl_inb(addr)
+#define inw(addr)		ctrl_inw(addr)
+#define inl(addr)		ctrl_inl(addr)
+#define outb(b, addr)		ctrl_outb(b, addr)
+#define outw(b, addr)		ctrl_outw(b, addr)
+#define outl(b, addr)		ctrl_outl(b, addr)
+
+void outsw(unsigned long port, const void *addr, unsigned long count);
+void insw(unsigned long port, void *addr, unsigned long count);
+void outsl(unsigned long port, const void *addr, unsigned long count);
+void insl(unsigned long port, void *addr, unsigned long count);
+
+void memcpy_toio(void __iomem *to, const void *from, long count);
+void memcpy_fromio(void *to, void __iomem *from, long count);
+
+#define mmiowb()
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_SH_CAYMAN
+extern unsigned long smsc_superio_virt;
+#endif
+#ifdef CONFIG_PCI
+extern unsigned long pciio_virt;
+#endif
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are trivial on the 1:1 Linux/SuperH mapping
+ */
+extern __inline__ unsigned long virt_to_phys(volatile void * address)
+{
+	return __pa(address);
+}
+
+extern __inline__ void * phys_to_virt(unsigned long address)
+{
+	return __va(address);
+}
+
+extern void * __ioremap(unsigned long phys_addr, unsigned long size,
+			unsigned long flags);
+
+extern __inline__ void * ioremap(unsigned long phys_addr, unsigned long size)
+{
+	return __ioremap(phys_addr, size, 1);
+}
+
+extern __inline__ void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+	return __ioremap(phys_addr, size, 0);
+}
+
+extern void iounmap(void *addr);
+
+unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
+extern void onchip_unmap(unsigned long vaddr);
+
+static __inline__ int check_signature(volatile void __iomem *io_addr,
+			const unsigned char *signature, int length)
+{
+	int retval = 0;
+	do {
+		if (readb(io_addr) != *signature)
+			goto out;
+		io_addr++;
+		signature++;
+		length--;
+	} while (length);
+	retval = 1;
+out:
+	return retval;
+}
+
+/*
+ * The caches on some architectures aren't dma-coherent and have need to
+ * handle this in software.  There are three types of operations that
+ * can be applied to dma buffers.
+ *
+ *  - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
+ *    writing the content of the caches back to memory, if necessary.
+ *    The function also invalidates the affected part of the caches as
+ *    necessary before DMA transfers from outside to memory.
+ *  - dma_cache_inv(start, size) invalidates the affected parts of the
+ *    caches.  Dirty lines of the caches may be written back or simply
+ *    be discarded.  This operation is necessary before dma operations
+ *    to the memory.
+ *  - dma_cache_wback(start, size) writes back any dirty lines but does
+ *    not invalidate the cache.  This can be used before DMA reads from
+ *    memory,
+ */
+
+static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
+{
+	unsigned long s = start & L1_CACHE_ALIGN_MASK;
+	unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
+
+	for (; s <= e; s += L1_CACHE_BYTES)
+		asm volatile ("ocbp	%0, 0" : : "r" (s));
+}
+
+static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
+{
+	// Note that caller has to be careful with overzealous
+	// invalidation should there be partial cache lines at the extremities
+	// of the specified range
+	unsigned long s = start & L1_CACHE_ALIGN_MASK;
+	unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
+
+	for (; s <= e; s += L1_CACHE_BYTES)
+		asm volatile ("ocbi	%0, 0" : : "r" (s));
+}
+
+static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
+{
+	unsigned long s = start & L1_CACHE_ALIGN_MASK;
+	unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
+
+	for (; s <= e; s += L1_CACHE_BYTES)
+		asm volatile ("ocbwb	%0, 0" : : "r" (s));
+}
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p)	__va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p)	p
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH64_IO_H */
diff --git a/include/asm-sh64/ioctl.h b/include/asm-sh64/ioctl.h
new file mode 100644
index 0000000..c089a6f
--- /dev/null
+++ b/include/asm-sh64/ioctl.h
@@ -0,0 +1,83 @@
+#ifndef __ASM_SH64_IOCTL_H
+#define __ASM_SH64_IOCTL_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/ioctl.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ * linux/ioctl.h for Linux by H.H. Bergman.
+ *
+ */
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms.  The i386 ioctl numbering scheme doesn't really enforce
+ * a type field.  De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here.  Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS	8
+#define _IOC_TYPEBITS	8
+#define _IOC_SIZEBITS	14
+#define _IOC_DIRBITS	2
+
+#define _IOC_NRMASK	((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK	((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK	((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK	((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT	0
+#define _IOC_TYPESHIFT	(_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT	(_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT	(_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE	0U
+#define _IOC_WRITE	1U
+#define _IOC_READ	2U
+
+#define _IOC(dir,type,nr,size) \
+	(((dir)  << _IOC_DIRSHIFT) | \
+	 ((type) << _IOC_TYPESHIFT) | \
+	 ((nr)   << _IOC_NRSHIFT) | \
+	 ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr)		_IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size)	_IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size)	_IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size)	_IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr)		(((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr)		(((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr)		(((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr)		(((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN		(_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT		(_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT	((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK	(_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT	(_IOC_SIZESHIFT)
+
+#endif /* __ASM_SH64_IOCTL_H */
diff --git a/include/asm-sh64/ioctls.h b/include/asm-sh64/ioctls.h
new file mode 100644
index 0000000..08f3c1f
--- /dev/null
+++ b/include/asm-sh64/ioctls.h
@@ -0,0 +1,116 @@
+#ifndef __ASM_SH64_IOCTLS_H
+#define __ASM_SH64_IOCTLS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/ioctls.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2004  Richard Curnow
+ *
+ */
+
+#include <asm/ioctl.h>
+
+#define FIOCLEX		0x6601		/* _IO('f', 1) */
+#define FIONCLEX	0x6602		/* _IO('f', 2) */
+#define FIOASYNC	0x4004667d	/* _IOW('f', 125, int) */
+#define FIONBIO		0x4004667e	/* _IOW('f', 126, int) */
+#define FIONREAD	0x8004667f	/* _IOW('f', 127, int) */
+#define TIOCINQ		FIONREAD
+#define FIOQSIZE	0x80086680	/* _IOR('f', 128, loff_t) */
+
+#define TCGETS		0x5401
+#define TCSETS		0x5402
+#define TCSETSW		0x5403
+#define TCSETSF		0x5404
+
+#define TCGETA		0x80127417	/* _IOR('t', 23, struct termio) */
+#define TCSETA		0x40127418	/* _IOW('t', 24, struct termio) */
+#define TCSETAW		0x40127419	/* _IOW('t', 25, struct termio) */
+#define TCSETAF		0x4012741c	/* _IOW('t', 28, struct termio) */
+
+#define TCSBRK		0x741d		/* _IO('t', 29) */
+#define TCXONC		0x741e		/* _IO('t', 30) */
+#define TCFLSH		0x741f		/* _IO('t', 31) */
+
+#define TIOCSWINSZ	0x40087467	/* _IOW('t', 103, struct winsize) */
+#define TIOCGWINSZ	0x80087468	/* _IOR('t', 104, struct winsize) */
+#define	TIOCSTART	0x746e		/* _IO('t', 110)  start output, like ^Q */
+#define	TIOCSTOP	0x746f		/* _IO('t', 111)  stop output, like ^S */
+#define TIOCOUTQ        0x80047473	/* _IOR('t', 115, int) output queue size */
+
+#define TIOCSPGRP	0x40047476	/* _IOW('t', 118, int) */
+#define TIOCGPGRP	0x80047477	/* _IOR('t', 119, int) */
+
+#define TIOCEXCL	0x540c		/* _IO('T', 12) */
+#define TIOCNXCL	0x540d		/* _IO('T', 13) */
+#define TIOCSCTTY	0x540e		/* _IO('T', 14) */
+
+#define TIOCSTI		0x40015412	/* _IOW('T', 18, char) 0x5412 */
+#define TIOCMGET	0x80045415	/* _IOR('T', 21, unsigned int) 0x5415 */
+#define TIOCMBIS	0x40045416	/* _IOW('T', 22, unsigned int) 0x5416 */
+#define TIOCMBIC	0x40045417	/* _IOW('T', 23, unsigned int) 0x5417 */
+#define TIOCMSET	0x40045418	/* _IOW('T', 24, unsigned int) 0x5418 */
+
+#define TIOCM_LE	0x001
+#define TIOCM_DTR	0x002
+#define TIOCM_RTS	0x004
+#define TIOCM_ST	0x008
+#define TIOCM_SR	0x010
+#define TIOCM_CTS	0x020
+#define TIOCM_CAR	0x040
+#define TIOCM_RNG	0x080
+#define TIOCM_DSR	0x100
+#define TIOCM_CD	TIOCM_CAR
+#define TIOCM_RI	TIOCM_RNG
+
+#define TIOCGSOFTCAR	0x80045419	/* _IOR('T', 25, unsigned int) 0x5419 */
+#define TIOCSSOFTCAR	0x4004541a	/* _IOW('T', 26, unsigned int) 0x541A */
+#define TIOCLINUX	0x4004541c	/* _IOW('T', 28, char) 0x541C */
+#define TIOCCONS	0x541d		/* _IO('T', 29) */
+#define TIOCGSERIAL	0x803c541e	/* _IOR('T', 30, struct serial_struct) 0x541E */
+#define TIOCSSERIAL	0x403c541f	/* _IOW('T', 31, struct serial_struct) 0x541F */
+#define TIOCPKT		0x40045420	/* _IOW('T', 32, int) 0x5420 */
+
+#define TIOCPKT_DATA		 0
+#define TIOCPKT_FLUSHREAD	 1
+#define TIOCPKT_FLUSHWRITE	 2
+#define TIOCPKT_STOP		 4
+#define TIOCPKT_START		 8
+#define TIOCPKT_NOSTOP		16
+#define TIOCPKT_DOSTOP		32
+
+
+#define TIOCNOTTY	0x5422		/* _IO('T', 34) */
+#define TIOCSETD	0x40045423	/* _IOW('T', 35, int) 0x5423 */
+#define TIOCGETD	0x80045424	/* _IOR('T', 36, int) 0x5424 */
+#define TCSBRKP		0x40045424	/* _IOW('T', 37, int) 0x5425 */	/* Needed for POSIX tcsendbreak() */
+#define TIOCTTYGSTRUCT	0x8c105426	/* _IOR('T', 38, struct tty_struct) 0x5426 */ /* For debugging only */
+#define TIOCSBRK	0x5427		/* _IO('T', 39) */ /* BSD compatibility */
+#define TIOCCBRK	0x5428		/* _IO('T', 40) */ /* BSD compatibility */
+#define TIOCGSID	0x80045429	/* _IOR('T', 41, pid_t) 0x5429 */ /* Return the session ID of FD */
+#define TIOCGPTN	0x80045430	/* _IOR('T',0x30, unsigned int) 0x5430 Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK	0x40045431	/* _IOW('T',0x31, int) Lock/unlock Pty */
+
+#define TIOCSERCONFIG	0x5453		/* _IO('T', 83) */
+#define TIOCSERGWILD	0x80045454	/* _IOR('T', 84,  int) 0x5454 */
+#define TIOCSERSWILD	0x40045455	/* _IOW('T', 85,  int) 0x5455 */
+#define TIOCGLCKTRMIOS	0x5456
+#define TIOCSLCKTRMIOS	0x5457
+#define TIOCSERGSTRUCT	0x80d85458	/* _IOR('T', 88, struct async_struct) 0x5458 */ /* For debugging only */
+#define TIOCSERGETLSR   0x80045459	/* _IOR('T', 89, unsigned int) 0x5459 */ /* Get line status register */
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+#define TIOCSER_TEMT    0x01	/* Transmitter physically empty */
+
+#define TIOCSERGETMULTI 0x80a8545a	/* _IOR('T', 90, struct serial_multiport_struct) 0x545A */ /* Get multiport config  */
+#define TIOCSERSETMULTI 0x40a8545b	/* _IOW('T', 91, struct serial_multiport_struct) 0x545B */ /* Set multiport config */
+
+#define TIOCMIWAIT	0x545c		/* _IO('T', 92) wait for a change on serial input line(s) */
+#define TIOCGICOUNT	0x802c545d	/* _IOR('T', 93, struct async_icount) 0x545D */	/* read serial port inline interrupt counts */
+
+#endif /* __ASM_SH64_IOCTLS_H */
diff --git a/include/asm-sh64/ipc.h b/include/asm-sh64/ipc.h
new file mode 100644
index 0000000..a46e3d9
--- /dev/null
+++ b/include/asm-sh64/ipc.h
@@ -0,0 +1 @@
+#include <asm-generic/ipc.h>
diff --git a/include/asm-sh64/ipcbuf.h b/include/asm-sh64/ipcbuf.h
new file mode 100644
index 0000000..c441e35
--- /dev/null
+++ b/include/asm-sh64/ipcbuf.h
@@ -0,0 +1,40 @@
+#ifndef __ASM_SH64_IPCBUF_H__
+#define __ASM_SH64_IPCBUF_H__
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/ipcbuf.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+/*
+ * The ipc64_perm structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct ipc64_perm
+{
+	__kernel_key_t		key;
+	__kernel_uid32_t	uid;
+	__kernel_gid32_t	gid;
+	__kernel_uid32_t	cuid;
+	__kernel_gid32_t	cgid;
+	__kernel_mode_t		mode;
+	unsigned short		__pad1;
+	unsigned short		seq;
+	unsigned short		__pad2;
+	unsigned long		__unused1;
+	unsigned long		__unused2;
+};
+
+#endif /* __ASM_SH64_IPCBUF_H__ */
diff --git a/include/asm-sh64/irq.h b/include/asm-sh64/irq.h
new file mode 100644
index 0000000..f815b43
--- /dev/null
+++ b/include/asm-sh64/irq.h
@@ -0,0 +1,149 @@
+#ifndef __ASM_SH64_IRQ_H
+#define __ASM_SH64_IRQ_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/irq.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#include <linux/config.h>
+
+/*
+ * Encoded IRQs are not considered worth to be supported.
+ * Main reason is that there's no per-encoded-interrupt
+ * enable/disable mechanism (as there was in SH3/4).
+ * An all enabled/all disabled is worth only if there's
+ * a cascaded IC to disable/enable/ack on. Until such
+ * IC is available there's no such support.
+ *
+ * Presumably Encoded IRQs may use extra IRQs beyond 64,
+ * below. Some logic must be added to cope with IRQ_IRL?
+ * in an exclusive way.
+ *
+ * Priorities are set at Platform level, when IRQ_IRL0-3
+ * are set to 0 Encoding is allowed. Otherwise it's not
+ * allowed.
+ */
+
+/* Independent IRQs */
+#define IRQ_IRL0	0
+#define IRQ_IRL1	1
+#define IRQ_IRL2	2
+#define IRQ_IRL3	3
+
+#define IRQ_INTA	4
+#define IRQ_INTB	5
+#define IRQ_INTC	6
+#define IRQ_INTD	7
+
+#define IRQ_SERR	12
+#define IRQ_ERR		13
+#define IRQ_PWR3	14
+#define IRQ_PWR2	15
+#define IRQ_PWR1	16
+#define IRQ_PWR0	17
+
+#define IRQ_DMTE0	18
+#define IRQ_DMTE1	19
+#define IRQ_DMTE2	20
+#define IRQ_DMTE3	21
+#define IRQ_DAERR	22
+
+#define IRQ_TUNI0	32
+#define IRQ_TUNI1	33
+#define IRQ_TUNI2	34
+#define IRQ_TICPI2	35
+
+#define IRQ_ATI		36
+#define IRQ_PRI		37
+#define IRQ_CUI		38
+
+#define IRQ_ERI		39
+#define IRQ_RXI		40
+#define IRQ_BRI		41
+#define IRQ_TXI		42
+
+#define IRQ_ITI		63
+
+#define NR_INTC_IRQS	64
+
+#ifdef CONFIG_SH_CAYMAN
+#define NR_EXT_IRQS     32
+#define START_EXT_IRQS  64
+
+/* PCI bus 2 uses encoded external interrupts on the Cayman board */
+#define IRQ_P2INTA      (START_EXT_IRQS + (3*8) + 0)
+#define IRQ_P2INTB      (START_EXT_IRQS + (3*8) + 1)
+#define IRQ_P2INTC      (START_EXT_IRQS + (3*8) + 2)
+#define IRQ_P2INTD      (START_EXT_IRQS + (3*8) + 3)
+
+#define I8042_KBD_IRQ	(START_EXT_IRQS + 2)
+#define I8042_AUX_IRQ	(START_EXT_IRQS + 6)
+
+#define IRQ_CFCARD	(START_EXT_IRQS + 7)
+#define IRQ_PCMCIA	(0)
+
+#else
+#define NR_EXT_IRQS	0
+#endif
+
+#define NR_IRQS		(NR_INTC_IRQS+NR_EXT_IRQS)
+
+
+/* Default IRQs, fixed */
+#define TIMER_IRQ	IRQ_TUNI0
+#define RTC_IRQ		IRQ_CUI
+
+/* Default Priorities, Platform may choose differently */
+#define	NO_PRIORITY	0	/* Disabled */
+#define TIMER_PRIORITY	2
+#define RTC_PRIORITY	TIMER_PRIORITY
+#define SCIF_PRIORITY	3
+#define INTD_PRIORITY	3
+#define	IRL3_PRIORITY	4
+#define INTC_PRIORITY	6
+#define	IRL2_PRIORITY	7
+#define INTB_PRIORITY	9
+#define	IRL1_PRIORITY	10
+#define INTA_PRIORITY	12
+#define	IRL0_PRIORITY	13
+#define TOP_PRIORITY	15
+
+extern void disable_irq(unsigned int);
+extern void disable_irq_nosync(unsigned int);
+extern void enable_irq(unsigned int);
+
+extern int intc_evt_to_irq[(0xE20/0x20)+1];
+int intc_irq_describe(char* p, int irq);
+
+#define irq_canonicalize(irq)	(irq)
+
+#ifdef CONFIG_SH_CAYMAN
+int cayman_irq_demux(int evt);
+int cayman_irq_describe(char* p, int irq);
+#define irq_demux(x) cayman_irq_demux(x)
+#define irq_describe(p, x) cayman_irq_describe(p, x)
+#else
+#define irq_demux(x) (intc_evt_to_irq[x])
+#define irq_describe(p, x) intc_irq_describe(p, x)
+#endif
+
+/*
+ * Function for "on chip support modules".
+ */
+
+/*
+ * SH-5 supports Priority based interrupts only.
+ * Interrupt priorities are defined at platform level.
+ */
+#define set_ipr_data(a, b, c, d)
+#define make_ipr_irq(a)
+#define make_imask_irq(a)
+
+#endif /* __ASM_SH64_IRQ_H */
diff --git a/include/asm-sh64/keyboard.h b/include/asm-sh64/keyboard.h
new file mode 100644
index 0000000..733e2bb
--- /dev/null
+++ b/include/asm-sh64/keyboard.h
@@ -0,0 +1,74 @@
+/*
+ *  linux/include/asm-shmedia/keyboard.h
+ *
+ * Copied from i386 version:
+ *    Created 3 Nov 1996 by Geert Uytterhoeven
+ */
+
+/*
+ *  This file contains the i386 architecture specific keyboard definitions
+ */
+
+#ifndef __ASM_SH64_KEYBOARD_H
+#define __ASM_SH64_KEYBOARD_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_SH_CAYMAN
+#define KEYBOARD_IRQ			(START_EXT_IRQS + 2) /* SMSC SuperIO IRQ 1 */
+#endif
+#define DISABLE_KBD_DURING_INTERRUPTS	0
+
+extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
+extern int pckbd_getkeycode(unsigned int scancode);
+extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
+			   char raw_mode);
+extern char pckbd_unexpected_up(unsigned char keycode);
+extern void pckbd_leds(unsigned char leds);
+extern void pckbd_init_hw(void);
+extern unsigned char pckbd_sysrq_xlate[128];
+
+#define kbd_setkeycode		pckbd_setkeycode
+#define kbd_getkeycode		pckbd_getkeycode
+#define kbd_translate		pckbd_translate
+#define kbd_unexpected_up	pckbd_unexpected_up
+#define kbd_leds		pckbd_leds
+#define kbd_init_hw		pckbd_init_hw
+#define kbd_sysrq_xlate		pckbd_sysrq_xlate
+
+#define SYSRQ_KEY 0x54
+
+/* resource allocation */
+#define kbd_request_region()
+#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, \
+                                             "keyboard", NULL)
+
+/* How to access the keyboard macros on this platform.  */
+#define kbd_read_input() inb(KBD_DATA_REG)
+#define kbd_read_status() inb(KBD_STATUS_REG)
+#define kbd_write_output(val) outb(val, KBD_DATA_REG)
+#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
+
+/* Some stoneage hardware needs delays after some operations.  */
+#define kbd_pause() do { } while(0)
+
+/*
+ * Machine specific bits for the PS/2 driver
+ */
+
+#ifdef CONFIG_SH_CAYMAN
+#define AUX_IRQ (START_EXT_IRQS + 6) /* SMSC SuperIO IRQ12 */
+#endif
+
+#define aux_request_irq(hand, dev_id)					\
+	request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS2 Mouse", dev_id)
+
+#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id)
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH64_KEYBOARD_H */
+
diff --git a/include/asm-sh64/kmap_types.h b/include/asm-sh64/kmap_types.h
new file mode 100644
index 0000000..2ae7c75
--- /dev/null
+++ b/include/asm-sh64/kmap_types.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_KMAP_TYPES_H
+#define __ASM_SH64_KMAP_TYPES_H
+
+#include <asm-sh/kmap_types.h>
+
+#endif /* __ASM_SH64_KMAP_TYPES_H */
+
diff --git a/include/asm-sh64/linkage.h b/include/asm-sh64/linkage.h
new file mode 100644
index 0000000..1dd0e84
--- /dev/null
+++ b/include/asm-sh64/linkage.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_LINKAGE_H
+#define __ASM_SH64_LINKAGE_H
+
+#include <asm-sh/linkage.h>
+
+#endif /* __ASM_SH64_LINKAGE_H */
+
diff --git a/include/asm-sh64/local.h b/include/asm-sh64/local.h
new file mode 100644
index 0000000..d9bd95d
--- /dev/null
+++ b/include/asm-sh64/local.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_LOCAL_H
+#define __ASM_SH64_LOCAL_H
+
+#include <asm-generic/local.h>
+
+#endif /* __ASM_SH64_LOCAL_H */
+
diff --git a/include/asm-sh64/mc146818rtc.h b/include/asm-sh64/mc146818rtc.h
new file mode 100644
index 0000000..6cd3aec
--- /dev/null
+++ b/include/asm-sh64/mc146818rtc.h
@@ -0,0 +1,7 @@
+/*
+ * linux/include/asm-sh64/mc146818rtc.h
+ *
+*/
+
+/* For now, an empty place-holder to get IDE to compile. */
+
diff --git a/include/asm-sh64/mman.h b/include/asm-sh64/mman.h
new file mode 100644
index 0000000..a9be6d8
--- /dev/null
+++ b/include/asm-sh64/mman.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_MMAN_H
+#define __ASM_SH64_MMAN_H
+
+#include <asm-sh/mman.h>
+
+#endif /* __ASM_SH64_MMAN_H */
diff --git a/include/asm-sh64/mmu.h b/include/asm-sh64/mmu.h
new file mode 100644
index 0000000..ccd36d2
--- /dev/null
+++ b/include/asm-sh64/mmu.h
@@ -0,0 +1,7 @@
+#ifndef __MMU_H
+#define __MMU_H
+
+/* Default "unsigned long" context */
+typedef unsigned long mm_context_t;
+
+#endif
diff --git a/include/asm-sh64/mmu_context.h b/include/asm-sh64/mmu_context.h
new file mode 100644
index 0000000..f062e15
--- /dev/null
+++ b/include/asm-sh64/mmu_context.h
@@ -0,0 +1,209 @@
+#ifndef __ASM_SH64_MMU_CONTEXT_H
+#define __ASM_SH64_MMU_CONTEXT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/mmu_context.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ *
+ * ASID handling idea taken from MIPS implementation.
+ *
+ */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Cache of MMU context last used.
+ *
+ * The MMU "context" consists of two things:
+ *   (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache)
+ *   (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache)
+ */
+extern unsigned long mmu_context_cache;
+
+#include <linux/config.h>
+#include <asm/page.h>
+
+
+/* Current mm's pgd */
+extern pgd_t *mmu_pdtp_cache;
+
+#define SR_ASID_MASK		0xffffffffff00ffffULL
+#define SR_ASID_SHIFT		16
+
+#define MMU_CONTEXT_ASID_MASK		0x000000ff
+#define MMU_CONTEXT_VERSION_MASK	0xffffff00
+#define MMU_CONTEXT_FIRST_VERSION	0x00000100
+#define NO_CONTEXT			0
+
+/* ASID is 8-bit value, so it can't be 0x100 */
+#define MMU_NO_ASID			0x100
+
+
+/*
+ * Virtual Page Number mask
+ */
+#define MMU_VPN_MASK	0xfffff000
+
+extern __inline__ void
+get_new_mmu_context(struct mm_struct *mm)
+{
+	extern void flush_tlb_all(void);
+	extern void flush_cache_all(void);
+
+	unsigned long mc = ++mmu_context_cache;
+
+	if (!(mc & MMU_CONTEXT_ASID_MASK)) {
+		/* We exhaust ASID of this version.
+		   Flush all TLB and start new cycle. */
+		flush_tlb_all();
+		/* We have to flush all caches as ASIDs are
+                   used in cache */
+		flush_cache_all();
+		/* Fix version if needed.
+		   Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */
+		if (!mc)
+			mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
+	}
+	mm->context = mc;
+}
+
+/*
+ * Get MMU context if needed.
+ */
+static __inline__ void
+get_mmu_context(struct mm_struct *mm)
+{
+	if (mm) {
+		unsigned long mc = mmu_context_cache;
+		/* Check if we have old version of context.
+		   If it's old, we need to get new context with new version. */
+		if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK)
+			get_new_mmu_context(mm);
+	}
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int init_new_context(struct task_struct *tsk,
+					struct mm_struct *mm)
+{
+	mm->context = NO_CONTEXT;
+
+	return 0;
+}
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+	extern void flush_tlb_mm(struct mm_struct *mm);
+
+	/* Well, at least free TLB entries */
+	flush_tlb_mm(mm);
+}
+
+#endif	/* __ASSEMBLY__ */
+
+/* Common defines */
+#define TLB_STEP	0x00000010
+#define TLB_PTEH	0x00000000
+#define TLB_PTEL	0x00000008
+
+/* PTEH defines */
+#define PTEH_ASID_SHIFT	2
+#define PTEH_VALID	0x0000000000000001
+#define PTEH_SHARED	0x0000000000000002
+#define PTEH_MATCH_ASID	0x00000000000003ff
+
+#ifndef __ASSEMBLY__
+/* This has to be a common function because the next location to fill
+ * information is shared. */
+extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
+
+/* Profiling counter. */
+#ifdef CONFIG_SH64_PROC_TLB
+extern unsigned long long calls_to_do_fast_page_fault;
+#endif
+
+static inline unsigned long get_asid(void)
+{
+	unsigned long long sr;
+
+	asm volatile ("getcon   " __SR ", %0\n\t"
+		      : "=r" (sr));
+
+	sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
+	return (unsigned long) sr;
+}
+
+/* Set ASID into SR */
+static inline void set_asid(unsigned long asid)
+{
+	unsigned long long sr, pc;
+
+	asm volatile ("getcon	" __SR ", %0" : "=r" (sr));
+
+	sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
+
+	/*
+	 * It is possible that this function may be inlined and so to avoid
+	 * the assembler reporting duplicate symbols we make use of the gas trick
+	 * of generating symbols using numerics and forward reference.
+	 */
+	asm volatile ("movi	1, %1\n\t"
+		      "shlli	%1, 28, %1\n\t"
+		      "or	%0, %1, %1\n\t"
+		      "putcon	%1, " __SR "\n\t"
+		      "putcon	%0, " __SSR "\n\t"
+		      "movi	1f, %1\n\t"
+		      "ori	%1, 1 , %1\n\t"
+		      "putcon	%1, " __SPC "\n\t"
+		      "rte\n"
+		      "1:\n\t"
+		      : "=r" (sr), "=r" (pc) : "0" (sr));
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static __inline__ void activate_context(struct mm_struct *mm)
+{
+	get_mmu_context(mm);
+	set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
+}
+
+
+static __inline__ void switch_mm(struct mm_struct *prev,
+				 struct mm_struct *next,
+				 struct task_struct *tsk)
+{
+	if (prev != next) {
+		mmu_pdtp_cache = next->pgd;
+		activate_context(next);
+	}
+}
+
+#define deactivate_mm(tsk,mm)	do { } while (0)
+
+#define activate_mm(prev, next) \
+	switch_mm((prev),(next),NULL)
+
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+#endif	/* __ASSEMBLY__ */
+
+#endif /* __ASM_SH64_MMU_CONTEXT_H */
diff --git a/include/asm-sh64/module.h b/include/asm-sh64/module.h
new file mode 100644
index 0000000..c313650
--- /dev/null
+++ b/include/asm-sh64/module.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_SH64_MODULE_H
+#define __ASM_SH64_MODULE_H
+/*
+ * This file contains the SH architecture specific module code.
+ */
+
+struct mod_arch_specific {
+	/* empty */
+};
+
+#define Elf_Shdr		Elf32_Shdr
+#define Elf_Sym			Elf32_Sym
+#define Elf_Ehdr		Elf32_Ehdr
+
+#define module_map(x)		vmalloc(x)
+#define module_unmap(x)		vfree(x)
+#define module_arch_init(x)	(0)
+#define arch_init_modules(x)	do { } while (0)
+
+#endif /* __ASM_SH64_MODULE_H */
diff --git a/include/asm-sh64/msgbuf.h b/include/asm-sh64/msgbuf.h
new file mode 100644
index 0000000..cf0494c
--- /dev/null
+++ b/include/asm-sh64/msgbuf.h
@@ -0,0 +1,42 @@
+#ifndef __ASM_SH64_MSGBUF_H
+#define __ASM_SH64_MSGBUF_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/msgbuf.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+/*
+ * The msqid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+	struct ipc64_perm msg_perm;
+	__kernel_time_t msg_stime;	/* last msgsnd time */
+	unsigned long	__unused1;
+	__kernel_time_t msg_rtime;	/* last msgrcv time */
+	unsigned long	__unused2;
+	__kernel_time_t msg_ctime;	/* last change time */
+	unsigned long	__unused3;
+	unsigned long  msg_cbytes;	/* current number of bytes on queue */
+	unsigned long  msg_qnum;	/* number of messages in queue */
+	unsigned long  msg_qbytes;	/* max number of bytes on queue */
+	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
+	__kernel_pid_t msg_lrpid;	/* last receive pid */
+	unsigned long  __unused4;
+	unsigned long  __unused5;
+};
+
+#endif /* __ASM_SH64_MSGBUF_H */
diff --git a/include/asm-sh64/namei.h b/include/asm-sh64/namei.h
new file mode 100644
index 0000000..99d759a
--- /dev/null
+++ b/include/asm-sh64/namei.h
@@ -0,0 +1,24 @@
+#ifndef __ASM_SH64_NAMEI_H
+#define __ASM_SH64_NAMEI_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/namei.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ * Included from linux/fs/namei.c
+ *
+ */
+
+/* This dummy routine maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+
+#define __emul_prefix() NULL
+
+#endif /* __ASM_SH64_NAMEI_H */
diff --git a/include/asm-sh64/page.h b/include/asm-sh64/page.h
new file mode 100644
index 0000000..e1f7f5a
--- /dev/null
+++ b/include/asm-sh64/page.h
@@ -0,0 +1,137 @@
+#ifndef __ASM_SH64_PAGE_H
+#define __ASM_SH64_PAGE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/page.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003, 2004  Paul Mundt
+ *
+ * benedict.gaster@superh.com 19th, 24th July 2002.
+ *
+ * Modified to take account of enabling for D-CACHE support.
+ *
+ */
+
+#include <linux/config.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT	12
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE	4096
+#else
+#define PAGE_SIZE	(1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK	(~(PAGE_SIZE-1))
+#define PTE_MASK	PAGE_MASK
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define HPAGE_SHIFT	16
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
+#define HPAGE_SHIFT	20
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
+#define HPAGE_SHIFT	29
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SIZE		(1UL << HPAGE_SHIFT)
+#define HPAGE_MASK		(~(HPAGE_SIZE-1))
+#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT-PAGE_SHIFT)
+#endif
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+extern struct page *mem_map;
+extern void sh64_page_clear(void *page);
+extern void sh64_page_copy(void *from, void *to);
+
+#define clear_page(page)               sh64_page_clear(page)
+#define copy_page(to,from)             sh64_page_copy(from, to)
+
+#if defined(CONFIG_DCACHE_DISABLED)
+
+#define clear_user_page(page, vaddr, pg)	clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
+
+#else
+
+extern void clear_user_page(void *to, unsigned long address, struct page *pg);
+extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
+
+#endif /* defined(CONFIG_DCACHE_DISABLED) */
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x)	((x).pte)
+#define pmd_val(x)	((x).pmd)
+#define pgd_val(x)	((x).pgd)
+#define pgprot_val(x)	((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x)	((pgprot_t) { (x) } )
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/*
+ * Kconfig defined.
+ */
+#define __MEMORY_START		(CONFIG_MEMORY_START)
+#define PAGE_OFFSET		(CONFIG_CACHED_MEMORY_OFFSET)
+
+#define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define MAP_NR(addr)		((__pa(addr)-__MEMORY_START) >> PAGE_SHIFT)
+#define VALID_PAGE(page)	((page - mem_map) < max_mapnr)
+
+#define phys_to_page(phys)	(mem_map + (((phys) - __MEMORY_START) >> PAGE_SHIFT))
+#define page_to_phys(page)	(((page - mem_map) << PAGE_SHIFT) + __MEMORY_START)
+
+/* PFN start number, because of __MEMORY_START */
+#define PFN_START		(__MEMORY_START >> PAGE_SHIFT)
+
+#define pfn_to_page(pfn)	(mem_map + (pfn) - PFN_START)
+#define page_to_pfn(page)	((unsigned long)((page) - mem_map) + PFN_START)
+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_valid(pfn)		(((pfn) - PFN_START) < max_mapnr)
+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#ifndef __ASSEMBLY__
+
+/* Pure 2^n version of get_order */
+extern __inline__ int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size-1) >> (PAGE_SHIFT-1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+	return order;
+}
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_PAGE_H */
diff --git a/include/asm-sh64/param.h b/include/asm-sh64/param.h
new file mode 100644
index 0000000..d18cc87
--- /dev/null
+++ b/include/asm-sh64/param.h
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/param.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ *
+ */
+#ifndef __ASM_SH64_PARAM_H
+#define __ASM_SH64_PARAM_H
+
+#include <linux/config.h>
+
+#ifdef __KERNEL__
+# ifdef CONFIG_SH_WDT
+#  define HZ		1000		/* Needed for high-res WOVF */
+# else
+#  define HZ		100
+# endif
+# define USER_HZ	100		/* User interfaces are in "ticks" */
+# define CLOCKS_PER_SEC	(USER_HZ)	/* frequency at which times() counts */
+#endif
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE	4096
+
+#ifndef NGROUPS
+#define NGROUPS		32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP		(-1)
+#endif
+
+#define MAXHOSTNAMELEN	64	/* max length of hostname */
+
+#endif /* __ASM_SH64_PARAM_H */
diff --git a/include/asm-sh64/pci.h b/include/asm-sh64/pci.h
new file mode 100644
index 0000000..8cc14e139
--- /dev/null
+++ b/include/asm-sh64/pci.h
@@ -0,0 +1,110 @@
+#ifndef __ASM_SH64_PCI_H
+#define __ASM_SH64_PCI_H
+
+#ifdef __KERNEL__
+
+#include <linux/dma-mapping.h>
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+   already-configured bus numbers - to be used for buggy BIOSes
+   or architectures with incomplete PCI setup by the loader */
+
+#define pcibios_assign_all_busses()     1
+
+/*
+ * These are currently the correct values for the STM overdrive board
+ * We need some way of setting this on a board specific way, it will
+ * not be the same on other boards I think
+ */
+#if defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
+#define PCIBIOS_MIN_IO          0x2000
+#define PCIBIOS_MIN_MEM         0x40000000
+#endif
+
+extern void pcibios_set_master(struct pci_dev *dev);
+
+/*
+ * Set penalize isa irq function
+ */
+static inline void pcibios_penalize_isa_irq(int irq)
+{
+	/* We don't do dynamic PCI IRQ allocation */
+}
+
+/* Dynamic DMA mapping stuff.
+ * SuperH has everything mapped statically like x86.
+ */
+
+/* The PCI address space does equal the physical memory
+ * address space.  The networking and block device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS	(1)
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+/* pci_unmap_{single,page} being a nop depends upon the
+ * configuration.
+ */
+#ifdef CONFIG_SH_PCIDMA_NONCOHERENT
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
+	dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
+	__u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME)			\
+	((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
+	(((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME)			\
+	((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
+	(((PTR)->LEN_NAME) = (VAL))
+#else
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME)		(0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
+#endif
+
+/* Not supporting more than 32-bit PCI bus addresses now, but
+ * must satisfy references to this function.  Change if needed.
+ */
+#define pci_dac_dma_supported(pci_dev, mask) (0)
+
+/* These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg)	((sg)->dma_address)
+#define sg_dma_len(sg)		((sg)->length)
+
+/* Board-specific fixup routines. */
+extern void pcibios_fixup(void);
+extern void pcibios_fixup_irqs(void);
+
+#ifdef CONFIG_PCI_AUTO
+extern int pciauto_assign_resources(int busno, struct pci_channel *hose);
+#endif
+
+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+#endif /* __KERNEL__ */
+
+/* generic pci stuff */
+#include <asm-generic/pci.h>
+
+/* generic DMA-mapping stuff */
+#include <asm-generic/pci-dma-compat.h>
+
+#endif /* __ASM_SH64_PCI_H */
+
diff --git a/include/asm-sh64/percpu.h b/include/asm-sh64/percpu.h
new file mode 100644
index 0000000..a01d16c
--- /dev/null
+++ b/include/asm-sh64/percpu.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_PERCPU
+#define __ASM_SH64_PERCPU
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ASM_SH64_PERCPU */
diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h
new file mode 100644
index 0000000..b25f5df
--- /dev/null
+++ b/include/asm-sh64/pgalloc.h
@@ -0,0 +1,195 @@
+#ifndef __ASM_SH64_PGALLOC_H
+#define __ASM_SH64_PGALLOC_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/pgalloc.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003, 2004  Paul Mundt
+ * Copyright (C) 2003, 2004  Richard Curnow
+ *
+ */
+
+#include <linux/threads.h>
+#include <linux/mm.h>
+
+#define pgd_quicklist (current_cpu_data.pgd_quick)
+#define pmd_quicklist (current_cpu_data.pmd_quick)
+#define pte_quicklist (current_cpu_data.pte_quick)
+#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+
+static inline void pgd_init(unsigned long page)
+{
+	unsigned long *pgd = (unsigned long *)page;
+	extern pte_t empty_bad_pte_table[PTRS_PER_PTE];
+	int i;
+
+	for (i = 0; i < USER_PTRS_PER_PGD; i++)
+		pgd[i] = (unsigned long)empty_bad_pte_table;
+}
+
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+	unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
+	pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
+	return ret;
+}
+
+extern __inline__ pgd_t *get_pgd_fast(void)
+{
+	unsigned long *ret;
+
+	if ((ret = pgd_quicklist) != NULL) {
+		pgd_quicklist = (unsigned long *)(*ret);
+		ret[0] = 0;
+		pgtable_cache_size--;
+	} else
+		ret = (unsigned long *)get_pgd_slow();
+
+	if (ret) {
+		memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+	}
+	return (pgd_t *)ret;
+}
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+	pgd_quicklist = (unsigned long *) pgd;
+	pgtable_cache_size++;
+}
+
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
+{
+	kfree((void *)pgd);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
+extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t *get_pte_fast(void)
+{
+	unsigned long *ret;
+
+	if((ret = (unsigned long *)pte_quicklist) != NULL) {
+		pte_quicklist = (unsigned long *)(*ret);
+		ret[0] = ret[1];
+		pgtable_cache_size--;
+	}
+	return (pte_t *)ret;
+}
+
+extern __inline__ void free_pte_fast(pte_t *pte)
+{
+	*(unsigned long *)pte = (unsigned long) pte_quicklist;
+	pte_quicklist = (unsigned long *) pte;
+	pgtable_cache_size++;
+}
+
+static inline void pte_free_kernel(pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct page *pte)
+{
+	__free_page(pte);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+					   unsigned long address)
+{
+	pte_t *pte;
+
+	pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT|__GFP_ZERO);
+
+	return pte;
+}
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	struct page *pte;
+
+	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+
+	return pte;
+}
+
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+
+#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
+
+#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
+#define pmd_free(x)			do { } while (0)
+#define pgd_populate(mm, pmd, pte)	BUG()
+#define __pte_free_tlb(tlb,pte)		tlb_remove_page((tlb),(pte))
+#define __pmd_free_tlb(tlb,pmd)		do { } while (0)
+
+#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
+
+static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	pmd_t *pmd;
+	pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+	return pmd;
+}
+
+static __inline__ void pmd_free(pmd_t *pmd)
+{
+	free_page((unsigned long) pmd);
+}
+
+#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd)
+#define __pmd_free_tlb(tlb,pmd)		pmd_free(pmd)
+
+#else
+#error "No defined page table size"
+#endif
+
+#define check_pgt_cache()		do { } while (0)
+#define pgd_free(pgd)		free_pgd_slow(pgd)
+#define pgd_alloc(mm)		get_pgd_fast()
+
+extern int do_check_pgt_cache(int, int);
+
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
+{
+	struct task_struct * p;
+	pgd_t *pgd;
+
+	read_lock(&tasklist_lock);
+	for_each_process(p) {
+		if (!p->mm)
+			continue;
+		*pgd_offset(p->mm,address) = entry;
+	}
+	read_unlock(&tasklist_lock);
+	for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+		pgd[address >> PGDIR_SHIFT] = entry;
+}
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+	set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+				struct page *pte)
+{
+	set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte)));
+}
+
+#endif /* __ASM_SH64_PGALLOC_H */
diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h
new file mode 100644
index 0000000..45f70c0
--- /dev/null
+++ b/include/asm-sh64/pgtable.h
@@ -0,0 +1,508 @@
+#ifndef __ASM_SH64_PGTABLE_H
+#define __ASM_SH64_PGTABLE_H
+
+#include <asm-generic/4level-fixup.h>
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/pgtable.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003, 2004  Paul Mundt
+ * Copyright (C) 2003, 2004  Richard Curnow
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the SuperH page table tree.
+ */
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <linux/threads.h>
+#include <linux/config.h>
+
+extern void paging_init(void);
+
+/* We provide our own get_unmapped_area to avoid cache synonym issue */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+/*
+ * Basically we have the same two-level (which is the logical three level
+ * Linux page table layout folded) page tables as the i386.
+ */
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * NEFF and NPHYS related defines.
+ * FIXME : These need to be model-dependent.  For now this is OK, SH5-101 and SH5-103
+ * implement 32 bits effective and 32 bits physical.  But future implementations may
+ * extend beyond this.
+ */
+#define NEFF		32
+#define	NEFF_SIGN	(1LL << (NEFF - 1))
+#define	NEFF_MASK	(-1LL << NEFF)
+
+#define NPHYS		32
+#define	NPHYS_SIGN	(1LL << (NPHYS - 1))
+#define	NPHYS_MASK	(-1LL << NPHYS)
+
+/* Typically 2-level is sufficient up to 32 bits of virtual address space, beyond
+   that 3-level would be appropriate. */
+#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
+/* For 4k pages, this contains 512 entries, i.e. 9 bits worth of address. */
+#define PTRS_PER_PTE	((1<<PAGE_SHIFT)/sizeof(unsigned long long))
+#define PTE_MAGNITUDE	3	      /* sizeof(unsigned long long) magnit. */
+#define PTE_SHIFT	PAGE_SHIFT
+#define PTE_BITS	(PAGE_SHIFT - PTE_MAGNITUDE)
+
+/* top level: PMD. */
+#define PGDIR_SHIFT	(PTE_SHIFT + PTE_BITS)
+#define PGD_BITS	(NEFF - PGDIR_SHIFT)
+#define PTRS_PER_PGD	(1<<PGD_BITS)
+
+/* middle level: PMD. This doesn't do anything for the 2-level case. */
+#define PTRS_PER_PMD	(1)
+
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+#define PMD_SHIFT	PGDIR_SHIFT
+#define PMD_SIZE	PGDIR_SIZE
+#define PMD_MASK	PGDIR_MASK
+
+#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
+/*
+ * three-level asymmetric paging structure: PGD is top level.
+ * The asymmetry comes from 32-bit pointers and 64-bit PTEs.
+ */
+/* bottom level: PTE. It's 9 bits = 512 pointers */
+#define PTRS_PER_PTE	((1<<PAGE_SHIFT)/sizeof(unsigned long long))
+#define PTE_MAGNITUDE	3	      /* sizeof(unsigned long long) magnit. */
+#define PTE_SHIFT	PAGE_SHIFT
+#define PTE_BITS	(PAGE_SHIFT - PTE_MAGNITUDE)
+
+/* middle level: PMD. It's 10 bits = 1024 pointers */
+#define PTRS_PER_PMD	((1<<PAGE_SHIFT)/sizeof(unsigned long long *))
+#define PMD_MAGNITUDE	2	      /* sizeof(unsigned long long *) magnit. */
+#define PMD_SHIFT	(PTE_SHIFT + PTE_BITS)
+#define PMD_BITS	(PAGE_SHIFT - PMD_MAGNITUDE)
+
+/* top level: PMD. It's 1 bit = 2 pointers */
+#define PGDIR_SHIFT	(PMD_SHIFT + PMD_BITS)
+#define PGD_BITS	(NEFF - PGDIR_SHIFT)
+#define PTRS_PER_PGD	(1<<PGD_BITS)
+
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE-1))
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+#else
+#error "No defined number of page table levels"
+#endif
+
+/*
+ * Error outputs.
+ */
+#define pte_ERROR(e) \
+	printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Table setting routines. Used within arch/mm only.
+ */
+#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
+{
+	unsigned long long x = ((unsigned long long) pteval.pte);
+	unsigned long long *xp = (unsigned long long *) pteptr;
+	/*
+	 * Sign-extend based on NPHYS.
+	 */
+	*(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
+}
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
+{
+	pmd_val(*pmdp) = (unsigned long) ptep;
+}
+
+/*
+ * PGD defines. Top level.
+ */
+
+/* To find an entry in a generic PGD. */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define __pgd_offset(address) pgd_index(address)
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/* To find an entry in a kernel PGD. */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * PGD level access routines.
+ *
+ * Note1:
+ * There's no need to use physical addresses since the tree walk is all
+ * in performed in software, until the PTE translation.
+ *
+ * Note 2:
+ * A PGD entry can be uninitialized (_PGD_UNUSED), generically bad,
+ * clear (_PGD_EMPTY), present. When present, lower 3 nibbles contain
+ * _KERNPG_TABLE. Being a kernel virtual pointer also bit 31 must
+ * be 1. Assuming an arbitrary clear value of bit 31 set to 0 and
+ * lower 3 nibbles set to 0xFFF (_PGD_EMPTY) any other value is a
+ * bad pgd that must be notified via printk().
+ *
+ */
+#define _PGD_EMPTY		0x0
+
+#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
+static inline int pgd_none(pgd_t pgd)		{ return 0; }
+static inline int pgd_bad(pgd_t pgd)		{ return 0; }
+#define pgd_present(pgd) ((pgd_val(pgd) & _PAGE_PRESENT) ? 1 : 0)
+#define pgd_clear(xx)				do { } while(0)
+
+#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
+#define pgd_present(pgd_entry)	(1)
+#define pgd_none(pgd_entry)	(pgd_val((pgd_entry)) == _PGD_EMPTY)
+/* TODO: Think later about what a useful definition of 'bad' would be now. */
+#define pgd_bad(pgd_entry)	(0)
+#define pgd_clear(pgd_entry_p)	(set_pgd((pgd_entry_p), __pgd(_PGD_EMPTY)))
+
+#endif
+
+
+#define pgd_page(pgd_entry)	((unsigned long) (pgd_val(pgd_entry) & PAGE_MASK))
+
+/*
+ * PMD defines. Middle level.
+ */
+
+/* PGD to PMD dereferencing */
+#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
+static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+{
+	return (pmd_t *) dir;
+}
+#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
+#define __pmd_offset(address) \
+		(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pmd_offset(dir, addr) \
+		((pmd_t *) ((pgd_val(*(dir))) & PAGE_MASK) + __pmd_offset((addr)))
+#endif
+
+/*
+ * PMD level access routines. Same notes as above.
+ */
+#define _PMD_EMPTY		0x0
+/* Either the PMD is empty or present, it's not paged out */
+#define pmd_present(pmd_entry)	(pmd_val(pmd_entry) & _PAGE_PRESENT)
+#define pmd_clear(pmd_entry_p)	(set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
+#define pmd_none(pmd_entry)	(pmd_val((pmd_entry)) == _PMD_EMPTY)
+#define pmd_bad(pmd_entry)	((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+
+#define pmd_page_kernel(pmd_entry) \
+	((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
+
+#define pmd_page(pmd) \
+	(virt_to_page(pmd_val(pmd)))
+
+/* PMD to PTE dereferencing */
+#define pte_index(address) \
+		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+#define pte_offset_kernel(dir, addr) \
+		((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
+
+#define pte_offset_map(dir,addr)	pte_offset_kernel(dir, addr)
+#define pte_offset_map_nested(dir,addr)	pte_offset_kernel(dir, addr)
+#define pte_unmap(pte)		do { } while (0)
+#define pte_unmap_nested(pte)	do { } while (0)
+
+/* Round it up ! */
+#define USER_PTRS_PER_PGD	((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR	0
+
+#ifndef __ASSEMBLY__
+#define VMALLOC_END	0xff000000
+#define VMALLOC_START	0xf0000000
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+
+#define IOBASE_VADDR	0xff000000
+#define IOBASE_END	0xffffffff
+
+/*
+ * PTEL coherent flags.
+ * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
+ */
+/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
+   positions, to avoid expensive bit shuffling on every refill.  The remaining
+   bits are used for s/w purposes and masked out on each refill.
+
+   Note, the PTE slots are used to hold data of type swp_entry_t when a page is
+   swapped out.  Only the _PAGE_PRESENT flag is significant when the page is
+   swapped out, and it must be placed so that it doesn't overlap either the
+   type or offset fields of swp_entry_t.  For x86, offset is at [31:8] and type
+   at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t.  This
+   scheme doesn't map to SH-5 because bit [0] controls cacheability.  So bit
+   [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
+   into 2 pieces.  That is handled by SWP_ENTRY and SWP_TYPE below. */
+#define _PAGE_WT	0x001  /* CB0: if cacheable, 1->write-thru, 0->write-back */
+#define _PAGE_DEVICE	0x001  /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
+#define _PAGE_CACHABLE	0x002  /* CB1: uncachable/cachable */
+#define _PAGE_PRESENT	0x004  /* software: page referenced */
+#define _PAGE_FILE	0x004  /* software: only when !present */
+#define _PAGE_SIZE0	0x008  /* SZ0-bit : size of page */
+#define _PAGE_SIZE1	0x010  /* SZ1-bit : size of page */
+#define _PAGE_SHARED	0x020  /* software: reflects PTEH's SH */
+#define _PAGE_READ	0x040  /* PR0-bit : read access allowed */
+#define _PAGE_EXECUTE	0x080  /* PR1-bit : execute access allowed */
+#define _PAGE_WRITE	0x100  /* PR2-bit : write access allowed */
+#define _PAGE_USER	0x200  /* PR3-bit : user space access allowed */
+#define _PAGE_DIRTY	0x400  /* software: page accessed in write */
+#define _PAGE_ACCESSED	0x800  /* software: page referenced */
+
+/* Mask which drops software flags */
+#define _PAGE_FLAGS_HARDWARE_MASK	0xfffffffffffff3dbLL
+
+/*
+ * HugeTLB support
+ */
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define _PAGE_SZHUGE	(_PAGE_SIZE0)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
+#define _PAGE_SZHUGE	(_PAGE_SIZE1)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
+#define _PAGE_SZHUGE	(_PAGE_SIZE0 | _PAGE_SIZE1)
+#endif
+
+/*
+ * Default flags for a Kernel page.
+ * This is fundametally also SHARED because the main use of this define
+ * (other than for PGD/PMD entries) is for the VMALLOC pool which is
+ * contextless.
+ *
+ * _PAGE_EXECUTE is required for modules
+ *
+ */
+#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+			 _PAGE_EXECUTE | \
+			 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
+			 _PAGE_SHARED)
+
+/* Default flags for a User page */
+#define _PAGE_TABLE	(_KERNPG_TABLE | _PAGE_USER)
+
+#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE	__pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+				 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_USER | \
+				 _PAGE_SHARED)
+/* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
+ * protection mode for the stack. */
+#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \
+				 _PAGE_ACCESSED | _PAGE_USER | _PAGE_EXECUTE)
+#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \
+				 _PAGE_ACCESSED | _PAGE_USER)
+#define PAGE_KERNEL	__pgprot(_KERNPG_TABLE)
+
+
+/*
+ * In ST50 we have full permissions (Read/Write/Execute/Shared).
+ * Just match'em all. These are for mmap(), therefore all at least
+ * User/Cachable/Present/Accessed. No point in making Fault on Write.
+ */
+#define __MMAP_COMMON	(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED)
+       /* sxwr */
+#define __P000	__pgprot(__MMAP_COMMON)
+#define __P001	__pgprot(__MMAP_COMMON | _PAGE_READ)
+#define __P010	__pgprot(__MMAP_COMMON)
+#define __P011	__pgprot(__MMAP_COMMON | _PAGE_READ)
+#define __P100	__pgprot(__MMAP_COMMON | _PAGE_EXECUTE)
+#define __P101	__pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ)
+#define __P110	__pgprot(__MMAP_COMMON | _PAGE_EXECUTE)
+#define __P111	__pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ)
+
+#define __S000	__pgprot(__MMAP_COMMON | _PAGE_SHARED)
+#define __S001	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ)
+#define __S010	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_WRITE)
+#define __S011	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ | _PAGE_WRITE)
+#define __S100	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE)
+#define __S101	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ)
+#define __S110	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_WRITE)
+#define __S111	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ | _PAGE_WRITE)
+
+/* Make it a device mapping for maximum safety (e.g. for mapping device
+   registers into user-space via /dev/map).  */
+#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
+#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
+
+/*
+ * Handling allocation failures during page table setup.
+ */
+extern void __handle_bad_pmd_kernel(pmd_t * pmd);
+#define __handle_bad_pmd(x)	__handle_bad_pmd_kernel(x)
+
+/*
+ * PTE level access routines.
+ *
+ * Note1:
+ * It's the tree walk leaf. This is physical address to be stored.
+ *
+ * Note 2:
+ * Regarding the choice of _PTE_EMPTY:
+
+   We must choose a bit pattern that cannot be valid, whether or not the page
+   is present.  bit[2]==1 => present, bit[2]==0 => swapped out.  If swapped
+   out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
+   left for us to select.  If we force bit[7]==0 when swapped out, we could use
+   the combination bit[7,2]=2'b10 to indicate an empty PTE.  Alternatively, if
+   we force bit[7]==1 when swapped out, we can use all zeroes to indicate
+   empty.  This is convenient, because the page tables get cleared to zero
+   when they are allocated.
+
+ */
+#define _PTE_EMPTY	0x0
+#define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm,addr,xp)	(set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
+#define pte_none(x)	(pte_val(x) == _PTE_EMPTY)
+
+/*
+ * Some definitions to translate between mem_map, PTEs, and page
+ * addresses:
+ */
+
+/*
+ * Given a PTE, return the index of the mem_map[] entry corresponding
+ * to the page frame the PTE. Get the absolute physical address, make
+ * a relative physical address and translate it to an index.
+ */
+#define pte_pagenr(x)		(((unsigned long) (pte_val(x)) - \
+				 __MEMORY_START) >> PAGE_SHIFT)
+
+/*
+ * Given a PTE, return the "struct page *".
+ */
+#define pte_page(x)		(mem_map + pte_pagenr(x))
+
+/*
+ * Return number of (down rounded) MB corresponding to x pages.
+ */
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+
+/*
+ * The following have defined behavior only work if pte_present() is true.
+ */
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXECUTE; }
+static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; }
+
+extern inline pte_t pte_rdprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; }
+extern inline pte_t pte_wrprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
+extern inline pte_t pte_exprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; }
+extern inline pte_t pte_mkclean(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
+extern inline pte_t pte_mkold(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
+
+extern inline pte_t pte_mkread(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
+extern inline pte_t pte_mkexec(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
+
+/*
+ * Conversion functions: convert a page and protection to a page entry.
+ *
+ * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
+ */
+#define mk_pte(page,pgprot)							\
+({										\
+	pte_t __pte;								\
+										\
+	set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | 		\
+		__MEMORY_START | pgprot_val((pgprot))));			\
+	__pte;									\
+})
+
+/*
+ * This takes a (absolute) physical page address that is used
+ * by the remapping functions
+ */
+#define mk_pte_phys(physpage, pgprot) \
+({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
+
+extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
+
+#define page_pte_prot(page, prot) mk_pte(page, prot)
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+typedef pte_t *pte_addr_t;
+#define pgtable_cache_init()	do { } while (0)
+
+extern void update_mmu_cache(struct vm_area_struct * vma,
+			     unsigned long address, pte_t pte);
+
+/* Encode and decode a swap entry */
+#define __swp_type(x)			(((x).val & 3) + (((x).val >> 1) & 0x3c))
+#define __swp_offset(x)			((x).val >> 8)
+#define __swp_entry(type, offset)	((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
+
+/* Encode and decode a nonlinear file mapping entry */
+#define PTE_FILE_MAX_BITS		29
+#define pte_to_pgoff(pte)		(pte_val(pte))
+#define pgoff_to_pte(off)		((pte_t) { (off) | _PAGE_FILE })
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define PageSkip(page)		(0)
+#define kern_addr_valid(addr)	(1)
+
+#define io_remap_page_range(vma, vaddr, paddr, size, prot)		\
+		remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
+		remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn)	(pfn)
+#define GET_IOSPACE(pfn)		0
+#define GET_PFN(pfn)			(pfn)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()    do { } while (0)
+
+#define pte_pfn(x)		(((unsigned long)((x).pte)) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+#include <asm-generic/pgtable.h>
+
+#endif /* __ASM_SH64_PGTABLE_H */
diff --git a/include/asm-sh64/platform.h b/include/asm-sh64/platform.h
new file mode 100644
index 0000000..7046a90
--- /dev/null
+++ b/include/asm-sh64/platform.h
@@ -0,0 +1,69 @@
+#ifndef __ASM_SH64_PLATFORM_H
+#define __ASM_SH64_PLATFORM_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/platform.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ * benedict.gaster@superh.com:	 3rd May 2002
+ *    Added support for ramdisk, removing statically linked romfs at the same time.
+ */
+
+#include <linux/ioport.h>
+#include <asm/irq.h>
+
+
+/*
+ * Platform definition structure.
+ */
+struct sh64_platform {
+	unsigned int readonly_rootfs;
+	unsigned int ramdisk_flags;
+	unsigned int initial_root_dev;
+	unsigned int loader_type;
+	unsigned int initrd_start;
+	unsigned int initrd_size;
+	unsigned int fpu_flags;
+	unsigned int io_res_count;
+	unsigned int kram_res_count;
+	unsigned int xram_res_count;
+	unsigned int rom_res_count;
+	struct resource *io_res_p;
+	struct resource *kram_res_p;
+	struct resource *xram_res_p;
+	struct resource *rom_res_p;
+};
+
+extern struct sh64_platform platform_parms;
+
+extern unsigned long long memory_start, memory_end;
+
+extern unsigned long long fpu_in_use;
+
+extern int platform_int_priority[NR_INTC_IRQS];
+
+#define FPU_FLAGS		(platform_parms.fpu_flags)
+#define STANDARD_IO_RESOURCES	(platform_parms.io_res_count)
+#define STANDARD_KRAM_RESOURCES	(platform_parms.kram_res_count)
+#define STANDARD_XRAM_RESOURCES	(platform_parms.xram_res_count)
+#define STANDARD_ROM_RESOURCES	(platform_parms.rom_res_count)
+
+/*
+ * Kernel Memory description, Respectively:
+ * code = last but one memory descriptor
+ * data = last memory descriptor
+ */
+#define code_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 2])
+#define data_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 1])
+
+/* Be prepared to 64-bit sign extensions */
+#define PFN_UP(x)       ((((x) + PAGE_SIZE-1) >> PAGE_SHIFT) & 0x000fffff)
+#define PFN_DOWN(x)     (((x) >> PAGE_SHIFT) & 0x000fffff)
+#define PFN_PHYS(x)     ((x) << PAGE_SHIFT)
+
+#endif	/* __ASM_SH64_PLATFORM_H */
diff --git a/include/asm-sh64/poll.h b/include/asm-sh64/poll.h
new file mode 100644
index 0000000..a420d14
--- /dev/null
+++ b/include/asm-sh64/poll.h
@@ -0,0 +1,36 @@
+#ifndef __ASM_SH64_POLL_H
+#define __ASM_SH64_POLL_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/poll.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+/* These are specified by iBCS2 */
+#define POLLIN		0x0001
+#define POLLPRI		0x0002
+#define POLLOUT		0x0004
+#define POLLERR		0x0008
+#define POLLHUP		0x0010
+#define POLLNVAL	0x0020
+
+/* The rest seem to be more-or-less nonstandard. Check them! */
+#define POLLRDNORM	0x0040
+#define POLLRDBAND	0x0080
+#define POLLWRNORM	0x0100
+#define POLLWRBAND	0x0200
+#define POLLMSG		0x0400
+
+struct pollfd {
+	int fd;
+	short events;
+	short revents;
+};
+
+#endif /* __ASM_SH64_POLL_H */
diff --git a/include/asm-sh64/posix_types.h b/include/asm-sh64/posix_types.h
new file mode 100644
index 0000000..0620317
--- /dev/null
+++ b/include/asm-sh64/posix_types.h
@@ -0,0 +1,131 @@
+#ifndef __ASM_SH64_POSIX_TYPES_H
+#define __ASM_SH64_POSIX_TYPES_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/posix_types.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ *
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc.  Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long	__kernel_ino_t;
+typedef unsigned short	__kernel_mode_t;
+typedef unsigned short	__kernel_nlink_t;
+typedef long		__kernel_off_t;
+typedef int		__kernel_pid_t;
+typedef unsigned short	__kernel_ipc_pid_t;
+typedef unsigned short	__kernel_uid_t;
+typedef unsigned short	__kernel_gid_t;
+typedef long unsigned int	__kernel_size_t;
+typedef int		__kernel_ssize_t;
+typedef int		__kernel_ptrdiff_t;
+typedef long		__kernel_time_t;
+typedef long		__kernel_suseconds_t;
+typedef long		__kernel_clock_t;
+typedef int		__kernel_timer_t;
+typedef int		__kernel_clockid_t;
+typedef int		__kernel_daddr_t;
+typedef char *		__kernel_caddr_t;
+typedef unsigned short	__kernel_uid16_t;
+typedef unsigned short	__kernel_gid16_t;
+typedef unsigned int	__kernel_uid32_t;
+typedef unsigned int	__kernel_gid32_t;
+
+typedef unsigned short	__kernel_old_uid_t;
+typedef unsigned short	__kernel_old_gid_t;
+typedef unsigned short	__kernel_old_dev_t;
+
+#ifdef __GNUC__
+typedef long long	__kernel_loff_t;
+#endif
+
+typedef struct {
+#if defined(__KERNEL__) || defined(__USE_ALL)
+	int	val[2];
+#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+	int	__val[2];
+#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+} __kernel_fsid_t;
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#undef	__FD_SET
+static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+	unsigned long __tmp = __fd / __NFDBITS;
+	unsigned long __rem = __fd % __NFDBITS;
+	__fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
+}
+
+#undef	__FD_CLR
+static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+	unsigned long __tmp = __fd / __NFDBITS;
+	unsigned long __rem = __fd % __NFDBITS;
+	__fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
+}
+
+
+#undef	__FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
+{
+	unsigned long __tmp = __fd / __NFDBITS;
+	unsigned long __rem = __fd % __NFDBITS;
+	return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef	__FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
+{
+	unsigned long *__tmp = __p->fds_bits;
+	int __i;
+
+	if (__builtin_constant_p(__FDSET_LONGS)) {
+		switch (__FDSET_LONGS) {
+		case 16:
+			__tmp[ 0] = 0; __tmp[ 1] = 0;
+			__tmp[ 2] = 0; __tmp[ 3] = 0;
+			__tmp[ 4] = 0; __tmp[ 5] = 0;
+			__tmp[ 6] = 0; __tmp[ 7] = 0;
+			__tmp[ 8] = 0; __tmp[ 9] = 0;
+			__tmp[10] = 0; __tmp[11] = 0;
+			__tmp[12] = 0; __tmp[13] = 0;
+			__tmp[14] = 0; __tmp[15] = 0;
+			return;
+
+		case 8:
+			__tmp[ 0] = 0; __tmp[ 1] = 0;
+			__tmp[ 2] = 0; __tmp[ 3] = 0;
+			__tmp[ 4] = 0; __tmp[ 5] = 0;
+			__tmp[ 6] = 0; __tmp[ 7] = 0;
+			return;
+
+		case 4:
+			__tmp[ 0] = 0; __tmp[ 1] = 0;
+			__tmp[ 2] = 0; __tmp[ 3] = 0;
+			return;
+		}
+	}
+	__i = __FDSET_LONGS;
+	while (__i) {
+		__i--;
+		*__tmp = 0;
+		__tmp++;
+	}
+}
+
+#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+
+#endif /* __ASM_SH64_POSIX_TYPES_H */
diff --git a/include/asm-sh64/processor.h b/include/asm-sh64/processor.h
new file mode 100644
index 0000000..a51bd41
--- /dev/null
+++ b/include/asm-sh64/processor.h
@@ -0,0 +1,286 @@
+#ifndef __ASM_SH64_PROCESSOR_H
+#define __ASM_SH64_PROCESSOR_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/processor.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ * Copyright (C) 2004  Richard Curnow
+ *
+ */
+
+#include <asm/page.h>
+
+#ifndef __ASSEMBLY__
+
+#include <asm/types.h>
+#include <asm/cache.h>
+#include <asm/registers.h>
+#include <linux/threads.h>
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ \
+void *pc; \
+unsigned long long __dummy = 0; \
+__asm__("gettr	tr0, %1\n\t" \
+	"pta	4, tr0\n\t" \
+	"gettr	tr0, %0\n\t" \
+	"ptabs	%1, tr0\n\t"	\
+	:"=r" (pc), "=r" (__dummy) \
+	: "1" (__dummy)); \
+pc; })
+
+/*
+ *  CPU type and hardware bug flags. Kept separately for each CPU.
+ */
+enum cpu_type {
+	CPU_SH5_101,
+	CPU_SH5_103,
+	CPU_SH_NONE
+};
+
+/*
+ * TLB information structure
+ *
+ * Defined for both I and D tlb, per-processor.
+ */
+struct tlb_info {
+	unsigned long long next;
+	unsigned long long first;
+	unsigned long long last;
+
+	unsigned int entries;
+	unsigned int step;
+
+	unsigned long flags;
+};
+
+struct sh_cpuinfo {
+	enum cpu_type type;
+	unsigned long loops_per_jiffy;
+
+	char	hard_math;
+
+	unsigned long *pgd_quick;
+	unsigned long *pmd_quick;
+	unsigned long *pte_quick;
+	unsigned long pgtable_cache_sz;
+	unsigned int cpu_clock, master_clock, bus_clock, module_clock;
+
+	/* Cache info */
+	struct cache_info icache;
+	struct cache_info dcache;
+
+	/* TLB info */
+	struct tlb_info itlb;
+	struct tlb_info dtlb;
+};
+
+extern struct sh_cpuinfo boot_cpu_data;
+
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+
+#endif
+
+/*
+ * User space process size: 2GB - 4k.
+ */
+#define TASK_SIZE	0x7ffff000UL
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE	(TASK_SIZE / 3)
+
+/*
+ * Bit of SR register
+ *
+ * FD-bit:
+ *     When it's set, it means the processor doesn't have right to use FPU,
+ *     and it results exception when the floating operation is executed.
+ *
+ * IMASK-bit:
+ *     Interrupt level mask
+ *
+ * STEP-bit:
+ *     Single step bit
+ *
+ */
+#define SR_FD    0x00008000
+
+#if defined(CONFIG_SH64_SR_WATCH)
+#define SR_MMU   0x84000000
+#else
+#define SR_MMU   0x80000000
+#endif
+
+#define SR_IMASK 0x000000f0
+#define SR_SSTEP 0x08000000
+
+#ifndef __ASSEMBLY__
+
+/*
+ * FPU structure and data : require 8-byte alignment as we need to access it
+   with fld.p, fst.p
+ */
+
+struct sh_fpu_hard_struct {
+	unsigned long fp_regs[64];
+	unsigned int fpscr;
+	/* long status; * software status information */
+};
+
+#if 0
+/* Dummy fpu emulator  */
+struct sh_fpu_soft_struct {
+	unsigned long long fp_regs[32];
+	unsigned int fpscr;
+	unsigned char lookahead;
+	unsigned long entry_pc;
+};
+#endif
+
+union sh_fpu_union {
+	struct sh_fpu_hard_struct hard;
+	/* 'hard' itself only produces 32 bit alignment, yet we need
+	   to access it using 64 bit load/store as well. */
+	unsigned long long alignment_dummy;
+};
+
+struct thread_struct {
+	unsigned long sp;
+	unsigned long pc;
+	/* This stores the address of the pt_regs built during a context
+	   switch, or of the register save area built for a kernel mode
+	   exception.  It is used for backtracing the stack of a sleeping task
+	   or one that traps in kernel mode. */
+        struct pt_regs *kregs;
+	/* This stores the address of the pt_regs constructed on entry from
+	   user mode.  It is a fixed value over the lifetime of a process, or
+	   NULL for a kernel thread. */
+	struct pt_regs *uregs;
+
+	unsigned long trap_no, error_code;
+	unsigned long address;
+	/* Hardware debugging registers may come here */
+
+	/* floating point info */
+	union sh_fpu_union fpu;
+};
+
+#define INIT_MMAP \
+{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
+
+extern  struct pt_regs fake_swapper_regs;
+
+#define INIT_THREAD  {				\
+	.sp		= sizeof(init_stack) +	\
+			  (long) &init_stack,	\
+	.pc		= 0,			\
+        .kregs		= &fake_swapper_regs,	\
+	.uregs	        = NULL,			\
+	.trap_no	= 0,			\
+	.error_code	= 0,			\
+	.address	= 0,			\
+	.fpu		= { { { 0, } }, }	\
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+#define SR_USER (SR_MMU | SR_FD)
+
+#define start_thread(regs, new_pc, new_sp) 	 		\
+	set_fs(USER_DS);			 		\
+	regs->sr = SR_USER;	/* User mode. */ 		\
+	regs->pc = new_pc - 4;	/* Compensate syscall exit */	\
+	regs->pc |= 1;		/* Set SHmedia ! */		\
+	regs->regs[18] = 0;   		 	 		\
+	regs->regs[15] = new_sp
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+
+/* Copy and release all segment info associated with a VM */
+#define copy_segments(p, mm)	do { } while (0)
+#define release_segments(mm)	do { } while (0)
+#define forget_segments()	do { } while (0)
+#define prepare_to_copy(tsk)	do { } while (0)
+/*
+ * FPU lazy state save handling.
+ */
+
+extern __inline__ void release_fpu(void)
+{
+	unsigned long long __dummy;
+
+	/* Set FD flag in SR */
+	__asm__ __volatile__("getcon	" __SR ", %0\n\t"
+			     "or	%0, %1, %0\n\t"
+			     "putcon	%0, " __SR "\n\t"
+			     : "=&r" (__dummy)
+			     : "r" (SR_FD));
+}
+
+extern __inline__ void grab_fpu(void)
+{
+	unsigned long long __dummy;
+
+	/* Clear out FD flag in SR */
+	__asm__ __volatile__("getcon	" __SR ", %0\n\t"
+			     "and	%0, %1, %0\n\t"
+			     "putcon	%0, " __SR "\n\t"
+			     : "=&r" (__dummy)
+			     : "r" (~SR_FD));
+}
+
+/* Round to nearest, no exceptions on inexact, overflow, underflow,
+   zero-divide, invalid.  Configure option for whether to flush denorms to
+   zero, or except if a denorm is encountered.  */
+#if defined(CONFIG_SH64_FPU_DENORM_FLUSH)
+#define FPSCR_INIT  0x00040000
+#else
+#define FPSCR_INIT  0x00000000
+#endif
+
+/* Save the current FP regs */
+void fpsave(struct sh_fpu_hard_struct *fpregs);
+
+/* Initialise the FP state of a task */
+void fpinit(struct sh_fpu_hard_struct *fpregs);
+
+extern struct task_struct *last_task_used_math;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+#define thread_saved_pc(tsk)	(tsk->thread.pc)
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk)  ((tsk)->thread.pc)
+#define KSTK_ESP(tsk)  ((tsk)->thread.sp)
+
+#define cpu_relax()	do { } while (0)
+
+#endif	/* __ASSEMBLY__ */
+#endif /* __ASM_SH64_PROCESSOR_H */
+
diff --git a/include/asm-sh64/ptrace.h b/include/asm-sh64/ptrace.h
new file mode 100644
index 0000000..56190f5
--- /dev/null
+++ b/include/asm-sh64/ptrace.h
@@ -0,0 +1,37 @@
+#ifndef __ASM_SH64_PTRACE_H
+#define __ASM_SH64_PTRACE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/ptrace.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ */
+struct pt_regs {
+	unsigned long long pc;
+	unsigned long long sr;
+	unsigned long long syscall_nr;
+	unsigned long long regs[63];
+	unsigned long long tregs[8];
+	unsigned long long pad[2];
+};
+
+#ifdef __KERNEL__
+#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
+#define instruction_pointer(regs) ((regs)->pc)
+#define profile_pc(regs) instruction_pointer(regs)
+extern void show_regs(struct pt_regs *);
+#endif
+
+#define PTRACE_O_TRACESYSGOOD     0x00000001
+
+#endif /* __ASM_SH64_PTRACE_H */
diff --git a/include/asm-sh64/registers.h b/include/asm-sh64/registers.h
new file mode 100644
index 0000000..7eec666
--- /dev/null
+++ b/include/asm-sh64/registers.h
@@ -0,0 +1,106 @@
+#ifndef __ASM_SH64_REGISTERS_H
+#define __ASM_SH64_REGISTERS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/registers.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2004  Richard Curnow
+ */
+
+#ifdef __ASSEMBLY__
+/* =====================================================================
+**
+** Section 1: acts on assembly sources pre-processed by GPP ( <source.S>).
+**	      Assigns symbolic names to control & target registers.
+*/
+
+/*
+ * Define some useful aliases for control registers.
+ */
+#define SR	cr0
+#define SSR	cr1
+#define PSSR	cr2
+			/* cr3 UNDEFINED */
+#define INTEVT	cr4
+#define EXPEVT	cr5
+#define PEXPEVT	cr6
+#define TRA	cr7
+#define SPC	cr8
+#define PSPC	cr9
+#define RESVEC	cr10
+#define VBR	cr11
+			/* cr12 UNDEFINED */
+#define TEA	cr13
+			/* cr14-cr15 UNDEFINED */
+#define DCR	cr16
+#define KCR0	cr17
+#define KCR1	cr18
+			/* cr19-cr31 UNDEFINED */
+			/* cr32-cr61 RESERVED */
+#define CTC	cr62
+#define USR	cr63
+
+/*
+ * ABI dependent registers (general purpose set)
+ */
+#define RET	r2
+#define ARG1	r2
+#define ARG2	r3
+#define ARG3	r4
+#define ARG4	r5
+#define ARG5	r6
+#define ARG6	r7
+#define SP	r15
+#define LINK	r18
+#define ZERO	r63
+
+/*
+ * Status register defines: used only by assembly sources (and
+ * 			    syntax independednt)
+ */
+#define SR_RESET_VAL	0x0000000050008000
+#define SR_HARMLESS	0x00000000500080f0	/* Write ignores for most */
+#define SR_ENABLE_FPU	0xffffffffffff7fff	/* AND with this */
+
+#if defined (CONFIG_SH64_SR_WATCH)
+#define SR_ENABLE_MMU	0x0000000084000000	/* OR with this */
+#else
+#define SR_ENABLE_MMU	0x0000000080000000	/* OR with this */
+#endif
+
+#define SR_UNBLOCK_EXC	0xffffffffefffffff	/* AND with this */
+#define SR_BLOCK_EXC	0x0000000010000000	/* OR with this */
+
+#else	/* Not __ASSEMBLY__ syntax */
+
+/*
+** Stringify reg. name
+*/
+#define __str(x)  #x
+
+/* Stringify control register names for use in inline assembly */
+#define __SR __str(SR)
+#define __SSR __str(SSR)
+#define __PSSR __str(PSSR)
+#define __INTEVT __str(INTEVT)
+#define __EXPEVT __str(EXPEVT)
+#define __PEXPEVT __str(PEXPEVT)
+#define __TRA __str(TRA)
+#define __SPC __str(SPC)
+#define __PSPC __str(PSPC)
+#define __RESVEC __str(RESVEC)
+#define __VBR __str(VBR)
+#define __TEA __str(TEA)
+#define __DCR __str(DCR)
+#define __KCR0 __str(KCR0)
+#define __KCR1 __str(KCR1)
+#define __CTC __str(CTC)
+#define __USR __str(USR)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_SH64_REGISTERS_H */
diff --git a/include/asm-sh64/resource.h b/include/asm-sh64/resource.h
new file mode 100644
index 0000000..8ff9394
--- /dev/null
+++ b/include/asm-sh64/resource.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_RESOURCE_H
+#define __ASM_SH64_RESOURCE_H
+
+#include <asm-sh/resource.h>
+
+#endif /* __ASM_SH64_RESOURCE_H */
diff --git a/include/asm-sh64/scatterlist.h b/include/asm-sh64/scatterlist.h
new file mode 100644
index 0000000..5d8fa32
--- /dev/null
+++ b/include/asm-sh64/scatterlist.h
@@ -0,0 +1,23 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/scatterlist.h
+ *
+ * Copyright (C) 2003  Paul Mundt
+ *
+ */
+#ifndef __ASM_SH64_SCATTERLIST_H
+#define __ASM_SH64_SCATTERLIST_H
+
+struct scatterlist {
+    struct page * page; /* Location for highmem page, if any */
+    unsigned int offset;/* for highmem, page offset */
+    dma_addr_t dma_address;
+    unsigned int length;
+};
+
+#define ISA_DMA_THRESHOLD (0xffffffff)
+
+#endif /* !__ASM_SH64_SCATTERLIST_H */
diff --git a/include/asm-sh64/sections.h b/include/asm-sh64/sections.h
new file mode 100644
index 0000000..897f36b
--- /dev/null
+++ b/include/asm-sh64/sections.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_SECTIONS_H
+#define __ASM_SH64_SECTIONS_H
+
+#include <asm-sh/sections.h>
+
+#endif /* __ASM_SH64_SECTIONS_H */
+
diff --git a/include/asm-sh64/segment.h b/include/asm-sh64/segment.h
new file mode 100644
index 0000000..92ac001
--- /dev/null
+++ b/include/asm-sh64/segment.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+/* Only here because we have some old header files that expect it.. */
+
+#endif /* _ASM_SEGMENT_H */
diff --git a/include/asm-sh64/semaphore-helper.h b/include/asm-sh64/semaphore-helper.h
new file mode 100644
index 0000000..fcfafe2
--- /dev/null
+++ b/include/asm-sh64/semaphore-helper.h
@@ -0,0 +1,101 @@
+#ifndef __ASM_SH64_SEMAPHORE_HELPER_H
+#define __ASM_SH64_SEMAPHORE_HELPER_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/semaphore-helper.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+#include <asm/errno.h>
+
+/*
+ * SMP- and interrupt-safe semaphores helper functions.
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ * (C) Copyright 1999 Andrea Arcangeli
+ */
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ *
+ * This is trivially done with load_locked/store_cond,
+ * which we have.  Let the rest of the losers suck eggs.
+ */
+static __inline__ void wake_one_more(struct semaphore * sem)
+{
+	atomic_inc((atomic_t *)&sem->sleepers);
+}
+
+static __inline__ int waking_non_zero(struct semaphore *sem)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&semaphore_wake_lock, flags);
+	if (sem->sleepers > 0) {
+		sem->sleepers--;
+		ret = 1;
+	}
+	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+	return ret;
+}
+
+/*
+ * waking_non_zero_interruptible:
+ *	1	got the lock
+ *	0	go to sleep
+ *	-EINTR	interrupted
+ *
+ * We must undo the sem->count down_interruptible() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
+						struct task_struct *tsk)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&semaphore_wake_lock, flags);
+	if (sem->sleepers > 0) {
+		sem->sleepers--;
+		ret = 1;
+	} else if (signal_pending(tsk)) {
+		atomic_inc(&sem->count);
+		ret = -EINTR;
+	}
+	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+	return ret;
+}
+
+/*
+ * waking_non_zero_trylock:
+ *	1	failed to lock
+ *	0	got the lock
+ *
+ * We must undo the sem->count down_trylock() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
+{
+	unsigned long flags;
+	int ret = 1;
+
+	spin_lock_irqsave(&semaphore_wake_lock, flags);
+	if (sem->sleepers <= 0)
+		atomic_inc(&sem->count);
+	else {
+		sem->sleepers--;
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+	return ret;
+}
+
+#endif /* __ASM_SH64_SEMAPHORE_HELPER_H */
diff --git a/include/asm-sh64/semaphore.h b/include/asm-sh64/semaphore.h
new file mode 100644
index 0000000..fce22bb
--- /dev/null
+++ b/include/asm-sh64/semaphore.h
@@ -0,0 +1,123 @@
+#ifndef __ASM_SH64_SEMAPHORE_H
+#define __ASM_SH64_SEMAPHORE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/semaphore.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ * SMP- and interrupt-safe semaphores.
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ *
+ * SuperH verison by Niibe Yutaka
+ *  (Currently no asm implementation but generic C code...)
+ *
+ */
+
+#include <linux/linkage.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+
+struct semaphore {
+	atomic_t count;
+	int sleepers;
+	wait_queue_head_t wait;
+};
+
+#define __SEMAPHORE_INITIALIZER(name, n)				\
+{									\
+	.count		= ATOMIC_INIT(n),				\
+	.sleepers	= 0,						\
+	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
+}
+
+#define __MUTEX_INITIALIZER(name) \
+	__SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+static inline void sema_init (struct semaphore *sem, int val)
+{
+/*
+ *	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
+ *
+ * i'd rather use the more flexible initialization above, but sadly
+ * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
+ */
+	atomic_set(&sem->count, val);
+	sem->sleepers = 0;
+	init_waitqueue_head(&sem->wait);
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+	sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+	sema_init(sem, 0);
+}
+
+#if 0
+asmlinkage void __down_failed(void /* special register calling convention */);
+asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
+asmlinkage int  __down_failed_trylock(void  /* params in registers */);
+asmlinkage void __up_wakeup(void /* special register calling convention */);
+#endif
+
+asmlinkage void __down(struct semaphore * sem);
+asmlinkage int  __down_interruptible(struct semaphore * sem);
+asmlinkage int  __down_trylock(struct semaphore * sem);
+asmlinkage void __up(struct semaphore * sem);
+
+extern spinlock_t semaphore_wake_lock;
+
+static inline void down(struct semaphore * sem)
+{
+	if (atomic_dec_return(&sem->count) < 0)
+		__down(sem);
+}
+
+static inline int down_interruptible(struct semaphore * sem)
+{
+	int ret = 0;
+
+	if (atomic_dec_return(&sem->count) < 0)
+		ret = __down_interruptible(sem);
+	return ret;
+}
+
+static inline int down_trylock(struct semaphore * sem)
+{
+	int ret = 0;
+
+	if (atomic_dec_return(&sem->count) < 0)
+		ret = __down_trylock(sem);
+	return ret;
+}
+
+/*
+ * Note! This is subtle. We jump to wake people up only if
+ * the semaphore was negative (== somebody was waiting on it).
+ */
+static inline void up(struct semaphore * sem)
+{
+	if (atomic_inc_return(&sem->count) <= 0)
+		__up(sem);
+}
+
+#endif /* __ASM_SH64_SEMAPHORE_H */
diff --git a/include/asm-sh64/sembuf.h b/include/asm-sh64/sembuf.h
new file mode 100644
index 0000000..ec4d9f1
--- /dev/null
+++ b/include/asm-sh64/sembuf.h
@@ -0,0 +1,36 @@
+#ifndef __ASM_SH64_SEMBUF_H
+#define __ASM_SH64_SEMBUF_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/sembuf.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+/*
+ * The semid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+	struct ipc64_perm sem_perm;		/* permissions .. see ipc.h */
+	__kernel_time_t	sem_otime;		/* last semop time */
+	unsigned long	__unused1;
+	__kernel_time_t	sem_ctime;		/* last change time */
+	unsigned long	__unused2;
+	unsigned long	sem_nsems;		/* no. of semaphores in array */
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* __ASM_SH64_SEMBUF_H */
diff --git a/include/asm-sh64/serial.h b/include/asm-sh64/serial.h
new file mode 100644
index 0000000..8e39b4e
--- /dev/null
+++ b/include/asm-sh64/serial.h
@@ -0,0 +1,33 @@
+/*
+ * include/asm-sh/serial.h
+ *
+ * Configuration details for 8250, 16450, 16550, etc. serial ports
+ */
+
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+#define RS_TABLE_SIZE  2
+
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+
+#define STD_SERIAL_PORT_DEFNS			\
+	/* UART CLK   PORT IRQ     FLAGS        */			\
+	{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS },	/* ttyS0 */	\
+	{ 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }	/* ttyS1 */
+
+#define SERIAL_PORT_DFNS STD_SERIAL_PORT_DEFNS
+
+/* XXX: This should be moved ino irq.h */
+#define irq_cannonicalize(x) (x)
+
+#endif /* _ASM_SERIAL_H */
diff --git a/include/asm-sh64/setup.h b/include/asm-sh64/setup.h
new file mode 100644
index 0000000..ebd42eb
--- /dev/null
+++ b/include/asm-sh64/setup.h
@@ -0,0 +1,16 @@
+#ifndef __ASM_SH64_SETUP_H
+#define __ASM_SH64_SETUP_H
+
+#define PARAM ((unsigned char *)empty_zero_page)
+#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
+#define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
+#define ORIG_ROOT_DEV (*(unsigned long *) (PARAM+0x008))
+#define LOADER_TYPE (*(unsigned long *) (PARAM+0x00c))
+#define INITRD_START (*(unsigned long *) (PARAM+0x010))
+#define INITRD_SIZE (*(unsigned long *) (PARAM+0x014))
+
+#define COMMAND_LINE ((char *) (PARAM+256))
+#define COMMAND_LINE_SIZE 256
+
+#endif /* __ASM_SH64_SETUP_H */
+
diff --git a/include/asm-sh64/shmbuf.h b/include/asm-sh64/shmbuf.h
new file mode 100644
index 0000000..022f349
--- /dev/null
+++ b/include/asm-sh64/shmbuf.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_SH64_SHMBUF_H
+#define __ASM_SH64_SHMBUF_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/shmbuf.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+/*
+ * The shmid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+	struct ipc64_perm	shm_perm;	/* operation perms */
+	size_t			shm_segsz;	/* size of segment (bytes) */
+	__kernel_time_t		shm_atime;	/* last attach time */
+	unsigned long		__unused1;
+	__kernel_time_t		shm_dtime;	/* last detach time */
+	unsigned long		__unused2;
+	__kernel_time_t		shm_ctime;	/* last change time */
+	unsigned long		__unused3;
+	__kernel_pid_t		shm_cpid;	/* pid of creator */
+	__kernel_pid_t		shm_lpid;	/* pid of last operator */
+	unsigned long		shm_nattch;	/* no. of current attaches */
+	unsigned long		__unused4;
+	unsigned long		__unused5;
+};
+
+struct shminfo64 {
+	unsigned long	shmmax;
+	unsigned long	shmmin;
+	unsigned long	shmmni;
+	unsigned long	shmseg;
+	unsigned long	shmall;
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* __ASM_SH64_SHMBUF_H */
diff --git a/include/asm-sh64/shmparam.h b/include/asm-sh64/shmparam.h
new file mode 100644
index 0000000..d3a99a4
--- /dev/null
+++ b/include/asm-sh64/shmparam.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_SH64_SHMPARAM_H
+#define __ASM_SH64_SHMPARAM_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/shmparam.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#include <asm/cache.h>
+
+/* attach addr a multiple of this */
+#define	SHMLBA	(cpu_data->dcache.sets * L1_CACHE_BYTES)
+
+#endif /* __ASM_SH64_SHMPARAM_H */
diff --git a/include/asm-sh64/sigcontext.h b/include/asm-sh64/sigcontext.h
new file mode 100644
index 0000000..6293509
--- /dev/null
+++ b/include/asm-sh64/sigcontext.h
@@ -0,0 +1,30 @@
+#ifndef __ASM_SH64_SIGCONTEXT_H
+#define __ASM_SH64_SIGCONTEXT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/sigcontext.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+struct sigcontext {
+	unsigned long	oldmask;
+
+	/* CPU registers */
+	unsigned long long sc_regs[63];
+	unsigned long long sc_tregs[8];
+	unsigned long long sc_pc;
+	unsigned long long sc_sr;
+
+	/* FPU registers */
+	unsigned long long sc_fpregs[32];
+	unsigned int sc_fpscr;
+	unsigned int sc_fpvalid;
+};
+
+#endif /* __ASM_SH64_SIGCONTEXT_H */
diff --git a/include/asm-sh64/siginfo.h b/include/asm-sh64/siginfo.h
new file mode 100644
index 0000000..56ef1da
--- /dev/null
+++ b/include/asm-sh64/siginfo.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_SIGINFO_H
+#define __ASM_SH64_SIGINFO_H
+
+#include <asm-generic/siginfo.h>
+
+#endif /* __ASM_SH64_SIGINFO_H */
diff --git a/include/asm-sh64/signal.h b/include/asm-sh64/signal.h
new file mode 100644
index 0000000..77957e9
--- /dev/null
+++ b/include/asm-sh64/signal.h
@@ -0,0 +1,185 @@
+#ifndef __ASM_SH64_SIGNAL_H
+#define __ASM_SH64_SIGNAL_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/signal.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#include <linux/types.h>
+#include <asm/processor.h>
+
+/* Avoid too many header ordering problems.  */
+struct siginfo;
+
+#define _NSIG		64
+#define _NSIG_BPW	32
+#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t;		/* at least 32 bits */
+
+typedef struct {
+	unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#define SIGHUP		 1
+#define SIGINT		 2
+#define SIGQUIT		 3
+#define SIGILL		 4
+#define SIGTRAP		 5
+#define SIGABRT		 6
+#define SIGIOT		 6
+#define SIGBUS		 7
+#define SIGFPE		 8
+#define SIGKILL		 9
+#define SIGUSR1		10
+#define SIGSEGV		11
+#define SIGUSR2		12
+#define SIGPIPE		13
+#define SIGALRM		14
+#define SIGTERM		15
+#define SIGSTKFLT	16
+#define SIGCHLD		17
+#define SIGCONT		18
+#define SIGSTOP		19
+#define SIGTSTP		20
+#define SIGTTIN		21
+#define SIGTTOU		22
+#define SIGURG		23
+#define SIGXCPU		24
+#define SIGXFSZ		25
+#define SIGVTALRM	26
+#define SIGPROF		27
+#define SIGWINCH	28
+#define SIGIO		29
+#define SIGPOLL		SIGIO
+/*
+#define SIGLOST		29
+*/
+#define SIGPWR		30
+#define SIGSYS		31
+#define	SIGUNUSED	31
+
+/* These should not be considered constants from userland.  */
+#define SIGRTMIN	32
+#define SIGRTMAX	(_NSIG-1)
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP	0x00000001
+#define SA_NOCLDWAIT	0x00000002 /* not supported yet */
+#define SA_SIGINFO	0x00000004
+#define SA_ONSTACK	0x08000000
+#define SA_RESTART	0x10000000
+#define SA_NODEFER	0x40000000
+#define SA_RESETHAND	0x80000000
+
+#define SA_NOMASK	SA_NODEFER
+#define SA_ONESHOT	SA_RESETHAND
+#define SA_INTERRUPT	0x20000000 /* dummy -- ignored */
+
+#define SA_RESTORER	0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK	1
+#define SS_DISABLE	2
+
+#define MINSIGSTKSZ	2048
+#define SIGSTKSZ	THREAD_SIZE
+
+#ifdef __KERNEL__
+
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ is for shared interrupt support on PCI and EISA.
+ */
+#define SA_PROBE		SA_ONESHOT
+#define SA_SAMPLE_RANDOM	SA_RESTART
+#define SA_SHIRQ		0x04000000
+#endif
+
+#define SIG_BLOCK          0	/* for blocking signals */
+#define SIG_UNBLOCK        1	/* for unblocking signals */
+#define SIG_SETMASK        2	/* for setting the signal mask */
+
+/* Type of a signal handler.  */
+typedef void (*__sighandler_t)(int);
+
+#define SIG_DFL	((__sighandler_t)0)	/* default signal handling */
+#define SIG_IGN	((__sighandler_t)1)	/* ignore signal */
+#define SIG_ERR	((__sighandler_t)-1)	/* error return from signal */
+
+#ifdef __KERNEL__
+struct old_sigaction {
+	__sighandler_t sa_handler;
+	old_sigset_t sa_mask;
+	unsigned long sa_flags;
+	void (*sa_restorer)(void);
+};
+
+struct sigaction {
+	__sighandler_t sa_handler;
+	unsigned long sa_flags;
+	void (*sa_restorer)(void);
+	sigset_t sa_mask;		/* mask last for extensibility */
+};
+
+struct k_sigaction {
+	struct sigaction sa;
+};
+#else
+/* Here we must cater to libcs that poke about in kernel headers.  */
+
+struct sigaction {
+	union {
+	  __sighandler_t _sa_handler;
+	  void (*_sa_sigaction)(int, struct siginfo *, void *);
+	} _u;
+	sigset_t sa_mask;
+	unsigned long sa_flags;
+	void (*sa_restorer)(void);
+};
+
+#define sa_handler	_u._sa_handler
+#define sa_sigaction	_u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+	void *ss_sp;
+	int ss_flags;
+	size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+#include <asm/sigcontext.h>
+
+#define sigmask(sig)	(1UL << ((sig) - 1))
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_SIGNAL_H */
diff --git a/include/asm-sh64/smp.h b/include/asm-sh64/smp.h
new file mode 100644
index 0000000..4a4d0da
--- /dev/null
+++ b/include/asm-sh64/smp.h
@@ -0,0 +1,15 @@
+#ifndef __ASM_SH64_SMP_H
+#define __ASM_SH64_SMP_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/smp.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#endif /* __ASM_SH64_SMP_H */
diff --git a/include/asm-sh64/socket.h b/include/asm-sh64/socket.h
new file mode 100644
index 0000000..1853f72
--- /dev/null
+++ b/include/asm-sh64/socket.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_SOCKET_H
+#define __ASM_SH64_SOCKET_H
+
+#include <asm-sh/socket.h>
+
+#endif /* __ASM_SH64_SOCKET_H */
diff --git a/include/asm-sh64/sockios.h b/include/asm-sh64/sockios.h
new file mode 100644
index 0000000..1ae23ae
--- /dev/null
+++ b/include/asm-sh64/sockios.h
@@ -0,0 +1,24 @@
+#ifndef __ASM_SH64_SOCKIOS_H
+#define __ASM_SH64_SOCKIOS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/sockios.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+/* Socket-level I/O control calls. */
+#define FIOGETOWN	_IOR('f', 123, int)
+#define FIOSETOWN 	_IOW('f', 124, int)
+
+#define SIOCATMARK	_IOR('s', 7, int)
+#define SIOCSPGRP	_IOW('s', 8, pid_t)
+#define SIOCGPGRP	_IOR('s', 9, pid_t)
+
+#define SIOCGSTAMP	_IOR('s', 100, struct timeval) /* Get stamp - linux-specific */
+#endif /* __ASM_SH64_SOCKIOS_H */
diff --git a/include/asm-sh64/spinlock.h b/include/asm-sh64/spinlock.h
new file mode 100644
index 0000000..296b0c9
--- /dev/null
+++ b/include/asm-sh64/spinlock.h
@@ -0,0 +1,17 @@
+#ifndef __ASM_SH64_SPINLOCK_H
+#define __ASM_SH64_SPINLOCK_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/spinlock.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#error "No SMP on SH64"
+
+#endif /* __ASM_SH64_SPINLOCK_H */
diff --git a/include/asm-sh64/stat.h b/include/asm-sh64/stat.h
new file mode 100644
index 0000000..86f551b
--- /dev/null
+++ b/include/asm-sh64/stat.h
@@ -0,0 +1,88 @@
+#ifndef __ASM_SH64_STAT_H
+#define __ASM_SH64_STAT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/stat.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+struct __old_kernel_stat {
+	unsigned short st_dev;
+	unsigned short st_ino;
+	unsigned short st_mode;
+	unsigned short st_nlink;
+	unsigned short st_uid;
+	unsigned short st_gid;
+	unsigned short st_rdev;
+	unsigned long  st_size;
+	unsigned long  st_atime;
+	unsigned long  st_mtime;
+	unsigned long  st_ctime;
+};
+
+struct stat {
+	unsigned short st_dev;
+	unsigned short __pad1;
+	unsigned long st_ino;
+	unsigned short st_mode;
+	unsigned short st_nlink;
+	unsigned short st_uid;
+	unsigned short st_gid;
+	unsigned short st_rdev;
+	unsigned short __pad2;
+	unsigned long  st_size;
+	unsigned long  st_blksize;
+	unsigned long  st_blocks;
+	unsigned long  st_atime;
+	unsigned long  st_atime_nsec;
+	unsigned long  st_mtime;
+	unsigned long  st_mtime_nsec;
+	unsigned long  st_ctime;
+	unsigned long  st_ctime_nsec;
+	unsigned long  __unused4;
+	unsigned long  __unused5;
+};
+
+/* This matches struct stat64 in glibc2.1, hence the absolutely
+ * insane amounts of padding around dev_t's.
+ */
+struct stat64 {
+	unsigned short	st_dev;
+	unsigned char	__pad0[10];
+
+	unsigned long	st_ino;
+	unsigned int	st_mode;
+	unsigned int	st_nlink;
+
+	unsigned long	st_uid;
+	unsigned long	st_gid;
+
+	unsigned short	st_rdev;
+	unsigned char	__pad3[10];
+
+	long long	st_size;
+	unsigned long	st_blksize;
+
+	unsigned long	st_blocks;	/* Number 512-byte blocks allocated. */
+	unsigned long	__pad4;		/* future possible st_blocks high bits */
+
+	unsigned long	st_atime;
+	unsigned long	st_atime_nsec;
+
+	unsigned long	st_mtime;
+	unsigned long	st_mtime_nsec;
+
+	unsigned long	st_ctime;
+	unsigned long	st_ctime_nsec;	/* will be high 32 bits of ctime someday */
+
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+};
+
+#endif /* __ASM_SH64_STAT_H */
diff --git a/include/asm-sh64/statfs.h b/include/asm-sh64/statfs.h
new file mode 100644
index 0000000..083fd79
--- /dev/null
+++ b/include/asm-sh64/statfs.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_STATFS_H
+#define __ASM_SH64_STATFS_H
+
+#include <asm-generic/statfs.h>
+
+#endif /* __ASM_SH64_STATFS_H */
diff --git a/include/asm-sh64/string.h b/include/asm-sh64/string.h
new file mode 100644
index 0000000..8a73573
--- /dev/null
+++ b/include/asm-sh64/string.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_SH64_STRING_H
+#define __ASM_SH64_STRING_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/string.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ * Empty on purpose. ARCH SH64 ASM libs are out of the current project scope.
+ *
+ */
+
+#define __HAVE_ARCH_MEMCPY
+
+extern void *memcpy(void *dest, const void *src, size_t count);
+
+#endif
diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h
new file mode 100644
index 0000000..42510e4
--- /dev/null
+++ b/include/asm-sh64/system.h
@@ -0,0 +1,195 @@
+#ifndef __ASM_SH64_SYSTEM_H
+#define __ASM_SH64_SYSTEM_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/system.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ * Copyright (C) 2004  Richard Curnow
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/registers.h>
+#include <asm/processor.h>
+
+/*
+ *	switch_to() should switch tasks to task nr n, first
+ */
+
+typedef struct {
+	unsigned long seg;
+} mm_segment_t;
+
+extern struct task_struct *sh64_switch_to(struct task_struct *prev,
+					  struct thread_struct *prev_thread,
+					  struct task_struct *next,
+					  struct thread_struct *next_thread);
+
+#define switch_to(prev,next,last) \
+	do {\
+		if (last_task_used_math != next) {\
+			struct pt_regs *regs = next->thread.uregs;\
+			if (regs) regs->sr |= SR_FD;\
+		}\
+		last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\
+	} while(0)
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr), 1))
+
+extern void __xchg_called_with_bad_pointer(void);
+
+#define mb()	__asm__ __volatile__ ("synco": : :"memory")
+#define rmb()	mb()
+#define wmb()	__asm__ __volatile__ ("synco": : :"memory")
+#define read_barrier_depends()	do { } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb()	mb()
+#define smp_rmb()	rmb()
+#define smp_wmb()	wmb()
+#define smp_read_barrier_depends()	read_barrier_depends()
+#else
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	do { } while (0)
+#endif /* CONFIG_SMP */
+
+#define set_rmb(var, value) do { xchg(&var, value); } while (0)
+#define set_mb(var, value) set_rmb(var, value)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+/* Interrupt Control */
+#ifndef HARD_CLI
+#define SR_MASK_L 0x000000f0L
+#define SR_MASK_LL 0x00000000000000f0LL
+#else
+#define SR_MASK_L 0x10000000L
+#define SR_MASK_LL 0x0000000010000000LL
+#endif
+
+static __inline__ void local_irq_enable(void)
+{
+	/* cli/sti based on SR.BL */
+	unsigned long long __dummy0, __dummy1=~SR_MASK_LL;
+
+	__asm__ __volatile__("getcon	" __SR ", %0\n\t"
+			     "and	%0, %1, %0\n\t"
+			     "putcon	%0, " __SR "\n\t"
+			     : "=&r" (__dummy0)
+			     : "r" (__dummy1));
+}
+
+static __inline__ void local_irq_disable(void)
+{
+	/* cli/sti based on SR.BL */
+	unsigned long long __dummy0, __dummy1=SR_MASK_LL;
+	__asm__ __volatile__("getcon	" __SR ", %0\n\t"
+			     "or	%0, %1, %0\n\t"
+			     "putcon	%0, " __SR "\n\t"
+			     : "=&r" (__dummy0)
+			     : "r" (__dummy1));
+}
+
+#define local_save_flags(x) 						\
+(__extension__ ({	unsigned long long __dummy=SR_MASK_LL;		\
+	__asm__ __volatile__(						\
+		"getcon	" __SR ", %0\n\t"				\
+		"and	%0, %1, %0"					\
+		: "=&r" (x)						\
+		: "r" (__dummy));}))
+
+#define local_irq_save(x)						\
+(__extension__ ({	unsigned long long __d2=SR_MASK_LL, __d1;	\
+	__asm__ __volatile__(          	         			\
+		"getcon	" __SR ", %1\n\t" 				\
+		"or	%1, r63, %0\n\t"				\
+		"or	%1, %2, %1\n\t"					\
+		"putcon	%1, " __SR "\n\t"    				\
+		"and	%0, %2, %0"    					\
+		: "=&r" (x), "=&r" (__d1)				\
+		: "r" (__d2));}));
+
+#define local_irq_restore(x) do { 					\
+	if ( ((x) & SR_MASK_L) == 0 )		/* dropping to 0 ? */	\
+		local_irq_enable();		/* yes...re-enable */	\
+} while (0)
+
+#define irqs_disabled()			\
+({					\
+	unsigned long flags;		\
+	local_save_flags(flags);	\
+	(flags != 0);			\
+})
+
+extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
+{
+	unsigned long flags, retval;
+
+	local_irq_save(flags);
+	retval = *m;
+	*m = val;
+	local_irq_restore(flags);
+	return retval;
+}
+
+extern __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
+{
+	unsigned long flags, retval;
+
+	local_irq_save(flags);
+	retval = *m;
+	*m = val & 0xff;
+	local_irq_restore(flags);
+	return retval;
+}
+
+static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+	switch (size) {
+	case 4:
+		return xchg_u32(ptr, x);
+		break;
+	case 1:
+		return xchg_u8(ptr, x);
+		break;
+	}
+	__xchg_called_with_bad_pointer();
+	return x;
+}
+
+/* XXX
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+
+#define smp_mb()        barrier()
+#define smp_rmb()       barrier()
+#define smp_wmb()       barrier()
+
+#ifdef CONFIG_SH_ALPHANUMERIC
+/* This is only used for debugging. */
+extern void print_seg(char *file,int line);
+#define PLS() print_seg(__FILE__,__LINE__)
+#else	/* CONFIG_SH_ALPHANUMERIC */
+#define PLS()
+#endif	/* CONFIG_SH_ALPHANUMERIC */
+
+#define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
+
+#define arch_align_stack(x) (x)
+
+#endif /* __ASM_SH64_SYSTEM_H */
diff --git a/include/asm-sh64/termbits.h b/include/asm-sh64/termbits.h
new file mode 100644
index 0000000..86bde5e
--- /dev/null
+++ b/include/asm-sh64/termbits.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_TERMBITS_H
+#define __ASM_SH64_TERMBITS_H
+
+#include <asm-sh/termbits.h>
+
+#endif /* __ASM_SH64_TERMBITS_H */
diff --git a/include/asm-sh64/termios.h b/include/asm-sh64/termios.h
new file mode 100644
index 0000000..4a9c7fb
--- /dev/null
+++ b/include/asm-sh64/termios.h
@@ -0,0 +1,117 @@
+#ifndef __ASM_SH64_TERMIOS_H
+#define __ASM_SH64_TERMIOS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/termios.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+	unsigned short ws_row;
+	unsigned short ws_col;
+	unsigned short ws_xpixel;
+	unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+	unsigned short c_iflag;		/* input mode flags */
+	unsigned short c_oflag;		/* output mode flags */
+	unsigned short c_cflag;		/* control mode flags */
+	unsigned short c_lflag;		/* local mode flags */
+	unsigned char c_line;		/* line discipline */
+	unsigned char c_cc[NCC];	/* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE	0x001
+#define TIOCM_DTR	0x002
+#define TIOCM_RTS	0x004
+#define TIOCM_ST	0x008
+#define TIOCM_SR	0x010
+#define TIOCM_CTS	0x020
+#define TIOCM_CAR	0x040
+#define TIOCM_RNG	0x080
+#define TIOCM_DSR	0x100
+#define TIOCM_CD	TIOCM_CAR
+#define TIOCM_RI	TIOCM_RNG
+#define TIOCM_OUT1	0x2000
+#define TIOCM_OUT2	0x4000
+#define TIOCM_LOOP	0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+/* line disciplines */
+#define N_TTY		0
+#define N_SLIP		1
+#define N_MOUSE		2
+#define N_PPP		3
+#define N_STRIP		4
+#define N_AX25		5
+#define N_X25		6	/* X.25 async */
+#define N_6PACK		7
+#define N_MASC		8	/* Reserved for Mobitex module <kaz@cafe.net> */
+#define N_R3964		9	/* Reserved for Simatic R3964 module */
+#define N_PROFIBUS_FDL	10	/* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA		11	/* Linux IR - http://www.cs.uit.no/~dagb/irda/irda.html */
+#define N_SMSBLOCK	12	/* SMS block mode - for talking to GSM data cards about SMS messages */
+#define N_HDLC		13	/* synchronous HDLC */
+#define N_SYNC_PPP	14
+#define N_HCI		15	/* Bluetooth HCI UART */
+
+#ifdef __KERNEL__
+
+/*	intr=^C		quit=^\		erase=del	kill=^U
+	eof=^D		vtime=\0	vmin=\1		sxtc=\0
+	start=^Q	stop=^S		susp=^Z		eol=\0
+	reprint=^R	discard=^U	werase=^W	lnext=^V
+	eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+	unsigned short __tmp; \
+	get_user(__tmp,&(termio)->x); \
+	*(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+	copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+	put_user((termios)->c_iflag, &(termio)->c_iflag); \
+	put_user((termios)->c_oflag, &(termio)->c_oflag); \
+	put_user((termios)->c_cflag, &(termio)->c_cflag); \
+	put_user((termios)->c_lflag, &(termio)->c_lflag); \
+	put_user((termios)->c_line,  &(termio)->c_line); \
+	copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __ASM_SH64_TERMIOS_H */
diff --git a/include/asm-sh64/thread_info.h b/include/asm-sh64/thread_info.h
new file mode 100644
index 0000000..e65f394
--- /dev/null
+++ b/include/asm-sh64/thread_info.h
@@ -0,0 +1,87 @@
+#ifndef __ASM_SH64_THREAD_INFO_H
+#define __ASM_SH64_THREAD_INFO_H
+
+/*
+ * SuperH 5 version
+ * Copyright (C) 2003  Paul Mundt
+ */
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/registers.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants must also be changed
+ */
+struct thread_info {
+	struct task_struct	*task;		/* main task structure */
+	struct exec_domain	*exec_domain;	/* execution domain */
+	unsigned long		flags;		/* low level flags */
+	/* Put the 4 32-bit fields together to make asm offsetting easier. */
+	__s32			preempt_count; /* 0 => preemptable, <0 => BUG */
+	__u16			cpu;
+
+	mm_segment_t		addr_limit;
+	struct restart_block	restart_block;
+
+	__u8			supervisor_stack[0];
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.task		= &tsk,			\
+	.exec_domain	= &default_exec_domain,	\
+	.flags		= 0,			\
+	.cpu		= 0,			\
+	.preempt_count	= 1,			\
+	.addr_limit     = KERNEL_DS,            \
+	.restart_block	= {			\
+		.fn = do_no_restart_syscall,	\
+	},					\
+}
+
+#define init_thread_info	(init_thread_union.thread_info)
+#define init_stack		(init_thread_union.stack)
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+	struct thread_info *ti;
+
+	__asm__ __volatile__ ("getcon " __KCR0 ", %0\n\t" : "=r" (ti));
+
+	return ti;
+}
+
+/* thread information allocation */
+
+
+
+#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
+#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#endif /* __ASSEMBLY__ */
+
+#define THREAD_SIZE  8192
+
+#define PREEMPT_ACTIVE		0x4000000
+
+/* thread information flags */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_SIGPENDING		2	/* signal pending */
+#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_MEMDIE		4
+
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_THREAD_INFO_H */
diff --git a/include/asm-sh64/timex.h b/include/asm-sh64/timex.h
new file mode 100644
index 0000000..af0b792
--- /dev/null
+++ b/include/asm-sh64/timex.h
@@ -0,0 +1,34 @@
+#ifndef __ASM_SH64_TIMEX_H
+#define __ASM_SH64_TIMEX_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/timex.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ *
+ * sh-5 architecture timex specifications
+ *
+ */
+
+#define CLOCK_TICK_RATE	1193180 /* Underlying HZ */
+#define CLOCK_TICK_FACTOR	20	/* Factor of both 1000000 and CLOCK_TICK_RATE */
+#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
+	(1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
+		<< (SHIFT_SCALE-SHIFT_HZ)) / HZ)
+
+typedef unsigned long cycles_t;
+
+static __inline__ cycles_t get_cycles (void)
+{
+	return 0;
+}
+
+#define vxtime_lock()		do {} while (0)
+#define vxtime_unlock()		do {} while (0)
+
+#endif /* __ASM_SH64_TIMEX_H */
diff --git a/include/asm-sh64/tlb.h b/include/asm-sh64/tlb.h
new file mode 100644
index 0000000..4979408
--- /dev/null
+++ b/include/asm-sh64/tlb.h
@@ -0,0 +1,92 @@
+/*
+ * include/asm-sh64/tlb.h
+ *
+ * Copyright (C) 2003  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#ifndef __ASM_SH64_TLB_H
+#define __ASM_SH64_TLB_H
+
+/*
+ * Note! These are mostly unused, we just need the xTLB_LAST_VAR_UNRESTRICTED
+ * for head.S! Once this limitation is gone, we can clean the rest of this up.
+ */
+
+/* ITLB defines */
+#define ITLB_FIXED	0x00000000	/* First fixed ITLB, see head.S */
+#define ITLB_LAST_VAR_UNRESTRICTED	0x000003F0	/* Last ITLB */
+
+/* DTLB defines */
+#define DTLB_FIXED	0x00800000	/* First fixed DTLB, see head.S */
+#define DTLB_LAST_VAR_UNRESTRICTED	0x008003F0	/* Last DTLB */
+
+#ifndef __ASSEMBLY__
+
+/**
+ * for_each_dtlb_entry
+ *
+ * @tlb:	TLB entry
+ *
+ * Iterate over free (non-wired) DTLB entries
+ */
+#define for_each_dtlb_entry(tlb)		\
+	for (tlb  = cpu_data->dtlb.first;	\
+	     tlb <= cpu_data->dtlb.last;	\
+	     tlb += cpu_data->dtlb.step)
+
+/**
+ * for_each_itlb_entry
+ *
+ * @tlb:	TLB entry
+ *
+ * Iterate over free (non-wired) ITLB entries
+ */
+#define for_each_itlb_entry(tlb)		\
+	for (tlb  = cpu_data->itlb.first;	\
+	     tlb <= cpu_data->itlb.last;	\
+	     tlb += cpu_data->itlb.step)
+
+/**
+ * __flush_tlb_slot
+ *
+ * @slot:	Address of TLB slot.
+ *
+ * Flushes TLB slot @slot.
+ */
+static inline void __flush_tlb_slot(unsigned long long slot)
+{
+	__asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
+}
+
+/* arch/sh64/mm/tlb.c */
+extern int sh64_tlb_init(void);
+extern unsigned long long sh64_next_free_dtlb_entry(void);
+extern unsigned long long sh64_get_wired_dtlb_entry(void);
+extern int sh64_put_wired_dtlb_entry(unsigned long long entry);
+
+extern void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, unsigned long asid, unsigned long paddr);
+extern void sh64_teardown_tlb_slot(unsigned long long config_addr);
+
+#define tlb_start_vma(tlb, vma) \
+	flush_cache_range(vma, vma->vm_start, vma->vm_end)
+
+#define tlb_end_vma(tlb, vma)	\
+	flush_tlb_range(vma, vma->vm_start, vma->vm_end)
+
+#define __tlb_remove_tlb_entry(tlb, pte, address)	do { } while (0)
+
+/*
+ * Flush whole TLBs for MM
+ */
+#define tlb_flush(tlb)		flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_SH64_TLB_H */
+
diff --git a/include/asm-sh64/tlbflush.h b/include/asm-sh64/tlbflush.h
new file mode 100644
index 0000000..15c0719
--- /dev/null
+++ b/include/asm-sh64/tlbflush.h
@@ -0,0 +1,31 @@
+#ifndef __ASM_SH64_TLBFLUSH_H
+#define __ASM_SH64_TLBFLUSH_H
+
+#include <asm/pgalloc.h>
+
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb() flushes the current mm struct TLBs
+ *  - flush_tlb_all() flushes all processes TLBs
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(mm, start, end) flushes a range of pages
+ *
+ */
+
+extern void flush_tlb(void);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+			    unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+				      unsigned long start, unsigned long end)
+{
+}
+
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#endif /* __ASM_SH64_TLBFLUSH_H */
+
diff --git a/include/asm-sh64/topology.h b/include/asm-sh64/topology.h
new file mode 100644
index 0000000..3421178
--- /dev/null
+++ b/include/asm-sh64/topology.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_TOPOLOGY_H
+#define __ASM_SH64_TOPOLOGY_H
+
+#include <asm-generic/topology.h>
+
+#endif /* __ASM_SH64_TOPOLOGY_H */
diff --git a/include/asm-sh64/types.h b/include/asm-sh64/types.h
new file mode 100644
index 0000000..41d4d2f
--- /dev/null
+++ b/include/asm-sh64/types.h
@@ -0,0 +1,76 @@
+#ifndef __ASM_SH64_TYPES_H
+#define __ASM_SH64_TYPES_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/types.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+typedef __signed__ char s8;
+typedef unsigned char u8;
+
+typedef __signed__ short s16;
+typedef unsigned short u16;
+
+typedef __signed__ int s32;
+typedef unsigned int u32;
+
+typedef __signed__ long long s64;
+typedef unsigned long long u64;
+
+/* DMA addresses come in generic and 64-bit flavours.  */
+
+#ifdef CONFIG_HIGHMEM64G
+typedef u64 dma_addr_t;
+#else
+typedef u32 dma_addr_t;
+#endif
+typedef u64 dma64_addr_t;
+
+typedef unsigned int kmem_bufctl_t;
+
+#endif /* __ASSEMBLY__ */
+
+#define BITS_PER_LONG 32
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_TYPES_H */
diff --git a/include/asm-sh64/uaccess.h b/include/asm-sh64/uaccess.h
new file mode 100644
index 0000000..a33654d
--- /dev/null
+++ b/include/asm-sh64/uaccess.h
@@ -0,0 +1,327 @@
+#ifndef __ASM_SH64_UACCESS_H
+#define __ASM_SH64_UACCESS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/uaccess.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003, 2004  Paul Mundt
+ *
+ * User space memory access functions
+ *
+ * Copyright (C) 1999  Niibe Yutaka
+ *
+ *  Based on:
+ *     MIPS implementation version 1.15 by
+ *              Copyright (C) 1996, 1997, 1998 by Ralf Baechle
+ *     and i386 version.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+
+#define VERIFY_READ    0
+#define VERIFY_WRITE   1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not.  If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons (Data Segment Register?), these macros are misnamed.
+ */
+
+#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
+
+#define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
+#define USER_DS		MAKE_MM_SEG(0x80000000)
+
+#define get_ds()	(KERNEL_DS)
+#define get_fs()        (current_thread_info()->addr_limit)
+#define set_fs(x)       (current_thread_info()->addr_limit=(x))
+
+#define segment_eq(a,b)	((a).seg == (b).seg)
+
+#define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
+
+/*
+ * Uhhuh, this needs 33-bit arithmetic. We have a carry..
+ *
+ * sum := addr + size;  carry? --> flag = true;
+ * if (sum >= addr_limit) flag = true;
+ */
+#define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1)
+
+#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
+#define __access_ok(addr,size) (__range_ok(addr,size) == 0)
+
+/* this function will go away soon - use access_ok() instead */
+extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
+{
+	return access_ok(type,addr,size) ? 0 : -EFAULT;
+}
+
+/*
+ * Uh, these should become the main single-value transfer routines ...
+ * They automatically use the right size if we just have the right
+ * pointer type ...
+ *
+ * As MIPS uses the same address space for kernel and user data, we
+ * can just do these as direct assignments.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x,ptr)	__put_user_check((x),(ptr),sizeof(*(ptr)))
+#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
+
+/*
+ * The "__xxx" versions do not do address space checking, useful when
+ * doing multiple accesses to the same area (the user has to do the
+ * checks by hand with "access_ok()")
+ */
+#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+
+/*
+ * The "xxx_ret" versions return constant specified in third argument, if
+ * something bad happens. These macros can be optimized for the
+ * case of just returning from the function xxx_ret is used.
+ */
+
+#define put_user_ret(x,ptr,ret) ({ \
+if (put_user(x,ptr)) return ret; })
+
+#define get_user_ret(x,ptr,ret) ({ \
+if (get_user(x,ptr)) return ret; })
+
+#define __put_user_ret(x,ptr,ret) ({ \
+if (__put_user(x,ptr)) return ret; })
+
+#define __get_user_ret(x,ptr,ret) ({ \
+if (__get_user(x,ptr)) return ret; })
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct *)(x))
+
+#define __get_user_size(x,ptr,size,retval)			\
+do {								\
+	retval = 0;						\
+	switch (size) {						\
+	case 1:							\
+		retval = __get_user_asm_b(x, ptr);		\
+		break;						\
+	case 2:							\
+		retval = __get_user_asm_w(x, ptr);		\
+		break;						\
+	case 4:							\
+		retval = __get_user_asm_l(x, ptr);		\
+		break;						\
+	case 8:							\
+		retval = __get_user_asm_q(x, ptr);		\
+		break;						\
+	default:						\
+		__get_user_unknown();				\
+		break;						\
+	}							\
+} while (0)
+
+#define __get_user_nocheck(x,ptr,size)				\
+({								\
+	long __gu_addr = (long)(ptr);				\
+	long __gu_err;						\
+	__typeof(*(ptr)) __gu_val;				\
+	__asm__ ("":"=r" (__gu_val));				\
+	__asm__ ("":"=r" (__gu_err));				\
+	__get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \
+	(x) = (__typeof__(*(ptr))) __gu_val;			\
+	__gu_err;						\
+})
+
+#define __get_user_check(x,ptr,size)				\
+({								\
+	long __gu_addr = (long)(ptr);				\
+	long __gu_err = -EFAULT;				\
+	__typeof(*(ptr)) __gu_val;				\
+	__asm__ ("":"=r" (__gu_val));				\
+	__asm__ ("":"=r" (__gu_err));				\
+	if (__access_ok(__gu_addr, (size)))			\
+		__get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \
+	(x) = (__typeof__(*(ptr))) __gu_val;			\
+	__gu_err;						\
+})
+
+extern long __get_user_asm_b(void *, long);
+extern long __get_user_asm_w(void *, long);
+extern long __get_user_asm_l(void *, long);
+extern long __get_user_asm_q(void *, long);
+extern void __get_user_unknown(void);
+
+#define __put_user_size(x,ptr,size,retval)			\
+do {								\
+	retval = 0;						\
+	switch (size) {						\
+	case 1:							\
+		retval = __put_user_asm_b(x, ptr);		\
+		break;						\
+	case 2:							\
+		retval = __put_user_asm_w(x, ptr);		\
+		break;						\
+	case 4:							\
+		retval = __put_user_asm_l(x, ptr);		\
+		break;						\
+	case 8:							\
+		retval = __put_user_asm_q(x, ptr);		\
+		break;						\
+	default:						\
+		__put_user_unknown();				\
+	}							\
+} while (0)
+
+#define __put_user_nocheck(x,ptr,size)				\
+({								\
+	long __pu_err;						\
+	__typeof__(*(ptr)) __pu_val = (x);			\
+	__put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \
+	__pu_err;						\
+})
+
+#define __put_user_check(x,ptr,size)				\
+({								\
+	long __pu_err = -EFAULT;				\
+	long __pu_addr = (long)(ptr);				\
+	__typeof__(*(ptr)) __pu_val = (x);			\
+								\
+	if (__access_ok(__pu_addr, (size)))			\
+		__put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\
+	__pu_err;						\
+})
+
+extern long __put_user_asm_b(void *, long);
+extern long __put_user_asm_w(void *, long);
+extern long __put_user_asm_l(void *, long);
+extern long __put_user_asm_q(void *, long);
+extern void __put_user_unknown(void);
+
+
+/* Generic arbitrary sized copy.  */
+/* Return the number of bytes NOT copied */
+/* XXX: should be such that: 4byte and the rest. */
+extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n);
+
+#define copy_to_user(to,from,n) ({ \
+void *__copy_to = (void *) (to); \
+__kernel_size_t __copy_size = (__kernel_size_t) (n); \
+__kernel_size_t __copy_res; \
+if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
+__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
+} else __copy_res = __copy_size; \
+__copy_res; })
+
+#define copy_to_user_ret(to,from,n,retval) ({ \
+if (copy_to_user(to,from,n)) \
+	return retval; \
+})
+
+#define __copy_to_user(to,from,n)		\
+	__copy_user((void *)(to),		\
+		    (void *)(from), n)
+
+#define __copy_to_user_ret(to,from,n,retval) ({ \
+if (__copy_to_user(to,from,n)) \
+	return retval; \
+})
+
+#define copy_from_user(to,from,n) ({ \
+void *__copy_to = (void *) (to); \
+void *__copy_from = (void *) (from); \
+__kernel_size_t __copy_size = (__kernel_size_t) (n); \
+__kernel_size_t __copy_res; \
+if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
+__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
+} else __copy_res = __copy_size; \
+__copy_res; })
+
+#define copy_from_user_ret(to,from,n,retval) ({ \
+if (copy_from_user(to,from,n)) \
+	return retval; \
+})
+
+#define __copy_from_user(to,from,n)		\
+	__copy_user((void *)(to),		\
+		    (void *)(from), n)
+
+#define __copy_from_user_ret(to,from,n,retval) ({ \
+if (__copy_from_user(to,from,n)) \
+	return retval; \
+})
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+/* XXX: Not sure it works well..
+   should be such that: 4byte clear and the rest. */
+extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
+
+#define clear_user(addr,n) ({ \
+void * __cl_addr = (addr); \
+unsigned long __cl_size = (n); \
+if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
+__cl_size = __clear_user(__cl_addr, __cl_size); \
+__cl_size; })
+
+extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count);
+
+#define strncpy_from_user(dest,src,count) ({ \
+unsigned long __sfu_src = (unsigned long) (src); \
+int __sfu_count = (int) (count); \
+long __sfu_res = -EFAULT; \
+if(__access_ok(__sfu_src, __sfu_count)) { \
+__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
+} __sfu_res; })
+
+#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
+
+/*
+ * Return the size of a string (including the ending 0!)
+ */
+extern long __strnlen_user(const char *__s, long __n);
+
+extern __inline__ long strnlen_user(const char *s, long n)
+{
+	if (!__addr_ok(s))
+		return 0;
+	else
+		return __strnlen_user(s, n);
+}
+
+struct exception_table_entry
+{
+	unsigned long insn, fixup;
+};
+
+#define ARCH_HAS_SEARCH_EXTABLE
+
+/* If gcc inlines memset, it will use st.q instructions.  Therefore, we need
+   kmalloc allocations to be 8-byte aligned.  Without this, the alignment
+   becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on
+   sh64 at the moment). */
+#define ARCH_KMALLOC_MINALIGN 8
+
+/*
+ * We want 8-byte alignment for the slab caches as well, otherwise we have
+ * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create().
+ */
+#define ARCH_SLAB_MINALIGN 8
+
+/* Returns 0 if exception not found and fixup.unit otherwise.  */
+extern unsigned long search_exception_table(unsigned long addr);
+extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
+
+#endif /* __ASM_SH64_UACCESS_H */
diff --git a/include/asm-sh64/ucontext.h b/include/asm-sh64/ucontext.h
new file mode 100644
index 0000000..cf77a08
--- /dev/null
+++ b/include/asm-sh64/ucontext.h
@@ -0,0 +1,23 @@
+#ifndef __ASM_SH64_UCONTEXT_H
+#define __ASM_SH64_UCONTEXT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/ucontext.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+struct ucontext {
+	unsigned long	  uc_flags;
+	struct ucontext  *uc_link;
+	stack_t		  uc_stack;
+	struct sigcontext uc_mcontext;
+	sigset_t	  uc_sigmask;	/* mask last for extensibility */
+};
+
+#endif /* __ASM_SH64_UCONTEXT_H */
diff --git a/include/asm-sh64/unaligned.h b/include/asm-sh64/unaligned.h
new file mode 100644
index 0000000..74481b1
--- /dev/null
+++ b/include/asm-sh64/unaligned.h
@@ -0,0 +1,17 @@
+#ifndef __ASM_SH64_UNALIGNED_H
+#define __ASM_SH64_UNALIGNED_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/unaligned.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#include <asm-generic/unaligned.h>
+
+#endif /* __ASM_SH64_UNALIGNED_H */
diff --git a/include/asm-sh64/unistd.h b/include/asm-sh64/unistd.h
new file mode 100644
index 0000000..95f0b13
--- /dev/null
+++ b/include/asm-sh64/unistd.h
@@ -0,0 +1,560 @@
+#ifndef __ASM_SH64_UNISTD_H
+#define __ASM_SH64_UNISTD_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/unistd.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ * Copyright (C) 2004  Sean McGoogan
+ *
+ * This file contains the system call numbers.
+ *
+ */
+
+#define __NR_setup		  0	/* used only by init, to get system going */
+#define __NR_exit		  1
+#define __NR_fork		  2
+#define __NR_read		  3
+#define __NR_write		  4
+#define __NR_open		  5
+#define __NR_close		  6
+#define __NR_waitpid		  7
+#define __NR_creat		  8
+#define __NR_link		  9
+#define __NR_unlink		 10
+#define __NR_execve		 11
+#define __NR_chdir		 12
+#define __NR_time		 13
+#define __NR_mknod		 14
+#define __NR_chmod		 15
+#define __NR_lchown		 16
+#define __NR_break		 17
+#define __NR_oldstat		 18
+#define __NR_lseek		 19
+#define __NR_getpid		 20
+#define __NR_mount		 21
+#define __NR_umount		 22
+#define __NR_setuid		 23
+#define __NR_getuid		 24
+#define __NR_stime		 25
+#define __NR_ptrace		 26
+#define __NR_alarm		 27
+#define __NR_oldfstat		 28
+#define __NR_pause		 29
+#define __NR_utime		 30
+#define __NR_stty		 31
+#define __NR_gtty		 32
+#define __NR_access		 33
+#define __NR_nice		 34
+#define __NR_ftime		 35
+#define __NR_sync		 36
+#define __NR_kill		 37
+#define __NR_rename		 38
+#define __NR_mkdir		 39
+#define __NR_rmdir		 40
+#define __NR_dup		 41
+#define __NR_pipe		 42
+#define __NR_times		 43
+#define __NR_prof		 44
+#define __NR_brk		 45
+#define __NR_setgid		 46
+#define __NR_getgid		 47
+#define __NR_signal		 48
+#define __NR_geteuid		 49
+#define __NR_getegid		 50
+#define __NR_acct		 51
+#define __NR_umount2		 52
+#define __NR_lock		 53
+#define __NR_ioctl		 54
+#define __NR_fcntl		 55
+#define __NR_mpx		 56
+#define __NR_setpgid		 57
+#define __NR_ulimit		 58
+#define __NR_oldolduname	 59
+#define __NR_umask		 60
+#define __NR_chroot		 61
+#define __NR_ustat		 62
+#define __NR_dup2		 63
+#define __NR_getppid		 64
+#define __NR_getpgrp		 65
+#define __NR_setsid		 66
+#define __NR_sigaction		 67
+#define __NR_sgetmask		 68
+#define __NR_ssetmask		 69
+#define __NR_setreuid		 70
+#define __NR_setregid		 71
+#define __NR_sigsuspend		 72
+#define __NR_sigpending		 73
+#define __NR_sethostname	 74
+#define __NR_setrlimit		 75
+#define __NR_getrlimit	 	 76	/* Back compatible 2Gig limited rlimit */
+#define __NR_getrusage		 77
+#define __NR_gettimeofday	 78
+#define __NR_settimeofday	 79
+#define __NR_getgroups		 80
+#define __NR_setgroups		 81
+#define __NR_select		 82
+#define __NR_symlink		 83
+#define __NR_oldlstat		 84
+#define __NR_readlink		 85
+#define __NR_uselib		 86
+#define __NR_swapon		 87
+#define __NR_reboot		 88
+#define __NR_readdir		 89
+#define __NR_mmap		 90
+#define __NR_munmap		 91
+#define __NR_truncate		 92
+#define __NR_ftruncate		 93
+#define __NR_fchmod		 94
+#define __NR_fchown		 95
+#define __NR_getpriority	 96
+#define __NR_setpriority	 97
+#define __NR_profil		 98
+#define __NR_statfs		 99
+#define __NR_fstatfs		100
+#define __NR_ioperm		101
+#define __NR_socketcall		102	/* old implementation of socket systemcall */
+#define __NR_syslog		103
+#define __NR_setitimer		104
+#define __NR_getitimer		105
+#define __NR_stat		106
+#define __NR_lstat		107
+#define __NR_fstat		108
+#define __NR_olduname		109
+#define __NR_iopl		110
+#define __NR_vhangup		111
+#define __NR_idle		112
+#define __NR_vm86old		113
+#define __NR_wait4		114
+#define __NR_swapoff		115
+#define __NR_sysinfo		116
+#define __NR_ipc		117
+#define __NR_fsync		118
+#define __NR_sigreturn		119
+#define __NR_clone		120
+#define __NR_setdomainname	121
+#define __NR_uname		122
+#define __NR_modify_ldt		123
+#define __NR_adjtimex		124
+#define __NR_mprotect		125
+#define __NR_sigprocmask	126
+#define __NR_create_module	127
+#define __NR_init_module	128
+#define __NR_delete_module	129
+#define __NR_get_kernel_syms	130
+#define __NR_quotactl		131
+#define __NR_getpgid		132
+#define __NR_fchdir		133
+#define __NR_bdflush		134
+#define __NR_sysfs		135
+#define __NR_personality	136
+#define __NR_afs_syscall	137 /* Syscall for Andrew File System */
+#define __NR_setfsuid		138
+#define __NR_setfsgid		139
+#define __NR__llseek		140
+#define __NR_getdents		141
+#define __NR__newselect		142
+#define __NR_flock		143
+#define __NR_msync		144
+#define __NR_readv		145
+#define __NR_writev		146
+#define __NR_getsid		147
+#define __NR_fdatasync		148
+#define __NR__sysctl		149
+#define __NR_mlock		150
+#define __NR_munlock		151
+#define __NR_mlockall		152
+#define __NR_munlockall		153
+#define __NR_sched_setparam		154
+#define __NR_sched_getparam		155
+#define __NR_sched_setscheduler		156
+#define __NR_sched_getscheduler		157
+#define __NR_sched_yield		158
+#define __NR_sched_get_priority_max	159
+#define __NR_sched_get_priority_min	160
+#define __NR_sched_rr_get_interval	161
+#define __NR_nanosleep		162
+#define __NR_mremap		163
+#define __NR_setresuid		164
+#define __NR_getresuid		165
+#define __NR_vm86		166
+#define __NR_query_module	167
+#define __NR_poll		168
+#define __NR_nfsservctl		169
+#define __NR_setresgid		170
+#define __NR_getresgid		171
+#define __NR_prctl              172
+#define __NR_rt_sigreturn	173
+#define __NR_rt_sigaction	174
+#define __NR_rt_sigprocmask	175
+#define __NR_rt_sigpending	176
+#define __NR_rt_sigtimedwait	177
+#define __NR_rt_sigqueueinfo	178
+#define __NR_rt_sigsuspend	179
+#define __NR_pread		180
+#define __NR_pwrite		181
+#define __NR_chown		182
+#define __NR_getcwd		183
+#define __NR_capget		184
+#define __NR_capset		185
+#define __NR_sigaltstack	186
+#define __NR_sendfile		187
+#define __NR_streams1		188	/* some people actually want it */
+#define __NR_streams2		189	/* some people actually want it */
+#define __NR_vfork		190
+#define __NR_ugetrlimit		191	/* SuS compliant getrlimit */
+#define __NR_mmap2		192
+#define __NR_truncate64		193
+#define __NR_ftruncate64	194
+#define __NR_stat64		195
+#define __NR_lstat64		196
+#define __NR_fstat64		197
+#define __NR_lchown32		198
+#define __NR_getuid32		199
+#define __NR_getgid32		200
+#define __NR_geteuid32		201
+#define __NR_getegid32		202
+#define __NR_setreuid32		203
+#define __NR_setregid32		204
+#define __NR_getgroups32	205
+#define __NR_setgroups32	206
+#define __NR_fchown32		207
+#define __NR_setresuid32	208
+#define __NR_getresuid32	209
+#define __NR_setresgid32	210
+#define __NR_getresgid32	211
+#define __NR_chown32		212
+#define __NR_setuid32		213
+#define __NR_setgid32		214
+#define __NR_setfsuid32		215
+#define __NR_setfsgid32		216
+#define __NR_pivot_root		217
+#define __NR_mincore		218
+#define __NR_madvise		219
+
+/* Non-multiplexed socket family */
+#define __NR_socket		220
+#define __NR_bind		221
+#define __NR_connect		222
+#define __NR_listen		223
+#define __NR_accept		224
+#define __NR_getsockname	225
+#define __NR_getpeername	226
+#define __NR_socketpair		227
+#define __NR_send		228
+#define __NR_sendto		229
+#define __NR_recv		230
+#define __NR_recvfrom		231
+#define __NR_shutdown		232
+#define __NR_setsockopt		233
+#define __NR_getsockopt		234
+#define __NR_sendmsg		235
+#define __NR_recvmsg		236
+
+/* Non-multiplexed IPC family */
+#define __NR_semop		237
+#define __NR_semget		238
+#define __NR_semctl		239
+#define __NR_msgsnd		240
+#define __NR_msgrcv		241
+#define __NR_msgget		242
+#define __NR_msgctl		243
+#if 0
+#define __NR_shmatcall		244
+#endif
+#define __NR_shmdt		245
+#define __NR_shmget		246
+#define __NR_shmctl		247
+
+#define __NR_getdents64		248
+#define __NR_fcntl64		249
+/* 223 is unused */
+#define __NR_gettid		252
+#define __NR_readahead		253
+#define __NR_setxattr		254
+#define __NR_lsetxattr		255
+#define __NR_fsetxattr		256
+#define __NR_getxattr		257
+#define __NR_lgetxattr		258
+#define __NR_fgetxattr		269
+#define __NR_listxattr		260
+#define __NR_llistxattr		261
+#define __NR_flistxattr		262
+#define __NR_removexattr	263
+#define __NR_lremovexattr	264
+#define __NR_fremovexattr	265
+#define __NR_tkill		266
+#define __NR_sendfile64		267
+#define __NR_futex		268
+#define __NR_sched_setaffinity	269
+#define __NR_sched_getaffinity	270
+#define __NR_set_thread_area	271
+#define __NR_get_thread_area	272
+#define __NR_io_setup		273
+#define __NR_io_destroy		274
+#define __NR_io_getevents	275
+#define __NR_io_submit		276
+#define __NR_io_cancel		277
+#define __NR_fadvise64		278
+#define __NR_exit_group		280
+
+#define __NR_lookup_dcookie	281
+#define __NR_epoll_create	282
+#define __NR_epoll_ctl		283
+#define __NR_epoll_wait		284
+#define __NR_remap_file_pages	285
+#define __NR_set_tid_address	286
+#define __NR_timer_create	287
+#define __NR_timer_settime	(__NR_timer_create+1)
+#define __NR_timer_gettime	(__NR_timer_create+2)
+#define __NR_timer_getoverrun	(__NR_timer_create+3)
+#define __NR_timer_delete	(__NR_timer_create+4)
+#define __NR_clock_settime	(__NR_timer_create+5)
+#define __NR_clock_gettime	(__NR_timer_create+6)
+#define __NR_clock_getres	(__NR_timer_create+7)
+#define __NR_clock_nanosleep	(__NR_timer_create+8)
+#define __NR_statfs64		296
+#define __NR_fstatfs64		297
+#define __NR_tgkill		298
+#define __NR_utimes		299
+#define __NR_fadvise64_64	300
+#define __NR_vserver		301
+#define __NR_mbind              302
+#define __NR_get_mempolicy      303
+#define __NR_set_mempolicy      304
+#define __NR_mq_open            305
+#define __NR_mq_unlink          (__NR_mq_open+1)
+#define __NR_mq_timedsend       (__NR_mq_open+2)
+#define __NR_mq_timedreceive    (__NR_mq_open+3)
+#define __NR_mq_notify          (__NR_mq_open+4)
+#define __NR_mq_getsetattr      (__NR_mq_open+5)
+#define __NR_sys_kexec_load	311
+#define __NR_waitid		312
+#define __NR_add_key		313
+#define __NR_request_key	314
+#define __NR_keyctl		315
+
+#define NR_syscalls 316
+
+/* user-visible error numbers are in the range -1 - -125: see <asm-sh64/errno.h> */
+
+#define __syscall_return(type, res) \
+do { \
+	/* Note: when returning from kernel the return value is in r9	    \
+	**       This prevents conflicts between return value and arg1      \
+	**       when dispatching signal handler, in other words makes	    \
+	**       life easier in the system call epilogue (see entry.S)      \
+	*/								    \
+        register unsigned long __sr2 __asm__ ("r2") = res;		    \
+	if ((unsigned long)(res) >= (unsigned long)(-125)) { \
+		errno = -(res);						    \
+		__sr2 = -1; 						    \
+	} \
+	return (type) (__sr2); 						    \
+} while (0)
+
+/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
+
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x10 << 16) | __NR_##name); \
+__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "()"			    \
+	: "=r" (__sc0) 							    \
+	: "r" (__sc0) ); 						    \
+__syscall_return(type,__sc0); 						    \
+}
+
+	/*
+	 * The apparent spurious "dummy" assembler comment is *needed*,
+	 * as without it, the compiler treats the arg<n> variables
+	 * as no longer live just before the asm. The compiler can
+	 * then optimize the storage into any registers it wishes.
+	 * The additional dummy statement forces the compiler to put
+	 * the arguments into the correct registers before the TRAPA.
+	 */
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x11 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
+__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2)"		    \
+	: "=r" (__sc0) 							    \
+	: "r" (__sc0), "r" (__sc2));					    \
+__asm__ __volatile__ ("!dummy	%0 %1"				   	    \
+	:								    \
+	: "r" (__sc0), "r" (__sc2));					    \
+__syscall_return(type,__sc0); 						    \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1,type2 arg2) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x12 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
+register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
+__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3)"		    \
+	: "=r" (__sc0) 							    \
+	: "r" (__sc0), "r" (__sc2), "r" (__sc3) );			    \
+__asm__ __volatile__ ("!dummy	%0 %1 %2"			   	    \
+	:								    \
+	: "r" (__sc0), "r" (__sc2), "r" (__sc3) );			    \
+__syscall_return(type,__sc0); 						    \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1,type2 arg2,type3 arg3) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
+register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
+register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
+__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4)"		    \
+	: "=r" (__sc0) 							    \
+	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );		    \
+__asm__ __volatile__ ("!dummy	%0 %1 %2 %3"			   	    \
+	:								    \
+	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );	   	    \
+__syscall_return(type,__sc0); 						    \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x14 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
+register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
+register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
+register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;	    \
+__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4,%5)"	    \
+	: "=r" (__sc0) 							    \
+	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
+__asm__ __volatile__ ("!dummy	%0 %1 %2 %3 %4"			   	    \
+	:								    \
+	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
+__syscall_return(type,__sc0); 						    \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x15 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
+register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
+register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
+register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;	    \
+register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5;	    \
+__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4,%5,%6)"	    \
+	: "=r" (__sc0) 							    \
+	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
+	  "r" (__sc6));							    \
+__asm__ __volatile__ ("!dummy	%0 %1 %2 %3 %4 %5"		   	    \
+	:								    \
+	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
+	  "r" (__sc6));							    \
+__syscall_return(type,__sc0); 						    \
+}
+
+#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x16 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
+register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
+register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
+register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;	    \
+register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5;	    \
+register unsigned long __sc7 __asm__ ("r7") = (unsigned long) arg6;	    \
+__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4,%5,%6,%7)"	    \
+	: "=r" (__sc0) 							    \
+	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
+	  "r" (__sc6), "r" (__sc7));					    \
+__asm__ __volatile__ ("!dummy	%0 %1 %2 %3 %4 %5 %6"		   	    \
+	:								    \
+	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
+	  "r" (__sc6), "r" (__sc7));					    \
+__syscall_return(type,__sc0); 						    \
+}
+
+#ifdef __KERNEL__
+#define __ARCH_WANT_IPC_PARSE_VERSION
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_OLD_STAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SGETMASK
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#endif
+
+#ifdef __KERNEL_SYSCALLS__
+
+/* Copy from sh */
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+/*
+ * we need this inline - forking from kernel space will result
+ * in NO COPY ON WRITE (!!!), until an execve is executed. This
+ * is no problem, but for the stack. This is handled by not letting
+ * main() use the stack at all after fork(). Thus, no function
+ * calls - which means inline code for fork too, as otherwise we
+ * would use the stack upon exit from 'fork()'.
+ *
+ * Actually only pause and fork are needed inline, so that there
+ * won't be any messing with the stack from main(), but we define
+ * some others too.
+ */
+#define __NR__exit __NR_exit
+static inline _syscall0(int,pause)
+static inline _syscall1(int,setup,int,magic)
+static inline _syscall0(int,sync)
+static inline _syscall0(pid_t,setsid)
+static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
+static inline _syscall1(int,dup,int,fd)
+static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
+static inline _syscall1(int,close,int,fd)
+static inline _syscall1(int,_exit,int,exitcode)
+static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
+static inline _syscall1(int,delete_module,const char *,name)
+
+static inline pid_t wait(int * wait_stat)
+{
+	return waitpid(-1,wait_stat,0);
+}
+#endif
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#ifndef cond_syscall
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+#endif
+
+#endif /* __ASM_SH64_UNISTD_H */
diff --git a/include/asm-sh64/user.h b/include/asm-sh64/user.h
new file mode 100644
index 0000000..8f32f39
--- /dev/null
+++ b/include/asm-sh64/user.h
@@ -0,0 +1,71 @@
+#ifndef __ASM_SH64_USER_H
+#define __ASM_SH64_USER_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/user.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ *
+ */
+
+#include <linux/types.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/*
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd).  The file contents are as follows:
+ *
+ *  upage: 1 page consisting of a user struct that tells gdb
+ *	what is present in the file.  Directly after this is a
+ *	copy of the task_struct, which is currently not used by gdb,
+ *	but it may come in handy at some point.  All of the registers
+ *	are stored as part of the upage.  The upage should always be
+ *	only one page long.
+ *  data: The data segment follows next.  We use current->end_text to
+ *	current->brk to pick up all of the user variables, plus any memory
+ *	that may have been sbrk'ed.  No attempt is made to determine if a
+ *	page is demand-zero or if a page is totally unused, we just cover
+ *	the entire range.  All of the addresses are rounded in such a way
+ *	that an integral number of pages is written.
+ *  stack: We need the stack information in order to get a meaningful
+ *	backtrace.  We need to write the data from usp to
+ *	current->start_stack, so we round each of these in order to be able
+ *	to write an integer number of pages.
+ */
+
+struct user_fpu_struct {
+        unsigned long long fp_regs[32];
+	unsigned int fpscr;
+};
+
+struct user {
+	struct pt_regs	regs;			/* entire machine state */
+	struct user_fpu_struct fpu;	/* Math Co-processor registers  */
+	int u_fpvalid;		/* True if math co-processor being used */
+	size_t		u_tsize;		/* text size (pages) */
+	size_t		u_dsize;		/* data size (pages) */
+	size_t		u_ssize;		/* stack size (pages) */
+	unsigned long	start_code;		/* text starting address */
+	unsigned long	start_data;		/* data starting address */
+	unsigned long	start_stack;		/* stack starting address */
+	long int	signal;			/* signal causing core dump */
+	struct regs *	u_ar0;			/* help gdb find registers */
+	struct user_fpu_struct* u_fpstate;	/* Math Co-processor pointer */
+	unsigned long	magic;			/* identifies a core file */
+	char		u_comm[32];		/* user command name */
+};
+
+#define NBPG			PAGE_SIZE
+#define UPAGES			1
+#define HOST_TEXT_START_ADDR	(u.start_code)
+#define HOST_DATA_START_ADDR	(u.start_data)
+#define HOST_STACK_END_ADDR	(u.start_stack + u.u_ssize * NBPG)
+
+#endif /* __ASM_SH64_USER_H */