Merge with git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e91db542..dc5a933 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -77,6 +77,14 @@
 config ARCH_MTD_XIP
 	bool
 
+config VECTORS_BASE
+	hex
+	default 0xffff0000 if MMU
+	default DRAM_BASE if REMAP_VECTORS_TO_RAM
+	default 0x00000000
+	help
+	  The base address of exception vectors.
+
 source "init/Kconfig"
 
 menu "System Type"
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
new file mode 100644
index 0000000..e1574be
--- /dev/null
+++ b/arch/arm/Kconfig-nommu
@@ -0,0 +1,44 @@
+#
+# Kconfig for uClinux(non-paged MM) depend configurations
+# Hyok S. Choi <hyok.choi@samsung.com>
+# 
+
+config SET_MEM_PARAM
+	bool "Set flash/sdram size and base addr"
+	help
+	 Say Y to manually set the base addresses and sizes.
+	 otherwise, the default values are assigned.
+
+config DRAM_BASE
+	hex '(S)DRAM Base Address' if SET_MEM_PARAM
+	default 0x00800000
+
+config DRAM_SIZE
+	hex '(S)DRAM SIZE' if SET_MEM_PARAM
+	default 0x00800000
+
+config FLASH_MEM_BASE
+	hex 'FLASH Base Address' if SET_MEM_PARAM
+	default 0x00400000
+
+config FLASH_SIZE
+	hex 'FLASH Size' if SET_MEM_PARAM
+	default 0x00400000
+
+config REMAP_VECTORS_TO_RAM
+	bool 'Install vectors to the begining of RAM' if DRAM_BASE
+	depends on DRAM_BASE
+	help
+	  The kernel needs to change the hardware exception vectors.
+	  In nommu mode, the hardware exception vectors are normally
+	  placed at address 0x00000000. However, this region may be
+	  occupied by read-only memory depending on H/W design.
+
+	  If the region contains read-write memory, say 'n' here.
+
+	  If your CPU provides a remap facility which allows the exception
+	  vectors to be mapped to writable memory, say 'n' here.
+
+	  Otherwise, say 'y' here.  In this case, the kernel will require
+	  external support to redirect the hardware exception vectors to
+	  the writable versions located at DRAM_BASE.
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index ce3e804..95a9627 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -20,6 +20,11 @@
 # Select a platform tht is kept up-to-date
 KBUILD_DEFCONFIG := versatile_defconfig
 
+# defines filename extension depending memory manement type.
+ifeq ($(CONFIG_MMU),)
+MMUEXT		:= -nommu
+endif
+
 ifeq ($(CONFIG_FRAME_POINTER),y)
 CFLAGS		+=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
 endif
@@ -73,7 +78,7 @@
 CHECKFLAGS	+= -D__arm__
 
 #Default value
-head-y		:= arch/arm/kernel/head.o arch/arm/kernel/init_task.o
+head-y		:= arch/arm/kernel/head$(MMUEXT).o arch/arm/kernel/init_task.o
 textofs-y	:= 0x00008000
 
  machine-$(CONFIG_ARCH_RPC)	   := rpc
@@ -133,7 +138,7 @@
 MACHINE  :=
 endif
   
-export	TEXT_OFFSET GZFLAGS
+export	TEXT_OFFSET GZFLAGS MMUEXT
 
 # Do we have FASTFPE?
 FASTFPE		:=arch/arm/fastfpe
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 491c7e4..b56f5e6 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -2,6 +2,7 @@
  *  linux/arch/arm/boot/compressed/head.S
  *
  *  Copyright (C) 1996-2002 Russell King
+ *  Copyright (C) 2004 Hyok S. Choi (MPU support)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -320,6 +321,62 @@
 cache_on:	mov	r3, #8			@ cache_on function
 		b	call_cache_fn
 
+/*
+ * Initialize the highest priority protection region, PR7
+ * to cover all 32bit address and cacheable and bufferable.
+ */
+__armv4_mpu_cache_on:
+		mov	r0, #0x3f		@ 4G, the whole
+		mcr	p15, 0, r0, c6, c7, 0	@ PR7 Area Setting
+		mcr 	p15, 0, r0, c6, c7, 1
+
+		mov	r0, #0x80		@ PR7
+		mcr	p15, 0, r0, c2, c0, 0	@ D-cache on
+		mcr	p15, 0, r0, c2, c0, 1	@ I-cache on
+		mcr	p15, 0, r0, c3, c0, 0	@ write-buffer on
+
+		mov	r0, #0xc000
+		mcr	p15, 0, r0, c5, c0, 1	@ I-access permission
+		mcr	p15, 0, r0, c5, c0, 0	@ D-access permission
+
+		mov	r0, #0
+		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
+		mcr	p15, 0, r0, c7, c5, 0	@ flush(inval) I-Cache
+		mcr	p15, 0, r0, c7, c6, 0	@ flush(inval) D-Cache
+		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
+						@ ...I .... ..D. WC.M
+		orr	r0, r0, #0x002d		@ .... .... ..1. 11.1
+		orr	r0, r0, #0x1000		@ ...1 .... .... ....
+
+		mcr	p15, 0, r0, c1, c0, 0	@ write control reg
+
+		mov	r0, #0
+		mcr	p15, 0, r0, c7, c5, 0	@ flush(inval) I-Cache
+		mcr	p15, 0, r0, c7, c6, 0	@ flush(inval) D-Cache
+		mov	pc, lr
+
+__armv3_mpu_cache_on:
+		mov	r0, #0x3f		@ 4G, the whole
+		mcr	p15, 0, r0, c6, c7, 0	@ PR7 Area Setting
+
+		mov	r0, #0x80		@ PR7
+		mcr	p15, 0, r0, c2, c0, 0	@ cache on
+		mcr	p15, 0, r0, c3, c0, 0	@ write-buffer on
+
+		mov	r0, #0xc000
+		mcr	p15, 0, r0, c5, c0, 0	@ access permission
+
+		mov	r0, #0
+		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
+		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
+						@ .... .... .... WC.M
+		orr	r0, r0, #0x000d		@ .... .... .... 11.1
+		mov	r0, #0
+		mcr	p15, 0, r0, c1, c0, 0	@ write control reg
+
+		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
+		mov	pc, lr
+
 __setup_mmu:	sub	r3, r4, #16384		@ Page directory size
 		bic	r3, r3, #0xff		@ Align the pointer
 		bic	r3, r3, #0x3f00
@@ -496,6 +553,18 @@
 		b	__armv4_mmu_cache_off
 		mov	pc, lr
 
+		.word	0x41007400		@ ARM74x
+		.word	0xff00ff00
+		b	__armv3_mpu_cache_on
+		b	__armv3_mpu_cache_off
+		b	__armv3_mpu_cache_flush
+		
+		.word	0x41009400		@ ARM94x
+		.word	0xff00ff00
+		b	__armv4_mpu_cache_on
+		b	__armv4_mpu_cache_off
+		b	__armv4_mpu_cache_flush
+
 		.word	0x00007000		@ ARM7 IDs
 		.word	0x0000f000
 		mov	pc, lr
@@ -562,6 +631,24 @@
 cache_off:	mov	r3, #12			@ cache_off function
 		b	call_cache_fn
 
+__armv4_mpu_cache_off:
+		mrc	p15, 0, r0, c1, c0
+		bic	r0, r0, #0x000d
+		mcr	p15, 0, r0, c1, c0	@ turn MPU and cache off
+		mov	r0, #0
+		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
+		mcr	p15, 0, r0, c7, c6, 0	@ flush D-Cache
+		mcr	p15, 0, r0, c7, c5, 0	@ flush I-Cache
+		mov	pc, lr
+
+__armv3_mpu_cache_off:
+		mrc	p15, 0, r0, c1, c0
+		bic	r0, r0, #0x000d
+		mcr	p15, 0, r0, c1, c0, 0	@ turn MPU and cache off
+		mov	r0, #0
+		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
+		mov	pc, lr
+
 __armv4_mmu_cache_off:
 		mrc	p15, 0, r0, c1, c0
 		bic	r0, r0, #0x000d
@@ -601,6 +688,24 @@
 		mov	r3, #16
 		b	call_cache_fn
 
+__armv4_mpu_cache_flush:
+		mov	r2, #1
+		mov	r3, #0
+		mcr	p15, 0, ip, c7, c6, 0	@ invalidate D cache
+		mov	r1, #7 << 5		@ 8 segments
+1:		orr	r3, r1, #63 << 26	@ 64 entries
+2:		mcr	p15, 0, r3, c7, c14, 2	@ clean & invalidate D index
+		subs	r3, r3, #1 << 26
+		bcs	2b			@ entries 63 to 0
+		subs 	r1, r1, #1 << 5
+		bcs	1b			@ segments 7 to 0
+
+		teq	r2, #0
+		mcrne	p15, 0, ip, c7, c5, 0	@ invalidate I cache
+		mcr	p15, 0, ip, c7, c10, 4	@ drain WB
+		mov	pc, lr
+		
+
 __armv6_mmu_cache_flush:
 		mov	r1, #0
 		mcr	p15, 0, r1, c7, c14, 0	@ clean+invalidate D
@@ -638,6 +743,7 @@
 		mov	pc, lr
 
 __armv3_mmu_cache_flush:
+__armv3_mpu_cache_flush:
 		mov	r1, #0
 		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
 		mov	pc, lr
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 355914f..ab8e600 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -666,7 +666,7 @@
  *
  * #define __kernel_dmb() \
  *         asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
- *	        : : : "lr","cc" )
+ *	        : : : "r0", "lr","cc" )
  */
 
 __kuser_memory_barrier:				@ 0xffff0fa0
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
new file mode 100644
index 0000000..a52da0d
--- /dev/null
+++ b/arch/arm/kernel/head-common.S
@@ -0,0 +1,217 @@
+/*
+ *  linux/arch/arm/kernel/head-common.S
+ *
+ *  Copyright (C) 1994-2002 Russell King
+ *  Copyright (c) 2003 ARM Limited
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+	.type	__switch_data, %object
+__switch_data:
+	.long	__mmap_switched
+	.long	__data_loc			@ r4
+	.long	__data_start			@ r5
+	.long	__bss_start			@ r6
+	.long	_end				@ r7
+	.long	processor_id			@ r4
+	.long	__machine_arch_type		@ r5
+	.long	cr_alignment			@ r6
+	.long	init_thread_union + THREAD_START_SP @ sp
+
+/*
+ * The following fragment of code is executed with the MMU on in MMU mode,
+ * and uses absolute addresses; this is not position independent.
+ *
+ *  r0  = cp#15 control register
+ *  r1  = machine ID
+ *  r9  = processor ID
+ */
+	.type	__mmap_switched, %function
+__mmap_switched:
+	adr	r3, __switch_data + 4
+
+	ldmia	r3!, {r4, r5, r6, r7}
+	cmp	r4, r5				@ Copy data segment if needed
+1:	cmpne	r5, r6
+	ldrne	fp, [r4], #4
+	strne	fp, [r5], #4
+	bne	1b
+
+	mov	fp, #0				@ Clear BSS (and zero fp)
+1:	cmp	r6, r7
+	strcc	fp, [r6],#4
+	bcc	1b
+
+	ldmia	r3, {r4, r5, r6, sp}
+	str	r9, [r4]			@ Save processor ID
+	str	r1, [r5]			@ Save machine type
+	bic	r4, r0, #CR_A			@ Clear 'A' bit
+	stmia	r6, {r0, r4}			@ Save control register values
+	b	start_kernel
+
+/*
+ * Exception handling.  Something went wrong and we can't proceed.  We
+ * ought to tell the user, but since we don't have any guarantee that
+ * we're even running on the right architecture, we do virtually nothing.
+ *
+ * If CONFIG_DEBUG_LL is set we try to print out something about the error
+ * and hope for the best (useful if bootloader fails to pass a proper
+ * machine ID for example).
+ */
+
+	.type	__error_p, %function
+__error_p:
+#ifdef CONFIG_DEBUG_LL
+	adr	r0, str_p1
+	bl	printascii
+	b	__error
+str_p1:	.asciz	"\nError: unrecognized/unsupported processor variant.\n"
+	.align
+#endif
+
+	.type	__error_a, %function
+__error_a:
+#ifdef CONFIG_DEBUG_LL
+	mov	r4, r1				@ preserve machine ID
+	adr	r0, str_a1
+	bl	printascii
+	mov	r0, r4
+	bl	printhex8
+	adr	r0, str_a2
+	bl	printascii
+	adr	r3, 3f
+	ldmia	r3, {r4, r5, r6}		@ get machine desc list
+	sub	r4, r3, r4			@ get offset between virt&phys
+	add	r5, r5, r4			@ convert virt addresses to
+	add	r6, r6, r4			@ physical address space
+1:	ldr	r0, [r5, #MACHINFO_TYPE]	@ get machine type
+	bl	printhex8
+	mov	r0, #'\t'
+	bl	printch
+	ldr     r0, [r5, #MACHINFO_NAME]	@ get machine name
+	add	r0, r0, r4
+	bl	printascii
+	mov	r0, #'\n'
+	bl	printch
+	add	r5, r5, #SIZEOF_MACHINE_DESC	@ next machine_desc
+	cmp	r5, r6
+	blo	1b
+	adr	r0, str_a3
+	bl	printascii
+	b	__error
+str_a1:	.asciz	"\nError: unrecognized/unsupported machine ID (r1 = 0x"
+str_a2:	.asciz	").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
+str_a3:	.asciz	"\nPlease check your kernel config and/or bootloader.\n"
+	.align
+#endif
+
+	.type	__error, %function
+__error:
+#ifdef CONFIG_ARCH_RPC
+/*
+ * Turn the screen red on a error - RiscPC only.
+ */
+	mov	r0, #0x02000000
+	mov	r3, #0x11
+	orr	r3, r3, r3, lsl #8
+	orr	r3, r3, r3, lsl #16
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+#endif
+1:	mov	r0, r0
+	b	1b
+
+
+/*
+ * Read processor ID register (CP#15, CR0), and look up in the linker-built
+ * supported processor list.  Note that we can't use the absolute addresses
+ * for the __proc_info lists since we aren't running with the MMU on
+ * (and therefore, we are not in the correct address space).  We have to
+ * calculate the offset.
+ *
+ *	r9 = cpuid
+ * Returns:
+ *	r3, r4, r6 corrupted
+ *	r5 = proc_info pointer in physical address space
+ *	r9 = cpuid (preserved)
+ */
+	.type	__lookup_processor_type, %function
+__lookup_processor_type:
+	adr	r3, 3f
+	ldmda	r3, {r5 - r7}
+	sub	r3, r3, r7			@ get offset between virt&phys
+	add	r5, r5, r3			@ convert virt addresses to
+	add	r6, r6, r3			@ physical address space
+1:	ldmia	r5, {r3, r4}			@ value, mask
+	and	r4, r4, r9			@ mask wanted bits
+	teq	r3, r4
+	beq	2f
+	add	r5, r5, #PROC_INFO_SZ		@ sizeof(proc_info_list)
+	cmp	r5, r6
+	blo	1b
+	mov	r5, #0				@ unknown processor
+2:	mov	pc, lr
+
+/*
+ * This provides a C-API version of the above function.
+ */
+ENTRY(lookup_processor_type)
+	stmfd	sp!, {r4 - r7, r9, lr}
+	mov	r9, r0
+	bl	__lookup_processor_type
+	mov	r0, r5
+	ldmfd	sp!, {r4 - r7, r9, pc}
+
+/*
+ * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
+ * more information about the __proc_info and __arch_info structures.
+ */
+	.long	__proc_info_begin
+	.long	__proc_info_end
+3:	.long	.
+	.long	__arch_info_begin
+	.long	__arch_info_end
+
+/*
+ * Lookup machine architecture in the linker-build list of architectures.
+ * Note that we can't use the absolute addresses for the __arch_info
+ * lists since we aren't running with the MMU on (and therefore, we are
+ * not in the correct address space).  We have to calculate the offset.
+ *
+ *  r1 = machine architecture number
+ * Returns:
+ *  r3, r4, r6 corrupted
+ *  r5 = mach_info pointer in physical address space
+ */
+	.type	__lookup_machine_type, %function
+__lookup_machine_type:
+	adr	r3, 3b
+	ldmia	r3, {r4, r5, r6}
+	sub	r3, r3, r4			@ get offset between virt&phys
+	add	r5, r5, r3			@ convert virt addresses to
+	add	r6, r6, r3			@ physical address space
+1:	ldr	r3, [r5, #MACHINFO_TYPE]	@ get machine type
+	teq	r3, r1				@ matches loader number?
+	beq	2f				@ found
+	add	r5, r5, #SIZEOF_MACHINE_DESC	@ next machine_desc
+	cmp	r5, r6
+	blo	1b
+	mov	r5, #0				@ unknown machine
+2:	mov	pc, lr
+
+/*
+ * This provides a C-API version of the above function.
+ */
+ENTRY(lookup_machine_type)
+	stmfd	sp!, {r4 - r6, lr}
+	mov	r1, r0
+	bl	__lookup_machine_type
+	mov	r0, r5
+	ldmfd	sp!, {r4 - r6, pc}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
new file mode 100644
index 0000000..b093ab8
--- /dev/null
+++ b/arch/arm/kernel/head-nommu.S
@@ -0,0 +1,83 @@
+/*
+ *  linux/arch/arm/kernel/head-nommu.S
+ *
+ *  Copyright (C) 1994-2002 Russell King
+ *  Copyright (C) 2003-2006 Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Common kernel startup code (non-paged MM)
+ *    for 32-bit CPUs which has a process ID register(CP15).
+ *
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/mach-types.h>
+#include <asm/procinfo.h>
+#include <asm/ptrace.h>
+#include <asm/constants.h>
+#include <asm/system.h>
+
+#define PROCINFO_INITFUNC       12
+
+/*
+ * Kernel startup entry point.
+ * ---------------------------
+ *
+ * This is normally called from the decompressor code.  The requirements
+ * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
+ * r1 = machine nr.
+ *
+ * See linux/arch/arm/tools/mach-types for the complete list of machine
+ * numbers for r1.
+ *
+ */
+	__INIT
+	.type	stext, %function
+ENTRY(stext)
+	msr	cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
+						@ and irqs disabled
+	mrc	p15, 0, r9, c0, c0		@ get processor id
+	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
+	movs	r10, r5				@ invalid processor (r5=0)?
+	beq	__error_p				@ yes, error 'p'
+	bl	__lookup_machine_type		@ r5=machinfo
+	movs	r8, r5				@ invalid machine (r5=0)?
+	beq	__error_a			@ yes, error 'a'
+
+	ldr	r13, __switch_data		@ address to jump to after
+						@ the initialization is done
+	adr	lr, __after_proc_init		@ return (PIC) address
+	add	pc, r10, #PROCINFO_INITFUNC
+
+/*
+ * Set the Control Register and Read the process ID.
+ */
+	.type	__after_proc_init, %function
+__after_proc_init:
+	mrc	p15, 0, r0, c1, c0, 0		@ read control reg
+#ifdef CONFIG_ALIGNMENT_TRAP
+	orr	r0, r0, #CR_A
+#else
+	bic	r0, r0, #CR_A
+#endif
+#ifdef CONFIG_CPU_DCACHE_DISABLE
+	bic	r0, r0, #CR_C
+#endif
+#ifdef CONFIG_CPU_BPREDICT_DISABLE
+	bic	r0, r0, #CR_Z
+#endif
+#ifdef CONFIG_CPU_ICACHE_DISABLE
+	bic	r0, r0, #CR_I
+#endif
+	mcr	p15, 0, r0, c1, c0, 0		@ write control reg
+
+	mov	pc, r13				@ clear the BSS and jump
+						@ to start_kernel
+
+#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 53b6901..04b66a9 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -102,49 +102,6 @@
 	adr	lr, __enable_mmu		@ return (PIC) address
 	add	pc, r10, #PROCINFO_INITFUNC
 
-	.type	__switch_data, %object
-__switch_data:
-	.long	__mmap_switched
-	.long	__data_loc			@ r4
-	.long	__data_start			@ r5
-	.long	__bss_start			@ r6
-	.long	_end				@ r7
-	.long	processor_id			@ r4
-	.long	__machine_arch_type		@ r5
-	.long	cr_alignment			@ r6
-	.long	init_thread_union + THREAD_START_SP @ sp
-
-/*
- * The following fragment of code is executed with the MMU on, and uses
- * absolute addresses; this is not position independent.
- *
- *  r0  = cp#15 control register
- *  r1  = machine ID
- *  r9  = processor ID
- */
-	.type	__mmap_switched, %function
-__mmap_switched:
-	adr	r3, __switch_data + 4
-
-	ldmia	r3!, {r4, r5, r6, r7}
-	cmp	r4, r5				@ Copy data segment if needed
-1:	cmpne	r5, r6
-	ldrne	fp, [r4], #4
-	strne	fp, [r5], #4
-	bne	1b
-
-	mov	fp, #0				@ Clear BSS (and zero fp)
-1:	cmp	r6, r7
-	strcc	fp, [r6],#4
-	bcc	1b
-
-	ldmia	r3, {r4, r5, r6, sp}
-	str	r9, [r4]			@ Save processor ID
-	str	r1, [r5]			@ Save machine type
-	bic	r4, r0, #CR_A			@ Clear 'A' bit
-	stmia	r6, {r0, r4}			@ Save control register values
-	b	start_kernel
-
 #if defined(CONFIG_SMP)
 	.type   secondary_startup, #function
 ENTRY(secondary_startup)
@@ -367,166 +324,4 @@
 	mov	pc, lr
 	.ltorg
 
-
-
-/*
- * Exception handling.  Something went wrong and we can't proceed.  We
- * ought to tell the user, but since we don't have any guarantee that
- * we're even running on the right architecture, we do virtually nothing.
- *
- * If CONFIG_DEBUG_LL is set we try to print out something about the error
- * and hope for the best (useful if bootloader fails to pass a proper
- * machine ID for example).
- */
-
-	.type	__error_p, %function
-__error_p:
-#ifdef CONFIG_DEBUG_LL
-	adr	r0, str_p1
-	bl	printascii
-	b	__error
-str_p1:	.asciz	"\nError: unrecognized/unsupported processor variant.\n"
-	.align
-#endif
-
-	.type	__error_a, %function
-__error_a:
-#ifdef CONFIG_DEBUG_LL
-	mov	r4, r1				@ preserve machine ID
-	adr	r0, str_a1
-	bl	printascii
-	mov	r0, r4
-	bl	printhex8
-	adr	r0, str_a2
-	bl	printascii
-	adr	r3, 3f
-	ldmia	r3, {r4, r5, r6}		@ get machine desc list
-	sub	r4, r3, r4			@ get offset between virt&phys
-	add	r5, r5, r4			@ convert virt addresses to
-	add	r6, r6, r4			@ physical address space
-1:	ldr	r0, [r5, #MACHINFO_TYPE]	@ get machine type
-	bl	printhex8
-	mov	r0, #'\t'
-	bl	printch
-	ldr     r0, [r5, #MACHINFO_NAME]	@ get machine name
-	add	r0, r0, r4
-	bl	printascii
-	mov	r0, #'\n'
-	bl	printch
-	add	r5, r5, #SIZEOF_MACHINE_DESC	@ next machine_desc
-	cmp	r5, r6
-	blo	1b
-	adr	r0, str_a3
-	bl	printascii
-	b	__error
-str_a1:	.asciz	"\nError: unrecognized/unsupported machine ID (r1 = 0x"
-str_a2:	.asciz	").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
-str_a3:	.asciz	"\nPlease check your kernel config and/or bootloader.\n"
-	.align
-#endif
-
-	.type	__error, %function
-__error:
-#ifdef CONFIG_ARCH_RPC
-/*
- * Turn the screen red on a error - RiscPC only.
- */
-	mov	r0, #0x02000000
-	mov	r3, #0x11
-	orr	r3, r3, r3, lsl #8
-	orr	r3, r3, r3, lsl #16
-	str	r3, [r0], #4
-	str	r3, [r0], #4
-	str	r3, [r0], #4
-	str	r3, [r0], #4
-#endif
-1:	mov	r0, r0
-	b	1b
-
-
-/*
- * Read processor ID register (CP#15, CR0), and look up in the linker-built
- * supported processor list.  Note that we can't use the absolute addresses
- * for the __proc_info lists since we aren't running with the MMU on
- * (and therefore, we are not in the correct address space).  We have to
- * calculate the offset.
- *
- *	r9 = cpuid
- * Returns:
- *	r3, r4, r6 corrupted
- *	r5 = proc_info pointer in physical address space
- *	r9 = cpuid (preserved)
- */
-	.type	__lookup_processor_type, %function
-__lookup_processor_type:
-	adr	r3, 3f
-	ldmda	r3, {r5 - r7}
-	sub	r3, r3, r7			@ get offset between virt&phys
-	add	r5, r5, r3			@ convert virt addresses to
-	add	r6, r6, r3			@ physical address space
-1:	ldmia	r5, {r3, r4}			@ value, mask
-	and	r4, r4, r9			@ mask wanted bits
-	teq	r3, r4
-	beq	2f
-	add	r5, r5, #PROC_INFO_SZ		@ sizeof(proc_info_list)
-	cmp	r5, r6
-	blo	1b
-	mov	r5, #0				@ unknown processor
-2:	mov	pc, lr
-
-/*
- * This provides a C-API version of the above function.
- */
-ENTRY(lookup_processor_type)
-	stmfd	sp!, {r4 - r7, r9, lr}
-	mov	r9, r0
-	bl	__lookup_processor_type
-	mov	r0, r5
-	ldmfd	sp!, {r4 - r7, r9, pc}
-
-/*
- * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
- * more information about the __proc_info and __arch_info structures.
- */
-	.long	__proc_info_begin
-	.long	__proc_info_end
-3:	.long	.
-	.long	__arch_info_begin
-	.long	__arch_info_end
-
-/*
- * Lookup machine architecture in the linker-build list of architectures.
- * Note that we can't use the absolute addresses for the __arch_info
- * lists since we aren't running with the MMU on (and therefore, we are
- * not in the correct address space).  We have to calculate the offset.
- *
- *  r1 = machine architecture number
- * Returns:
- *  r3, r4, r6 corrupted
- *  r5 = mach_info pointer in physical address space
- */
-	.type	__lookup_machine_type, %function
-__lookup_machine_type:
-	adr	r3, 3b
-	ldmia	r3, {r4, r5, r6}
-	sub	r3, r3, r4			@ get offset between virt&phys
-	add	r5, r5, r3			@ convert virt addresses to
-	add	r6, r6, r3			@ physical address space
-1:	ldr	r3, [r5, #MACHINFO_TYPE]	@ get machine type
-	teq	r3, r1				@ matches loader number?
-	beq	2f				@ found
-	add	r5, r5, #SIZEOF_MACHINE_DESC	@ next machine_desc
-	cmp	r5, r6
-	blo	1b
-	mov	r5, #0				@ unknown machine
-2:	mov	pc, lr
-
-/*
- * This provides a C-API version of the above function.
- */
-ENTRY(lookup_machine_type)
-	stmfd	sp!, {r4 - r6, lr}
-	mov	r1, r0
-	bl	__lookup_machine_type
-	mov	r0, r5
-	ldmfd	sp!, {r4 - r6, pc}
+#include "head-common.S"
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 9991049..27beece 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -7,6 +7,6 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#define KERN_SIGRETURN_CODE	0xffff0500
+#define KERN_SIGRETURN_CODE	(CONFIG_VECTORS_BASE + 0x00000500)
 
 extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d566d5f..35230a0 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -688,6 +688,7 @@
 
 void __init trap_init(void)
 {
+	unsigned long vectors = CONFIG_VECTORS_BASE;
 	extern char __stubs_start[], __stubs_end[];
 	extern char __vectors_start[], __vectors_end[];
 	extern char __kuser_helper_start[], __kuser_helper_end[];
@@ -698,9 +699,9 @@
 	 * into the vector page, mapped at 0xffff0000, and ensure these
 	 * are visible to the instruction stream.
 	 */
-	memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start);
-	memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start);
-	memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+	memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
+	memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
+	memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
 
 	/*
 	 * Copy signal return handlers into the vector page, and
@@ -709,6 +710,6 @@
 	memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
 	       sizeof(sigreturn_codes));
 
-	flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
+	flush_icache_range(vectors, vectors + PAGE_SIZE);
 	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 }
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index f90513e..b9dfce5 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -30,6 +30,7 @@
 #include <asm/procinfo.h>
 #include <asm/hardware.h>
 #include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
 #include "proc-macros.S"
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 89faa60..6386f63 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -240,7 +240,7 @@
 			}
 			p += sprintf(p,
 				     "%s Cache level %lu:\n"
-				     "\tSize           : %lu bytes\n"
+				     "\tSize           : %u bytes\n"
 				     "\tAttributes     : ",
 				     cache_types[j+cci.pcci_unified], i+1,
 				     cci.pcci_cache_size);
@@ -648,9 +648,9 @@
 	if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
 
 	p += sprintf(p,
-		     "Processor/Clock ratio   : %ld/%ld\n"
-		     "Bus/Clock ratio         : %ld/%ld\n"
-		     "ITC/Clock ratio         : %ld/%ld\n",
+		     "Processor/Clock ratio   : %d/%d\n"
+		     "Bus/Clock ratio         : %d/%d\n"
+		     "ITC/Clock ratio         : %d/%d\n",
 		     proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
 
 	return p - page;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index ac16743..4995890 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -188,7 +188,7 @@
 	itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
 
 	local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
-	printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
+	printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
 	       "ITC freq=%lu.%03luMHz", smp_processor_id(),
 	       platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
 	       itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 3b6fd79..b47476d 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -9,6 +9,8 @@
  * 		2002/08/07 Erich Focht <efocht@ess.nec.de>
  * Populate cpu entries in sysfs for non-numa systems as well
  *  	Intel Corporation - Ashok Raj
+ * 02/27/2006 Zhang, Yanmin
+ *	Populate cpu cache entries in sysfs for cpu cache info
  */
 
 #include <linux/config.h>
@@ -19,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/bootmem.h>
 #include <linux/nodemask.h>
+#include <linux/notifier.h>
 #include <asm/mmzone.h>
 #include <asm/numa.h>
 #include <asm/cpu.h>
@@ -101,3 +104,367 @@
 }
 
 subsys_initcall(topology_init);
+
+
+/*
+ * Export cpu cache information through sysfs
+ */
+
+/*
+ *  A bunch of string array to get pretty printing
+ */
+static const char *cache_types[] = {
+	"",			/* not used */
+	"Instruction",
+	"Data",
+	"Unified"	/* unified */
+};
+
+static const char *cache_mattrib[]={
+	"WriteThrough",
+	"WriteBack",
+	"",		/* reserved */
+	""		/* reserved */
+};
+
+struct cache_info {
+	pal_cache_config_info_t	cci;
+	cpumask_t shared_cpu_map;
+	int level;
+	int type;
+	struct kobject kobj;
+};
+
+struct cpu_cache_info {
+	struct cache_info *cache_leaves;
+	int	num_cache_leaves;
+	struct kobject kobj;
+};
+
+static struct cpu_cache_info	all_cpu_cache_info[NR_CPUS];
+#define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
+
+#ifdef CONFIG_SMP
+static void cache_shared_cpu_map_setup( unsigned int cpu,
+		struct cache_info * this_leaf)
+{
+	pal_cache_shared_info_t	csi;
+	int num_shared, i = 0;
+	unsigned int j;
+
+	if (cpu_data(cpu)->threads_per_core <= 1 &&
+		cpu_data(cpu)->cores_per_socket <= 1) {
+		cpu_set(cpu, this_leaf->shared_cpu_map);
+		return;
+	}
+
+	if (ia64_pal_cache_shared_info(this_leaf->level,
+					this_leaf->type,
+					0,
+					&csi) != PAL_STATUS_SUCCESS)
+		return;
+
+	num_shared = (int) csi.num_shared;
+	do {
+		for_each_cpu(j)
+			if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
+				&& cpu_data(j)->core_id == csi.log1_cid
+				&& cpu_data(j)->thread_id == csi.log1_tid)
+				cpu_set(j, this_leaf->shared_cpu_map);
+
+		i++;
+	} while (i < num_shared &&
+		ia64_pal_cache_shared_info(this_leaf->level,
+				this_leaf->type,
+				i,
+				&csi) == PAL_STATUS_SUCCESS);
+}
+#else
+static void cache_shared_cpu_map_setup(unsigned int cpu,
+		struct cache_info * this_leaf)
+{
+	cpu_set(cpu, this_leaf->shared_cpu_map);
+	return;
+}
+#endif
+
+static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
+					char *buf)
+{
+	return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
+}
+
+static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
+					char *buf)
+{
+	return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
+}
+
+static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
+{
+	return sprintf(buf,
+			"%s\n",
+			cache_mattrib[this_leaf->cci.pcci_cache_attr]);
+}
+
+static ssize_t show_size(struct cache_info *this_leaf, char *buf)
+{
+	return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
+}
+
+static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
+{
+	unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
+	number_of_sets /= this_leaf->cci.pcci_assoc;
+	number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
+
+	return sprintf(buf, "%u\n", number_of_sets);
+}
+
+static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
+{
+	ssize_t	len;
+	cpumask_t shared_cpu_map;
+
+	cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
+	len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
+	len += sprintf(buf+len, "\n");
+	return len;
+}
+
+static ssize_t show_type(struct cache_info *this_leaf, char *buf)
+{
+	int type = this_leaf->type + this_leaf->cci.pcci_unified;
+	return sprintf(buf, "%s\n", cache_types[type]);
+}
+
+static ssize_t show_level(struct cache_info *this_leaf, char *buf)
+{
+	return sprintf(buf, "%u\n", this_leaf->level);
+}
+
+struct cache_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct cache_info *, char *);
+	ssize_t (*store)(struct cache_info *, const char *, size_t count);
+};
+
+#ifdef define_one_ro
+	#undef define_one_ro
+#endif
+#define define_one_ro(_name) \
+	static struct cache_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+define_one_ro(level);
+define_one_ro(type);
+define_one_ro(coherency_line_size);
+define_one_ro(ways_of_associativity);
+define_one_ro(size);
+define_one_ro(number_of_sets);
+define_one_ro(shared_cpu_map);
+define_one_ro(attributes);
+
+static struct attribute * cache_default_attrs[] = {
+	&type.attr,
+	&level.attr,
+	&coherency_line_size.attr,
+	&ways_of_associativity.attr,
+	&attributes.attr,
+	&size.attr,
+	&number_of_sets.attr,
+	&shared_cpu_map.attr,
+	NULL
+};
+
+#define to_object(k) container_of(k, struct cache_info, kobj)
+#define to_attr(a) container_of(a, struct cache_attr, attr)
+
+static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
+{
+	struct cache_attr *fattr = to_attr(attr);
+	struct cache_info *this_leaf = to_object(kobj);
+	ssize_t ret;
+
+	ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
+	return ret;
+}
+
+static struct sysfs_ops cache_sysfs_ops = {
+	.show   = cache_show
+};
+
+static struct kobj_type cache_ktype = {
+	.sysfs_ops	= &cache_sysfs_ops,
+	.default_attrs	= cache_default_attrs,
+};
+
+static struct kobj_type cache_ktype_percpu_entry = {
+	.sysfs_ops	= &cache_sysfs_ops,
+};
+
+static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
+{
+	if (all_cpu_cache_info[cpu].cache_leaves) {
+		kfree(all_cpu_cache_info[cpu].cache_leaves);
+		all_cpu_cache_info[cpu].cache_leaves = NULL;
+	}
+	all_cpu_cache_info[cpu].num_cache_leaves = 0;
+	memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
+
+	return;
+}
+
+static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
+{
+	u64 i, levels, unique_caches;
+	pal_cache_config_info_t cci;
+	int j;
+	s64 status;
+	struct cache_info *this_cache;
+	int num_cache_leaves = 0;
+
+	if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
+		printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
+		return -1;
+	}
+
+	this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
+			GFP_KERNEL);
+	if (this_cache == NULL)
+		return -ENOMEM;
+
+	for (i=0; i < levels; i++) {
+		for (j=2; j >0 ; j--) {
+			if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
+					PAL_STATUS_SUCCESS)
+				continue;
+
+			this_cache[num_cache_leaves].cci = cci;
+			this_cache[num_cache_leaves].level = i + 1;
+			this_cache[num_cache_leaves].type = j;
+
+			cache_shared_cpu_map_setup(cpu,
+					&this_cache[num_cache_leaves]);
+			num_cache_leaves ++;
+		}
+	}
+
+	all_cpu_cache_info[cpu].cache_leaves = this_cache;
+	all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
+
+	memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
+
+	return 0;
+}
+
+/* Add cache interface for CPU device */
+static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+{
+	unsigned int cpu = sys_dev->id;
+	unsigned long i, j;
+	struct cache_info *this_object;
+	int retval = 0;
+	cpumask_t oldmask;
+
+	if (all_cpu_cache_info[cpu].kobj.parent)
+		return 0;
+
+	oldmask = current->cpus_allowed;
+	retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	if (unlikely(retval))
+		return retval;
+
+	retval = cpu_cache_sysfs_init(cpu);
+	set_cpus_allowed(current, oldmask);
+	if (unlikely(retval < 0))
+		return retval;
+
+	all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj;
+	kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache");
+	all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry;
+	retval = kobject_register(&all_cpu_cache_info[cpu].kobj);
+
+	for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
+		this_object = LEAF_KOBJECT_PTR(cpu,i);
+		this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj;
+		kobject_set_name(&(this_object->kobj), "index%1lu", i);
+		this_object->kobj.ktype = &cache_ktype;
+		retval = kobject_register(&(this_object->kobj));
+		if (unlikely(retval)) {
+			for (j = 0; j < i; j++) {
+				kobject_unregister(
+					&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
+			}
+			kobject_unregister(&all_cpu_cache_info[cpu].kobj);
+			cpu_cache_sysfs_exit(cpu);
+			break;
+		}
+	}
+	return retval;
+}
+
+/* Remove cache interface for CPU device */
+static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
+{
+	unsigned int cpu = sys_dev->id;
+	unsigned long i;
+
+	for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
+		kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
+
+	if (all_cpu_cache_info[cpu].kobj.parent) {
+		kobject_unregister(&all_cpu_cache_info[cpu].kobj);
+		memset(&all_cpu_cache_info[cpu].kobj,
+			0,
+			sizeof(struct kobject));
+	}
+
+	cpu_cache_sysfs_exit(cpu);
+
+	return 0;
+}
+
+/*
+ * When a cpu is hot-plugged, do a check and initiate
+ * cache kobject if necessary
+ */
+static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
+		unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+	struct sys_device *sys_dev;
+
+	sys_dev = get_cpu_sysdev(cpu);
+	switch (action) {
+	case CPU_ONLINE:
+		cache_add_dev(sys_dev);
+		break;
+	case CPU_DEAD:
+		cache_remove_dev(sys_dev);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cache_cpu_notifier =
+{
+	.notifier_call = cache_cpu_callback
+};
+
+static int __cpuinit cache_sysfs_init(void)
+{
+	int i;
+
+	for_each_online_cpu(i) {
+		cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
+				(void *)(long)i);
+	}
+
+	register_cpu_notifier(&cache_cpu_notifier);
+
+	return 0;
+}
+
+device_initcall(cache_sysfs_init);
+
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 3f5d77f..7cc162e 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -60,6 +60,17 @@
 
 	  If unsure, say N.
 
+config MMC_OMAP
+	tristate "TI OMAP Multimedia Card Interface support"
+	depends on ARCH_OMAP && MMC
+	select TPS65010 if MACH_OMAP_H2
+	help
+	  This selects the TI OMAP Multimedia card Interface.
+	  If you have an OMAP board with a Multimedia Card slot,
+	  say Y or M here.
+
+	  If unsure, say N.
+
 config MMC_WBSD
 	tristate "Winbond W83L51xD SD/MMC Card Interface support"
 	depends on MMC && ISA_DMA_API
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 769d545..c7c34aa 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -20,5 +20,10 @@
 obj-$(CONFIG_MMC_SDHCI)		+= sdhci.o
 obj-$(CONFIG_MMC_WBSD)		+= wbsd.o
 obj-$(CONFIG_MMC_AU1X)		+= au1xmmc.o
+obj-$(CONFIG_MMC_OMAP)		+= omap.o
 
 mmc_core-y := mmc.o mmc_queue.o mmc_sysfs.o
+
+ifeq ($(CONFIG_MMC_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 85e89c7..c0326bb 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -56,12 +56,11 @@
 #define DRIVER_NAME "au1xxx-mmc"
 
 /* Set this to enable special debugging macros */
-/* #define MMC_DEBUG */
 
-#ifdef MMC_DEBUG
-#define DEBUG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
+#ifdef DEBUG
+#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
 #else
-#define DEBUG(fmt, idx, args...)
+#define DBG(fmt, idx, args...)
 #endif
 
 const struct {
@@ -424,18 +423,18 @@
 			break;
 
 		if (status & SD_STATUS_RC) {
-			DEBUG("RX CRC Error [%d + %d].\n", host->id,
+			DBG("RX CRC Error [%d + %d].\n", host->id,
 					host->pio.len, count);
 			break;
 		}
 
 		if (status & SD_STATUS_RO) {
-			DEBUG("RX Overrun [%d + %d]\n", host->id,
+			DBG("RX Overrun [%d + %d]\n", host->id,
 					host->pio.len, count);
 			break;
 		}
 		else if (status & SD_STATUS_RU) {
-			DEBUG("RX Underrun [%d + %d]\n", host->id,
+			DBG("RX Underrun [%d + %d]\n", host->id,
 					host->pio.len,	count);
 			break;
 		}
@@ -721,7 +720,7 @@
 {
 	struct au1xmmc_host *host = mmc_priv(mmc);
 
-	DEBUG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n",
+	DBG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n",
 	      host->id, ios->power_mode, ios->clock, ios->vdd,
 	      ios->bus_mode);
 
@@ -810,7 +809,7 @@
 				au1xmmc_receive_pio(host);
 		}
 		else if (status & 0x203FBC70) {
-			DEBUG("Unhandled status %8.8x\n", host->id, status);
+			DBG("Unhandled status %8.8x\n", host->id, status);
 			handled = 0;
 		}
 
@@ -839,7 +838,7 @@
 
 	if (host->mrq != NULL) {
 		u32 status = au_readl(HOST_STATUS(host));
-		DEBUG("PENDING - %8.8x\n", host->id, status);
+		DBG("PENDING - %8.8x\n", host->id, status);
 	}
 
 	mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT);
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 1888060..da6ddd9 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -27,12 +27,6 @@
 
 #include "mmc.h"
 
-#ifdef CONFIG_MMC_DEBUG
-#define DBG(x...)	printk(KERN_DEBUG x)
-#else
-#define DBG(x...)	do { } while (0)
-#endif
-
 #define CMD_RETRIES	3
 
 /*
@@ -77,8 +71,9 @@
 {
 	struct mmc_command *cmd = mrq->cmd;
 	int err = mrq->cmd->error;
-	DBG("MMC: req done (%02x): %d: %08x %08x %08x %08x\n", cmd->opcode,
-	    err, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
+	pr_debug("MMC: req done (%02x): %d: %08x %08x %08x %08x\n",
+		 cmd->opcode, err, cmd->resp[0], cmd->resp[1],
+		 cmd->resp[2], cmd->resp[3]);
 
 	if (err && cmd->retries) {
 		cmd->retries--;
@@ -102,8 +97,8 @@
 void
 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 {
-	DBG("MMC: starting cmd %02x arg %08x flags %08x\n",
-	    mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+	pr_debug("MMC: starting cmd %02x arg %08x flags %08x\n",
+		 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
 
 	WARN_ON(host->card_busy == NULL);
 
@@ -976,8 +971,8 @@
 		if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
 			max_dtr = card->csd.max_dtr;
 
-	DBG("MMC: selected %d.%03dMHz transfer rate\n",
-	    max_dtr / 1000000, (max_dtr / 1000) % 1000);
+	pr_debug("MMC: selected %d.%03dMHz transfer rate\n",
+		 max_dtr / 1000000, (max_dtr / 1000) % 1000);
 
 	return max_dtr;
 }
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index 9fef29d..df7e861e 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -33,12 +33,8 @@
 
 #define DRIVER_NAME "mmci-pl18x"
 
-#ifdef CONFIG_MMC_DEBUG
 #define DBG(host,fmt,args...)	\
 	pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
-#else
-#define DBG(host,fmt,args...)	do { } while (0)
-#endif
 
 static unsigned int fmax = 515633;
 
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
new file mode 100644
index 0000000..becb3c6
--- /dev/null
+++ b/drivers/mmc/omap.c
@@ -0,0 +1,1226 @@
+/*
+ *  linux/drivers/media/mmc/omap.c
+ *
+ *  Copyright (C) 2004 Nokia Corporation
+ *  Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
+ *  Misc hacks here and there by Tony Lindgren <tony@atomide.com>
+ *  Other hacks (DMA, SD, etc) by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/protocol.h>
+#include <linux/mmc/card.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/scatterlist.h>
+#include <asm/mach-types.h>
+
+#include <asm/arch/board.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/fpga.h>
+#include <asm/arch/tps65010.h>
+
+#include "omap.h"
+
+#define DRIVER_NAME "mmci-omap"
+#define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
+
+/* Specifies how often in millisecs to poll for card status changes
+ * when the cover switch is open */
+#define OMAP_MMC_SWITCH_POLL_DELAY	500
+
+static int mmc_omap_enable_poll = 1;
+
+struct mmc_omap_host {
+	int			initialized;
+	int			suspended;
+	struct mmc_request *	mrq;
+	struct mmc_command *	cmd;
+	struct mmc_data *	data;
+	struct mmc_host *	mmc;
+	struct device *		dev;
+	unsigned char		id; /* 16xx chips have 2 MMC blocks */
+	struct clk *		iclk;
+	struct clk *		fclk;
+	void __iomem		*base;
+	int			irq;
+	unsigned char		bus_mode;
+	unsigned char		hw_bus_mode;
+
+	unsigned int		sg_len;
+	int			sg_idx;
+	u16 *			buffer;
+	u32			buffer_bytes_left;
+	u32			total_bytes_left;
+
+	unsigned		use_dma:1;
+	unsigned		brs_received:1, dma_done:1;
+	unsigned		dma_is_read:1;
+	unsigned		dma_in_use:1;
+	int			dma_ch;
+	spinlock_t		dma_lock;
+	struct timer_list	dma_timer;
+	unsigned		dma_len;
+
+	short			power_pin;
+	short			wp_pin;
+
+	int			switch_pin;
+	struct work_struct	switch_work;
+	struct timer_list	switch_timer;
+	int			switch_last_state;
+};
+
+static inline int
+mmc_omap_cover_is_open(struct mmc_omap_host *host)
+{
+	if (host->switch_pin < 0)
+		return 0;
+	return omap_get_gpio_datain(host->switch_pin);
+}
+
+static ssize_t
+mmc_omap_show_cover_switch(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct mmc_omap_host *host = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", mmc_omap_cover_is_open(host) ? "open" :
+			"closed");
+}
+
+static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
+
+static ssize_t
+mmc_omap_show_enable_poll(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", mmc_omap_enable_poll);
+}
+
+static ssize_t
+mmc_omap_store_enable_poll(struct device *dev,
+	struct device_attribute *attr, const char *buf,
+	size_t size)
+{
+	int enable_poll;
+
+	if (sscanf(buf, "%10d", &enable_poll) != 1)
+		return -EINVAL;
+
+	if (enable_poll != mmc_omap_enable_poll) {
+		struct mmc_omap_host *host = dev_get_drvdata(dev);
+
+		mmc_omap_enable_poll = enable_poll;
+		if (enable_poll && host->switch_pin >= 0)
+			schedule_work(&host->switch_work);
+	}
+	return size;
+}
+
+static DEVICE_ATTR(enable_poll, 0664,
+		   mmc_omap_show_enable_poll, mmc_omap_store_enable_poll);
+
+static void
+mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
+{
+	u32 cmdreg;
+	u32 resptype;
+	u32 cmdtype;
+
+	host->cmd = cmd;
+
+	resptype = 0;
+	cmdtype = 0;
+
+	/* Our hardware needs to know exact type */
+	switch (RSP_TYPE(mmc_resp_type(cmd))) {
+	case RSP_TYPE(MMC_RSP_R1):
+		/* resp 1, resp 1b */
+		resptype = 1;
+		break;
+	case RSP_TYPE(MMC_RSP_R2):
+		resptype = 2;
+		break;
+	case RSP_TYPE(MMC_RSP_R3):
+		resptype = 3;
+		break;
+	default:
+		break;
+	}
+
+	if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
+		cmdtype = OMAP_MMC_CMDTYPE_ADTC;
+	} else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
+		cmdtype = OMAP_MMC_CMDTYPE_BC;
+	} else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
+		cmdtype = OMAP_MMC_CMDTYPE_BCR;
+	} else {
+		cmdtype = OMAP_MMC_CMDTYPE_AC;
+	}
+
+	cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
+
+	if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
+		cmdreg |= 1 << 6;
+
+	if (cmd->flags & MMC_RSP_BUSY)
+		cmdreg |= 1 << 11;
+
+	if (host->data && !(host->data->flags & MMC_DATA_WRITE))
+		cmdreg |= 1 << 15;
+
+	clk_enable(host->fclk);
+
+	OMAP_MMC_WRITE(host->base, CTO, 200);
+	OMAP_MMC_WRITE(host->base, ARGL, cmd->arg & 0xffff);
+	OMAP_MMC_WRITE(host->base, ARGH, cmd->arg >> 16);
+	OMAP_MMC_WRITE(host->base, IE,
+		       OMAP_MMC_STAT_A_EMPTY    | OMAP_MMC_STAT_A_FULL    |
+		       OMAP_MMC_STAT_CMD_CRC    | OMAP_MMC_STAT_CMD_TOUT  |
+		       OMAP_MMC_STAT_DATA_CRC   | OMAP_MMC_STAT_DATA_TOUT |
+		       OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR  |
+		       OMAP_MMC_STAT_END_OF_DATA);
+	OMAP_MMC_WRITE(host->base, CMD, cmdreg);
+}
+
+static void
+mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
+{
+	if (host->dma_in_use) {
+		enum dma_data_direction dma_data_dir;
+
+		BUG_ON(host->dma_ch < 0);
+		if (data->error != MMC_ERR_NONE)
+			omap_stop_dma(host->dma_ch);
+		/* Release DMA channel lazily */
+		mod_timer(&host->dma_timer, jiffies + HZ);
+		if (data->flags & MMC_DATA_WRITE)
+			dma_data_dir = DMA_TO_DEVICE;
+		else
+			dma_data_dir = DMA_FROM_DEVICE;
+		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
+			     dma_data_dir);
+	}
+	host->data = NULL;
+	host->sg_len = 0;
+	clk_disable(host->fclk);
+
+	/* NOTE:  MMC layer will sometimes poll-wait CMD13 next, issuing
+	 * dozens of requests until the card finishes writing data.
+	 * It'd be cheaper to just wait till an EOFB interrupt arrives...
+	 */
+
+	if (!data->stop) {
+		host->mrq = NULL;
+		mmc_request_done(host->mmc, data->mrq);
+		return;
+	}
+
+	mmc_omap_start_command(host, data->stop);
+}
+
+static void
+mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
+{
+	unsigned long flags;
+	int done;
+
+	if (!host->dma_in_use) {
+		mmc_omap_xfer_done(host, data);
+		return;
+	}
+	done = 0;
+	spin_lock_irqsave(&host->dma_lock, flags);
+	if (host->dma_done)
+		done = 1;
+	else
+		host->brs_received = 1;
+	spin_unlock_irqrestore(&host->dma_lock, flags);
+	if (done)
+		mmc_omap_xfer_done(host, data);
+}
+
+static void
+mmc_omap_dma_timer(unsigned long data)
+{
+	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+
+	BUG_ON(host->dma_ch < 0);
+	omap_free_dma(host->dma_ch);
+	host->dma_ch = -1;
+}
+
+static void
+mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
+{
+	unsigned long flags;
+	int done;
+
+	done = 0;
+	spin_lock_irqsave(&host->dma_lock, flags);
+	if (host->brs_received)
+		done = 1;
+	else
+		host->dma_done = 1;
+	spin_unlock_irqrestore(&host->dma_lock, flags);
+	if (done)
+		mmc_omap_xfer_done(host, data);
+}
+
+static void
+mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
+{
+	host->cmd = NULL;
+
+	if (cmd->flags & MMC_RSP_PRESENT) {
+		if (cmd->flags & MMC_RSP_136) {
+			/* response type 2 */
+			cmd->resp[3] =
+				OMAP_MMC_READ(host->base, RSP0) |
+				(OMAP_MMC_READ(host->base, RSP1) << 16);
+			cmd->resp[2] =
+				OMAP_MMC_READ(host->base, RSP2) |
+				(OMAP_MMC_READ(host->base, RSP3) << 16);
+			cmd->resp[1] =
+				OMAP_MMC_READ(host->base, RSP4) |
+				(OMAP_MMC_READ(host->base, RSP5) << 16);
+			cmd->resp[0] =
+				OMAP_MMC_READ(host->base, RSP6) |
+				(OMAP_MMC_READ(host->base, RSP7) << 16);
+		} else {
+			/* response types 1, 1b, 3, 4, 5, 6 */
+			cmd->resp[0] =
+				OMAP_MMC_READ(host->base, RSP6) |
+				(OMAP_MMC_READ(host->base, RSP7) << 16);
+		}
+	}
+
+	if (host->data == NULL || cmd->error != MMC_ERR_NONE) {
+		host->mrq = NULL;
+		clk_disable(host->fclk);
+		mmc_request_done(host->mmc, cmd->mrq);
+	}
+}
+
+/* PIO only */
+static void
+mmc_omap_sg_to_buf(struct mmc_omap_host *host)
+{
+	struct scatterlist *sg;
+
+	sg = host->data->sg + host->sg_idx;
+	host->buffer_bytes_left = sg->length;
+	host->buffer = page_address(sg->page) + sg->offset;
+	if (host->buffer_bytes_left > host->total_bytes_left)
+		host->buffer_bytes_left = host->total_bytes_left;
+}
+
+/* PIO only */
+static void
+mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
+{
+	int n;
+	void __iomem *reg;
+	u16 *p;
+
+	if (host->buffer_bytes_left == 0) {
+		host->sg_idx++;
+		BUG_ON(host->sg_idx == host->sg_len);
+		mmc_omap_sg_to_buf(host);
+	}
+	n = 64;
+	if (n > host->buffer_bytes_left)
+		n = host->buffer_bytes_left;
+	host->buffer_bytes_left -= n;
+	host->total_bytes_left -= n;
+	host->data->bytes_xfered += n;
+
+	if (write) {
+		__raw_writesw(host->base + OMAP_MMC_REG_DATA, host->buffer, n);
+	} else {
+		__raw_readsw(host->base + OMAP_MMC_REG_DATA, host->buffer, n);
+	}
+}
+
+static inline void mmc_omap_report_irq(u16 status)
+{
+	static const char *mmc_omap_status_bits[] = {
+		"EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
+		"CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
+	};
+	int i, c = 0;
+
+	for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
+		if (status & (1 << i)) {
+			if (c)
+				printk(" ");
+			printk("%s", mmc_omap_status_bits[i]);
+			c++;
+		}
+}
+
+static irqreturn_t mmc_omap_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
+	u16 status;
+	int end_command;
+	int end_transfer;
+	int transfer_error;
+
+	if (host->cmd == NULL && host->data == NULL) {
+		status = OMAP_MMC_READ(host->base, STAT);
+		dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
+		if (status != 0) {
+			OMAP_MMC_WRITE(host->base, STAT, status);
+			OMAP_MMC_WRITE(host->base, IE, 0);
+		}
+		return IRQ_HANDLED;
+	}
+
+	end_command = 0;
+	end_transfer = 0;
+	transfer_error = 0;
+
+	while ((status = OMAP_MMC_READ(host->base, STAT)) != 0) {
+		OMAP_MMC_WRITE(host->base, STAT, status);
+#ifdef CONFIG_MMC_DEBUG
+		dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
+			status, host->cmd != NULL ? host->cmd->opcode : -1);
+		mmc_omap_report_irq(status);
+		printk("\n");
+#endif
+		if (host->total_bytes_left) {
+			if ((status & OMAP_MMC_STAT_A_FULL) ||
+			    (status & OMAP_MMC_STAT_END_OF_DATA))
+				mmc_omap_xfer_data(host, 0);
+			if (status & OMAP_MMC_STAT_A_EMPTY)
+				mmc_omap_xfer_data(host, 1);
+		}
+
+		if (status & OMAP_MMC_STAT_END_OF_DATA) {
+			end_transfer = 1;
+		}
+
+		if (status & OMAP_MMC_STAT_DATA_TOUT) {
+			dev_dbg(mmc_dev(host->mmc), "data timeout\n");
+			if (host->data) {
+				host->data->error |= MMC_ERR_TIMEOUT;
+				transfer_error = 1;
+			}
+		}
+
+		if (status & OMAP_MMC_STAT_DATA_CRC) {
+			if (host->data) {
+				host->data->error |= MMC_ERR_BADCRC;
+				dev_dbg(mmc_dev(host->mmc),
+					 "data CRC error, bytes left %d\n",
+					host->total_bytes_left);
+				transfer_error = 1;
+			} else {
+				dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
+			}
+		}
+
+		if (status & OMAP_MMC_STAT_CMD_TOUT) {
+			/* Timeouts are routine with some commands */
+			if (host->cmd) {
+				if (host->cmd->opcode != MMC_ALL_SEND_CID &&
+						host->cmd->opcode !=
+						MMC_SEND_OP_COND &&
+						host->cmd->opcode !=
+						MMC_APP_CMD &&
+						!mmc_omap_cover_is_open(host))
+					dev_err(mmc_dev(host->mmc),
+						"command timeout, CMD %d\n",
+						host->cmd->opcode);
+				host->cmd->error = MMC_ERR_TIMEOUT;
+				end_command = 1;
+			}
+		}
+
+		if (status & OMAP_MMC_STAT_CMD_CRC) {
+			if (host->cmd) {
+				dev_err(mmc_dev(host->mmc),
+					"command CRC error (CMD%d, arg 0x%08x)\n",
+					host->cmd->opcode, host->cmd->arg);
+				host->cmd->error = MMC_ERR_BADCRC;
+				end_command = 1;
+			} else
+				dev_err(mmc_dev(host->mmc),
+					"command CRC error without cmd?\n");
+		}
+
+		if (status & OMAP_MMC_STAT_CARD_ERR) {
+			if (host->cmd && host->cmd->opcode == MMC_STOP_TRANSMISSION) {
+				u32 response = OMAP_MMC_READ(host->base, RSP6)
+					| (OMAP_MMC_READ(host->base, RSP7) << 16);
+				/* STOP sometimes sets must-ignore bits */
+				if (!(response & (R1_CC_ERROR
+								| R1_ILLEGAL_COMMAND
+								| R1_COM_CRC_ERROR))) {
+					end_command = 1;
+					continue;
+				}
+			}
+
+			dev_dbg(mmc_dev(host->mmc), "card status error (CMD%d)\n",
+				host->cmd->opcode);
+			if (host->cmd) {
+				host->cmd->error = MMC_ERR_FAILED;
+				end_command = 1;
+			}
+			if (host->data) {
+				host->data->error = MMC_ERR_FAILED;
+				transfer_error = 1;
+			}
+		}
+
+		/*
+		 * NOTE: On 1610 the END_OF_CMD may come too early when
+		 * starting a write 
+		 */
+		if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
+		    (!(status & OMAP_MMC_STAT_A_EMPTY))) {
+			end_command = 1;
+		}
+	}
+
+	if (end_command) {
+		mmc_omap_cmd_done(host, host->cmd);
+	}
+	if (transfer_error)
+		mmc_omap_xfer_done(host, host->data);
+	else if (end_transfer)
+		mmc_omap_end_of_data(host, host->data);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mmc_omap_switch_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct mmc_omap_host *host = (struct mmc_omap_host *) dev_id;
+
+	schedule_work(&host->switch_work);
+
+	return IRQ_HANDLED;
+}
+
+static void mmc_omap_switch_timer(unsigned long arg)
+{
+	struct mmc_omap_host *host = (struct mmc_omap_host *) arg;
+
+	schedule_work(&host->switch_work);
+}
+
+/* FIXME: Handle card insertion and removal properly. Maybe use a mask
+ * for MMC state? */
+static void mmc_omap_switch_callback(unsigned long data, u8 mmc_mask)
+{
+}
+
+static void mmc_omap_switch_handler(void *data)
+{
+	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+	struct mmc_card *card;
+	static int complained = 0;
+	int cards = 0, cover_open;
+
+	if (host->switch_pin == -1)
+		return;
+	cover_open = mmc_omap_cover_is_open(host);
+	if (cover_open != host->switch_last_state) {
+		kobject_uevent(&host->dev->kobj, KOBJ_CHANGE);
+		host->switch_last_state = cover_open;
+	}
+	mmc_detect_change(host->mmc, 0);
+	list_for_each_entry(card, &host->mmc->cards, node) {
+		if (mmc_card_present(card))
+			cards++;
+	}
+	if (mmc_omap_cover_is_open(host)) {
+		if (!complained) {
+			dev_info(mmc_dev(host->mmc), "cover is open");
+			complained = 1;
+		}
+		if (mmc_omap_enable_poll)
+			mod_timer(&host->switch_timer, jiffies +
+				msecs_to_jiffies(OMAP_MMC_SWITCH_POLL_DELAY));
+	} else {
+		complained = 0;
+	}
+}
+
+/* Prepare to transfer the next segment of a scatterlist */
+static void
+mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
+{
+	int dma_ch = host->dma_ch;
+	unsigned long data_addr;
+	u16 buf, frame;
+	u32 count;
+	struct scatterlist *sg = &data->sg[host->sg_idx];
+	int src_port = 0;
+	int dst_port = 0;
+	int sync_dev = 0;
+
+	data_addr = io_v2p((u32) host->base) + OMAP_MMC_REG_DATA;
+	frame = 1 << data->blksz_bits;
+	count = sg_dma_len(sg);
+
+	if ((data->blocks == 1) && (count > (1 << data->blksz_bits)))
+		count = frame;
+
+	host->dma_len = count;
+
+	/* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
+	 * Use 16 or 32 word frames when the blocksize is at least that large.
+	 * Blocksize is usually 512 bytes; but not for some SD reads.
+	 */
+	if (cpu_is_omap15xx() && frame > 32)
+		frame = 32;
+	else if (frame > 64)
+		frame = 64;
+	count /= frame;
+	frame >>= 1;
+
+	if (!(data->flags & MMC_DATA_WRITE)) {
+		buf = 0x800f | ((frame - 1) << 8);
+
+		if (cpu_class_is_omap1()) {
+			src_port = OMAP_DMA_PORT_TIPB;
+			dst_port = OMAP_DMA_PORT_EMIFF;
+		}
+		if (cpu_is_omap24xx())
+			sync_dev = OMAP24XX_DMA_MMC1_RX;
+
+		omap_set_dma_src_params(dma_ch, src_port,
+					OMAP_DMA_AMODE_CONSTANT,
+					data_addr, 0, 0);
+		omap_set_dma_dest_params(dma_ch, dst_port,
+					 OMAP_DMA_AMODE_POST_INC,
+					 sg_dma_address(sg), 0, 0);
+		omap_set_dma_dest_data_pack(dma_ch, 1);
+		omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
+	} else {
+		buf = 0x0f80 | ((frame - 1) << 0);
+
+		if (cpu_class_is_omap1()) {
+			src_port = OMAP_DMA_PORT_EMIFF;
+			dst_port = OMAP_DMA_PORT_TIPB;
+		}
+		if (cpu_is_omap24xx())
+			sync_dev = OMAP24XX_DMA_MMC1_TX;
+
+		omap_set_dma_dest_params(dma_ch, dst_port,
+					 OMAP_DMA_AMODE_CONSTANT,
+					 data_addr, 0, 0);
+		omap_set_dma_src_params(dma_ch, src_port,
+					OMAP_DMA_AMODE_POST_INC,
+					sg_dma_address(sg), 0, 0);
+		omap_set_dma_src_data_pack(dma_ch, 1);
+		omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
+	}
+
+	/* Max limit for DMA frame count is 0xffff */
+	if (unlikely(count > 0xffff))
+		BUG();
+
+	OMAP_MMC_WRITE(host->base, BUF, buf);
+	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
+				     frame, count, OMAP_DMA_SYNC_FRAME,
+				     sync_dev, 0);
+}
+
+/* A scatterlist segment completed */
+static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
+{
+	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+	struct mmc_data *mmcdat = host->data;
+
+	if (unlikely(host->dma_ch < 0)) {
+		dev_err(mmc_dev(host->mmc), "DMA callback while DMA not
+				enabled\n");
+		return;
+	}
+	/* FIXME: We really should do something to _handle_ the errors */
+	if (ch_status & OMAP_DMA_TOUT_IRQ) {
+		dev_err(mmc_dev(host->mmc),"DMA timeout\n");
+		return;
+	}
+	if (ch_status & OMAP_DMA_DROP_IRQ) {
+		dev_err(mmc_dev(host->mmc), "DMA sync error\n");
+		return;
+	}
+	if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
+		return;
+	}
+	mmcdat->bytes_xfered += host->dma_len;
+	host->sg_idx++;
+	if (host->sg_idx < host->sg_len) {
+		mmc_omap_prepare_dma(host, host->data);
+		omap_start_dma(host->dma_ch);
+	} else
+		mmc_omap_dma_done(host, host->data);
+}
+
+static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
+{
+	const char *dev_name;
+	int sync_dev, dma_ch, is_read, r;
+
+	is_read = !(data->flags & MMC_DATA_WRITE);
+	del_timer_sync(&host->dma_timer);
+	if (host->dma_ch >= 0) {
+		if (is_read == host->dma_is_read)
+			return 0;
+		omap_free_dma(host->dma_ch);
+		host->dma_ch = -1;
+	}
+
+	if (is_read) {
+		if (host->id == 1) {
+			sync_dev = OMAP_DMA_MMC_RX;
+			dev_name = "MMC1 read";
+		} else {
+			sync_dev = OMAP_DMA_MMC2_RX;
+			dev_name = "MMC2 read";
+		}
+	} else {
+		if (host->id == 1) {
+			sync_dev = OMAP_DMA_MMC_TX;
+			dev_name = "MMC1 write";
+		} else {
+			sync_dev = OMAP_DMA_MMC2_TX;
+			dev_name = "MMC2 write";
+		}
+	}
+	r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb,
+			     host, &dma_ch);
+	if (r != 0) {
+		dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
+		return r;
+	}
+	host->dma_ch = dma_ch;
+	host->dma_is_read = is_read;
+
+	return 0;
+}
+
+static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
+{
+	u16 reg;
+
+	reg = OMAP_MMC_READ(host->base, SDIO);
+	reg &= ~(1 << 5);
+	OMAP_MMC_WRITE(host->base, SDIO, reg);
+	/* Set maximum timeout */
+	OMAP_MMC_WRITE(host->base, CTO, 0xff);
+}
+
+static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
+{
+	int timeout;
+	u16 reg;
+
+	/* Convert ns to clock cycles by assuming 20MHz frequency
+	 * 1 cycle at 20MHz = 500 ns
+	 */
+	timeout = req->data->timeout_clks + req->data->timeout_ns / 500;
+
+	/* Check if we need to use timeout multiplier register */
+	reg = OMAP_MMC_READ(host->base, SDIO);
+	if (timeout > 0xffff) {
+		reg |= (1 << 5);
+		timeout /= 1024;
+	} else
+		reg &= ~(1 << 5);
+	OMAP_MMC_WRITE(host->base, SDIO, reg);
+	OMAP_MMC_WRITE(host->base, DTO, timeout);
+}
+
+static void
+mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
+{
+	struct mmc_data *data = req->data;
+	int i, use_dma, block_size;
+	unsigned sg_len;
+
+	host->data = data;
+	if (data == NULL) {
+		OMAP_MMC_WRITE(host->base, BLEN, 0);
+		OMAP_MMC_WRITE(host->base, NBLK, 0);
+		OMAP_MMC_WRITE(host->base, BUF, 0);
+		host->dma_in_use = 0;
+		set_cmd_timeout(host, req);
+		return;
+	}
+
+
+	block_size = 1 << data->blksz_bits;
+
+	OMAP_MMC_WRITE(host->base, NBLK, data->blocks - 1);
+	OMAP_MMC_WRITE(host->base, BLEN, block_size - 1);
+	set_data_timeout(host, req);
+
+	/* cope with calling layer confusion; it issues "single
+	 * block" writes using multi-block scatterlists.
+	 */
+	sg_len = (data->blocks == 1) ? 1 : data->sg_len;
+
+	/* Only do DMA for entire blocks */
+	use_dma = host->use_dma;
+	if (use_dma) {
+		for (i = 0; i < sg_len; i++) {
+			if ((data->sg[i].length % block_size) != 0) {
+				use_dma = 0;
+				break;
+			}
+		}
+	}
+
+	host->sg_idx = 0;
+	if (use_dma) {
+		if (mmc_omap_get_dma_channel(host, data) == 0) {
+			enum dma_data_direction dma_data_dir;
+
+			if (data->flags & MMC_DATA_WRITE)
+				dma_data_dir = DMA_TO_DEVICE;
+			else
+				dma_data_dir = DMA_FROM_DEVICE;
+
+			host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
+						sg_len, dma_data_dir);
+			host->total_bytes_left = 0;
+			mmc_omap_prepare_dma(host, req->data);
+			host->brs_received = 0;
+			host->dma_done = 0;
+			host->dma_in_use = 1;
+		} else
+			use_dma = 0;
+	}
+
+	/* Revert to PIO? */
+	if (!use_dma) {
+		OMAP_MMC_WRITE(host->base, BUF, 0x1f1f);
+		host->total_bytes_left = data->blocks * block_size;
+		host->sg_len = sg_len;
+		mmc_omap_sg_to_buf(host);
+		host->dma_in_use = 0;
+	}
+}
+
+static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+	struct mmc_omap_host *host = mmc_priv(mmc);
+
+	WARN_ON(host->mrq != NULL);
+
+	host->mrq = req;
+
+	/* only touch fifo AFTER the controller readies it */
+	mmc_omap_prepare_data(host, req);
+	mmc_omap_start_command(host, req->cmd);
+	if (host->dma_in_use)
+		omap_start_dma(host->dma_ch);
+}
+
+static void innovator_fpga_socket_power(int on)
+{
+#if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
+
+	if (on) {
+		fpga_write(fpga_read(OMAP1510_FPGA_POWER) | (1 << 3),
+		     OMAP1510_FPGA_POWER);
+	} else {
+		fpga_write(fpga_read(OMAP1510_FPGA_POWER) & ~(1 << 3),
+		     OMAP1510_FPGA_POWER);
+	}
+#endif
+}
+
+/*
+ * Turn the socket power on/off. Innovator uses FPGA, most boards
+ * probably use GPIO.
+ */
+static void mmc_omap_power(struct mmc_omap_host *host, int on)
+{
+	if (on) {
+		if (machine_is_omap_innovator())
+			innovator_fpga_socket_power(1);
+		else if (machine_is_omap_h2())
+			tps65010_set_gpio_out_value(GPIO3, HIGH);
+		else if (machine_is_omap_h3())
+			/* GPIO 4 of TPS65010 sends SD_EN signal */
+			tps65010_set_gpio_out_value(GPIO4, HIGH);
+		else if (cpu_is_omap24xx()) {
+			u16 reg = OMAP_MMC_READ(host->base, CON);
+			OMAP_MMC_WRITE(host->base, CON, reg | (1 << 11));
+		} else
+			if (host->power_pin >= 0)
+				omap_set_gpio_dataout(host->power_pin, 1);
+	} else {
+		if (machine_is_omap_innovator())
+			innovator_fpga_socket_power(0);
+		else if (machine_is_omap_h2())
+			tps65010_set_gpio_out_value(GPIO3, LOW);
+		else if (machine_is_omap_h3())
+			tps65010_set_gpio_out_value(GPIO4, LOW);
+		else if (cpu_is_omap24xx()) {
+			u16 reg = OMAP_MMC_READ(host->base, CON);
+			OMAP_MMC_WRITE(host->base, CON, reg & ~(1 << 11));
+		} else
+			if (host->power_pin >= 0)
+				omap_set_gpio_dataout(host->power_pin, 0);
+	}
+}
+
+static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+	struct mmc_omap_host *host = mmc_priv(mmc);
+	int dsor;
+	int realclock, i;
+
+	realclock = ios->clock;
+
+	if (ios->clock == 0)
+		dsor = 0;
+	else {
+		int func_clk_rate = clk_get_rate(host->fclk);
+
+		dsor = func_clk_rate / realclock;
+		if (dsor < 1)
+			dsor = 1;
+
+		if (func_clk_rate / dsor > realclock)
+			dsor++;
+
+		if (dsor > 250)
+			dsor = 250;
+		dsor++;
+
+		if (ios->bus_width == MMC_BUS_WIDTH_4)
+			dsor |= 1 << 15;
+	}
+
+	switch (ios->power_mode) {
+	case MMC_POWER_OFF:
+		mmc_omap_power(host, 0);
+		break;
+	case MMC_POWER_UP:
+	case MMC_POWER_ON:
+		mmc_omap_power(host, 1);
+		dsor |= 1<<11;
+		break;
+	}
+
+	host->bus_mode = ios->bus_mode;
+	host->hw_bus_mode = host->bus_mode;
+
+	clk_enable(host->fclk);
+
+	/* On insanely high arm_per frequencies something sometimes
+	 * goes somehow out of sync, and the POW bit is not being set,
+	 * which results in the while loop below getting stuck.
+	 * Writing to the CON register twice seems to do the trick. */
+	for (i = 0; i < 2; i++)
+		OMAP_MMC_WRITE(host->base, CON, dsor);
+	if (ios->power_mode == MMC_POWER_UP) {
+		/* Send clock cycles, poll completion */
+		OMAP_MMC_WRITE(host->base, IE, 0);
+		OMAP_MMC_WRITE(host->base, STAT, 0xffff);
+		OMAP_MMC_WRITE(host->base, CMD, 1<<7);
+		while (0 == (OMAP_MMC_READ(host->base, STAT) & 1));
+		OMAP_MMC_WRITE(host->base, STAT, 1);
+	}
+	clk_disable(host->fclk);
+}
+
+static int mmc_omap_get_ro(struct mmc_host *mmc)
+{
+	struct mmc_omap_host *host = mmc_priv(mmc);
+
+	return host->wp_pin && omap_get_gpio_datain(host->wp_pin);
+}
+
+static struct mmc_host_ops mmc_omap_ops = {
+	.request	= mmc_omap_request,
+	.set_ios	= mmc_omap_set_ios,
+	.get_ro		= mmc_omap_get_ro,
+};
+
+static int __init mmc_omap_probe(struct platform_device *pdev)
+{
+	struct omap_mmc_conf *minfo = pdev->dev.platform_data;
+	struct mmc_host *mmc;
+	struct mmc_omap_host *host = NULL;
+	int ret = 0;
+	
+	if (platform_get_resource(pdev, IORESOURCE_MEM, 0) ||
+			platform_get_irq(pdev, IORESOURCE_IRQ, 0)) {
+		dev_err(&pdev->dev, "mmc_omap_probe: invalid resource type\n");
+		return -ENODEV;
+	}
+
+	if (!request_mem_region(pdev->resource[0].start,
+				pdev->resource[0].end - pdev->resource[0].start + 1,
+				pdev->name)) {
+		dev_dbg(&pdev->dev, "request_mem_region failed\n");
+		return -EBUSY;
+	}
+
+	mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
+	if (!mmc) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	host = mmc_priv(mmc);
+	host->mmc = mmc;
+
+	spin_lock_init(&host->dma_lock);
+	init_timer(&host->dma_timer);
+	host->dma_timer.function = mmc_omap_dma_timer;
+	host->dma_timer.data = (unsigned long) host;
+
+	host->id = pdev->id;
+
+	if (cpu_is_omap24xx()) {
+		host->iclk = clk_get(&pdev->dev, "mmc_ick");
+		if (IS_ERR(host->iclk))
+			goto out;
+		clk_enable(host->iclk);
+	}
+
+	if (!cpu_is_omap24xx())
+		host->fclk = clk_get(&pdev->dev, "mmc_ck");
+	else
+		host->fclk = clk_get(&pdev->dev, "mmc_fck");
+
+	if (IS_ERR(host->fclk)) {
+		ret = PTR_ERR(host->fclk);
+		goto out;
+	}
+
+	/* REVISIT:
+	 * Also, use minfo->cover to decide how to manage
+	 * the card detect sensing.
+	 */
+	host->power_pin = minfo->power_pin;
+	host->switch_pin = minfo->switch_pin;
+	host->wp_pin = minfo->wp_pin;
+	host->use_dma = 1;
+	host->dma_ch = -1;
+
+	host->irq = pdev->resource[1].start;
+	host->base = ioremap(pdev->res.start, SZ_4K);
+	if (!host->base) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	 if (minfo->wire4)
+		 mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+	mmc->ops = &mmc_omap_ops;
+	mmc->f_min = 400000;
+	mmc->f_max = 24000000;
+	mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
+
+	/* Use scatterlist DMA to reduce per-transfer costs.
+	 * NOTE max_seg_size assumption that small blocks aren't
+	 * normally used (except e.g. for reading SD registers).
+	 */
+	mmc->max_phys_segs = 32;
+	mmc->max_hw_segs = 32;
+	mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */
+	mmc->max_seg_size = mmc->max_sectors * 512;
+
+	if (host->power_pin >= 0) {
+		if ((ret = omap_request_gpio(host->power_pin)) != 0) {
+			dev_err(mmc_dev(host->mmc), "Unable to get GPIO
+					pin for MMC power\n");
+			goto out;
+		}
+		omap_set_gpio_direction(host->power_pin, 0);
+	}
+
+	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
+	if (ret)
+		goto out;
+
+	host->dev = &pdev->dev;
+	platform_set_drvdata(pdev, host);
+
+	mmc_add_host(mmc);
+
+	if (host->switch_pin >= 0) {
+		INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host);
+		init_timer(&host->switch_timer);
+		host->switch_timer.function = mmc_omap_switch_timer;
+		host->switch_timer.data = (unsigned long) host;
+		if (omap_request_gpio(host->switch_pin) != 0) {
+			dev_warn(mmc_dev(host->mmc), "Unable to get GPIO pin for MMC cover switch\n");
+			host->switch_pin = -1;
+			goto no_switch;
+		}
+
+		omap_set_gpio_direction(host->switch_pin, 1);
+		ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin),
+				  mmc_omap_switch_irq, SA_TRIGGER_RISING, DRIVER_NAME, host);
+		if (ret) {
+			dev_warn(mmc_dev(host->mmc), "Unable to get IRQ for MMC cover switch\n");
+			omap_free_gpio(host->switch_pin);
+			host->switch_pin = -1;
+			goto no_switch;
+		}
+		ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
+		if (ret == 0) {
+			ret = device_create_file(&pdev->dev, &dev_attr_enable_poll);
+			if (ret != 0)
+				device_remove_file(&pdev->dev, &dev_attr_cover_switch);
+		}
+		if (ret) {
+			dev_wan(mmc_dev(host->mmc), "Unable to create sysfs attributes\n");
+			free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
+			omap_free_gpio(host->switch_pin);
+			host->switch_pin = -1;
+			goto no_switch;
+		}
+		if (mmc_omap_enable_poll && mmc_omap_cover_is_open(host))
+			schedule_work(&host->switch_work);
+	}
+
+no_switch:
+	return 0;
+
+out:
+	/* FIXME: Free other resources too. */
+	if (host) {
+		if (host->iclk && !IS_ERR(host->iclk))
+			clk_put(host->iclk);
+		if (host->fclk && !IS_ERR(host->fclk))
+			clk_put(host->fclk);
+		mmc_free_host(host->mmc);
+	}
+	return ret;
+}
+
+static int mmc_omap_remove(struct platform_device *pdev)
+{
+	struct mmc_omap_host *host = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	if (host) {
+		mmc_remove_host(host->mmc);
+		free_irq(host->irq, host);
+
+		if (host->power_pin >= 0)
+			omap_free_gpio(host->power_pin);
+		if (host->switch_pin >= 0) {
+			device_remove_file(&pdev->dev, &dev_attr_enable_poll);
+			device_remove_file(&pdev->dev, &dev_attr_cover_switch);
+			free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
+			omap_free_gpio(host->switch_pin);
+			host->switch_pin = -1;
+			del_timer_sync(&host->switch_timer);
+			flush_scheduled_work();
+		}
+		if (host->iclk && !IS_ERR(host->iclk))
+			clk_put(host->iclk);
+		if (host->fclk && !IS_ERR(host->fclk))
+			clk_put(host->fclk);
+		mmc_free_host(host->mmc);
+	}
+
+	release_mem_region(pdev->resource[0].start,
+			pdev->resource[0].end - pdev->resource[0].start + 1);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	int ret = 0;
+	struct mmc_omap_host *host = platform_get_drvdata(pdev);
+
+	if (host && host->suspended)
+		return 0;
+
+	if (host) {
+		ret = mmc_suspend_host(host->mmc, mesg);
+		if (ret == 0)
+			host->suspended = 1;
+	}
+	return ret;
+}
+
+static int mmc_omap_resume(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct mmc_omap_host *host = platform_get_drvdata(pdev);
+
+	if (host && !host->suspended)
+		return 0;
+
+	if (host) {
+		ret = mmc_resume_host(host->mmc);
+		if (ret == 0)
+			host->suspended = 0;
+	}
+
+	return ret;
+}
+#else
+#define mmc_omap_suspend	NULL
+#define mmc_omap_resume		NULL
+#endif
+
+static struct platform_driver mmc_omap_driver = {
+	.probe		= mmc_omap_probe,
+	.remove		= mmc_omap_remove,
+	.suspend	= mmc_omap_suspend,
+	.resume		= mmc_omap_resume,
+	.driver		= {
+		.name	= DRIVER_NAME,
+	},
+};
+
+static int __init mmc_omap_init(void)
+{
+	return platform_driver_register(&mmc_omap_driver);
+}
+
+static void __exit mmc_omap_exit(void)
+{
+	platform_driver_unregister(&mmc_omap_driver);
+}
+
+module_init(mmc_omap_init);
+module_exit(mmc_omap_exit);
+
+MODULE_DESCRIPTION("OMAP Multimedia Card driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_AUTHOR("Juha Yrjölä");
diff --git a/drivers/mmc/omap.h b/drivers/mmc/omap.h
new file mode 100644
index 0000000..c954d35
--- /dev/null
+++ b/drivers/mmc/omap.h
@@ -0,0 +1,55 @@
+#ifndef	DRIVERS_MEDIA_MMC_OMAP_H
+#define	DRIVERS_MEDIA_MMC_OMAP_H
+
+#define	OMAP_MMC_REG_CMD	0x00
+#define	OMAP_MMC_REG_ARGL	0x04
+#define	OMAP_MMC_REG_ARGH	0x08
+#define	OMAP_MMC_REG_CON	0x0c
+#define	OMAP_MMC_REG_STAT	0x10
+#define	OMAP_MMC_REG_IE		0x14
+#define	OMAP_MMC_REG_CTO	0x18
+#define	OMAP_MMC_REG_DTO	0x1c
+#define	OMAP_MMC_REG_DATA	0x20
+#define	OMAP_MMC_REG_BLEN	0x24
+#define	OMAP_MMC_REG_NBLK	0x28
+#define	OMAP_MMC_REG_BUF	0x2c
+#define OMAP_MMC_REG_SDIO	0x34
+#define	OMAP_MMC_REG_REV	0x3c
+#define	OMAP_MMC_REG_RSP0	0x40
+#define	OMAP_MMC_REG_RSP1	0x44
+#define	OMAP_MMC_REG_RSP2	0x48
+#define	OMAP_MMC_REG_RSP3	0x4c
+#define	OMAP_MMC_REG_RSP4	0x50
+#define	OMAP_MMC_REG_RSP5	0x54
+#define	OMAP_MMC_REG_RSP6	0x58
+#define	OMAP_MMC_REG_RSP7	0x5c
+#define	OMAP_MMC_REG_IOSR	0x60
+#define	OMAP_MMC_REG_SYSC	0x64
+#define	OMAP_MMC_REG_SYSS	0x68
+
+#define	OMAP_MMC_STAT_CARD_ERR		(1 << 14)
+#define	OMAP_MMC_STAT_CARD_IRQ		(1 << 13)
+#define	OMAP_MMC_STAT_OCR_BUSY		(1 << 12)
+#define	OMAP_MMC_STAT_A_EMPTY		(1 << 11)
+#define	OMAP_MMC_STAT_A_FULL		(1 << 10)
+#define	OMAP_MMC_STAT_CMD_CRC		(1 <<  8)
+#define	OMAP_MMC_STAT_CMD_TOUT		(1 <<  7)
+#define	OMAP_MMC_STAT_DATA_CRC		(1 <<  6)
+#define	OMAP_MMC_STAT_DATA_TOUT		(1 <<  5)
+#define	OMAP_MMC_STAT_END_BUSY		(1 <<  4)
+#define	OMAP_MMC_STAT_END_OF_DATA	(1 <<  3)
+#define	OMAP_MMC_STAT_CARD_BUSY		(1 <<  2)
+#define	OMAP_MMC_STAT_END_OF_CMD	(1 <<  0)
+
+#define OMAP_MMC_READ(base, reg)	__raw_readw((base) + OMAP_MMC_REG_##reg)
+#define OMAP_MMC_WRITE(base, reg, val)	__raw_writew((val), (base) + OMAP_MMC_REG_##reg)
+
+/*
+ * Command types
+ */
+#define OMAP_MMC_CMDTYPE_BC	0
+#define OMAP_MMC_CMDTYPE_BCR	1
+#define OMAP_MMC_CMDTYPE_AC	2
+#define OMAP_MMC_CMDTYPE_ADTC	3
+
+#endif
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index c32fad1..eb9a882 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -37,12 +37,6 @@
 
 #include "pxamci.h"
 
-#ifdef CONFIG_MMC_DEBUG
-#define DBG(x...)	printk(KERN_DEBUG x)
-#else
-#define DBG(x...)	do { } while (0)
-#endif
-
 #define DRIVER_NAME	"pxa2xx-mci"
 
 #define NR_SG	1
@@ -206,7 +200,7 @@
 
 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
 {
-	DBG("PXAMCI: request done\n");
+	pr_debug("PXAMCI: request done\n");
 	host->mrq = NULL;
 	host->cmd = NULL;
 	host->data = NULL;
@@ -252,7 +246,7 @@
 			if ((cmd->resp[0] & 0x80000000) == 0)
 				cmd->error = MMC_ERR_BADCRC;
 		} else {
-			DBG("ignoring CRC from command %d - *risky*\n",cmd->opcode);
+			pr_debug("ignoring CRC from command %d - *risky*\n",cmd->opcode);
 		}
 #else
 		cmd->error = MMC_ERR_BADCRC;
@@ -317,12 +311,12 @@
 
 	ireg = readl(host->base + MMC_I_REG);
 
-	DBG("PXAMCI: irq %08x\n", ireg);
+	pr_debug("PXAMCI: irq %08x\n", ireg);
 
 	if (ireg) {
 		unsigned stat = readl(host->base + MMC_STAT);
 
-		DBG("PXAMCI: stat %08x\n", stat);
+		pr_debug("PXAMCI: stat %08x\n", stat);
 
 		if (ireg & END_CMD_RES)
 			handled |= pxamci_cmd_done(host, stat);
@@ -376,9 +370,9 @@
 {
 	struct pxamci_host *host = mmc_priv(mmc);
 
-	DBG("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
-	    ios->clock, ios->power_mode, ios->vdd / 100,
-	    ios->vdd % 100);
+	pr_debug("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
+		 ios->clock, ios->power_mode, ios->vdd / 100,
+		 ios->vdd % 100);
 
 	if (ios->clock) {
 		unsigned int clk = CLOCKRATE / ios->clock;
@@ -405,8 +399,8 @@
 			host->cmdat |= CMDAT_INIT;
 	}
 
-	DBG("pxamci_set_ios: clkrt = %x cmdat = %x\n",
-	    host->clkrt, host->cmdat);
+	pr_debug("pxamci_set_ios: clkrt = %x cmdat = %x\n",
+		 host->clkrt, host->cmdat);
 }
 
 static struct mmc_host_ops pxamci_ops = {
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index 8b811d9..bdbfca0 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -31,12 +31,8 @@
 
 #define BUGMAIL "<sdhci-devel@list.drzeus.cx>"
 
-#ifdef CONFIG_MMC_DEBUG
 #define DBG(f, x...) \
-	printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__,## x)
-#else
-#define DBG(f, x...) do { } while (0)
-#endif
+	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
 
 static const struct pci_device_id pci_ids[] __devinitdata = {
 	/* handle any SD host controller */
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 3be397d..511f7b0 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -44,15 +44,10 @@
 #define DRIVER_NAME "wbsd"
 #define DRIVER_VERSION "1.5"
 
-#ifdef CONFIG_MMC_DEBUG
 #define DBG(x...) \
-	printk(KERN_DEBUG DRIVER_NAME ": " x)
+	pr_debug(DRIVER_NAME ": " x)
 #define DBGF(f, x...) \
-	printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x)
-#else
-#define DBG(x...)	do { } while (0)
-#define DBGF(x...)	do { } while (0)
-#endif
+	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 
 /*
  * Device resources
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index fe0d8b8..7d22dc0 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -63,6 +63,33 @@
 
 	  If unsure, say N.
 
+config SERIAL_8250_GSC
+	tristate
+	depends on SERIAL_8250 && GSC
+	default SERIAL_8250
+
+config SERIAL_8250_PCI
+	tristate "8250/16550 PCI device support" if EMBEDDED
+	depends on SERIAL_8250 && PCI
+	default SERIAL_8250
+	help
+	  This builds standard PCI serial support. You may be able to
+	  disable this feature if you only need legacy serial support.
+	  Saves about 9K.
+
+config SERIAL_8250_PNP
+	tristate "8250/16550 PNP device support" if EMBEDDED
+	depends on SERIAL_8250 && PNP
+	default SERIAL_8250
+	help
+	  This builds standard PNP serial support. You may be able to
+	  disable this feature if you only need legacy serial support.
+
+config SERIAL_8250_HP300
+	tristate
+	depends on SERIAL_8250 && HP300
+	default SERIAL_8250
+
 config SERIAL_8250_CS
 	tristate "8250/16550 PCMCIA device support"
 	depends on PCMCIA && SERIAL_8250
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index d2b4c21..0a71bf6 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -4,15 +4,13 @@
 #  $Id: Makefile,v 1.8 2002/07/21 21:32:30 rmk Exp $
 #
 
-serial-8250-y :=
-serial-8250-$(CONFIG_PNP) += 8250_pnp.o
-serial-8250-$(CONFIG_GSC) += 8250_gsc.o
-serial-8250-$(CONFIG_PCI) += 8250_pci.o
-serial-8250-$(CONFIG_HP300) += 8250_hp300.o
-
 obj-$(CONFIG_SERIAL_CORE) += serial_core.o
 obj-$(CONFIG_SERIAL_21285) += 21285.o
-obj-$(CONFIG_SERIAL_8250) += 8250.o $(serial-8250-y)
+obj-$(CONFIG_SERIAL_8250) += 8250.o
+obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
+obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
+obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
+obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
 obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
 obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
 obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index cb68efb..8a2de03 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,21 @@
+Version 1.42
+------------
+Fix slow oplock break when mounted to different servers at the same time and
+the tids match and we try to find matching fid on wrong server.
+
+Version 1.41
+------------
+Fix NTLMv2 security (can be enabled in /proc/fs/cifs) so customers can
+configure stronger authentication.  Fix sfu symlinks so they can
+be followed (not just recognized).  Fix wraparound of bcc on
+read responses when buffer size over 64K and also fix wrap of
+max smb buffer size when CIFSMaxBufSize over 64K.  Fix oops in
+cifs_user_read and cifs_readpages (when EAGAIN on send of smb
+on socket is returned over and over).  Add POSIX (advisory) byte range
+locking support (requires server with newest CIFS UNIX Extensions
+to the protocol implemented). Slow down negprot slightly in port 139
+RFC1001 case to give session_init time on buggy servers.
+
 Version 1.40
 ------------
 Use fsuid (fsgid) more consistently instead of uid (gid). Improve performance
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 7384947..58c7725 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -3,4 +3,4 @@
 #
 obj-$(CONFIG_CIFS) += cifs.o
 
-cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o
+cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o ntlmssp.o
diff --git a/fs/cifs/README b/fs/cifs/README
index b0070d1..b2b4d08 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -422,6 +422,13 @@
  nomapchars     Do not translate any of these seven characters (default).
  nocase         Request case insensitive path name matching (case
 		sensitive is the default if the server suports it).
+ posixpaths     If CIFS Unix extensions are supported, attempt to
+		negotiate posix path name support which allows certain
+		characters forbidden in typical CIFS filenames, without
+		requiring remapping. (default)
+ noposixpaths   If CIFS Unix extensions are supported, do not request
+		posix path name support (this may cause servers to
+		reject creatingfile with certain reserved characters).
  nobrl          Do not send byte range lock requests to the server.
 		This is necessary for certain applications that break
 		with cifs style mandatory byte range locks (and most
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a2c2485..e7d6373 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/cifsencrypt.c
  *
- *   Copyright (C) International Business Machines  Corp., 2005
+ *   Copyright (C) International Business Machines  Corp., 2005,2006
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -36,7 +36,8 @@
 extern void mdfour(unsigned char *out, unsigned char *in, int n);
 extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
 	
-static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, const char * key, char * signature)
+static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, 
+				    const char * key, char * signature)
 {
 	struct	MD5Context context;
 
@@ -56,9 +57,6 @@
 	int rc = 0;
 	char smb_signature[20];
 
-	/* BB remember to initialize sequence number elsewhere and initialize mac_signing key elsewhere BB */
-	/* BB remember to add code to save expected sequence number in midQ entry BB */
-
 	if((cifs_pdu == NULL) || (server == NULL))
 		return -EINVAL;
 
@@ -85,20 +83,33 @@
 static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
 				const char * key, char * signature)
 {
-        struct  MD5Context context;
+	struct  MD5Context context;
+	int i;
 
-        if((iov == NULL) || (signature == NULL))
-                return -EINVAL;
+	if((iov == NULL) || (signature == NULL))
+		return -EINVAL;
 
-        MD5Init(&context);
-        MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
+	MD5Init(&context);
+	MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
+	for(i=0;i<n_vec;i++) {
+		if(iov[i].iov_base == NULL) {
+			cERROR(1,("null iovec entry"));
+			return -EIO;
+		} else if(iov[i].iov_len == 0)
+			break; /* bail out if we are sent nothing to sign */
+		/* The first entry includes a length field (which does not get 
+		   signed that occupies the first 4 bytes before the header */
+		if(i==0) {
+			if (iov[0].iov_len <= 8 ) /* cmd field at offset 9 */
+				break; /* nothing to sign or corrupt header */
+			MD5Update(&context,iov[0].iov_base+4, iov[0].iov_len-4);
+		} else
+			MD5Update(&context,iov[i].iov_base, iov[i].iov_len);
+	}
 
-/*        MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length); */ /* BB FIXME BB */
+	MD5Final(signature,&context);
 
-        MD5Final(signature,&context);
-
-	return -EOPNOTSUPP;
-/*        return 0; */
+	return 0;
 }
 
 
@@ -259,4 +270,5 @@
 /*	hmac_md5_update(v2_session_response+16)client thing,8,&context); */ /* BB fix */
 
 	hmac_md5_final(v2_session_response,&context);
+	cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); /* BB removeme BB */
 }
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 4bbc544..d4b713e 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -93,13 +93,10 @@
 	int rc = 0;
 
 	sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
-	sb->s_fs_info = kmalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
+	sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
 	cifs_sb = CIFS_SB(sb);
 	if(cifs_sb == NULL)
 		return -ENOMEM;
-	else
-		memset(cifs_sb,0,sizeof(struct cifs_sb_info));
-	
 
 	rc = cifs_mount(sb, cifs_sb, data, devname);
 
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 74f405a..4e829dc 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -99,5 +99,5 @@
 extern ssize_t	cifs_listxattr(struct dentry *, char *, size_t);
 extern int cifs_ioctl (struct inode * inode, struct file * filep,
 		       unsigned int command, unsigned long arg);
-#define CIFS_VERSION   "1.40"
+#define CIFS_VERSION   "1.42"
 #endif				/* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 7bed276..006eb33 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/cifsglob.h
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2005
+ *   Copyright (C) International Business Machines  Corp., 2002,2006
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -430,6 +430,15 @@
 #define   CIFS_LARGE_BUFFER     2
 #define   CIFS_IOVEC            4    /* array of response buffers */
 
+/* Type of session setup needed */
+#define   CIFS_PLAINTEXT	0
+#define   CIFS_LANMAN		1
+#define   CIFS_NTLM		2
+#define   CIFS_NTLMSSP_NEG	3
+#define   CIFS_NTLMSSP_AUTH	4
+#define   CIFS_SPNEGO_INIT	5
+#define   CIFS_SPNEGO_TARG	6
+
 /*
  *****************************************************************
  * All constants go here
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index cc24710..b2233ac 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -859,7 +859,10 @@
 	LOCKING_ANDX_RANGE Locks[1];
 } __attribute__((packed)) LOCK_REQ;
 
-
+/* lock type */
+#define CIFS_RDLCK	0
+#define CIFS_WRLCK	1
+#define CIFS_UNLCK      2
 typedef struct cifs_posix_lock {
 	__le16  lock_type;  /* 0 = Read, 1 = Write, 2 = Unlock */
 	__le16  lock_flags; /* 1 = Wait (only valid for setlock) */
@@ -1786,7 +1789,13 @@
 #define CIFS_UNIX_POSIX_ACL_CAP         0x00000002 /* support getfacl/setfacl */
 #define CIFS_UNIX_XATTR_CAP             0x00000004 /* support new namespace   */
 #define CIFS_UNIX_EXTATTR_CAP           0x00000008 /* support chattr/chflag   */
-#define CIFS_UNIX_POSIX_PATHNAMES_CAP   0x00000010 /* Use POSIX pathnames on the wire. */
+#define CIFS_UNIX_POSIX_PATHNAMES_CAP   0x00000010 /* Allow POSIX path chars  */
+#ifdef CONFIG_CIFS_POSIX
+#define CIFS_UNIX_CAP_MASK              0x0000001b
+#else 
+#define CIFS_UNIX_CAP_MASK              0x00000013
+#endif /* CONFIG_CIFS_POSIX */
+
 
 #define CIFS_POSIX_EXTENSIONS           0x00000010 /* support for new QFSInfo */
 
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 7b25463..2879ba3 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/cifsproto.h
  *
- *   Copyright (c) International Business Machines  Corp., 2002,2005
+ *   Copyright (c) International Business Machines  Corp., 2002,2006
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -64,6 +64,14 @@
 extern void header_assemble(struct smb_hdr *, char /* command */ ,
 			    const struct cifsTconInfo *, int /* length of
 			    fixed section (word count) in two byte units */);
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
+				struct cifsSesInfo *ses,
+				void ** request_buf);
+extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
+			     const int stage, int * pNTLMv2_flg,
+			     const struct nls_table *nls_cp);
+#endif
 extern __u16 GetNextMid(struct TCP_Server_Info *server);
 extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16, 
 						 struct cifsTconInfo *);
@@ -257,7 +265,10 @@
 			const __u64 offset, const __u32 numUnlock,
 			const __u32 numLock, const __u8 lockType,
 			const int waitFlag);
-
+extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
+			const __u16 smb_file_id, const int get_flag,
+			const __u64 len, const __u64 offset, 
+			const __u16 lock_type, const int waitFlag);
 extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
 extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
 
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index a243fe2..d705500 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/cifssmb.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2005
+ *   Copyright (C) International Business Machines  Corp., 2002,2006
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   Contains the routines for constructing the SMB PDUs themselves
@@ -186,7 +186,35 @@
                 cifs_stats_inc(&tcon->num_smbs_sent);
 
 	return rc;
-}  
+}
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL  
+int
+small_smb_init_no_tc(const int smb_command, const int wct, 
+		     struct cifsSesInfo *ses, void **request_buf)
+{
+	int rc;
+	struct smb_hdr * buffer;
+
+	rc = small_smb_init(smb_command, wct, NULL, request_buf);
+	if(rc)
+		return rc;
+
+	buffer = (struct smb_hdr *)*request_buf;
+	buffer->Mid = GetNextMid(ses->server);
+	if (ses->capabilities & CAP_UNICODE)
+		buffer->Flags2 |= SMBFLG2_UNICODE;
+	if (ses->capabilities & CAP_STATUS32)
+		buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+
+	/* uid, tid can stay at zero as set in header assemble */
+
+	/* BB add support for turning on the signing when 
+	this function is used after 1st of session setup requests */
+
+	return rc;
+}
+#endif  /* CONFIG_CIFS_EXPERIMENTAL */
 
 /* If the return code is zero, this function must fill in request_buf pointer */
 static int
@@ -1042,7 +1070,7 @@
 		}
 	}
 
-	cifs_small_buf_release(pSMB);
+/*	cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
 	if(*buf) {
 		if(resp_buf_type == CIFS_SMALL_BUFFER)
 			cifs_small_buf_release(iov[0].iov_base);
@@ -1246,7 +1274,7 @@
 		*nbytes += le16_to_cpu(pSMBr->Count);
 	} 
 
-	cifs_small_buf_release(pSMB);
+/*	cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
 	if(resp_buf_type == CIFS_SMALL_BUFFER)
 		cifs_small_buf_release(iov[0].iov_base);
 	else if(resp_buf_type == CIFS_LARGE_BUFFER)
@@ -1325,6 +1353,85 @@
 }
 
 int
+CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
+		const __u16 smb_file_id, const int get_flag, const __u64 len,
+		const __u64 lkoffset, const __u16 lock_type, const int waitFlag)
+{
+	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
+	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
+	char *data_offset;
+	struct cifs_posix_lock *parm_data;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, param_offset, offset, byte_count, count;
+
+	cFYI(1, ("Posix Lock"));
+	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
+
+	if (rc)
+		return rc;
+
+	pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
+
+	params = 6; 
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+	offset = param_offset + params;
+
+	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+
+	count = sizeof(struct cifs_posix_lock);
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	if(get_flag)
+		pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
+	else
+		pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+	byte_count = 3 /* pad */  + params + count;
+	pSMB->DataCount = cpu_to_le16(count);
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	parm_data = (struct cifs_posix_lock *) 
+			(((char *) &pSMB->hdr.Protocol) + offset);
+
+	parm_data->lock_type = cpu_to_le16(lock_type);
+	if(waitFlag)
+		parm_data->lock_flags = 1;
+	parm_data->pid = cpu_to_le32(current->tgid);
+	parm_data->start = lkoffset;
+	parm_data->length = len;  /* normalize negative numbers */
+
+	pSMB->DataOffset = cpu_to_le16(offset);
+	pSMB->Fid = smb_file_id;
+	pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in Posix Lock = %d", rc));
+	}
+
+	if (pSMB)
+		cifs_small_buf_release(pSMB);
+
+	/* Note: On -EAGAIN error only caller can retry on handle based calls
+	   since file handle passed in no longer valid */
+
+	return rc;
+}
+
+
+int
 CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
 {
 	int rc = 0;
@@ -2578,7 +2685,7 @@
 		cifs_small_buf_release(iov[0].iov_base);
 	else if(buf_type == CIFS_LARGE_BUFFER)
 		cifs_buf_release(iov[0].iov_base);
-	cifs_small_buf_release(pSMB);
+/*	cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
 	return rc;
 }
 
@@ -2954,7 +3061,8 @@
 	pSMB->TotalParameterCount = cpu_to_le16(params);
 	pSMB->ParameterCount = pSMB->TotalParameterCount;
 	pSMB->ParameterOffset = cpu_to_le16(
-	  offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes) - 4);
+	      offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes)
+		- 4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;	/* one byte, no need to make endian neutral */
@@ -2977,12 +3085,12 @@
 			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->num_ffirst);
 
-	if (rc) {/* BB add logic to retry regular search if Unix search rejected unexpectedly by server */
+	if (rc) {/* BB add logic to retry regular search if Unix search
+			rejected unexpectedly by server */
 		/* BB Add code to handle unsupported level rc */
 		cFYI(1, ("Error in FindFirst = %d", rc));
 
-		if (pSMB)
-			cifs_buf_release(pSMB);
+		cifs_buf_release(pSMB);
 
 		/* BB eventually could optimize out free and realloc of buf */
 		/*    for this case */
@@ -2998,6 +3106,7 @@
 				psrch_inf->unicode = FALSE;
 
 			psrch_inf->ntwrk_buf_start = (char *)pSMBr;
+			psrch_inf->smallBuf = 0;
 			psrch_inf->srch_entries_start = 
 				(char *) &pSMBr->hdr.Protocol + 
 					le16_to_cpu(pSMBr->t2.DataOffset);
@@ -3118,9 +3227,14 @@
 			parms = (T2_FNEXT_RSP_PARMS *)response_data;
 			response_data = (char *)&pSMBr->hdr.Protocol +
 				le16_to_cpu(pSMBr->t2.DataOffset);
-			cifs_buf_release(psrch_inf->ntwrk_buf_start);
+			if(psrch_inf->smallBuf)
+				cifs_small_buf_release(
+					psrch_inf->ntwrk_buf_start);
+			else
+				cifs_buf_release(psrch_inf->ntwrk_buf_start);
 			psrch_inf->srch_entries_start = response_data;
 			psrch_inf->ntwrk_buf_start = (char *)pSMB;
+			psrch_inf->smallBuf = 0;
 			if(parms->EndofSearch)
 				psrch_inf->endOfSearch = TRUE;
 			else
@@ -3834,6 +3948,7 @@
 
 	cFYI(1, ("In SETFSUnixInfo"));
 SETFSUnixRetry:
+	/* BB switch to small buf init to save memory */
 	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
 		      (void **) &pSMBr);
 	if (rc)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2a0c1f4..0b86d5c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/connect.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2005
+ *   Copyright (C) International Business Machines  Corp., 2002,2006
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -564,7 +564,7 @@
 	
 
 		dump_smb(smb_buffer, length);
-		if (checkSMB (smb_buffer, smb_buffer->Mid, total_read+4)) {
+		if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
 			cifs_dump_mem("Bad SMB: ", smb_buffer, 48);
 			continue;
 		}
@@ -1476,6 +1476,14 @@
 			rc = smb_send(*csocket, smb_buf, 0x44,
 				(struct sockaddr *)psin_server);
 			kfree(ses_init_buf);
+			msleep(1); /* RFC1001 layer in at least one server 
+				      requires very short break before negprot
+				      presumably because not expecting negprot
+				      to follow so fast.  This is a simple
+				      solution that works without 
+				      complicating the code and causes no
+				      significant slowing down on mount
+				      for everyone else */
 		}
 		/* else the negprot may still work without this 
 		even though malloc failed */
@@ -1920,27 +1928,34 @@
 		cifs_sb->tcon = tcon;
 		tcon->ses = pSesInfo;
 
-		/* do not care if following two calls succeed - informational only */
+		/* do not care if following two calls succeed - informational */
 		CIFSSMBQFSDeviceInfo(xid, tcon);
 		CIFSSMBQFSAttributeInfo(xid, tcon);
+
 		if (tcon->ses->capabilities & CAP_UNIX) {
 			if(!CIFSSMBQFSUnixInfo(xid, tcon)) {
-				if(!volume_info.no_psx_acl) {
-					if(CIFS_UNIX_POSIX_ACL_CAP & 
-					   le64_to_cpu(tcon->fsUnixInfo.Capability))
-						cFYI(1,("server negotiated posix acl support"));
-						sb->s_flags |= MS_POSIXACL;
+				__u64 cap = 
+				       le64_to_cpu(tcon->fsUnixInfo.Capability);
+				cap &= CIFS_UNIX_CAP_MASK;
+				if(volume_info.no_psx_acl)
+					cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
+				else if(CIFS_UNIX_POSIX_ACL_CAP & cap) {
+					cFYI(1,("negotiated posix acl support"));
+					sb->s_flags |= MS_POSIXACL;
 				}
 
-				/* Try and negotiate POSIX pathnames if we can. */
-				if (volume_info.posix_paths && (CIFS_UNIX_POSIX_PATHNAMES_CAP &
-				    le64_to_cpu(tcon->fsUnixInfo.Capability))) {
-					if (!CIFSSMBSetFSUnixInfo(xid, tcon, CIFS_UNIX_POSIX_PATHNAMES_CAP))  {
-						cFYI(1,("negotiated posix pathnames support"));
-						cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-					} else {
-						cFYI(1,("posix pathnames support requested but not supported"));
-					}
+				if(volume_info.posix_paths == 0)
+					cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
+				else if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
+					cFYI(1,("negotiate posix pathnames"));
+					cifs_sb->mnt_cifs_flags |= 
+						CIFS_MOUNT_POSIX_PATHS;
+				}
+					
+				cFYI(1,("Negotiate caps 0x%x",(int)cap));
+
+				if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
+					cFYI(1,("setting capabilities failed"));
 				}
 			}
 		}
@@ -2278,6 +2293,8 @@
 	smb_buffer->Mid = GetNextMid(ses->server);
 	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
 	pSMB->req.AndXCommand = 0xFF;
+	if(ses->server->maxBuf > 64*1024)
+		ses->server->maxBuf = (64*1023);
 	pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
 	pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
 
@@ -2525,7 +2542,7 @@
 	__u32 negotiate_flags, capabilities;
 	__u16 count;
 
-	cFYI(1, ("In NTLMSSP sesssetup (negotiate) "));
+	cFYI(1, ("In NTLMSSP sesssetup (negotiate)"));
 	if(ses == NULL)
 		return -EINVAL;
 	domain = ses->domainName;
@@ -2575,7 +2592,8 @@
 	SecurityBlob->MessageType = NtLmNegotiate;
 	negotiate_flags =
 	    NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_OEM |
-	    NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM | 0x80000000 |
+	    NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM |
+	    NTLMSSP_NEGOTIATE_56 |
 	    /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128;
 	if(sign_CIFS_PDUs)
 		negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN;
@@ -2588,26 +2606,11 @@
 	SecurityBlob->WorkstationName.Length = 0;
 	SecurityBlob->WorkstationName.MaximumLength = 0;
 
-	if (domain == NULL) {
-		SecurityBlob->DomainName.Buffer = 0;
-		SecurityBlob->DomainName.Length = 0;
-		SecurityBlob->DomainName.MaximumLength = 0;
-	} else {
-		__u16 len;
-		negotiate_flags |= NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED;
-		strncpy(bcc_ptr, domain, 63);
-		len = strnlen(domain, 64);
-		SecurityBlob->DomainName.MaximumLength =
-		    cpu_to_le16(len);
-		SecurityBlob->DomainName.Buffer =
-		    cpu_to_le32((long) &SecurityBlob->
-				DomainString -
-				(long) &SecurityBlob->Signature);
-		bcc_ptr += len;
-		SecurityBlobLength += len;
-		SecurityBlob->DomainName.Length =
-		    cpu_to_le16(len);
-	}
+	/* Domain not sent on first Sesssetup in NTLMSSP, instead it is sent
+	along with username on auth request (ie the response to challenge) */
+	SecurityBlob->DomainName.Buffer = 0;
+	SecurityBlob->DomainName.Length = 0;
+	SecurityBlob->DomainName.MaximumLength = 0;
 	if (ses->capabilities & CAP_UNICODE) {
 		if ((long) bcc_ptr % 2) {
 			*bcc_ptr = 0;
@@ -2677,7 +2680,7 @@
 			      SecurityBlob2->MessageType));
 		} else if (ses) {
 			ses->Suid = smb_buffer_response->Uid; /* UID left in le format */ 
-			cFYI(1, ("UID = %d ", ses->Suid));
+			cFYI(1, ("UID = %d", ses->Suid));
 			if ((pSMBr->resp.hdr.WordCount == 3)
 			    || ((pSMBr->resp.hdr.WordCount == 4)
 				&& (blob_len <
@@ -2685,17 +2688,17 @@
 
 				if (pSMBr->resp.hdr.WordCount == 4) {
 					bcc_ptr += blob_len;
-					cFYI(1,
-					     ("Security Blob Length %d ",
+					cFYI(1, ("Security Blob Length %d",
 					      blob_len));
 				}
 
-				cFYI(1, ("NTLMSSP Challenge rcvd "));
+				cFYI(1, ("NTLMSSP Challenge rcvd"));
 
 				memcpy(ses->server->cryptKey,
 				       SecurityBlob2->Challenge,
 				       CIFS_CRYPTO_KEY_SIZE);
-				if(SecurityBlob2->NegotiateFlags & cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2))
+				if(SecurityBlob2->NegotiateFlags & 
+					cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2))
 					*pNTLMv2_flag = TRUE;
 
 				if((SecurityBlob2->NegotiateFlags & 
@@ -2818,7 +2821,7 @@
 						bcc_ptr++;
 					} else
 						cFYI(1,
-						     ("Variable field of length %d extends beyond end of smb ",
+						     ("Variable field of length %d extends beyond end of smb",
 						      len));
 				}
 			} else {
@@ -2830,7 +2833,7 @@
 		}
 	} else {
 		cERROR(1,
-		       (" Invalid Word count %d: ",
+		       (" Invalid Word count %d:",
 			smb_buffer_response->WordCount));
 		rc = -EIO;
 	}
@@ -3447,7 +3450,7 @@
 		if (extended_security
 				&& (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
 				&& (pSesInfo->server->secType == NTLMSSP)) {
-			cFYI(1, ("New style sesssetup "));
+			cFYI(1, ("New style sesssetup"));
 			rc = CIFSSpnegoSessSetup(xid, pSesInfo,
 				NULL /* security blob */, 
 				0 /* blob length */,
@@ -3455,7 +3458,7 @@
 		} else if (extended_security
 			   && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
 			   && (pSesInfo->server->secType == RawNTLMSSP)) {
-			cFYI(1, ("NTLMSSP sesssetup "));
+			cFYI(1, ("NTLMSSP sesssetup"));
 			rc = CIFSNTLMSSPNegotiateSessSetup(xid,
 						pSesInfo,
 						&ntlmv2_flag,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 632561d..1d0ca3e 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -48,13 +48,14 @@
 	struct dentry *temp;
 	int namelen = 0;
 	char *full_path;
-	char dirsep = CIFS_DIR_SEP(CIFS_SB(direntry->d_sb));
+	char dirsep;
 
 	if(direntry == NULL)
 		return NULL;  /* not much we can do if dentry is freed and
 		we need to reopen the file after it was closed implicitly
 		when the server crashed */
 
+	dirsep = CIFS_DIR_SEP(CIFS_SB(direntry->d_sb));
 cifs_bp_rename_retry:
 	for (temp = direntry; !IS_ROOT(temp);) {
 		namelen += (1 + temp->d_name.len);
@@ -255,12 +256,10 @@
 			CIFSSMBClose(xid, pTcon, fileHandle);
 		} else if(newinode) {
 			pCifsFile =
-			   kmalloc(sizeof (struct cifsFileInfo), GFP_KERNEL);
+			   kzalloc(sizeof (struct cifsFileInfo), GFP_KERNEL);
 			
 			if(pCifsFile == NULL)
 				goto cifs_create_out;
-			memset((char *)pCifsFile, 0,
-			       sizeof (struct cifsFileInfo));
 			pCifsFile->netfid = fileHandle;
 			pCifsFile->pid = current->tgid;
 			pCifsFile->pInode = newinode;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fb49aef..5c497c5 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -555,7 +555,10 @@
 		if (ptmp) {
 			cFYI(1, ("closedir free smb buf in srch struct"));
 			pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
-			cifs_buf_release(ptmp);
+			if(pCFileStruct->srch_inf.smallBuf)
+				cifs_small_buf_release(ptmp);
+			else
+				cifs_buf_release(ptmp);
 		}
 		ptmp = pCFileStruct->search_resume_name;
 		if (ptmp) {
@@ -574,13 +577,14 @@
 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
 {
 	int rc, xid;
-	__u32 lockType = LOCKING_ANDX_LARGE_FILES;
 	__u32 numLock = 0;
 	__u32 numUnlock = 0;
 	__u64 length;
 	int wait_flag = FALSE;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
+	__u16 netfid;
+	__u8 lockType = LOCKING_ANDX_LARGE_FILES;
 
 	length = 1 + pfLock->fl_end - pfLock->fl_start;
 	rc = -EACCES;
@@ -592,11 +596,11 @@
 	        pfLock->fl_end));
 
 	if (pfLock->fl_flags & FL_POSIX)
-		cFYI(1, ("Posix "));
+		cFYI(1, ("Posix"));
 	if (pfLock->fl_flags & FL_FLOCK)
-		cFYI(1, ("Flock "));
+		cFYI(1, ("Flock"));
 	if (pfLock->fl_flags & FL_SLEEP) {
-		cFYI(1, ("Blocking lock "));
+		cFYI(1, ("Blocking lock"));
 		wait_flag = TRUE;
 	}
 	if (pfLock->fl_flags & FL_ACCESS)
@@ -612,21 +616,23 @@
 		cFYI(1, ("F_WRLCK "));
 		numLock = 1;
 	} else if (pfLock->fl_type == F_UNLCK) {
-		cFYI(1, ("F_UNLCK "));
+		cFYI(1, ("F_UNLCK"));
 		numUnlock = 1;
+		/* Check if unlock includes more than
+		one lock range */
 	} else if (pfLock->fl_type == F_RDLCK) {
-		cFYI(1, ("F_RDLCK "));
+		cFYI(1, ("F_RDLCK"));
 		lockType |= LOCKING_ANDX_SHARED_LOCK;
 		numLock = 1;
 	} else if (pfLock->fl_type == F_EXLCK) {
-		cFYI(1, ("F_EXLCK "));
+		cFYI(1, ("F_EXLCK"));
 		numLock = 1;
 	} else if (pfLock->fl_type == F_SHLCK) {
-		cFYI(1, ("F_SHLCK "));
+		cFYI(1, ("F_SHLCK"));
 		lockType |= LOCKING_ANDX_SHARED_LOCK;
 		numLock = 1;
 	} else
-		cFYI(1, ("Unknown type of lock "));
+		cFYI(1, ("Unknown type of lock"));
 
 	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
 	pTcon = cifs_sb->tcon;
@@ -635,27 +641,41 @@
 		FreeXid(xid);
 		return -EBADF;
 	}
+	netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
 
+
+	/* BB add code here to normalize offset and length to
+	account for negative length which we can not accept over the
+	wire */
 	if (IS_GETLK(cmd)) {
-		rc = CIFSSMBLock(xid, pTcon,
-				 ((struct cifsFileInfo *)file->
-				  private_data)->netfid,
-				 length,
-				 pfLock->fl_start, 0, 1, lockType,
-				 0 /* wait flag */ );
+		if(experimEnabled && 
+		   (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
+		   (CIFS_UNIX_FCNTL_CAP & 
+			le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
+			int posix_lock_type;
+			if(lockType & LOCKING_ANDX_SHARED_LOCK)
+				posix_lock_type = CIFS_RDLCK;
+			else
+				posix_lock_type = CIFS_WRLCK;
+			rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
+					length,	pfLock->fl_start,
+					posix_lock_type, wait_flag);
+			FreeXid(xid);
+			return rc;
+		}
+
+		/* BB we could chain these into one lock request BB */
+		rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
+				 0, 1, lockType, 0 /* wait flag */ );
 		if (rc == 0) {
-			rc = CIFSSMBLock(xid, pTcon,
-					 ((struct cifsFileInfo *) file->
-					  private_data)->netfid,
-					 length,
+			rc = CIFSSMBLock(xid, pTcon, netfid, length, 
 					 pfLock->fl_start, 1 /* numUnlock */ ,
 					 0 /* numLock */ , lockType,
 					 0 /* wait flag */ );
 			pfLock->fl_type = F_UNLCK;
 			if (rc != 0)
 				cERROR(1, ("Error unlocking previously locked "
-					   "range %d during test of lock ",
-					   rc));
+					   "range %d during test of lock", rc));
 			rc = 0;
 
 		} else {
@@ -667,12 +687,30 @@
 		FreeXid(xid);
 		return rc;
 	}
-
-	rc = CIFSSMBLock(xid, pTcon,
-			 ((struct cifsFileInfo *) file->private_data)->
-			 netfid, length,
-			 pfLock->fl_start, numUnlock, numLock, lockType,
-			 wait_flag);
+	if (experimEnabled &&
+		(cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
+		(CIFS_UNIX_FCNTL_CAP &
+			 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
+		int posix_lock_type;
+		if(lockType & LOCKING_ANDX_SHARED_LOCK)
+			posix_lock_type = CIFS_RDLCK;
+		else
+			posix_lock_type = CIFS_WRLCK;
+		
+		if(numUnlock == 1)
+			posix_lock_type = CIFS_UNLCK;
+		else if(numLock == 0) {
+			/* if no lock or unlock then nothing
+			to do since we do not know what it is */
+			FreeXid(xid);
+			return -EOPNOTSUPP;
+		}
+		rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
+				      length, pfLock->fl_start,
+				      posix_lock_type, wait_flag);
+	} else
+		rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
+				numUnlock, numLock, lockType, wait_flag);
 	if (pfLock->fl_flags & FL_POSIX)
 		posix_lock_file_wait(file, pfLock);
 	FreeXid(xid);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 598eec9..957ddd1 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -565,11 +565,14 @@
 	struct cifsInodeInfo *cifsInode;
 	FILE_BASIC_INFO *pinfo_buf;
 
-	cFYI(1, ("cifs_unlink, inode = 0x%p with ", inode));
+	cFYI(1, ("cifs_unlink, inode = 0x%p", inode));
 
 	xid = GetXid();
 
-	cifs_sb = CIFS_SB(inode->i_sb);
+	if(inode)
+		cifs_sb = CIFS_SB(inode->i_sb);
+	else
+		cifs_sb = CIFS_SB(direntry->d_sb);
 	pTcon = cifs_sb->tcon;
 
 	/* Unlink can be called from rename so we can not grab the sem here
@@ -609,9 +612,8 @@
 		}
 	} else if (rc == -EACCES) {
 		/* try only if r/o attribute set in local lookup data? */
-		pinfo_buf = kmalloc(sizeof(FILE_BASIC_INFO), GFP_KERNEL);
+		pinfo_buf = kzalloc(sizeof(FILE_BASIC_INFO), GFP_KERNEL);
 		if (pinfo_buf) {
-			memset(pinfo_buf, 0, sizeof(FILE_BASIC_INFO));
 			/* ATTRS set to normal clears r/o bit */
 			pinfo_buf->Attributes = cpu_to_le32(ATTR_NORMAL);
 			if (!(pTcon->ses->flags & CIFS_SES_NT4))
@@ -693,9 +695,11 @@
 					   when needed */
 		direntry->d_inode->i_ctime = current_fs_time(inode->i_sb);
 	}
-	inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
-	cifsInode = CIFS_I(inode);
-	cifsInode->time = 0;	/* force revalidate of dir as well */
+	if(inode) {
+		inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
+		cifsInode = CIFS_I(inode);
+		cifsInode->time = 0;	/* force revalidate of dir as well */
+	}
 
 	kfree(full_path);
 	FreeXid(xid);
@@ -1167,7 +1171,7 @@
 						nfid, npid, FALSE);
 			atomic_dec(&open_file->wrtPending);
 			cFYI(1,("SetFSize for attrs rc = %d", rc));
-			if(rc == -EINVAL) {
+			if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
 				int bytes_written;
 				rc = CIFSSMBWrite(xid, pTcon,
 						  nfid, 0, attrs->ia_size,
@@ -1189,7 +1193,7 @@
 					   cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 			cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc));
-			if(rc == -EINVAL) {
+			if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
 				__u16 netfid;
 				int oplock = FALSE;
 
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 8d0da7c..9562f5b 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -67,7 +67,7 @@
 					cifs_sb_target->local_nls, 
 					cifs_sb_target->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
-		if(rc == -EIO)
+		if((rc == -EIO) || (rc == -EINVAL))
 			rc = -EOPNOTSUPP;  
 	}
 
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 432ba15..fafd056 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -72,10 +72,9 @@
 	struct cifsSesInfo *ret_buf;
 
 	ret_buf =
-	    (struct cifsSesInfo *) kmalloc(sizeof (struct cifsSesInfo),
+	    (struct cifsSesInfo *) kzalloc(sizeof (struct cifsSesInfo),
 					   GFP_KERNEL);
 	if (ret_buf) {
-		memset(ret_buf, 0, sizeof (struct cifsSesInfo));
 		write_lock(&GlobalSMBSeslock);
 		atomic_inc(&sesInfoAllocCount);
 		ret_buf->status = CifsNew;
@@ -110,10 +109,9 @@
 {
 	struct cifsTconInfo *ret_buf;
 	ret_buf =
-	    (struct cifsTconInfo *) kmalloc(sizeof (struct cifsTconInfo),
+	    (struct cifsTconInfo *) kzalloc(sizeof (struct cifsTconInfo),
 					    GFP_KERNEL);
 	if (ret_buf) {
-		memset(ret_buf, 0, sizeof (struct cifsTconInfo));
 		write_lock(&GlobalSMBSeslock);
 		atomic_inc(&tconInfoAllocCount);
 		list_add(&ret_buf->cifsConnectionList,
@@ -423,9 +421,7 @@
 {
 	__u32 len = smb->smb_buf_length;
 	__u32 clc_len;  /* calculated length */
-	cFYI(0,
-	     ("Entering checkSMB with Length: %x, smb_buf_length: %x",
-	      length, len));
+	cFYI(0, ("checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len));
 	if (((unsigned int)length < 2 + sizeof (struct smb_hdr)) ||
 	    (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)) {
 		if ((unsigned int)length < 2 + sizeof (struct smb_hdr)) {
@@ -433,29 +429,36 @@
 				sizeof (struct smb_hdr) - 1)
 			    && (smb->Status.CifsError != 0)) {
 				smb->WordCount = 0;
-				return 0;	/* some error cases do not return wct and bcc */
+				/* some error cases do not return wct and bcc */
+				return 0;
 			} else {
 				cERROR(1, ("Length less than smb header size"));
 			}
-
 		}
 		if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
-			cERROR(1,
-			       ("smb_buf_length greater than MaxBufSize"));
-		cERROR(1,
-		       ("bad smb detected. Illegal length. mid=%d",
-			smb->Mid));
+			cERROR(1, ("smb length greater than MaxBufSize, mid=%d",
+				   smb->Mid));
 		return 1;
 	}
 
 	if (checkSMBhdr(smb, mid))
 		return 1;
 	clc_len = smbCalcSize_LE(smb);
-	if ((4 + len != clc_len)
-	    || (4 + len != (unsigned int)length)) {
-		cERROR(1, ("Calculated size 0x%x vs actual length 0x%x",
-				clc_len, 4 + len));
-		cERROR(1, ("bad smb size detected for Mid=%d", smb->Mid));
+
+	if(4 + len != (unsigned int)length) {
+		cERROR(1, ("Length read does not match RFC1001 length %d",len));
+		return 1;
+	}
+
+	if (4 + len != clc_len) {
+		/* check if bcc wrapped around for large read responses */
+		if((len > 64 * 1024) && (len > clc_len)) {
+			/* check if lengths match mod 64K */
+			if(((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
+				return 0; /* bcc wrapped */			
+		}
+		cFYI(1, ("Calculated size %d vs length %d mismatch for mid %d",
+				clc_len, 4 + len, smb->Mid));
 		/* Windows XP can return a few bytes too much, presumably
 		an illegal pad, at the end of byte range lock responses 
 		so we allow for that three byte pad, as long as actual
@@ -469,8 +472,11 @@
 		wct and bcc to minimum size and drop the t2 parms and data */
 		if((4+len > clc_len) && (len <= clc_len + 512))
 			return 0;
-		else
+		else {
+			cERROR(1, ("RFC1001 size %d bigger than SMB for Mid=%d",
+					len, smb->Mid));
 			return 1;
+		}
 	}
 	return 0;
 }
diff --git a/fs/cifs/ntlmssp.c b/fs/cifs/ntlmssp.c
new file mode 100644
index 0000000..78866f9
--- /dev/null
+++ b/fs/cifs/ntlmssp.c
@@ -0,0 +1,129 @@
+/*
+ *   fs/cifs/ntlmssp.h
+ *
+ *   Copyright (c) International Business Machines  Corp., 2006
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_unicode.h"
+#include "cifs_debug.h"
+#include "ntlmssp.h"
+#include "nterr.h"
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
+{
+	__u32 capabilities = 0;
+
+	/* init fields common to all four types of SessSetup */
+	/* note that header is initialized to zero in header_assemble */
+	pSMB->req.AndXCommand = 0xFF;
+	pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
+	pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
+
+	/* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
+
+	/* BB verify whether signing required on neg or just on auth frame 
+	   (and NTLM case) */
+
+	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
+			CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
+
+	if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+		pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+	if (ses->capabilities & CAP_UNICODE) {
+		pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
+		capabilities |= CAP_UNICODE;
+	}
+	if (ses->capabilities & CAP_STATUS32) {
+		pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
+		capabilities |= CAP_STATUS32;
+	}
+	if (ses->capabilities & CAP_DFS) {
+		pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
+		capabilities |= CAP_DFS;
+	}
+
+	/* BB check whether to init vcnum BB */
+	return capabilities;
+}
+int 
+CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, const int type,
+		  int * pNTLMv2_flg, const struct nls_table *nls_cp)
+{
+	int rc = 0;
+	int wct;
+	struct smb_hdr *smb_buffer;
+	char *bcc_ptr;
+	SESSION_SETUP_ANDX *pSMB;
+	__u32 capabilities;
+
+	if(ses == NULL)
+		return -EINVAL;
+
+	cFYI(1,("SStp type: %d",type));
+	if(type < CIFS_NTLM) {
+#ifndef CONFIG_CIFS_WEAK_PW_HASH
+		/* LANMAN and plaintext are less secure and off by default.
+		So we make this explicitly be turned on in kconfig (in the
+		build) and turned on at runtime (changed from the default)
+		in proc/fs/cifs or via mount parm.  Unfortunately this is
+		needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
+		return -EOPNOTSUPP;
+#endif
+		wct = 10; /* lanman 2 style sessionsetup */
+	} else if(type < CIFS_NTLMSSP_NEG)
+		wct = 13; /* old style NTLM sessionsetup */
+	else /* same size for negotiate or auth, NTLMSSP or extended security */
+		wct = 12;
+
+	rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
+			    (void **)&smb_buffer);
+	if(rc)
+		return rc;
+
+	pSMB = (SESSION_SETUP_ANDX *)smb_buffer;
+
+	capabilities = cifs_ssetup_hdr(ses, pSMB);
+	bcc_ptr = pByteArea(smb_buffer);
+	if(type > CIFS_NTLM) {
+		pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+		capabilities |= CAP_EXTENDED_SECURITY;
+		pSMB->req.Capabilities = cpu_to_le32(capabilities);
+		/* BB set password lengths */
+	} else if(type < CIFS_NTLM) /* lanman */ {
+		/* no capabilities flags in old lanman negotiation */
+		/* pSMB->old_req.PasswordLength = */ /* BB fixme BB */
+	} else /* type CIFS_NTLM */ {
+		pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+		pSMB->req_no_secext.CaseInsensitivePasswordLength =
+			cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+		pSMB->req_no_secext.CaseSensitivePasswordLength =
+			cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+	}
+
+
+/*	rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buf_type, 0); */
+	/* SMB request buf freed in SendReceive2 */
+
+	return rc;
+}
+#endif /* CONFIG_CIFS_EXPERIMENTAL */
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 803389b..d39b712 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/ntlmssp.h
  *
- *   Copyright (c) International Business Machines  Corp., 2002
+ *   Copyright (c) International Business Machines  Corp., 2002,2006
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 488bd0d8..2f6e282 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -604,7 +604,12 @@
 		cifsFile->search_resume_name = NULL;
 		if(cifsFile->srch_inf.ntwrk_buf_start) {
 			cFYI(1,("freeing SMB ff cache buf on search rewind"));
-			cifs_buf_release(cifsFile->srch_inf.ntwrk_buf_start);
+			if(cifsFile->srch_inf.smallBuf)
+				cifs_small_buf_release(cifsFile->srch_inf.
+						ntwrk_buf_start);
+			else
+				cifs_buf_release(cifsFile->srch_inf.
+						ntwrk_buf_start);
 		}
 		rc = initiate_cifs_search(xid,file);
 		if(rc) {
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b12cb8a..3da8040 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -309,17 +309,16 @@
 	
 	*pRespBufType = CIFS_NO_BUFFER;  /* no response buf yet */
 
-	if (ses == NULL) {
-		cERROR(1,("Null smb session"));
-		return -EIO;
-	}
-	if(ses->server == NULL) {
-		cERROR(1,("Null tcp session"));
+	if ((ses == NULL) || (ses->server == NULL)) {
+		cifs_small_buf_release(in_buf);
+		cERROR(1,("Null session"));
 		return -EIO;
 	}
 
-	if(ses->server->tcpStatus == CifsExiting)
+	if(ses->server->tcpStatus == CifsExiting) {
+		cifs_small_buf_release(in_buf);
 		return -ENOENT;
+	}
 
 	/* Ensure that we do not send more than 50 overlapping requests 
 	   to the same server. We may make this configurable later or
@@ -346,6 +345,7 @@
 			} else {
 				if(ses->server->tcpStatus == CifsExiting) {
 					spin_unlock(&GlobalMid_Lock);
+					cifs_small_buf_release(in_buf);
 					return -ENOENT;
 				}
 
@@ -385,6 +385,7 @@
 	midQ = AllocMidQEntry(in_buf, ses);
 	if (midQ == NULL) {
 		up(&ses->server->tcpSem);
+		cifs_small_buf_release(in_buf);
 		/* If not lock req, update # of requests on wire to server */
 		if(long_op < 3) {
 			atomic_dec(&ses->server->inFlight); 
@@ -408,14 +409,18 @@
 	if(rc < 0) {
 		DeleteMidQEntry(midQ);
 		up(&ses->server->tcpSem);
+		cifs_small_buf_release(in_buf);
 		/* If not lock req, update # of requests on wire to server */
 		if(long_op < 3) {
 			atomic_dec(&ses->server->inFlight); 
 			wake_up(&ses->server->request_q);
 		}
 		return rc;
-	} else
+	} else {
 		up(&ses->server->tcpSem);
+		cifs_small_buf_release(in_buf);
+	}
+
 	if (long_op == -1)
 		goto cifs_no_response_exit2;
 	else if (long_op == 2) /* writes past end of file can take loong time */
@@ -543,6 +548,7 @@
 
 out_unlock2:
 	up(&ses->server->tcpSem);
+	cifs_small_buf_release(in_buf);
 	/* If not lock req, update # of requests on wire to server */
 	if(long_op < 3) {
 		atomic_dec(&ses->server->inFlight); 
diff --git a/include/asm-arm/arch-ixp23xx/uncompress.h b/include/asm-arm/arch-ixp23xx/uncompress.h
index 62623fa..013575e 100644
--- a/include/asm-arm/arch-ixp23xx/uncompress.h
+++ b/include/asm-arm/arch-ixp23xx/uncompress.h
@@ -16,26 +16,21 @@
 
 #define UART_BASE	((volatile u32 *)IXP23XX_UART1_PHYS)
 
-static __inline__ void putc(char c)
+static inline void putc(char c)
 {
 	int j;
 
 	for (j = 0; j < 0x1000; j++) {
 		if (UART_BASE[UART_LSR] & UART_LSR_THRE)
 			break;
+		barrier();
 	}
 
 	UART_BASE[UART_TX] = c;
 }
 
-static void putstr(const char *s)
+static inline void flush(void)
 {
-	while (*s) {
-		putc(*s);
-		if (*s == '\n')
-			putc('\r');
-		s++;
-	}
 }
 
 #define arch_decomp_setup()
diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h
index 1409c5b..c8f53a7 100644
--- a/include/asm-arm/arch-pxa/pxa-regs.h
+++ b/include/asm-arm/arch-pxa/pxa-regs.h
@@ -485,7 +485,7 @@
 #define SACR1_ENLBF	(1 << 5)	/* Enable Loopback */
 #define SACR1_DRPL	(1 << 4) 	/* Disable Replaying Function */
 #define SACR1_DREC	(1 << 3)	/* Disable Recording Function */
-#define SACR1_AMSL	(1 << 1)	/* Specify Alternate Mode */
+#define SACR1_AMSL	(1 << 0)	/* Specify Alternate Mode */
 
 #define SASR0_I2SOFF	(1 << 7)	/* Controller Status */
 #define SASR0_ROR	(1 << 6)	/* Rx FIFO Overrun */
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h
index 8f331bb..65ac305 100644
--- a/include/asm-arm/unistd.h
+++ b/include/asm-arm/unistd.h
@@ -308,8 +308,6 @@
 #define __NR_mq_notify			(__NR_SYSCALL_BASE+278)
 #define __NR_mq_getsetattr		(__NR_SYSCALL_BASE+279)
 #define __NR_waitid			(__NR_SYSCALL_BASE+280)
-
-#if defined(__ARM_EABI__)  /* reserve these for un-muxing socketcall */
 #define __NR_socket			(__NR_SYSCALL_BASE+281)
 #define __NR_bind			(__NR_SYSCALL_BASE+282)
 #define __NR_connect			(__NR_SYSCALL_BASE+283)
@@ -327,9 +325,6 @@
 #define __NR_getsockopt			(__NR_SYSCALL_BASE+295)
 #define __NR_sendmsg			(__NR_SYSCALL_BASE+296)
 #define __NR_recvmsg			(__NR_SYSCALL_BASE+297)
-#endif
-
-#if defined(__ARM_EABI__)  /* reserve these for un-muxing ipc */
 #define __NR_semop			(__NR_SYSCALL_BASE+298)
 #define __NR_semget			(__NR_SYSCALL_BASE+299)
 #define __NR_semctl			(__NR_SYSCALL_BASE+300)
@@ -341,16 +336,10 @@
 #define __NR_shmdt			(__NR_SYSCALL_BASE+306)
 #define __NR_shmget			(__NR_SYSCALL_BASE+307)
 #define __NR_shmctl			(__NR_SYSCALL_BASE+308)
-#endif
-
 #define __NR_add_key			(__NR_SYSCALL_BASE+309)
 #define __NR_request_key		(__NR_SYSCALL_BASE+310)
 #define __NR_keyctl			(__NR_SYSCALL_BASE+311)
-
-#if defined(__ARM_EABI__)  /* reserved for un-muxing ipc */
 #define __NR_semtimedop			(__NR_SYSCALL_BASE+312)
-#endif
-
 #define __NR_vserver			(__NR_SYSCALL_BASE+313)
 #define __NR_ioprio_set			(__NR_SYSCALL_BASE+314)
 #define __NR_ioprio_get			(__NR_SYSCALL_BASE+315)
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 4e7e6f2..37e52a2 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -68,6 +68,7 @@
 #define PAL_SHUTDOWN		40	/* enter processor shutdown state */
 #define PAL_PREFETCH_VISIBILITY	41	/* Make Processor Prefetches Visible */
 #define PAL_LOGICAL_TO_PHYSICAL 42	/* returns information on logical to physical processor mapping */
+#define PAL_CACHE_SHARED_INFO	43	/* returns information on caches shared by logical processor */
 
 #define PAL_COPY_PAL		256	/* relocate PAL procedures and PAL PMI */
 #define PAL_HALT_INFO		257	/* return the low power capabilities of processor */
@@ -130,7 +131,7 @@
 #define PAL_CACHE_LINE_STATE_MODIFIED	3	/* Modified */
 
 typedef struct pal_freq_ratio {
-	u64 den : 32, num : 32;	/* numerator & denominator */
+	u32 den, num;		/* numerator & denominator */
 } itc_ratio, proc_ratio;
 
 typedef	union  pal_cache_config_info_1_s {
@@ -151,10 +152,10 @@
 
 typedef	union  pal_cache_config_info_2_s {
 	struct {
-		u64		cache_size	: 32,	/*cache size in bytes*/
+		u32		cache_size;		/*cache size in bytes*/
 
 
-				alias_boundary	: 8,	/* 39-32 aliased addr
+		u32		alias_boundary	: 8,	/* 39-32 aliased addr
 							 * separation for max
 							 * performance.
 							 */
@@ -1647,6 +1648,33 @@
 
 	return iprv.status;
 }
+
+typedef struct pal_cache_shared_info_s
+{
+	u64 num_shared;
+	pal_proc_n_log_info1_t ppli1;
+	pal_proc_n_log_info2_t ppli2;
+} pal_cache_shared_info_t;
+
+/* Get information on logical to physical processor mappings. */
+static inline s64
+ia64_pal_cache_shared_info(u64 level,
+		u64 type,
+		u64 proc_number,
+		pal_cache_shared_info_t *info)
+{
+	struct ia64_pal_retval iprv;
+
+	PAL_CALL(iprv, PAL_CACHE_SHARED_INFO, level, type, proc_number);
+
+	if (iprv.status == PAL_STATUS_SUCCESS) {
+		info->num_shared = iprv.v0;
+		info->ppli1.ppli1_data = iprv.v1;
+		info->ppli2.ppli2_data = iprv.v2;
+	}
+
+	return iprv.status;
+}
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_IA64_PAL_H */