Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * (C) Copyright 2002 Linus Torvalds |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 3 | * Portions based on the vdso-randomization code from exec-shield: |
| 4 | * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
| 6 | * This file contains the needed initializations to support sysenter. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/smp.h> |
| 11 | #include <linux/thread_info.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/gfp.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/elf.h> |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 16 | #include <linux/mm.h> |
Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 17 | #include <linux/err.h> |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 18 | #include <linux/module.h> |
Stefani Seibold | 4e40112 | 2014-03-17 23:22:13 +0100 | [diff] [blame^] | 19 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
| 21 | #include <asm/cpufeature.h> |
| 22 | #include <asm/msr.h> |
| 23 | #include <asm/pgtable.h> |
| 24 | #include <asm/unistd.h> |
Jeremy Fitzhardinge | d4f7a2c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 25 | #include <asm/elf.h> |
Jeremy Fitzhardinge | 1dbf527c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 26 | #include <asm/tlbflush.h> |
Roland McGrath | 6c3652e | 2008-01-30 13:30:42 +0100 | [diff] [blame] | 27 | #include <asm/vdso.h> |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 28 | #include <asm/proto.h> |
Stefani Seibold | 7a59ed4 | 2014-03-17 23:22:09 +0100 | [diff] [blame] | 29 | #include <asm/fixmap.h> |
| 30 | #include <asm/hpet.h> |
| 31 | #include <asm/vvar.h> |
Jeremy Fitzhardinge | 1dbf527c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 32 | |
Jeremy Fitzhardinge | 1dbf527c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 33 | #ifdef CONFIG_COMPAT_VDSO |
Andy Lutomirski | b0b49f2 | 2014-03-13 16:01:26 -0700 | [diff] [blame] | 34 | #define VDSO_DEFAULT 0 |
Jeremy Fitzhardinge | 1dbf527c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 35 | #else |
Andy Lutomirski | b0b49f2 | 2014-03-13 16:01:26 -0700 | [diff] [blame] | 36 | #define VDSO_DEFAULT 1 |
Jeremy Fitzhardinge | 1dbf527c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 37 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 39 | #ifdef CONFIG_X86_64 |
| 40 | #define vdso_enabled sysctl_vsyscall32 |
| 41 | #define arch_setup_additional_pages syscall32_setup_pages |
| 42 | #endif |
| 43 | |
| 44 | /* |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 45 | * Should the kernel map a VDSO page into processes and pass its |
| 46 | * address down to glibc upon exec()? |
| 47 | */ |
Jeremy Fitzhardinge | 1dbf527c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 48 | unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT; |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 49 | |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 50 | static int __init vdso_setup(char *s) |
| 51 | { |
| 52 | vdso_enabled = simple_strtoul(s, NULL, 0); |
| 53 | |
Andy Lutomirski | b0b49f2 | 2014-03-13 16:01:26 -0700 | [diff] [blame] | 54 | if (vdso_enabled > 1) |
| 55 | pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n"); |
| 56 | |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 57 | return 1; |
| 58 | } |
| 59 | |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 60 | /* |
| 61 | * For consistency, the argument vdso32=[012] affects the 32-bit vDSO |
| 62 | * behavior on both 64-bit and 32-bit kernels. |
| 63 | * On 32-bit kernels, vdso=[012] means the same thing. |
| 64 | */ |
| 65 | __setup("vdso32=", vdso_setup); |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 66 | |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 67 | #ifdef CONFIG_X86_32 |
| 68 | __setup_param("vdso=", vdso32_setup, vdso_setup, 0); |
| 69 | |
| 70 | EXPORT_SYMBOL_GPL(vdso_enabled); |
| 71 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
Stefani Seibold | 4e40112 | 2014-03-17 23:22:13 +0100 | [diff] [blame^] | 73 | static struct page **vdso32_pages; |
| 74 | static unsigned int vdso32_size; |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 75 | |
| 76 | #ifdef CONFIG_X86_64 |
| 77 | |
Jeremy Fitzhardinge | b6ad92d | 2008-07-10 18:13:36 -0700 | [diff] [blame] | 78 | #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SYSENTER32)) |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 79 | #define vdso32_syscall() (boot_cpu_has(X86_FEATURE_SYSCALL32)) |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 80 | |
| 81 | /* May not be __init: called during resume */ |
| 82 | void syscall32_cpu_init(void) |
| 83 | { |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 84 | /* Load these always in case some future AMD CPU supports |
| 85 | SYSENTER from compat mode too. */ |
H. Peter Anvin | 715c85b | 2012-06-07 13:32:04 -0700 | [diff] [blame] | 86 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); |
| 87 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); |
| 88 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 89 | |
| 90 | wrmsrl(MSR_CSTAR, ia32_cstar_target); |
| 91 | } |
| 92 | |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 93 | #else /* CONFIG_X86_32 */ |
| 94 | |
| 95 | #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP)) |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 96 | #define vdso32_syscall() (0) |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 97 | |
Li Shaohua | 6fe940d | 2005-06-25 14:54:53 -0700 | [diff] [blame] | 98 | void enable_sep_cpu(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | { |
| 100 | int cpu = get_cpu(); |
| 101 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
| 102 | |
Li Shaohua | 6fe940d | 2005-06-25 14:54:53 -0700 | [diff] [blame] | 103 | if (!boot_cpu_has(X86_FEATURE_SEP)) { |
| 104 | put_cpu(); |
| 105 | return; |
| 106 | } |
| 107 | |
Rusty Russell | a75c54f | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 108 | tss->x86_tss.ss1 = __KERNEL_CS; |
H. Peter Anvin | faca622 | 2008-01-30 13:31:02 +0100 | [diff] [blame] | 109 | tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); |
H. Peter Anvin | faca622 | 2008-01-30 13:31:02 +0100 | [diff] [blame] | 111 | wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0); |
Roland McGrath | 0aa97fb22 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 112 | wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | put_cpu(); |
| 114 | } |
| 115 | |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 116 | #endif /* CONFIG_X86_64 */ |
| 117 | |
Jeremy Fitzhardinge | a6c4e07 | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 118 | int __init sysenter_setup(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | { |
Stefani Seibold | 4e40112 | 2014-03-17 23:22:13 +0100 | [diff] [blame^] | 120 | void *vdso_pages; |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 121 | const void *vdso; |
| 122 | size_t vdso_len; |
Stefani Seibold | 4e40112 | 2014-03-17 23:22:13 +0100 | [diff] [blame^] | 123 | unsigned int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 125 | if (vdso32_syscall()) { |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 126 | vdso = &vdso32_syscall_start; |
| 127 | vdso_len = &vdso32_syscall_end - &vdso32_syscall_start; |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 128 | } else if (vdso32_sysenter()){ |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 129 | vdso = &vdso32_sysenter_start; |
| 130 | vdso_len = &vdso32_sysenter_end - &vdso32_sysenter_start; |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 131 | } else { |
Andy Lutomirski | b4b541a | 2014-03-17 23:22:08 +0100 | [diff] [blame] | 132 | vdso = &vdso32_int80_start; |
| 133 | vdso_len = &vdso32_int80_end - &vdso32_int80_start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Stefani Seibold | 4e40112 | 2014-03-17 23:22:13 +0100 | [diff] [blame^] | 136 | vdso32_size = (vdso_len + PAGE_SIZE - 1) / PAGE_SIZE; |
| 137 | vdso32_pages = kmalloc(sizeof(*vdso32_pages) * vdso32_size, GFP_ATOMIC); |
| 138 | vdso_pages = kmalloc(VDSO_OFFSET(vdso32_size), GFP_ATOMIC); |
| 139 | |
| 140 | for(i = 0; i != vdso32_size; ++i) |
| 141 | vdso32_pages[i] = virt_to_page(vdso_pages + VDSO_OFFSET(i)); |
| 142 | |
| 143 | memcpy(vdso_pages, vdso, vdso_len); |
| 144 | patch_vdso32(vdso_pages, vdso_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | return 0; |
| 147 | } |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 148 | |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 149 | /* Setup a VMA at program startup for the vsyscall page */ |
Martin Schwidefsky | fc5243d | 2008-12-25 13:38:35 +0100 | [diff] [blame] | 150 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 151 | { |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 152 | struct mm_struct *mm = current->mm; |
| 153 | unsigned long addr; |
Zachary Amsden | 752783c | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 154 | int ret = 0; |
Stefani Seibold | 7a59ed4 | 2014-03-17 23:22:09 +0100 | [diff] [blame] | 155 | struct vm_area_struct *vma; |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 156 | |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 157 | #ifdef CONFIG_X86_X32_ABI |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 158 | if (test_thread_flag(TIF_X32)) |
H. Peter Anvin | 22e842d | 2012-02-21 14:32:19 -0800 | [diff] [blame] | 159 | return x32_setup_additional_pages(bprm, uses_interp); |
H. J. Lu | 1a21d4e | 2012-02-19 11:38:06 -0800 | [diff] [blame] | 160 | #endif |
| 161 | |
Andy Lutomirski | b0b49f2 | 2014-03-13 16:01:26 -0700 | [diff] [blame] | 162 | if (vdso_enabled != 1) /* Other values all mean "disabled" */ |
Roland McGrath | 5de253c | 2008-04-09 01:30:06 -0700 | [diff] [blame] | 163 | return 0; |
| 164 | |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 165 | down_write(&mm->mmap_sem); |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 166 | |
Andy Lutomirski | b0b49f2 | 2014-03-13 16:01:26 -0700 | [diff] [blame] | 167 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); |
| 168 | if (IS_ERR_VALUE(addr)) { |
| 169 | ret = addr; |
| 170 | goto up_fail; |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 171 | } |
Jeremy Fitzhardinge | 1dbf527c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 172 | |
Peter Zijlstra | f7b6eb3 | 2009-06-05 14:04:51 +0200 | [diff] [blame] | 173 | current->mm->context.vdso = (void *)addr; |
| 174 | |
Andy Lutomirski | b0b49f2 | 2014-03-13 16:01:26 -0700 | [diff] [blame] | 175 | /* |
| 176 | * MAYWRITE to allow gdb to COW and set breakpoints |
| 177 | */ |
Stefani Seibold | 7a59ed4 | 2014-03-17 23:22:09 +0100 | [diff] [blame] | 178 | ret = install_special_mapping(mm, |
| 179 | addr, |
Stefani Seibold | 4e40112 | 2014-03-17 23:22:13 +0100 | [diff] [blame^] | 180 | VDSO_OFFSET(vdso32_size), |
Stefani Seibold | 7a59ed4 | 2014-03-17 23:22:09 +0100 | [diff] [blame] | 181 | VM_READ|VM_EXEC| |
| 182 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
| 183 | vdso32_pages); |
Jeremy Fitzhardinge | 1dbf527c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 184 | |
Andy Lutomirski | b0b49f2 | 2014-03-13 16:01:26 -0700 | [diff] [blame] | 185 | if (ret) |
| 186 | goto up_fail; |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 187 | |
Stefani Seibold | 7a59ed4 | 2014-03-17 23:22:09 +0100 | [diff] [blame] | 188 | vma = _install_special_mapping(mm, |
| 189 | addr - VDSO_OFFSET(VDSO_PREV_PAGES), |
| 190 | VDSO_OFFSET(VDSO_PREV_PAGES), |
| 191 | VM_READ, |
| 192 | NULL); |
| 193 | |
| 194 | if (IS_ERR(vma)) { |
| 195 | ret = PTR_ERR(vma); |
| 196 | goto up_fail; |
| 197 | } |
| 198 | |
| 199 | ret = remap_pfn_range(vma, |
| 200 | addr - VDSO_OFFSET(VDSO_VVAR_PAGE), |
| 201 | __pa_symbol(&__vvar_page) >> PAGE_SHIFT, |
| 202 | PAGE_SIZE, |
| 203 | PAGE_READONLY); |
| 204 | |
| 205 | if (ret) |
| 206 | goto up_fail; |
| 207 | |
| 208 | #ifdef CONFIG_HPET_TIMER |
| 209 | if (hpet_address) { |
| 210 | ret = io_remap_pfn_range(vma, |
| 211 | addr - VDSO_OFFSET(VDSO_HPET_PAGE), |
| 212 | hpet_address >> PAGE_SHIFT, |
| 213 | PAGE_SIZE, |
| 214 | pgprot_noncached(PAGE_READONLY)); |
| 215 | |
| 216 | if (ret) |
| 217 | goto up_fail; |
| 218 | } |
| 219 | #endif |
| 220 | |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 221 | current_thread_info()->sysenter_return = |
Roland McGrath | 6c3652e | 2008-01-30 13:30:42 +0100 | [diff] [blame] | 222 | VDSO32_SYMBOL(addr, SYSENTER_RETURN); |
Jeremy Fitzhardinge | 1dbf527c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 223 | |
| 224 | up_fail: |
Peter Zijlstra | f7b6eb3 | 2009-06-05 14:04:51 +0200 | [diff] [blame] | 225 | if (ret) |
| 226 | current->mm->context.vdso = NULL; |
| 227 | |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 228 | up_write(&mm->mmap_sem); |
Jeremy Fitzhardinge | 1dbf527c | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 229 | |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 230 | return ret; |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 231 | } |
| 232 | |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 233 | #ifdef CONFIG_X86_64 |
| 234 | |
Jiri Slaby | d7a0380 | 2010-06-16 22:30:42 +0200 | [diff] [blame] | 235 | subsys_initcall(sysenter_setup); |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 236 | |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 237 | #ifdef CONFIG_SYSCTL |
| 238 | /* Register vsyscall32 into the ABI table */ |
| 239 | #include <linux/sysctl.h> |
| 240 | |
Joe Perches | f07d91e | 2013-06-13 19:37:33 -0700 | [diff] [blame] | 241 | static struct ctl_table abi_table2[] = { |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 242 | { |
| 243 | .procname = "vsyscall32", |
| 244 | .data = &sysctl_vsyscall32, |
| 245 | .maxlen = sizeof(int), |
| 246 | .mode = 0644, |
| 247 | .proc_handler = proc_dointvec |
| 248 | }, |
| 249 | {} |
| 250 | }; |
| 251 | |
Joe Perches | f07d91e | 2013-06-13 19:37:33 -0700 | [diff] [blame] | 252 | static struct ctl_table abi_root_table2[] = { |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 253 | { |
Roland McGrath | a97f52e | 2008-01-30 13:31:55 +0100 | [diff] [blame] | 254 | .procname = "abi", |
| 255 | .mode = 0555, |
| 256 | .child = abi_table2 |
| 257 | }, |
| 258 | {} |
| 259 | }; |
| 260 | |
| 261 | static __init int ia32_binfmt_init(void) |
| 262 | { |
| 263 | register_sysctl_table(abi_root_table2); |
| 264 | return 0; |
| 265 | } |
| 266 | __initcall(ia32_binfmt_init); |
| 267 | #endif |
| 268 | |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 269 | #else /* CONFIG_X86_32 */ |
| 270 | |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 271 | const char *arch_vma_name(struct vm_area_struct *vma) |
| 272 | { |
| 273 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) |
| 274 | return "[vdso]"; |
| 275 | return NULL; |
| 276 | } |
| 277 | |
Stephen Wilson | 31db58b | 2011-03-13 15:49:15 -0400 | [diff] [blame] | 278 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 279 | { |
| 280 | return NULL; |
| 281 | } |
| 282 | |
Stephen Wilson | 83b964b | 2011-03-13 15:49:16 -0400 | [diff] [blame] | 283 | int in_gate_area(struct mm_struct *mm, unsigned long addr) |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 284 | { |
Andy Lutomirski | b0b49f2 | 2014-03-13 16:01:26 -0700 | [diff] [blame] | 285 | return 0; |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 286 | } |
| 287 | |
Stephen Wilson | cae5d39 | 2011-03-13 15:49:17 -0400 | [diff] [blame] | 288 | int in_gate_area_no_mm(unsigned long addr) |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 289 | { |
| 290 | return 0; |
| 291 | } |
Roland McGrath | af65d64 | 2008-01-30 13:30:43 +0100 | [diff] [blame] | 292 | |
| 293 | #endif /* CONFIG_X86_64 */ |