Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/ppc64/kernel/vdso.c |
| 3 | * |
| 4 | * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. |
| 5 | * <benh@kernel.crashing.org> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version |
| 10 | * 2 of the License, or (at your option) any later version. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/config.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/errno.h> |
| 16 | #include <linux/sched.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/smp.h> |
| 20 | #include <linux/smp_lock.h> |
| 21 | #include <linux/stddef.h> |
| 22 | #include <linux/unistd.h> |
| 23 | #include <linux/slab.h> |
| 24 | #include <linux/user.h> |
| 25 | #include <linux/elf.h> |
| 26 | #include <linux/security.h> |
| 27 | #include <linux/bootmem.h> |
| 28 | |
| 29 | #include <asm/pgtable.h> |
| 30 | #include <asm/system.h> |
| 31 | #include <asm/processor.h> |
| 32 | #include <asm/mmu.h> |
| 33 | #include <asm/mmu_context.h> |
| 34 | #include <asm/machdep.h> |
| 35 | #include <asm/cputable.h> |
| 36 | #include <asm/sections.h> |
| 37 | #include <asm/vdso.h> |
| 38 | |
| 39 | #undef DEBUG |
| 40 | |
| 41 | #ifdef DEBUG |
| 42 | #define DBG(fmt...) printk(fmt) |
| 43 | #else |
| 44 | #define DBG(fmt...) |
| 45 | #endif |
| 46 | |
| 47 | |
| 48 | /* |
| 49 | * The vDSOs themselves are here |
| 50 | */ |
| 51 | extern char vdso64_start, vdso64_end; |
| 52 | extern char vdso32_start, vdso32_end; |
| 53 | |
| 54 | static void *vdso64_kbase = &vdso64_start; |
| 55 | static void *vdso32_kbase = &vdso32_start; |
| 56 | |
| 57 | unsigned int vdso64_pages; |
| 58 | unsigned int vdso32_pages; |
| 59 | |
| 60 | /* Signal trampolines user addresses */ |
| 61 | |
| 62 | unsigned long vdso64_rt_sigtramp; |
| 63 | unsigned long vdso32_sigtramp; |
| 64 | unsigned long vdso32_rt_sigtramp; |
| 65 | |
| 66 | /* Format of the patch table */ |
| 67 | struct vdso_patch_def |
| 68 | { |
| 69 | u32 pvr_mask, pvr_value; |
| 70 | const char *gen_name; |
| 71 | const char *fix_name; |
| 72 | }; |
| 73 | |
| 74 | /* Table of functions to patch based on the CPU type/revision |
| 75 | * |
| 76 | * TODO: Improve by adding whole lists for each entry |
| 77 | */ |
| 78 | static struct vdso_patch_def vdso_patches[] = { |
| 79 | { |
| 80 | 0xffff0000, 0x003a0000, /* POWER5 */ |
| 81 | "__kernel_sync_dicache", "__kernel_sync_dicache_p5" |
| 82 | }, |
| 83 | { |
| 84 | 0xffff0000, 0x003b0000, /* POWER5 */ |
| 85 | "__kernel_sync_dicache", "__kernel_sync_dicache_p5" |
| 86 | }, |
| 87 | }; |
| 88 | |
| 89 | /* |
| 90 | * Some infos carried around for each of them during parsing at |
| 91 | * boot time. |
| 92 | */ |
| 93 | struct lib32_elfinfo |
| 94 | { |
| 95 | Elf32_Ehdr *hdr; /* ptr to ELF */ |
| 96 | Elf32_Sym *dynsym; /* ptr to .dynsym section */ |
| 97 | unsigned long dynsymsize; /* size of .dynsym section */ |
| 98 | char *dynstr; /* ptr to .dynstr section */ |
| 99 | unsigned long text; /* offset of .text section in .so */ |
| 100 | }; |
| 101 | |
| 102 | struct lib64_elfinfo |
| 103 | { |
| 104 | Elf64_Ehdr *hdr; |
| 105 | Elf64_Sym *dynsym; |
| 106 | unsigned long dynsymsize; |
| 107 | char *dynstr; |
| 108 | unsigned long text; |
| 109 | }; |
| 110 | |
| 111 | |
| 112 | #ifdef __DEBUG |
| 113 | static void dump_one_vdso_page(struct page *pg, struct page *upg) |
| 114 | { |
| 115 | printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT), |
| 116 | page_count(pg), |
| 117 | pg->flags); |
| 118 | if (upg/* && pg != upg*/) { |
| 119 | printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) << PAGE_SHIFT), |
| 120 | page_count(upg), |
| 121 | upg->flags); |
| 122 | } |
| 123 | printk("\n"); |
| 124 | } |
| 125 | |
| 126 | static void dump_vdso_pages(struct vm_area_struct * vma) |
| 127 | { |
| 128 | int i; |
| 129 | |
| 130 | if (!vma || test_thread_flag(TIF_32BIT)) { |
| 131 | printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); |
| 132 | for (i=0; i<vdso32_pages; i++) { |
| 133 | struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); |
| 134 | struct page *upg = (vma && vma->vm_mm) ? |
| 135 | follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0) |
| 136 | : NULL; |
| 137 | dump_one_vdso_page(pg, upg); |
| 138 | } |
| 139 | } |
| 140 | if (!vma || !test_thread_flag(TIF_32BIT)) { |
| 141 | printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); |
| 142 | for (i=0; i<vdso64_pages; i++) { |
| 143 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); |
| 144 | struct page *upg = (vma && vma->vm_mm) ? |
| 145 | follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0) |
| 146 | : NULL; |
| 147 | dump_one_vdso_page(pg, upg); |
| 148 | } |
| 149 | } |
| 150 | } |
| 151 | #endif /* DEBUG */ |
| 152 | |
| 153 | /* |
| 154 | * Keep a dummy vma_close for now, it will prevent VMA merging. |
| 155 | */ |
| 156 | static void vdso_vma_close(struct vm_area_struct * vma) |
| 157 | { |
| 158 | } |
| 159 | |
| 160 | /* |
| 161 | * Our nopage() function, maps in the actual vDSO kernel pages, they will |
| 162 | * be mapped read-only by do_no_page(), and eventually COW'ed, either |
| 163 | * right away for an initial write access, or by do_wp_page(). |
| 164 | */ |
| 165 | static struct page * vdso_vma_nopage(struct vm_area_struct * vma, |
| 166 | unsigned long address, int *type) |
| 167 | { |
| 168 | unsigned long offset = address - vma->vm_start; |
| 169 | struct page *pg; |
| 170 | void *vbase = test_thread_flag(TIF_32BIT) ? vdso32_kbase : vdso64_kbase; |
| 171 | |
| 172 | DBG("vdso_vma_nopage(current: %s, address: %016lx, off: %lx)\n", |
| 173 | current->comm, address, offset); |
| 174 | |
| 175 | if (address < vma->vm_start || address > vma->vm_end) |
| 176 | return NOPAGE_SIGBUS; |
| 177 | |
| 178 | /* |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame^] | 179 | * Last page is systemcfg. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | */ |
| 181 | if ((vma->vm_end - address) <= PAGE_SIZE) |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame^] | 182 | pg = virt_to_page(systemcfg); |
| 183 | else |
| 184 | pg = virt_to_page(vbase + offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | get_page(pg); |
| 187 | DBG(" ->page count: %d\n", page_count(pg)); |
| 188 | |
| 189 | return pg; |
| 190 | } |
| 191 | |
| 192 | static struct vm_operations_struct vdso_vmops = { |
| 193 | .close = vdso_vma_close, |
| 194 | .nopage = vdso_vma_nopage, |
| 195 | }; |
| 196 | |
| 197 | /* |
| 198 | * This is called from binfmt_elf, we create the special vma for the |
| 199 | * vDSO and insert it into the mm struct tree |
| 200 | */ |
| 201 | int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) |
| 202 | { |
| 203 | struct mm_struct *mm = current->mm; |
| 204 | struct vm_area_struct *vma; |
| 205 | unsigned long vdso_pages; |
| 206 | unsigned long vdso_base; |
| 207 | |
| 208 | if (test_thread_flag(TIF_32BIT)) { |
| 209 | vdso_pages = vdso32_pages; |
| 210 | vdso_base = VDSO32_MBASE; |
| 211 | } else { |
| 212 | vdso_pages = vdso64_pages; |
| 213 | vdso_base = VDSO64_MBASE; |
| 214 | } |
| 215 | |
Benjamin Herrenschmidt | 547ee84 | 2005-04-16 15:24:35 -0700 | [diff] [blame] | 216 | current->thread.vdso_base = 0; |
| 217 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | /* vDSO has a problem and was disabled, just don't "enable" it for the |
| 219 | * process |
| 220 | */ |
Benjamin Herrenschmidt | 547ee84 | 2005-04-16 15:24:35 -0700 | [diff] [blame] | 221 | if (vdso_pages == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | return 0; |
Benjamin Herrenschmidt | 547ee84 | 2005-04-16 15:24:35 -0700 | [diff] [blame] | 223 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); |
| 225 | if (vma == NULL) |
| 226 | return -ENOMEM; |
Hugh Dickins | 2fd4ef8 | 2005-09-14 06:13:02 +0100 | [diff] [blame] | 227 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | memset(vma, 0, sizeof(*vma)); |
| 229 | |
| 230 | /* |
Benjamin Herrenschmidt | 547ee84 | 2005-04-16 15:24:35 -0700 | [diff] [blame] | 231 | * pick a base address for the vDSO in process space. We try to put it |
| 232 | * at vdso_base which is the "natural" base for it, but we might fail |
| 233 | * and end up putting it elsewhere. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | */ |
Benjamin Herrenschmidt | 547ee84 | 2005-04-16 15:24:35 -0700 | [diff] [blame] | 235 | vdso_base = get_unmapped_area(NULL, vdso_base, |
| 236 | vdso_pages << PAGE_SHIFT, 0, 0); |
Hugh Dickins | 2fd4ef8 | 2005-09-14 06:13:02 +0100 | [diff] [blame] | 237 | if (vdso_base & ~PAGE_MASK) { |
| 238 | kmem_cache_free(vm_area_cachep, vma); |
Benjamin Herrenschmidt | 547ee84 | 2005-04-16 15:24:35 -0700 | [diff] [blame] | 239 | return (int)vdso_base; |
Hugh Dickins | 2fd4ef8 | 2005-09-14 06:13:02 +0100 | [diff] [blame] | 240 | } |
Benjamin Herrenschmidt | 547ee84 | 2005-04-16 15:24:35 -0700 | [diff] [blame] | 241 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | current->thread.vdso_base = vdso_base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | |
| 244 | vma->vm_mm = mm; |
| 245 | vma->vm_start = current->thread.vdso_base; |
| 246 | |
| 247 | /* |
| 248 | * the VMA size is one page more than the vDSO since systemcfg |
| 249 | * is mapped in the last one |
| 250 | */ |
| 251 | vma->vm_end = vma->vm_start + ((vdso_pages + 1) << PAGE_SHIFT); |
| 252 | |
| 253 | /* |
| 254 | * our vma flags don't have VM_WRITE so by default, the process isn't allowed |
| 255 | * to write those pages. |
| 256 | * gdb can break that with ptrace interface, and thus trigger COW on those |
| 257 | * pages but it's then your responsibility to never do that on the "data" page |
| 258 | * of the vDSO or you'll stop getting kernel updates and your nice userland |
| 259 | * gettimeofday will be totally dead. It's fine to use that for setting |
| 260 | * breakpoints in the vDSO code pages though |
| 261 | */ |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame^] | 262 | vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | VM_RESERVED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | vma->vm_flags |= mm->def_flags; |
| 264 | vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; |
| 265 | vma->vm_ops = &vdso_vmops; |
| 266 | |
| 267 | down_write(&mm->mmap_sem); |
Hugh Dickins | 2fd4ef8 | 2005-09-14 06:13:02 +0100 | [diff] [blame] | 268 | if (insert_vm_struct(mm, vma)) { |
| 269 | up_write(&mm->mmap_sem); |
| 270 | kmem_cache_free(vm_area_cachep, vma); |
| 271 | return -ENOMEM; |
| 272 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
| 274 | up_write(&mm->mmap_sem); |
| 275 | |
| 276 | return 0; |
| 277 | } |
| 278 | |
| 279 | static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, |
| 280 | unsigned long *size) |
| 281 | { |
| 282 | Elf32_Shdr *sechdrs; |
| 283 | unsigned int i; |
| 284 | char *secnames; |
| 285 | |
| 286 | /* Grab section headers and strings so we can tell who is who */ |
| 287 | sechdrs = (void *)ehdr + ehdr->e_shoff; |
| 288 | secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; |
| 289 | |
| 290 | /* Find the section they want */ |
| 291 | for (i = 1; i < ehdr->e_shnum; i++) { |
| 292 | if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { |
| 293 | if (size) |
| 294 | *size = sechdrs[i].sh_size; |
| 295 | return (void *)ehdr + sechdrs[i].sh_offset; |
| 296 | } |
| 297 | } |
| 298 | *size = 0; |
| 299 | return NULL; |
| 300 | } |
| 301 | |
| 302 | static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname, |
| 303 | unsigned long *size) |
| 304 | { |
| 305 | Elf64_Shdr *sechdrs; |
| 306 | unsigned int i; |
| 307 | char *secnames; |
| 308 | |
| 309 | /* Grab section headers and strings so we can tell who is who */ |
| 310 | sechdrs = (void *)ehdr + ehdr->e_shoff; |
| 311 | secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; |
| 312 | |
| 313 | /* Find the section they want */ |
| 314 | for (i = 1; i < ehdr->e_shnum; i++) { |
| 315 | if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { |
| 316 | if (size) |
| 317 | *size = sechdrs[i].sh_size; |
| 318 | return (void *)ehdr + sechdrs[i].sh_offset; |
| 319 | } |
| 320 | } |
| 321 | if (size) |
| 322 | *size = 0; |
| 323 | return NULL; |
| 324 | } |
| 325 | |
| 326 | static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib, const char *symname) |
| 327 | { |
| 328 | unsigned int i; |
| 329 | char name[32], *c; |
| 330 | |
| 331 | for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) { |
| 332 | if (lib->dynsym[i].st_name == 0) |
| 333 | continue; |
| 334 | strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 32); |
| 335 | c = strchr(name, '@'); |
| 336 | if (c) |
| 337 | *c = 0; |
| 338 | if (strcmp(symname, name) == 0) |
| 339 | return &lib->dynsym[i]; |
| 340 | } |
| 341 | return NULL; |
| 342 | } |
| 343 | |
| 344 | static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib, const char *symname) |
| 345 | { |
| 346 | unsigned int i; |
| 347 | char name[32], *c; |
| 348 | |
| 349 | for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) { |
| 350 | if (lib->dynsym[i].st_name == 0) |
| 351 | continue; |
| 352 | strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 32); |
| 353 | c = strchr(name, '@'); |
| 354 | if (c) |
| 355 | *c = 0; |
| 356 | if (strcmp(symname, name) == 0) |
| 357 | return &lib->dynsym[i]; |
| 358 | } |
| 359 | return NULL; |
| 360 | } |
| 361 | |
| 362 | /* Note that we assume the section is .text and the symbol is relative to |
| 363 | * the library base |
| 364 | */ |
| 365 | static unsigned long __init find_function32(struct lib32_elfinfo *lib, const char *symname) |
| 366 | { |
| 367 | Elf32_Sym *sym = find_symbol32(lib, symname); |
| 368 | |
| 369 | if (sym == NULL) { |
| 370 | printk(KERN_WARNING "vDSO32: function %s not found !\n", symname); |
| 371 | return 0; |
| 372 | } |
| 373 | return sym->st_value - VDSO32_LBASE; |
| 374 | } |
| 375 | |
| 376 | /* Note that we assume the section is .text and the symbol is relative to |
| 377 | * the library base |
| 378 | */ |
| 379 | static unsigned long __init find_function64(struct lib64_elfinfo *lib, const char *symname) |
| 380 | { |
| 381 | Elf64_Sym *sym = find_symbol64(lib, symname); |
| 382 | |
| 383 | if (sym == NULL) { |
| 384 | printk(KERN_WARNING "vDSO64: function %s not found !\n", symname); |
| 385 | return 0; |
| 386 | } |
| 387 | #ifdef VDS64_HAS_DESCRIPTORS |
| 388 | return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) - VDSO64_LBASE; |
| 389 | #else |
| 390 | return sym->st_value - VDSO64_LBASE; |
| 391 | #endif |
| 392 | } |
| 393 | |
| 394 | |
| 395 | static __init int vdso_do_find_sections(struct lib32_elfinfo *v32, |
| 396 | struct lib64_elfinfo *v64) |
| 397 | { |
| 398 | void *sect; |
| 399 | |
| 400 | /* |
| 401 | * Locate symbol tables & text section |
| 402 | */ |
| 403 | |
| 404 | v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize); |
| 405 | v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL); |
| 406 | if (v32->dynsym == NULL || v32->dynstr == NULL) { |
| 407 | printk(KERN_ERR "vDSO32: a required symbol section was not found\n"); |
| 408 | return -1; |
| 409 | } |
| 410 | sect = find_section32(v32->hdr, ".text", NULL); |
| 411 | if (sect == NULL) { |
| 412 | printk(KERN_ERR "vDSO32: the .text section was not found\n"); |
| 413 | return -1; |
| 414 | } |
| 415 | v32->text = sect - vdso32_kbase; |
| 416 | |
| 417 | v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize); |
| 418 | v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL); |
| 419 | if (v64->dynsym == NULL || v64->dynstr == NULL) { |
| 420 | printk(KERN_ERR "vDSO64: a required symbol section was not found\n"); |
| 421 | return -1; |
| 422 | } |
| 423 | sect = find_section64(v64->hdr, ".text", NULL); |
| 424 | if (sect == NULL) { |
| 425 | printk(KERN_ERR "vDSO64: the .text section was not found\n"); |
| 426 | return -1; |
| 427 | } |
| 428 | v64->text = sect - vdso64_kbase; |
| 429 | |
| 430 | return 0; |
| 431 | } |
| 432 | |
| 433 | static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32, |
| 434 | struct lib64_elfinfo *v64) |
| 435 | { |
| 436 | /* |
| 437 | * Find signal trampolines |
| 438 | */ |
| 439 | |
| 440 | vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64"); |
| 441 | vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32"); |
| 442 | vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32"); |
| 443 | } |
| 444 | |
| 445 | static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32, |
| 446 | struct lib64_elfinfo *v64) |
| 447 | { |
| 448 | Elf32_Sym *sym32; |
| 449 | Elf64_Sym *sym64; |
| 450 | |
| 451 | sym32 = find_symbol32(v32, "__kernel_datapage_offset"); |
| 452 | if (sym32 == NULL) { |
| 453 | printk(KERN_ERR "vDSO32: Can't find symbol __kernel_datapage_offset !\n"); |
| 454 | return -1; |
| 455 | } |
| 456 | *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) = |
| 457 | (vdso32_pages << PAGE_SHIFT) - (sym32->st_value - VDSO32_LBASE); |
| 458 | |
| 459 | sym64 = find_symbol64(v64, "__kernel_datapage_offset"); |
| 460 | if (sym64 == NULL) { |
| 461 | printk(KERN_ERR "vDSO64: Can't find symbol __kernel_datapage_offset !\n"); |
| 462 | return -1; |
| 463 | } |
| 464 | *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) = |
| 465 | (vdso64_pages << PAGE_SHIFT) - (sym64->st_value - VDSO64_LBASE); |
| 466 | |
| 467 | return 0; |
| 468 | } |
| 469 | |
| 470 | static int vdso_do_func_patch32(struct lib32_elfinfo *v32, |
| 471 | struct lib64_elfinfo *v64, |
| 472 | const char *orig, const char *fix) |
| 473 | { |
| 474 | Elf32_Sym *sym32_gen, *sym32_fix; |
| 475 | |
| 476 | sym32_gen = find_symbol32(v32, orig); |
| 477 | if (sym32_gen == NULL) { |
| 478 | printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig); |
| 479 | return -1; |
| 480 | } |
| 481 | sym32_fix = find_symbol32(v32, fix); |
| 482 | if (sym32_fix == NULL) { |
| 483 | printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix); |
| 484 | return -1; |
| 485 | } |
| 486 | sym32_gen->st_value = sym32_fix->st_value; |
| 487 | sym32_gen->st_size = sym32_fix->st_size; |
| 488 | sym32_gen->st_info = sym32_fix->st_info; |
| 489 | sym32_gen->st_other = sym32_fix->st_other; |
| 490 | sym32_gen->st_shndx = sym32_fix->st_shndx; |
| 491 | |
| 492 | return 0; |
| 493 | } |
| 494 | |
| 495 | static int vdso_do_func_patch64(struct lib32_elfinfo *v32, |
| 496 | struct lib64_elfinfo *v64, |
| 497 | const char *orig, const char *fix) |
| 498 | { |
| 499 | Elf64_Sym *sym64_gen, *sym64_fix; |
| 500 | |
| 501 | sym64_gen = find_symbol64(v64, orig); |
| 502 | if (sym64_gen == NULL) { |
| 503 | printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig); |
| 504 | return -1; |
| 505 | } |
| 506 | sym64_fix = find_symbol64(v64, fix); |
| 507 | if (sym64_fix == NULL) { |
| 508 | printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix); |
| 509 | return -1; |
| 510 | } |
| 511 | sym64_gen->st_value = sym64_fix->st_value; |
| 512 | sym64_gen->st_size = sym64_fix->st_size; |
| 513 | sym64_gen->st_info = sym64_fix->st_info; |
| 514 | sym64_gen->st_other = sym64_fix->st_other; |
| 515 | sym64_gen->st_shndx = sym64_fix->st_shndx; |
| 516 | |
| 517 | return 0; |
| 518 | } |
| 519 | |
| 520 | static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32, |
| 521 | struct lib64_elfinfo *v64) |
| 522 | { |
| 523 | u32 pvr; |
| 524 | int i; |
| 525 | |
| 526 | pvr = mfspr(SPRN_PVR); |
| 527 | for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) { |
| 528 | struct vdso_patch_def *patch = &vdso_patches[i]; |
| 529 | int match = (pvr & patch->pvr_mask) == patch->pvr_value; |
| 530 | |
| 531 | DBG("patch %d (mask: %x, pvr: %x) : %s\n", |
| 532 | i, patch->pvr_mask, patch->pvr_value, match ? "match" : "skip"); |
| 533 | |
| 534 | if (!match) |
| 535 | continue; |
| 536 | |
| 537 | DBG("replacing %s with %s...\n", patch->gen_name, patch->fix_name); |
| 538 | |
| 539 | /* |
| 540 | * Patch the 32 bits and 64 bits symbols. Note that we do not patch |
| 541 | * the "." symbol on 64 bits. It would be easy to do, but doesn't |
| 542 | * seem to be necessary, patching the OPD symbol is enough. |
| 543 | */ |
| 544 | vdso_do_func_patch32(v32, v64, patch->gen_name, patch->fix_name); |
| 545 | vdso_do_func_patch64(v32, v64, patch->gen_name, patch->fix_name); |
| 546 | } |
| 547 | |
| 548 | return 0; |
| 549 | } |
| 550 | |
| 551 | |
| 552 | static __init int vdso_setup(void) |
| 553 | { |
| 554 | struct lib32_elfinfo v32; |
| 555 | struct lib64_elfinfo v64; |
| 556 | |
| 557 | v32.hdr = vdso32_kbase; |
| 558 | v64.hdr = vdso64_kbase; |
| 559 | |
| 560 | if (vdso_do_find_sections(&v32, &v64)) |
| 561 | return -1; |
| 562 | |
| 563 | if (vdso_fixup_datapage(&v32, &v64)) |
| 564 | return -1; |
| 565 | |
| 566 | if (vdso_fixup_alt_funcs(&v32, &v64)) |
| 567 | return -1; |
| 568 | |
| 569 | vdso_setup_trampolines(&v32, &v64); |
| 570 | |
| 571 | return 0; |
| 572 | } |
| 573 | |
| 574 | void __init vdso_init(void) |
| 575 | { |
| 576 | int i; |
| 577 | |
| 578 | vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT; |
| 579 | vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT; |
| 580 | |
| 581 | DBG("vdso64_kbase: %p, 0x%x pages, vdso32_kbase: %p, 0x%x pages\n", |
| 582 | vdso64_kbase, vdso64_pages, vdso32_kbase, vdso32_pages); |
| 583 | |
| 584 | /* |
| 585 | * Initialize the vDSO images in memory, that is do necessary |
| 586 | * fixups of vDSO symbols, locate trampolines, etc... |
| 587 | */ |
| 588 | if (vdso_setup()) { |
| 589 | printk(KERN_ERR "vDSO setup failure, not enabled !\n"); |
| 590 | /* XXX should free pages here ? */ |
| 591 | vdso64_pages = vdso32_pages = 0; |
| 592 | return; |
| 593 | } |
| 594 | |
| 595 | /* Make sure pages are in the correct state */ |
| 596 | for (i = 0; i < vdso64_pages; i++) { |
| 597 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); |
| 598 | ClearPageReserved(pg); |
| 599 | get_page(pg); |
| 600 | } |
| 601 | for (i = 0; i < vdso32_pages; i++) { |
| 602 | struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); |
| 603 | ClearPageReserved(pg); |
| 604 | get_page(pg); |
| 605 | } |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame^] | 606 | |
| 607 | get_page(virt_to_page(systemcfg)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | } |
| 609 | |
| 610 | int in_gate_area_no_task(unsigned long addr) |
| 611 | { |
| 612 | return 0; |
| 613 | } |
| 614 | |
| 615 | int in_gate_area(struct task_struct *task, unsigned long addr) |
| 616 | { |
| 617 | return 0; |
| 618 | } |
| 619 | |
| 620 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) |
| 621 | { |
| 622 | return NULL; |
| 623 | } |
| 624 | |