Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 2 | /* |
Dave Jones | 835c34a | 2007-10-12 21:10:53 -0400 | [diff] [blame] | 3 | * Memory preserving reboot related code. |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 4 | * |
| 5 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) |
| 6 | * Copyright (C) IBM Corporation, 2004. All rights reserved |
| 7 | */ |
| 8 | |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 9 | #include <linux/errno.h> |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 10 | #include <linux/crash_dump.h> |
Gustavo F. Padovan | 08aadf0 | 2008-07-29 02:48:53 -0300 | [diff] [blame] | 11 | #include <linux/uaccess.h> |
| 12 | #include <linux/io.h> |
Tom Lendacky | 4d96f91 | 2021-09-08 17:58:37 -0500 | [diff] [blame] | 13 | #include <linux/cc_platform.h> |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 14 | |
Lianbo Jiang | 992b649 | 2018-09-30 16:37:41 +0800 | [diff] [blame] | 15 | static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
| 16 | unsigned long offset, int userbuf, |
| 17 | bool encrypted) |
| 18 | { |
| 19 | void *vaddr; |
| 20 | |
| 21 | if (!csize) |
| 22 | return 0; |
| 23 | |
| 24 | if (encrypted) |
| 25 | vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE); |
| 26 | else |
| 27 | vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); |
| 28 | |
| 29 | if (!vaddr) |
| 30 | return -ENOMEM; |
| 31 | |
| 32 | if (userbuf) { |
| 33 | if (copy_to_user((void __user *)buf, vaddr + offset, csize)) { |
| 34 | iounmap((void __iomem *)vaddr); |
| 35 | return -EFAULT; |
| 36 | } |
| 37 | } else |
| 38 | memcpy(buf, vaddr + offset, csize); |
| 39 | |
| 40 | set_iounmap_nonlazy(); |
| 41 | iounmap((void __iomem *)vaddr); |
| 42 | return csize; |
| 43 | } |
| 44 | |
Randy Dunlap | e77e1716 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 45 | /** |
Lianbo Jiang | 992b649 | 2018-09-30 16:37:41 +0800 | [diff] [blame] | 46 | * copy_oldmem_page - copy one page of memory |
Randy Dunlap | e77e1716 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 47 | * @pfn: page frame number to be copied |
| 48 | * @buf: target memory address for the copy; this can be in kernel address |
| 49 | * space or user address space (see @userbuf) |
| 50 | * @csize: number of bytes to copy |
| 51 | * @offset: offset in bytes into the page (based on pfn) to begin the copy |
| 52 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
| 53 | * otherwise @buf is in kernel address space, use memcpy(). |
| 54 | * |
Lianbo Jiang | 992b649 | 2018-09-30 16:37:41 +0800 | [diff] [blame] | 55 | * Copy a page from the old kernel's memory. For this page, there is no pte |
| 56 | * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic. |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 57 | */ |
Lianbo Jiang | 992b649 | 2018-09-30 16:37:41 +0800 | [diff] [blame] | 58 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
| 59 | unsigned long offset, int userbuf) |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 60 | { |
Lianbo Jiang | 992b649 | 2018-09-30 16:37:41 +0800 | [diff] [blame] | 61 | return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false); |
| 62 | } |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 63 | |
Lianbo Jiang | 992b649 | 2018-09-30 16:37:41 +0800 | [diff] [blame] | 64 | /** |
| 65 | * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the |
Ingo Molnar | a97673a | 2018-12-03 10:47:34 +0100 | [diff] [blame] | 66 | * memory with the encryption mask set to accommodate kdump on SME-enabled |
Lianbo Jiang | 992b649 | 2018-09-30 16:37:41 +0800 | [diff] [blame] | 67 | * machines. |
| 68 | */ |
| 69 | ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, |
| 70 | unsigned long offset, int userbuf) |
| 71 | { |
| 72 | return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 73 | } |
Thiago Jung Bauermann | ae7eb82 | 2019-08-06 01:49:18 -0300 | [diff] [blame] | 74 | |
| 75 | ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) |
| 76 | { |
Tom Lendacky | 4d96f91 | 2021-09-08 17:58:37 -0500 | [diff] [blame] | 77 | return read_from_oldmem(buf, count, ppos, 0, |
| 78 | cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)); |
Thiago Jung Bauermann | ae7eb82 | 2019-08-06 01:49:18 -0300 | [diff] [blame] | 79 | } |