Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/drivers/char/mem.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | * |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 7 | * Added devfs support. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> |
André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 9 | * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/mm.h> |
| 13 | #include <linux/miscdevice.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/mman.h> |
| 17 | #include <linux/random.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/raw.h> |
| 20 | #include <linux/tty.h> |
| 21 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/ptrace.h> |
| 23 | #include <linux/device.h> |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 24 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/backing-dev.h> |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 26 | #include <linux/shmem_fs.h> |
Jens Axboe | d6b29d7 | 2007-06-04 09:59:47 +0200 | [diff] [blame] | 27 | #include <linux/splice.h> |
Linus Torvalds | b8a3ad5 | 2006-10-13 08:42:10 -0700 | [diff] [blame] | 28 | #include <linux/pfn.h> |
Paul Gortmaker | 66300e6 | 2011-07-10 12:14:53 -0400 | [diff] [blame] | 29 | #include <linux/export.h> |
Haren Myneni | e1612de | 2012-07-11 15:18:44 +1000 | [diff] [blame] | 30 | #include <linux/io.h> |
Christoph Hellwig | e2e40f2 | 2015-02-22 08:58:50 -0800 | [diff] [blame] | 31 | #include <linux/uio.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Rob Ward | 35b6c7e | 2014-12-20 18:28:35 +0000 | [diff] [blame] | 33 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
| 35 | #ifdef CONFIG_IA64 |
| 36 | # include <linux/efi.h> |
| 37 | #endif |
| 38 | |
Haren Myneni | e1612de | 2012-07-11 15:18:44 +1000 | [diff] [blame] | 39 | #define DEVPORT_MINOR 4 |
| 40 | |
Wu Fengguang | f222318 | 2009-12-14 17:58:07 -0800 | [diff] [blame] | 41 | static inline unsigned long size_inside_page(unsigned long start, |
| 42 | unsigned long size) |
| 43 | { |
| 44 | unsigned long sz; |
| 45 | |
Wu Fengguang | 7fabadd | 2009-12-14 17:58:09 -0800 | [diff] [blame] | 46 | sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); |
Wu Fengguang | f222318 | 2009-12-14 17:58:07 -0800 | [diff] [blame] | 47 | |
Wu Fengguang | 7fabadd | 2009-12-14 17:58:09 -0800 | [diff] [blame] | 48 | return min(sz, size); |
Wu Fengguang | f222318 | 2009-12-14 17:58:07 -0800 | [diff] [blame] | 49 | } |
| 50 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE |
Cyril Chemparathy | 7e6735c | 2012-09-12 14:05:58 -0400 | [diff] [blame] | 52 | static inline int valid_phys_addr_range(phys_addr_t addr, size_t count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | { |
Changli Gao | cfaf346 | 2011-03-23 16:42:58 -0700 | [diff] [blame] | 54 | return addr + count <= __pa(high_memory); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | } |
Bjorn Helgaas | 80851ef | 2006-01-08 01:04:13 -0800 | [diff] [blame] | 56 | |
Lennert Buytenhek | 06c67be | 2006-07-10 04:45:27 -0700 | [diff] [blame] | 57 | static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) |
Bjorn Helgaas | 80851ef | 2006-01-08 01:04:13 -0800 | [diff] [blame] | 58 | { |
| 59 | return 1; |
| 60 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | #endif |
| 62 | |
Ingo Molnar | d092633 | 2008-07-18 00:26:59 +0200 | [diff] [blame] | 63 | #ifdef CONFIG_STRICT_DEVMEM |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 64 | static inline int page_is_allowed(unsigned long pfn) |
| 65 | { |
| 66 | return devmem_is_allowed(pfn); |
| 67 | } |
Venki Pallipadi | e2beb3e | 2008-03-06 23:01:47 -0800 | [diff] [blame] | 68 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
Arjan van de Ven | ae531c2 | 2008-04-24 23:40:47 +0200 | [diff] [blame] | 69 | { |
Venki Pallipadi | e2beb3e | 2008-03-06 23:01:47 -0800 | [diff] [blame] | 70 | u64 from = ((u64)pfn) << PAGE_SHIFT; |
| 71 | u64 to = from + size; |
| 72 | u64 cursor = from; |
Arjan van de Ven | ae531c2 | 2008-04-24 23:40:47 +0200 | [diff] [blame] | 73 | |
Venki Pallipadi | e2beb3e | 2008-03-06 23:01:47 -0800 | [diff] [blame] | 74 | while (cursor < to) { |
Jiri Kosina | 39380b8 | 2016-07-08 11:38:28 +0200 | [diff] [blame] | 75 | if (!devmem_is_allowed(pfn)) |
Arjan van de Ven | ae531c2 | 2008-04-24 23:40:47 +0200 | [diff] [blame] | 76 | return 0; |
Venki Pallipadi | e2beb3e | 2008-03-06 23:01:47 -0800 | [diff] [blame] | 77 | cursor += PAGE_SIZE; |
| 78 | pfn++; |
Arjan van de Ven | ae531c2 | 2008-04-24 23:40:47 +0200 | [diff] [blame] | 79 | } |
| 80 | return 1; |
| 81 | } |
| 82 | #else |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 83 | static inline int page_is_allowed(unsigned long pfn) |
| 84 | { |
| 85 | return 1; |
| 86 | } |
Venki Pallipadi | e2beb3e | 2008-03-06 23:01:47 -0800 | [diff] [blame] | 87 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
Arjan van de Ven | ae531c2 | 2008-04-24 23:40:47 +0200 | [diff] [blame] | 88 | { |
| 89 | return 1; |
| 90 | } |
| 91 | #endif |
| 92 | |
Thierry Reding | 4707a34 | 2014-07-28 17:20:33 +0200 | [diff] [blame] | 93 | #ifndef unxlate_dev_mem_ptr |
| 94 | #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr |
| 95 | void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 96 | { |
| 97 | } |
Thierry Reding | 4707a34 | 2014-07-28 17:20:33 +0200 | [diff] [blame] | 98 | #endif |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 99 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | /* |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 101 | * This funcion reads the *physical* memory. The f_pos points directly to the |
| 102 | * memory location. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | */ |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 104 | static ssize_t read_mem(struct file *file, char __user *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | size_t count, loff_t *ppos) |
| 106 | { |
Cyril Chemparathy | 7e6735c | 2012-09-12 14:05:58 -0400 | [diff] [blame] | 107 | phys_addr_t p = *ppos; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | ssize_t read, sz; |
Thierry Reding | 4707a34 | 2014-07-28 17:20:33 +0200 | [diff] [blame] | 109 | void *ptr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
Petr Tesarik | 08d2d00 | 2014-01-30 09:48:02 +0100 | [diff] [blame] | 111 | if (p != *ppos) |
| 112 | return 0; |
| 113 | |
Bjorn Helgaas | 136939a | 2006-03-26 01:37:05 -0800 | [diff] [blame] | 114 | if (!valid_phys_addr_range(p, count)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | return -EFAULT; |
| 116 | read = 0; |
| 117 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 118 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 119 | if (p < PAGE_SIZE) { |
Wu Fengguang | 7fabadd | 2009-12-14 17:58:09 -0800 | [diff] [blame] | 120 | sz = size_inside_page(p, count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | if (sz > 0) { |
| 122 | if (clear_user(buf, sz)) |
| 123 | return -EFAULT; |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 124 | buf += sz; |
| 125 | p += sz; |
| 126 | count -= sz; |
| 127 | read += sz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | } |
| 129 | } |
| 130 | #endif |
| 131 | |
| 132 | while (count > 0) { |
Wu Fengguang | fa29e97 | 2009-12-14 17:58:08 -0800 | [diff] [blame] | 133 | unsigned long remaining; |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 134 | int allowed; |
Wu Fengguang | fa29e97 | 2009-12-14 17:58:08 -0800 | [diff] [blame] | 135 | |
Wu Fengguang | f222318 | 2009-12-14 17:58:07 -0800 | [diff] [blame] | 136 | sz = size_inside_page(p, count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 138 | allowed = page_is_allowed(p >> PAGE_SHIFT); |
| 139 | if (!allowed) |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 140 | return -EPERM; |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 141 | if (allowed == 2) { |
| 142 | /* Show zeros for restricted memory. */ |
| 143 | remaining = clear_user(buf, sz); |
| 144 | } else { |
| 145 | /* |
| 146 | * On ia64 if a page has been mapped somewhere as |
| 147 | * uncached, then it must also be accessed uncached |
| 148 | * by the kernel or data corruption may occur. |
| 149 | */ |
| 150 | ptr = xlate_dev_mem_ptr(p); |
| 151 | if (!ptr) |
| 152 | return -EFAULT; |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 153 | |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 154 | remaining = copy_to_user(buf, ptr, sz); |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 155 | |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 156 | unxlate_dev_mem_ptr(p, ptr); |
| 157 | } |
| 158 | |
Wu Fengguang | fa29e97 | 2009-12-14 17:58:08 -0800 | [diff] [blame] | 159 | if (remaining) |
| 160 | return -EFAULT; |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 161 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | buf += sz; |
| 163 | p += sz; |
| 164 | count -= sz; |
| 165 | read += sz; |
| 166 | } |
| 167 | |
| 168 | *ppos += read; |
| 169 | return read; |
| 170 | } |
| 171 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 172 | static ssize_t write_mem(struct file *file, const char __user *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | size_t count, loff_t *ppos) |
| 174 | { |
Cyril Chemparathy | 7e6735c | 2012-09-12 14:05:58 -0400 | [diff] [blame] | 175 | phys_addr_t p = *ppos; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | ssize_t written, sz; |
| 177 | unsigned long copied; |
| 178 | void *ptr; |
| 179 | |
Petr Tesarik | 08d2d00 | 2014-01-30 09:48:02 +0100 | [diff] [blame] | 180 | if (p != *ppos) |
| 181 | return -EFBIG; |
| 182 | |
Bjorn Helgaas | 136939a | 2006-03-26 01:37:05 -0800 | [diff] [blame] | 183 | if (!valid_phys_addr_range(p, count)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | return -EFAULT; |
| 185 | |
| 186 | written = 0; |
| 187 | |
| 188 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 189 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 190 | if (p < PAGE_SIZE) { |
Wu Fengguang | 7fabadd | 2009-12-14 17:58:09 -0800 | [diff] [blame] | 191 | sz = size_inside_page(p, count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | /* Hmm. Do something? */ |
| 193 | buf += sz; |
| 194 | p += sz; |
| 195 | count -= sz; |
| 196 | written += sz; |
| 197 | } |
| 198 | #endif |
| 199 | |
| 200 | while (count > 0) { |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 201 | int allowed; |
| 202 | |
Wu Fengguang | f222318 | 2009-12-14 17:58:07 -0800 | [diff] [blame] | 203 | sz = size_inside_page(p, count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 205 | allowed = page_is_allowed(p >> PAGE_SHIFT); |
| 206 | if (!allowed) |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 207 | return -EPERM; |
| 208 | |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 209 | /* Skip actual writing when a page is marked as restricted. */ |
| 210 | if (allowed == 1) { |
| 211 | /* |
| 212 | * On ia64 if a page has been mapped somewhere as |
| 213 | * uncached, then it must also be accessed uncached |
| 214 | * by the kernel or data corruption may occur. |
| 215 | */ |
| 216 | ptr = xlate_dev_mem_ptr(p); |
| 217 | if (!ptr) { |
| 218 | if (written) |
| 219 | break; |
| 220 | return -EFAULT; |
| 221 | } |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 222 | |
Kees Cook | a4866aa | 2017-04-05 09:39:08 -0700 | [diff] [blame] | 223 | copied = copy_from_user(ptr, buf, sz); |
| 224 | unxlate_dev_mem_ptr(p, ptr); |
| 225 | if (copied) { |
| 226 | written += sz - copied; |
| 227 | if (written) |
| 228 | break; |
| 229 | return -EFAULT; |
| 230 | } |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 231 | } |
| 232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | buf += sz; |
| 234 | p += sz; |
| 235 | count -= sz; |
| 236 | written += sz; |
| 237 | } |
| 238 | |
| 239 | *ppos += written; |
| 240 | return written; |
| 241 | } |
| 242 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 243 | int __weak phys_mem_access_prot_allowed(struct file *file, |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 244 | unsigned long pfn, unsigned long size, pgprot_t *vma_prot) |
| 245 | { |
| 246 | return 1; |
| 247 | } |
| 248 | |
Bjorn Helgaas | 44ac841 | 2006-01-08 01:04:10 -0800 | [diff] [blame] | 249 | #ifndef __HAVE_PHYS_MEM_ACCESS_PROT |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 250 | |
| 251 | /* |
| 252 | * Architectures vary in how they handle caching for addresses |
| 253 | * outside of main memory. |
| 254 | * |
| 255 | */ |
David Howells | ea56f41 | 2010-04-06 14:35:08 -0700 | [diff] [blame] | 256 | #ifdef pgprot_noncached |
Cyril Chemparathy | 7e6735c | 2012-09-12 14:05:58 -0400 | [diff] [blame] | 257 | static int uncached_access(struct file *file, phys_addr_t addr) |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 258 | { |
| 259 | #if defined(CONFIG_IA64) |
| 260 | /* |
| 261 | * On ia64, we ignore O_DSYNC because we cannot tolerate memory |
| 262 | * attribute aliases. |
| 263 | */ |
| 264 | return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); |
| 265 | #elif defined(CONFIG_MIPS) |
| 266 | { |
| 267 | extern int __uncached_access(struct file *file, |
| 268 | unsigned long addr); |
| 269 | |
| 270 | return __uncached_access(file, addr); |
| 271 | } |
| 272 | #else |
| 273 | /* |
| 274 | * Accessing memory above the top the kernel knows about or through a |
| 275 | * file pointer |
| 276 | * that was marked O_DSYNC will be done non-cached. |
| 277 | */ |
| 278 | if (file->f_flags & O_DSYNC) |
| 279 | return 1; |
| 280 | return addr >= __pa(high_memory); |
| 281 | #endif |
| 282 | } |
David Howells | ea56f41 | 2010-04-06 14:35:08 -0700 | [diff] [blame] | 283 | #endif |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 284 | |
Bjorn Helgaas | 44ac841 | 2006-01-08 01:04:10 -0800 | [diff] [blame] | 285 | static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 286 | unsigned long size, pgprot_t vma_prot) |
| 287 | { |
| 288 | #ifdef pgprot_noncached |
Cyril Chemparathy | 7e6735c | 2012-09-12 14:05:58 -0400 | [diff] [blame] | 289 | phys_addr_t offset = pfn << PAGE_SHIFT; |
Bjorn Helgaas | 44ac841 | 2006-01-08 01:04:10 -0800 | [diff] [blame] | 290 | |
| 291 | if (uncached_access(file, offset)) |
| 292 | return pgprot_noncached(vma_prot); |
| 293 | #endif |
| 294 | return vma_prot; |
| 295 | } |
| 296 | #endif |
| 297 | |
David Howells | 5da6185 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 298 | #ifndef CONFIG_MMU |
| 299 | static unsigned long get_unmapped_area_mem(struct file *file, |
| 300 | unsigned long addr, |
| 301 | unsigned long len, |
| 302 | unsigned long pgoff, |
| 303 | unsigned long flags) |
| 304 | { |
| 305 | if (!valid_mmap_phys_addr_range(pgoff, len)) |
| 306 | return (unsigned long) -EINVAL; |
Benjamin Herrenschmidt | 8a93258 | 2007-04-16 22:53:16 -0700 | [diff] [blame] | 307 | return pgoff << PAGE_SHIFT; |
David Howells | 5da6185 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 308 | } |
| 309 | |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 310 | /* permit direct mmap, for read, write or exec */ |
| 311 | static unsigned memory_mmap_capabilities(struct file *file) |
| 312 | { |
| 313 | return NOMMU_MAP_DIRECT | |
| 314 | NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; |
| 315 | } |
| 316 | |
| 317 | static unsigned zero_mmap_capabilities(struct file *file) |
| 318 | { |
| 319 | return NOMMU_MAP_COPY; |
| 320 | } |
| 321 | |
David Howells | 5da6185 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 322 | /* can't do an in-place private mapping if there's no MMU */ |
| 323 | static inline int private_mapping_ok(struct vm_area_struct *vma) |
| 324 | { |
| 325 | return vma->vm_flags & VM_MAYSHARE; |
| 326 | } |
| 327 | #else |
David Howells | 5da6185 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 328 | |
| 329 | static inline int private_mapping_ok(struct vm_area_struct *vma) |
| 330 | { |
| 331 | return 1; |
| 332 | } |
| 333 | #endif |
| 334 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 335 | static const struct vm_operations_struct mmap_mem_ops = { |
Rik van Riel | 7ae8ed5 | 2008-07-23 21:27:07 -0700 | [diff] [blame] | 336 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
| 337 | .access = generic_access_phys |
| 338 | #endif |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 339 | }; |
| 340 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 341 | static int mmap_mem(struct file *file, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | { |
Bjorn Helgaas | 80851ef | 2006-01-08 01:04:13 -0800 | [diff] [blame] | 343 | size_t size = vma->vm_end - vma->vm_start; |
Julius Werner | b299cde | 2017-05-12 14:42:58 -0700 | [diff] [blame] | 344 | phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; |
| 345 | |
| 346 | /* It's illegal to wrap around the end of the physical address space. */ |
Julius Werner | 32829da | 2017-06-02 15:36:39 -0700 | [diff] [blame] | 347 | if (offset + (phys_addr_t)size - 1 < offset) |
Julius Werner | b299cde | 2017-05-12 14:42:58 -0700 | [diff] [blame] | 348 | return -EINVAL; |
Bjorn Helgaas | 80851ef | 2006-01-08 01:04:13 -0800 | [diff] [blame] | 349 | |
Lennert Buytenhek | 06c67be | 2006-07-10 04:45:27 -0700 | [diff] [blame] | 350 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) |
Bjorn Helgaas | 80851ef | 2006-01-08 01:04:13 -0800 | [diff] [blame] | 351 | return -EINVAL; |
| 352 | |
David Howells | 5da6185 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 353 | if (!private_mapping_ok(vma)) |
| 354 | return -ENOSYS; |
| 355 | |
Venki Pallipadi | e2beb3e | 2008-03-06 23:01:47 -0800 | [diff] [blame] | 356 | if (!range_is_allowed(vma->vm_pgoff, size)) |
| 357 | return -EPERM; |
| 358 | |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 359 | if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, |
| 360 | &vma->vm_page_prot)) |
| 361 | return -EINVAL; |
| 362 | |
Roland Dreier | 8b15047 | 2005-10-28 17:46:18 -0700 | [diff] [blame] | 363 | vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, |
Bjorn Helgaas | 80851ef | 2006-01-08 01:04:13 -0800 | [diff] [blame] | 364 | size, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | vma->vm_page_prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 367 | vma->vm_ops = &mmap_mem_ops; |
| 368 | |
Konstantin Khlebnikov | 314e51b | 2012-10-08 16:29:02 -0700 | [diff] [blame] | 369 | /* Remap-pfn-range will mark the range VM_IO */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | if (remap_pfn_range(vma, |
| 371 | vma->vm_start, |
| 372 | vma->vm_pgoff, |
Bjorn Helgaas | 80851ef | 2006-01-08 01:04:13 -0800 | [diff] [blame] | 373 | size, |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 374 | vma->vm_page_prot)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | return -EAGAIN; |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 376 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | return 0; |
| 378 | } |
| 379 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 380 | static int mmap_kmem(struct file *file, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | { |
Linus Torvalds | 4bb8255 | 2005-08-13 14:22:59 -0700 | [diff] [blame] | 382 | unsigned long pfn; |
| 383 | |
Linus Torvalds | 6d3154c | 2007-01-22 08:53:24 -0800 | [diff] [blame] | 384 | /* Turn a kernel-virtual address into a physical page frame */ |
| 385 | pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; |
Linus Torvalds | 4bb8255 | 2005-08-13 14:22:59 -0700 | [diff] [blame] | 386 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | /* |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 388 | * RED-PEN: on some architectures there is more mapped memory than |
| 389 | * available in mem_map which pfn_valid checks for. Perhaps should add a |
| 390 | * new macro here. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | * |
| 392 | * RED-PEN: vmalloc is not supported right now. |
| 393 | */ |
Linus Torvalds | 4bb8255 | 2005-08-13 14:22:59 -0700 | [diff] [blame] | 394 | if (!pfn_valid(pfn)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | return -EIO; |
Linus Torvalds | 4bb8255 | 2005-08-13 14:22:59 -0700 | [diff] [blame] | 396 | |
| 397 | vma->vm_pgoff = pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | return mmap_mem(file, vma); |
| 399 | } |
| 400 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | /* |
| 402 | * This function reads the *virtual* memory as seen by the kernel. |
| 403 | */ |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 404 | static ssize_t read_kmem(struct file *file, char __user *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | size_t count, loff_t *ppos) |
| 406 | { |
| 407 | unsigned long p = *ppos; |
| 408 | ssize_t low_count, read, sz; |
Hans Grob | 890537b | 2013-02-06 11:37:20 +0100 | [diff] [blame] | 409 | char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ |
KAMEZAWA Hiroyuki | 325fda7 | 2010-02-02 13:44:05 -0800 | [diff] [blame] | 410 | int err = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | |
| 412 | read = 0; |
| 413 | if (p < (unsigned long) high_memory) { |
| 414 | low_count = count; |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 415 | if (count > (unsigned long)high_memory - p) |
| 416 | low_count = (unsigned long)high_memory - p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | |
| 418 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 419 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 420 | if (p < PAGE_SIZE && low_count > 0) { |
Wu Fengguang | 7fabadd | 2009-12-14 17:58:09 -0800 | [diff] [blame] | 421 | sz = size_inside_page(p, low_count); |
| 422 | if (clear_user(buf, sz)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | return -EFAULT; |
Wu Fengguang | 7fabadd | 2009-12-14 17:58:09 -0800 | [diff] [blame] | 424 | buf += sz; |
| 425 | p += sz; |
| 426 | read += sz; |
| 427 | low_count -= sz; |
| 428 | count -= sz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | } |
| 430 | #endif |
| 431 | while (low_count > 0) { |
Wu Fengguang | f222318 | 2009-12-14 17:58:07 -0800 | [diff] [blame] | 432 | sz = size_inside_page(p, low_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | |
| 434 | /* |
| 435 | * On ia64 if a page has been mapped somewhere as |
| 436 | * uncached, then it must also be accessed uncached |
| 437 | * by the kernel or data corruption may occur |
| 438 | */ |
Thierry Reding | 4707a34 | 2014-07-28 17:20:33 +0200 | [diff] [blame] | 439 | kbuf = xlate_dev_kmem_ptr((void *)p); |
Robin Murphy | 488debb9 | 2017-01-05 17:15:01 +0000 | [diff] [blame] | 440 | if (!virt_addr_valid(kbuf)) |
| 441 | return -ENXIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | |
| 443 | if (copy_to_user(buf, kbuf, sz)) |
| 444 | return -EFAULT; |
| 445 | buf += sz; |
| 446 | p += sz; |
| 447 | read += sz; |
| 448 | low_count -= sz; |
| 449 | count -= sz; |
| 450 | } |
| 451 | } |
| 452 | |
| 453 | if (count > 0) { |
| 454 | kbuf = (char *)__get_free_page(GFP_KERNEL); |
| 455 | if (!kbuf) |
| 456 | return -ENOMEM; |
| 457 | while (count > 0) { |
Wu Fengguang | 80ad89a | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 458 | sz = size_inside_page(p, count); |
KAMEZAWA Hiroyuki | 325fda7 | 2010-02-02 13:44:05 -0800 | [diff] [blame] | 459 | if (!is_vmalloc_or_module_addr((void *)p)) { |
| 460 | err = -ENXIO; |
| 461 | break; |
| 462 | } |
Wu Fengguang | 80ad89a | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 463 | sz = vread(kbuf, (char *)p, sz); |
| 464 | if (!sz) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | break; |
Wu Fengguang | 80ad89a | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 466 | if (copy_to_user(buf, kbuf, sz)) { |
KAMEZAWA Hiroyuki | 325fda7 | 2010-02-02 13:44:05 -0800 | [diff] [blame] | 467 | err = -EFAULT; |
| 468 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | } |
Wu Fengguang | 80ad89a | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 470 | count -= sz; |
| 471 | buf += sz; |
| 472 | read += sz; |
| 473 | p += sz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | } |
| 475 | free_page((unsigned long)kbuf); |
| 476 | } |
KAMEZAWA Hiroyuki | 325fda7 | 2010-02-02 13:44:05 -0800 | [diff] [blame] | 477 | *ppos = p; |
| 478 | return read ? read : err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | } |
| 480 | |
| 481 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 482 | static ssize_t do_write_kmem(unsigned long p, const char __user *buf, |
| 483 | size_t count, loff_t *ppos) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | { |
| 485 | ssize_t written, sz; |
| 486 | unsigned long copied; |
| 487 | |
| 488 | written = 0; |
| 489 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 490 | /* we don't have page 0 mapped on sparc and m68k.. */ |
Wu Fengguang | ee32398 | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 491 | if (p < PAGE_SIZE) { |
| 492 | sz = size_inside_page(p, count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | /* Hmm. Do something? */ |
| 494 | buf += sz; |
| 495 | p += sz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | count -= sz; |
| 497 | written += sz; |
| 498 | } |
| 499 | #endif |
| 500 | |
| 501 | while (count > 0) { |
Thierry Reding | 4707a34 | 2014-07-28 17:20:33 +0200 | [diff] [blame] | 502 | void *ptr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | |
Wu Fengguang | ee32398 | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 504 | sz = size_inside_page(p, count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | |
| 506 | /* |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 507 | * On ia64 if a page has been mapped somewhere as uncached, then |
| 508 | * it must also be accessed uncached by the kernel or data |
| 509 | * corruption may occur. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | */ |
Thierry Reding | 4707a34 | 2014-07-28 17:20:33 +0200 | [diff] [blame] | 511 | ptr = xlate_dev_kmem_ptr((void *)p); |
Robin Murphy | 488debb9 | 2017-01-05 17:15:01 +0000 | [diff] [blame] | 512 | if (!virt_addr_valid(ptr)) |
| 513 | return -ENXIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | |
| 515 | copied = copy_from_user(ptr, buf, sz); |
| 516 | if (copied) { |
Jan Beulich | c654d60 | 2006-03-25 03:07:31 -0800 | [diff] [blame] | 517 | written += sz - copied; |
| 518 | if (written) |
| 519 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | return -EFAULT; |
| 521 | } |
| 522 | buf += sz; |
| 523 | p += sz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | count -= sz; |
| 525 | written += sz; |
| 526 | } |
| 527 | |
| 528 | *ppos += written; |
| 529 | return written; |
| 530 | } |
| 531 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | /* |
| 533 | * This function writes to the *virtual* memory as seen by the kernel. |
| 534 | */ |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 535 | static ssize_t write_kmem(struct file *file, const char __user *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | size_t count, loff_t *ppos) |
| 537 | { |
| 538 | unsigned long p = *ppos; |
| 539 | ssize_t wrote = 0; |
| 540 | ssize_t virtr = 0; |
Hans Grob | 890537b | 2013-02-06 11:37:20 +0100 | [diff] [blame] | 541 | char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ |
KAMEZAWA Hiroyuki | 325fda7 | 2010-02-02 13:44:05 -0800 | [diff] [blame] | 542 | int err = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | |
| 544 | if (p < (unsigned long) high_memory) { |
Wu Fengguang | 80ad89a | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 545 | unsigned long to_write = min_t(unsigned long, count, |
| 546 | (unsigned long)high_memory - p); |
Wu Fengguang | ee32398 | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 547 | wrote = do_write_kmem(p, buf, to_write, ppos); |
Wu Fengguang | 80ad89a | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 548 | if (wrote != to_write) |
| 549 | return wrote; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | p += wrote; |
| 551 | buf += wrote; |
| 552 | count -= wrote; |
| 553 | } |
| 554 | |
| 555 | if (count > 0) { |
| 556 | kbuf = (char *)__get_free_page(GFP_KERNEL); |
| 557 | if (!kbuf) |
| 558 | return wrote ? wrote : -ENOMEM; |
| 559 | while (count > 0) { |
Wu Fengguang | 80ad89a | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 560 | unsigned long sz = size_inside_page(p, count); |
| 561 | unsigned long n; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | |
KAMEZAWA Hiroyuki | 325fda7 | 2010-02-02 13:44:05 -0800 | [diff] [blame] | 563 | if (!is_vmalloc_or_module_addr((void *)p)) { |
| 564 | err = -ENXIO; |
| 565 | break; |
| 566 | } |
Wu Fengguang | 80ad89a | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 567 | n = copy_from_user(kbuf, buf, sz); |
| 568 | if (n) { |
KAMEZAWA Hiroyuki | 325fda7 | 2010-02-02 13:44:05 -0800 | [diff] [blame] | 569 | err = -EFAULT; |
| 570 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | } |
Wu Fengguang | c85e9a9 | 2010-02-02 13:44:06 -0800 | [diff] [blame] | 572 | vwrite(kbuf, (char *)p, sz); |
Wu Fengguang | 80ad89a | 2009-12-14 17:58:10 -0800 | [diff] [blame] | 573 | count -= sz; |
| 574 | buf += sz; |
| 575 | virtr += sz; |
| 576 | p += sz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | } |
| 578 | free_page((unsigned long)kbuf); |
| 579 | } |
| 580 | |
KAMEZAWA Hiroyuki | 325fda7 | 2010-02-02 13:44:05 -0800 | [diff] [blame] | 581 | *ppos = p; |
| 582 | return virtr + wrote ? : err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | } |
| 584 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 585 | static ssize_t read_port(struct file *file, char __user *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | size_t count, loff_t *ppos) |
| 587 | { |
| 588 | unsigned long i = *ppos; |
| 589 | char __user *tmp = buf; |
| 590 | |
| 591 | if (!access_ok(VERIFY_WRITE, buf, count)) |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 592 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | while (count-- > 0 && i < 65536) { |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 594 | if (__put_user(inb(i), tmp) < 0) |
| 595 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | i++; |
| 597 | tmp++; |
| 598 | } |
| 599 | *ppos = i; |
| 600 | return tmp-buf; |
| 601 | } |
| 602 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 603 | static ssize_t write_port(struct file *file, const char __user *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | size_t count, loff_t *ppos) |
| 605 | { |
| 606 | unsigned long i = *ppos; |
Hans Grob | 890537b | 2013-02-06 11:37:20 +0100 | [diff] [blame] | 607 | const char __user *tmp = buf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 609 | if (!access_ok(VERIFY_READ, buf, count)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | return -EFAULT; |
| 611 | while (count-- > 0 && i < 65536) { |
| 612 | char c; |
Rob Ward | 6a0061b | 2014-12-20 18:28:36 +0000 | [diff] [blame] | 613 | |
Jan Beulich | c654d60 | 2006-03-25 03:07:31 -0800 | [diff] [blame] | 614 | if (__get_user(c, tmp)) { |
| 615 | if (tmp > buf) |
| 616 | break; |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 617 | return -EFAULT; |
Jan Beulich | c654d60 | 2006-03-25 03:07:31 -0800 | [diff] [blame] | 618 | } |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 619 | outb(c, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | i++; |
| 621 | tmp++; |
| 622 | } |
| 623 | *ppos = i; |
| 624 | return tmp-buf; |
| 625 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 627 | static ssize_t read_null(struct file *file, char __user *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | size_t count, loff_t *ppos) |
| 629 | { |
| 630 | return 0; |
| 631 | } |
| 632 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 633 | static ssize_t write_null(struct file *file, const char __user *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | size_t count, loff_t *ppos) |
| 635 | { |
| 636 | return count; |
| 637 | } |
| 638 | |
Al Viro | cd28e28 | 2015-04-03 15:57:04 -0400 | [diff] [blame] | 639 | static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to) |
Zach Brown | 162934d | 2013-05-07 16:18:27 -0700 | [diff] [blame] | 640 | { |
| 641 | return 0; |
| 642 | } |
| 643 | |
Al Viro | cd28e28 | 2015-04-03 15:57:04 -0400 | [diff] [blame] | 644 | static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from) |
Zach Brown | 162934d | 2013-05-07 16:18:27 -0700 | [diff] [blame] | 645 | { |
Al Viro | cd28e28 | 2015-04-03 15:57:04 -0400 | [diff] [blame] | 646 | size_t count = iov_iter_count(from); |
| 647 | iov_iter_advance(from, count); |
| 648 | return count; |
Zach Brown | 162934d | 2013-05-07 16:18:27 -0700 | [diff] [blame] | 649 | } |
| 650 | |
Jens Axboe | 1ebd32f | 2006-04-26 14:40:08 +0200 | [diff] [blame] | 651 | static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, |
| 652 | struct splice_desc *sd) |
| 653 | { |
| 654 | return sd->len; |
| 655 | } |
| 656 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 657 | static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out, |
Jens Axboe | 1ebd32f | 2006-04-26 14:40:08 +0200 | [diff] [blame] | 658 | loff_t *ppos, size_t len, unsigned int flags) |
| 659 | { |
| 660 | return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); |
| 661 | } |
| 662 | |
Al Viro | 13ba33e | 2014-08-18 10:04:12 -0400 | [diff] [blame] | 663 | static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter) |
Zach Brown | 162934d | 2013-05-07 16:18:27 -0700 | [diff] [blame] | 664 | { |
| 665 | size_t written = 0; |
Zach Brown | 162934d | 2013-05-07 16:18:27 -0700 | [diff] [blame] | 666 | |
Al Viro | 13ba33e | 2014-08-18 10:04:12 -0400 | [diff] [blame] | 667 | while (iov_iter_count(iter)) { |
| 668 | size_t chunk = iov_iter_count(iter), n; |
Rob Ward | 6a0061b | 2014-12-20 18:28:36 +0000 | [diff] [blame] | 669 | |
Al Viro | 13ba33e | 2014-08-18 10:04:12 -0400 | [diff] [blame] | 670 | if (chunk > PAGE_SIZE) |
| 671 | chunk = PAGE_SIZE; /* Just for latency reasons */ |
| 672 | n = iov_iter_zero(chunk, iter); |
| 673 | if (!n && iov_iter_count(iter)) |
| 674 | return written ? written : -EFAULT; |
| 675 | written += n; |
| 676 | if (signal_pending(current)) |
| 677 | return written ? written : -ERESTARTSYS; |
| 678 | cond_resched(); |
Zach Brown | 162934d | 2013-05-07 16:18:27 -0700 | [diff] [blame] | 679 | } |
Al Viro | 13ba33e | 2014-08-18 10:04:12 -0400 | [diff] [blame] | 680 | return written; |
Zach Brown | 162934d | 2013-05-07 16:18:27 -0700 | [diff] [blame] | 681 | } |
| 682 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 683 | static int mmap_zero(struct file *file, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | { |
Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 685 | #ifndef CONFIG_MMU |
| 686 | return -ENOSYS; |
| 687 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | if (vma->vm_flags & VM_SHARED) |
| 689 | return shmem_zero_setup(vma); |
Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 690 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 693 | static unsigned long get_unmapped_area_zero(struct file *file, |
| 694 | unsigned long addr, unsigned long len, |
| 695 | unsigned long pgoff, unsigned long flags) |
| 696 | { |
| 697 | #ifdef CONFIG_MMU |
| 698 | if (flags & MAP_SHARED) { |
| 699 | /* |
| 700 | * mmap_zero() will call shmem_zero_setup() to create a file, |
| 701 | * so use shmem's get_unmapped_area in case it can be huge; |
| 702 | * and pass NULL for file as in mmap.c's get_unmapped_area(), |
| 703 | * so as not to confuse shmem with our handle on "/dev/zero". |
| 704 | */ |
| 705 | return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); |
| 706 | } |
| 707 | |
| 708 | /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ |
| 709 | return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); |
| 710 | #else |
| 711 | return -ENOSYS; |
| 712 | #endif |
| 713 | } |
| 714 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 715 | static ssize_t write_full(struct file *file, const char __user *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | size_t count, loff_t *ppos) |
| 717 | { |
| 718 | return -ENOSPC; |
| 719 | } |
| 720 | |
| 721 | /* |
| 722 | * Special lseek() function for /dev/null and /dev/zero. Most notably, you |
| 723 | * can fopen() both devices with "a" now. This was previously impossible. |
| 724 | * -- SRB. |
| 725 | */ |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 726 | static loff_t null_lseek(struct file *file, loff_t offset, int orig) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | { |
| 728 | return file->f_pos = 0; |
| 729 | } |
| 730 | |
| 731 | /* |
| 732 | * The memory devices use the full 32/64 bits of the offset, and so we cannot |
| 733 | * check against negative addresses: they are ok. The return value is weird, |
| 734 | * though, in that case (0). |
| 735 | * |
| 736 | * also note that seeking relative to the "end of file" isn't supported: |
| 737 | * it has no meaning, so it returns -EINVAL. |
| 738 | */ |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 739 | static loff_t memory_lseek(struct file *file, loff_t offset, int orig) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | { |
| 741 | loff_t ret; |
| 742 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 743 | inode_lock(file_inode(file)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | switch (orig) { |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 745 | case SEEK_CUR: |
| 746 | offset += file->f_pos; |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 747 | case SEEK_SET: |
| 748 | /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ |
Andrzej Hajda | ecb63a1 | 2016-02-15 15:35:21 +0100 | [diff] [blame] | 749 | if ((unsigned long long)offset >= -MAX_ERRNO) { |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 750 | ret = -EOVERFLOW; |
| 751 | break; |
| 752 | } |
| 753 | file->f_pos = offset; |
| 754 | ret = file->f_pos; |
| 755 | force_successful_syscall_return(); |
| 756 | break; |
| 757 | default: |
| 758 | ret = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | } |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 760 | inode_unlock(file_inode(file)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | return ret; |
| 762 | } |
| 763 | |
Hans Grob | 890537b | 2013-02-06 11:37:20 +0100 | [diff] [blame] | 764 | static int open_port(struct inode *inode, struct file *filp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | { |
| 766 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; |
| 767 | } |
| 768 | |
| 769 | #define zero_lseek null_lseek |
| 770 | #define full_lseek null_lseek |
| 771 | #define write_zero write_null |
Al Viro | cd28e28 | 2015-04-03 15:57:04 -0400 | [diff] [blame] | 772 | #define write_iter_zero write_iter_null |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | #define open_mem open_port |
| 774 | #define open_kmem open_mem |
| 775 | |
Rob Ward | 73f0718 | 2014-12-07 15:40:33 +0000 | [diff] [blame] | 776 | static const struct file_operations __maybe_unused mem_fops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | .llseek = memory_lseek, |
| 778 | .read = read_mem, |
| 779 | .write = write_mem, |
| 780 | .mmap = mmap_mem, |
| 781 | .open = open_mem, |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 782 | #ifndef CONFIG_MMU |
David Howells | 5da6185 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 783 | .get_unmapped_area = get_unmapped_area_mem, |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 784 | .mmap_capabilities = memory_mmap_capabilities, |
| 785 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | }; |
| 787 | |
Rob Ward | a8c9125 | 2014-12-07 15:40:34 +0000 | [diff] [blame] | 788 | static const struct file_operations __maybe_unused kmem_fops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | .llseek = memory_lseek, |
| 790 | .read = read_kmem, |
| 791 | .write = write_kmem, |
| 792 | .mmap = mmap_kmem, |
| 793 | .open = open_kmem, |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 794 | #ifndef CONFIG_MMU |
David Howells | 5da6185 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 795 | .get_unmapped_area = get_unmapped_area_mem, |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 796 | .mmap_capabilities = memory_mmap_capabilities, |
| 797 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | }; |
| 799 | |
Arjan van de Ven | 62322d2 | 2006-07-03 00:24:21 -0700 | [diff] [blame] | 800 | static const struct file_operations null_fops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 | .llseek = null_lseek, |
| 802 | .read = read_null, |
| 803 | .write = write_null, |
Al Viro | cd28e28 | 2015-04-03 15:57:04 -0400 | [diff] [blame] | 804 | .read_iter = read_iter_null, |
| 805 | .write_iter = write_iter_null, |
Jens Axboe | 1ebd32f | 2006-04-26 14:40:08 +0200 | [diff] [blame] | 806 | .splice_write = splice_write_null, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | }; |
| 808 | |
Rob Ward | 3a4bc2f | 2014-12-07 15:40:35 +0000 | [diff] [blame] | 809 | static const struct file_operations __maybe_unused port_fops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 810 | .llseek = memory_lseek, |
| 811 | .read = read_port, |
| 812 | .write = write_port, |
| 813 | .open = open_port, |
| 814 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 | |
Arjan van de Ven | 62322d2 | 2006-07-03 00:24:21 -0700 | [diff] [blame] | 816 | static const struct file_operations zero_fops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | .llseek = zero_lseek, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | .write = write_zero, |
Al Viro | 13ba33e | 2014-08-18 10:04:12 -0400 | [diff] [blame] | 819 | .read_iter = read_iter_zero, |
Al Viro | cd28e28 | 2015-04-03 15:57:04 -0400 | [diff] [blame] | 820 | .write_iter = write_iter_zero, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | .mmap = mmap_zero, |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 822 | .get_unmapped_area = get_unmapped_area_zero, |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 823 | #ifndef CONFIG_MMU |
| 824 | .mmap_capabilities = zero_mmap_capabilities, |
| 825 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 826 | }; |
| 827 | |
Arjan van de Ven | 62322d2 | 2006-07-03 00:24:21 -0700 | [diff] [blame] | 828 | static const struct file_operations full_fops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | .llseek = full_lseek, |
Al Viro | 13ba33e | 2014-08-18 10:04:12 -0400 | [diff] [blame] | 830 | .read_iter = read_iter_zero, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | .write = write_full, |
| 832 | }; |
| 833 | |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 834 | static const struct memdev { |
| 835 | const char *name; |
Al Viro | 2c9ede5 | 2011-07-23 20:24:48 -0400 | [diff] [blame] | 836 | umode_t mode; |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 837 | const struct file_operations *fops; |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 838 | fmode_t fmode; |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 839 | } devlist[] = { |
Rob Ward | 73f0718 | 2014-12-07 15:40:33 +0000 | [diff] [blame] | 840 | #ifdef CONFIG_DEVMEM |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 841 | [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, |
Rob Ward | 73f0718 | 2014-12-07 15:40:33 +0000 | [diff] [blame] | 842 | #endif |
Adriano dos Santos Fernandes | d6f47be | 2009-06-17 16:27:48 -0700 | [diff] [blame] | 843 | #ifdef CONFIG_DEVKMEM |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 844 | [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, |
Adriano dos Santos Fernandes | d6f47be | 2009-06-17 16:27:48 -0700 | [diff] [blame] | 845 | #endif |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 846 | [3] = { "null", 0666, &null_fops, 0 }, |
Adriano dos Santos Fernandes | d6f47be | 2009-06-17 16:27:48 -0700 | [diff] [blame] | 847 | #ifdef CONFIG_DEVPORT |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 848 | [4] = { "port", 0, &port_fops, 0 }, |
Adriano dos Santos Fernandes | d6f47be | 2009-06-17 16:27:48 -0700 | [diff] [blame] | 849 | #endif |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 850 | [5] = { "zero", 0666, &zero_fops, 0 }, |
| 851 | [7] = { "full", 0666, &full_fops, 0 }, |
| 852 | [8] = { "random", 0666, &random_fops, 0 }, |
| 853 | [9] = { "urandom", 0666, &urandom_fops, 0 }, |
Kay Sievers | 7f3a781 | 2012-05-09 01:37:51 +0200 | [diff] [blame] | 854 | #ifdef CONFIG_PRINTK |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 855 | [11] = { "kmsg", 0644, &kmsg_fops, 0 }, |
Kay Sievers | 7f3a781 | 2012-05-09 01:37:51 +0200 | [diff] [blame] | 856 | #endif |
Adriano dos Santos Fernandes | d6f47be | 2009-06-17 16:27:48 -0700 | [diff] [blame] | 857 | }; |
| 858 | |
| 859 | static int memory_open(struct inode *inode, struct file *filp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | { |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 861 | int minor; |
| 862 | const struct memdev *dev; |
Adriano dos Santos Fernandes | d6f47be | 2009-06-17 16:27:48 -0700 | [diff] [blame] | 863 | |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 864 | minor = iminor(inode); |
| 865 | if (minor >= ARRAY_SIZE(devlist)) |
Frederic Weisbecker | 205153a | 2009-10-09 20:31:02 +0200 | [diff] [blame] | 866 | return -ENXIO; |
Adriano dos Santos Fernandes | d6f47be | 2009-06-17 16:27:48 -0700 | [diff] [blame] | 867 | |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 868 | dev = &devlist[minor]; |
| 869 | if (!dev->fops) |
Frederic Weisbecker | 205153a | 2009-10-09 20:31:02 +0200 | [diff] [blame] | 870 | return -ENXIO; |
Adriano dos Santos Fernandes | d6f47be | 2009-06-17 16:27:48 -0700 | [diff] [blame] | 871 | |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 872 | filp->f_op = dev->fops; |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 873 | filp->f_mode |= dev->fmode; |
KAMEZAWA Hiroyuki | 4a3956c | 2010-10-01 14:20:22 -0700 | [diff] [blame] | 874 | |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 875 | if (dev->fops->open) |
Frederic Weisbecker | 205153a | 2009-10-09 20:31:02 +0200 | [diff] [blame] | 876 | return dev->fops->open(inode, filp); |
| 877 | |
| 878 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | } |
| 880 | |
Arjan van de Ven | 62322d2 | 2006-07-03 00:24:21 -0700 | [diff] [blame] | 881 | static const struct file_operations memory_fops = { |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 882 | .open = memory_open, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 883 | .llseek = noop_llseek, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | }; |
| 885 | |
Al Viro | 2c9ede5 | 2011-07-23 20:24:48 -0400 | [diff] [blame] | 886 | static char *mem_devnode(struct device *dev, umode_t *mode) |
Kay Sievers | e454cea | 2009-09-18 23:01:12 +0200 | [diff] [blame] | 887 | { |
| 888 | if (mode && devlist[MINOR(dev->devt)].mode) |
| 889 | *mode = devlist[MINOR(dev->devt)].mode; |
| 890 | return NULL; |
| 891 | } |
| 892 | |
gregkh@suse.de | ca8eca6 | 2005-03-23 09:53:09 -0800 | [diff] [blame] | 893 | static struct class *mem_class; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | |
| 895 | static int __init chr_dev_init(void) |
| 896 | { |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 897 | int minor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 | |
Andrew Morton | d7d4d84 | 2010-03-10 15:21:52 -0800 | [diff] [blame] | 899 | if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | printk("unable to get major %d for memory devs\n", MEM_MAJOR); |
| 901 | |
gregkh@suse.de | ca8eca6 | 2005-03-23 09:53:09 -0800 | [diff] [blame] | 902 | mem_class = class_create(THIS_MODULE, "mem"); |
Anton Blanchard | 6e191f7 | 2010-04-06 14:34:55 -0700 | [diff] [blame] | 903 | if (IS_ERR(mem_class)) |
| 904 | return PTR_ERR(mem_class); |
| 905 | |
Kay Sievers | e454cea | 2009-09-18 23:01:12 +0200 | [diff] [blame] | 906 | mem_class->devnode = mem_devnode; |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 907 | for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { |
| 908 | if (!devlist[minor].name) |
| 909 | continue; |
Haren Myneni | e1612de | 2012-07-11 15:18:44 +1000 | [diff] [blame] | 910 | |
| 911 | /* |
Hans Grob | 890537b | 2013-02-06 11:37:20 +0100 | [diff] [blame] | 912 | * Create /dev/port? |
Haren Myneni | e1612de | 2012-07-11 15:18:44 +1000 | [diff] [blame] | 913 | */ |
| 914 | if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) |
| 915 | continue; |
| 916 | |
Kay Sievers | 389e0cb | 2009-07-04 16:51:29 +0200 | [diff] [blame] | 917 | device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), |
| 918 | NULL, devlist[minor].name); |
| 919 | } |
Greg Kroah-Hartman | ebf644c | 2006-07-25 17:13:31 -0700 | [diff] [blame] | 920 | |
David Howells | 31d1d48 | 2010-08-06 16:34:43 +0100 | [diff] [blame] | 921 | return tty_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 | } |
| 923 | |
| 924 | fs_initcall(chr_dev_init); |