blob: 94c2b556cf9728ea66973f97b0d0222c97b36da5 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/drivers/char/mem.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
Andrew Mortond7d4d842010-03-10 15:21:52 -08007 * Added devfs support.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02009 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/mm.h>
13#include <linux/miscdevice.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mman.h>
17#include <linux/random.h>
18#include <linux/init.h>
19#include <linux/raw.h>
20#include <linux/tty.h>
21#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/ptrace.h>
23#include <linux/device.h>
Vivek Goyal50b1fdb2005-06-25 14:58:23 -070024#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/backing-dev.h>
Hugh Dickinsc01d5b32016-07-26 15:26:15 -070026#include <linux/shmem_fs.h>
Jens Axboed6b29d72007-06-04 09:59:47 +020027#include <linux/splice.h>
Linus Torvaldsb8a3ad52006-10-13 08:42:10 -070028#include <linux/pfn.h>
Paul Gortmaker66300e62011-07-10 12:14:53 -040029#include <linux/export.h>
Haren Mynenie1612de2012-07-11 15:18:44 +100030#include <linux/io.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080031#include <linux/uio.h>
Rob Ward35b6c7e2014-12-20 18:28:35 +000032#include <linux/uaccess.h>
Matthew Garrett9b9d8dd2019-08-19 17:17:41 -070033#include <linux/security.h>
Dan Williams3234ac62020-05-21 14:06:17 -070034#include <linux/pseudo_fs.h>
35#include <uapi/linux/magic.h>
36#include <linux/mount.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38#ifdef CONFIG_IA64
39# include <linux/efi.h>
40#endif
41
Dan Williams3234ac62020-05-21 14:06:17 -070042#define DEVMEM_MINOR 1
Haren Mynenie1612de2012-07-11 15:18:44 +100043#define DEVPORT_MINOR 4
44
Wu Fengguangf2223182009-12-14 17:58:07 -080045static inline unsigned long size_inside_page(unsigned long start,
46 unsigned long size)
47{
48 unsigned long sz;
49
Wu Fengguang7fabadd2009-12-14 17:58:09 -080050 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
Wu Fengguangf2223182009-12-14 17:58:07 -080051
Wu Fengguang7fabadd2009-12-14 17:58:09 -080052 return min(sz, size);
Wu Fengguangf2223182009-12-14 17:58:07 -080053}
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
Cyril Chemparathy7e6735c2012-09-12 14:05:58 -040056static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
Changli Gaocfaf3462011-03-23 16:42:58 -070058 return addr + count <= __pa(high_memory);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059}
Bjorn Helgaas80851ef2006-01-08 01:04:13 -080060
Lennert Buytenhek06c67be2006-07-10 04:45:27 -070061static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
Bjorn Helgaas80851ef2006-01-08 01:04:13 -080062{
63 return 1;
64}
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#endif
66
Ingo Molnard0926332008-07-18 00:26:59 +020067#ifdef CONFIG_STRICT_DEVMEM
Kees Cooka4866aa2017-04-05 09:39:08 -070068static inline int page_is_allowed(unsigned long pfn)
69{
70 return devmem_is_allowed(pfn);
71}
Venki Pallipadie2beb3e2008-03-06 23:01:47 -080072static inline int range_is_allowed(unsigned long pfn, unsigned long size)
Arjan van de Venae531c22008-04-24 23:40:47 +020073{
Venki Pallipadie2beb3e2008-03-06 23:01:47 -080074 u64 from = ((u64)pfn) << PAGE_SHIFT;
75 u64 to = from + size;
76 u64 cursor = from;
Arjan van de Venae531c22008-04-24 23:40:47 +020077
Venki Pallipadie2beb3e2008-03-06 23:01:47 -080078 while (cursor < to) {
Jiri Kosina39380b82016-07-08 11:38:28 +020079 if (!devmem_is_allowed(pfn))
Arjan van de Venae531c22008-04-24 23:40:47 +020080 return 0;
Venki Pallipadie2beb3e2008-03-06 23:01:47 -080081 cursor += PAGE_SIZE;
82 pfn++;
Arjan van de Venae531c22008-04-24 23:40:47 +020083 }
84 return 1;
85}
86#else
Kees Cooka4866aa2017-04-05 09:39:08 -070087static inline int page_is_allowed(unsigned long pfn)
88{
89 return 1;
90}
Venki Pallipadie2beb3e2008-03-06 23:01:47 -080091static inline int range_is_allowed(unsigned long pfn, unsigned long size)
Arjan van de Venae531c22008-04-24 23:40:47 +020092{
93 return 1;
94}
95#endif
96
Thierry Reding4707a342014-07-28 17:20:33 +020097#ifndef unxlate_dev_mem_ptr
98#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
99void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700100{
101}
Thierry Reding4707a342014-07-28 17:20:33 +0200102#endif
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700103
Tetsuo Handa8619e5b2019-08-26 22:13:25 +0900104static inline bool should_stop_iteration(void)
105{
106 if (need_resched())
107 cond_resched();
108 return fatal_signal_pending(current);
109}
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111/*
Andrew Mortond7d4d842010-03-10 15:21:52 -0800112 * This funcion reads the *physical* memory. The f_pos points directly to the
113 * memory location.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 */
Andrew Mortond7d4d842010-03-10 15:21:52 -0800115static ssize_t read_mem(struct file *file, char __user *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 size_t count, loff_t *ppos)
117{
Cyril Chemparathy7e6735c2012-09-12 14:05:58 -0400118 phys_addr_t p = *ppos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 ssize_t read, sz;
Thierry Reding4707a342014-07-28 17:20:33 +0200120 void *ptr;
Kees Cook22ec1a22017-12-01 13:19:39 -0800121 char *bounce;
122 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Petr Tesarik08d2d002014-01-30 09:48:02 +0100124 if (p != *ppos)
125 return 0;
126
Bjorn Helgaas136939a2006-03-26 01:37:05 -0800127 if (!valid_phys_addr_range(p, count))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 return -EFAULT;
129 read = 0;
130#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
131 /* we don't have page 0 mapped on sparc and m68k.. */
132 if (p < PAGE_SIZE) {
Wu Fengguang7fabadd2009-12-14 17:58:09 -0800133 sz = size_inside_page(p, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 if (sz > 0) {
135 if (clear_user(buf, sz))
136 return -EFAULT;
Andrew Mortond7d4d842010-03-10 15:21:52 -0800137 buf += sz;
138 p += sz;
139 count -= sz;
140 read += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 }
142 }
143#endif
144
Kees Cook22ec1a22017-12-01 13:19:39 -0800145 bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
146 if (!bounce)
147 return -ENOMEM;
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 while (count > 0) {
Wu Fengguangfa29e972009-12-14 17:58:08 -0800150 unsigned long remaining;
Kees Cookb5b38202018-03-27 14:06:14 -0700151 int allowed, probe;
Wu Fengguangfa29e972009-12-14 17:58:08 -0800152
Wu Fengguangf2223182009-12-14 17:58:07 -0800153 sz = size_inside_page(p, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Kees Cook22ec1a22017-12-01 13:19:39 -0800155 err = -EPERM;
Kees Cooka4866aa2017-04-05 09:39:08 -0700156 allowed = page_is_allowed(p >> PAGE_SHIFT);
157 if (!allowed)
Kees Cook22ec1a22017-12-01 13:19:39 -0800158 goto failed;
159
160 err = -EFAULT;
Kees Cooka4866aa2017-04-05 09:39:08 -0700161 if (allowed == 2) {
162 /* Show zeros for restricted memory. */
163 remaining = clear_user(buf, sz);
164 } else {
165 /*
166 * On ia64 if a page has been mapped somewhere as
167 * uncached, then it must also be accessed uncached
168 * by the kernel or data corruption may occur.
169 */
170 ptr = xlate_dev_mem_ptr(p);
171 if (!ptr)
Kees Cook22ec1a22017-12-01 13:19:39 -0800172 goto failed;
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700173
Christoph Hellwigfe557312020-06-17 09:37:53 +0200174 probe = copy_from_kernel_nofault(bounce, ptr, sz);
Kees Cooka4866aa2017-04-05 09:39:08 -0700175 unxlate_dev_mem_ptr(p, ptr);
Kees Cookb5b38202018-03-27 14:06:14 -0700176 if (probe)
Kees Cook22ec1a22017-12-01 13:19:39 -0800177 goto failed;
178
179 remaining = copy_to_user(buf, bounce, sz);
Kees Cooka4866aa2017-04-05 09:39:08 -0700180 }
181
Wu Fengguangfa29e972009-12-14 17:58:08 -0800182 if (remaining)
Kees Cook22ec1a22017-12-01 13:19:39 -0800183 goto failed;
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 buf += sz;
186 p += sz;
187 count -= sz;
188 read += sz;
Tetsuo Handa8619e5b2019-08-26 22:13:25 +0900189 if (should_stop_iteration())
190 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 }
Kees Cook22ec1a22017-12-01 13:19:39 -0800192 kfree(bounce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 *ppos += read;
195 return read;
Kees Cook22ec1a22017-12-01 13:19:39 -0800196
197failed:
198 kfree(bounce);
199 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
Andrew Mortond7d4d842010-03-10 15:21:52 -0800202static ssize_t write_mem(struct file *file, const char __user *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 size_t count, loff_t *ppos)
204{
Cyril Chemparathy7e6735c2012-09-12 14:05:58 -0400205 phys_addr_t p = *ppos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 ssize_t written, sz;
207 unsigned long copied;
208 void *ptr;
209
Petr Tesarik08d2d002014-01-30 09:48:02 +0100210 if (p != *ppos)
211 return -EFBIG;
212
Bjorn Helgaas136939a2006-03-26 01:37:05 -0800213 if (!valid_phys_addr_range(p, count))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 return -EFAULT;
215
216 written = 0;
217
218#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
219 /* we don't have page 0 mapped on sparc and m68k.. */
220 if (p < PAGE_SIZE) {
Wu Fengguang7fabadd2009-12-14 17:58:09 -0800221 sz = size_inside_page(p, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 /* Hmm. Do something? */
223 buf += sz;
224 p += sz;
225 count -= sz;
226 written += sz;
227 }
228#endif
229
230 while (count > 0) {
Kees Cooka4866aa2017-04-05 09:39:08 -0700231 int allowed;
232
Wu Fengguangf2223182009-12-14 17:58:07 -0800233 sz = size_inside_page(p, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Kees Cooka4866aa2017-04-05 09:39:08 -0700235 allowed = page_is_allowed(p >> PAGE_SHIFT);
236 if (!allowed)
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700237 return -EPERM;
238
Kees Cooka4866aa2017-04-05 09:39:08 -0700239 /* Skip actual writing when a page is marked as restricted. */
240 if (allowed == 1) {
241 /*
242 * On ia64 if a page has been mapped somewhere as
243 * uncached, then it must also be accessed uncached
244 * by the kernel or data corruption may occur.
245 */
246 ptr = xlate_dev_mem_ptr(p);
247 if (!ptr) {
248 if (written)
249 break;
250 return -EFAULT;
251 }
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700252
Kees Cooka4866aa2017-04-05 09:39:08 -0700253 copied = copy_from_user(ptr, buf, sz);
254 unxlate_dev_mem_ptr(p, ptr);
255 if (copied) {
256 written += sz - copied;
257 if (written)
258 break;
259 return -EFAULT;
260 }
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700261 }
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 buf += sz;
264 p += sz;
265 count -= sz;
266 written += sz;
Tetsuo Handa8619e5b2019-08-26 22:13:25 +0900267 if (should_stop_iteration())
268 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
270
271 *ppos += written;
272 return written;
273}
274
Andrew Mortond7d4d842010-03-10 15:21:52 -0800275int __weak phys_mem_access_prot_allowed(struct file *file,
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700276 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
277{
278 return 1;
279}
280
Bjorn Helgaas44ac8412006-01-08 01:04:10 -0800281#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
Andrew Mortond7d4d842010-03-10 15:21:52 -0800282
283/*
284 * Architectures vary in how they handle caching for addresses
285 * outside of main memory.
286 *
287 */
David Howellsea56f412010-04-06 14:35:08 -0700288#ifdef pgprot_noncached
Cyril Chemparathy7e6735c2012-09-12 14:05:58 -0400289static int uncached_access(struct file *file, phys_addr_t addr)
Andrew Mortond7d4d842010-03-10 15:21:52 -0800290{
291#if defined(CONFIG_IA64)
292 /*
293 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
294 * attribute aliases.
295 */
296 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
297#elif defined(CONFIG_MIPS)
298 {
299 extern int __uncached_access(struct file *file,
300 unsigned long addr);
301
302 return __uncached_access(file, addr);
303 }
304#else
305 /*
306 * Accessing memory above the top the kernel knows about or through a
307 * file pointer
308 * that was marked O_DSYNC will be done non-cached.
309 */
310 if (file->f_flags & O_DSYNC)
311 return 1;
312 return addr >= __pa(high_memory);
313#endif
314}
David Howellsea56f412010-04-06 14:35:08 -0700315#endif
Andrew Mortond7d4d842010-03-10 15:21:52 -0800316
Bjorn Helgaas44ac8412006-01-08 01:04:10 -0800317static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
318 unsigned long size, pgprot_t vma_prot)
319{
320#ifdef pgprot_noncached
Cyril Chemparathy7e6735c2012-09-12 14:05:58 -0400321 phys_addr_t offset = pfn << PAGE_SHIFT;
Bjorn Helgaas44ac8412006-01-08 01:04:10 -0800322
323 if (uncached_access(file, offset))
324 return pgprot_noncached(vma_prot);
325#endif
326 return vma_prot;
327}
328#endif
329
David Howells5da61852006-09-27 01:50:16 -0700330#ifndef CONFIG_MMU
331static unsigned long get_unmapped_area_mem(struct file *file,
332 unsigned long addr,
333 unsigned long len,
334 unsigned long pgoff,
335 unsigned long flags)
336{
337 if (!valid_mmap_phys_addr_range(pgoff, len))
338 return (unsigned long) -EINVAL;
Benjamin Herrenschmidt8a932582007-04-16 22:53:16 -0700339 return pgoff << PAGE_SHIFT;
David Howells5da61852006-09-27 01:50:16 -0700340}
341
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100342/* permit direct mmap, for read, write or exec */
343static unsigned memory_mmap_capabilities(struct file *file)
344{
345 return NOMMU_MAP_DIRECT |
346 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
347}
348
349static unsigned zero_mmap_capabilities(struct file *file)
350{
351 return NOMMU_MAP_COPY;
352}
353
David Howells5da61852006-09-27 01:50:16 -0700354/* can't do an in-place private mapping if there's no MMU */
355static inline int private_mapping_ok(struct vm_area_struct *vma)
356{
357 return vma->vm_flags & VM_MAYSHARE;
358}
359#else
David Howells5da61852006-09-27 01:50:16 -0700360
361static inline int private_mapping_ok(struct vm_area_struct *vma)
362{
363 return 1;
364}
365#endif
366
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400367static const struct vm_operations_struct mmap_mem_ops = {
Rik van Riel7ae8ed52008-07-23 21:27:07 -0700368#ifdef CONFIG_HAVE_IOREMAP_PROT
369 .access = generic_access_phys
370#endif
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700371};
372
Andrew Mortond7d4d842010-03-10 15:21:52 -0800373static int mmap_mem(struct file *file, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
Bjorn Helgaas80851ef2006-01-08 01:04:13 -0800375 size_t size = vma->vm_end - vma->vm_start;
Julius Wernerb299cde2017-05-12 14:42:58 -0700376 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
377
Craig Bergstrombe62a322017-11-15 15:29:51 -0700378 /* Does it even fit in phys_addr_t? */
379 if (offset >> PAGE_SHIFT != vma->vm_pgoff)
380 return -EINVAL;
381
Julius Wernerb299cde2017-05-12 14:42:58 -0700382 /* It's illegal to wrap around the end of the physical address space. */
Julius Werner32829da2017-06-02 15:36:39 -0700383 if (offset + (phys_addr_t)size - 1 < offset)
Julius Wernerb299cde2017-05-12 14:42:58 -0700384 return -EINVAL;
Bjorn Helgaas80851ef2006-01-08 01:04:13 -0800385
Lennert Buytenhek06c67be2006-07-10 04:45:27 -0700386 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
Bjorn Helgaas80851ef2006-01-08 01:04:13 -0800387 return -EINVAL;
388
David Howells5da61852006-09-27 01:50:16 -0700389 if (!private_mapping_ok(vma))
390 return -ENOSYS;
391
Venki Pallipadie2beb3e2008-03-06 23:01:47 -0800392 if (!range_is_allowed(vma->vm_pgoff, size))
393 return -EPERM;
394
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700395 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
396 &vma->vm_page_prot))
397 return -EINVAL;
398
Roland Dreier8b150472005-10-28 17:46:18 -0700399 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
Bjorn Helgaas80851ef2006-01-08 01:04:13 -0800400 size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 vma->vm_page_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700403 vma->vm_ops = &mmap_mem_ops;
404
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -0700405 /* Remap-pfn-range will mark the range VM_IO */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 if (remap_pfn_range(vma,
407 vma->vm_start,
408 vma->vm_pgoff,
Bjorn Helgaas80851ef2006-01-08 01:04:13 -0800409 size,
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700410 vma->vm_page_prot)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 return -EAGAIN;
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700412 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 return 0;
414}
415
Andrew Mortond7d4d842010-03-10 15:21:52 -0800416static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417{
Linus Torvalds4bb82552005-08-13 14:22:59 -0700418 unsigned long pfn;
419
Linus Torvalds6d3154c2007-01-22 08:53:24 -0800420 /* Turn a kernel-virtual address into a physical page frame */
421 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
Linus Torvalds4bb82552005-08-13 14:22:59 -0700422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 /*
Andrew Mortond7d4d842010-03-10 15:21:52 -0800424 * RED-PEN: on some architectures there is more mapped memory than
425 * available in mem_map which pfn_valid checks for. Perhaps should add a
426 * new macro here.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 *
428 * RED-PEN: vmalloc is not supported right now.
429 */
Linus Torvalds4bb82552005-08-13 14:22:59 -0700430 if (!pfn_valid(pfn))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 return -EIO;
Linus Torvalds4bb82552005-08-13 14:22:59 -0700432
433 vma->vm_pgoff = pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 return mmap_mem(file, vma);
435}
436
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437/*
438 * This function reads the *virtual* memory as seen by the kernel.
439 */
Andrew Mortond7d4d842010-03-10 15:21:52 -0800440static ssize_t read_kmem(struct file *file, char __user *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 size_t count, loff_t *ppos)
442{
443 unsigned long p = *ppos;
444 ssize_t low_count, read, sz;
Hans Grob890537b2013-02-06 11:37:20 +0100445 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
KAMEZAWA Hiroyuki325fda72010-02-02 13:44:05 -0800446 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 read = 0;
449 if (p < (unsigned long) high_memory) {
450 low_count = count;
Andrew Mortond7d4d842010-03-10 15:21:52 -0800451 if (count > (unsigned long)high_memory - p)
452 low_count = (unsigned long)high_memory - p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
454#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
455 /* we don't have page 0 mapped on sparc and m68k.. */
456 if (p < PAGE_SIZE && low_count > 0) {
Wu Fengguang7fabadd2009-12-14 17:58:09 -0800457 sz = size_inside_page(p, low_count);
458 if (clear_user(buf, sz))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 return -EFAULT;
Wu Fengguang7fabadd2009-12-14 17:58:09 -0800460 buf += sz;
461 p += sz;
462 read += sz;
463 low_count -= sz;
464 count -= sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 }
466#endif
467 while (low_count > 0) {
Wu Fengguangf2223182009-12-14 17:58:07 -0800468 sz = size_inside_page(p, low_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
470 /*
471 * On ia64 if a page has been mapped somewhere as
472 * uncached, then it must also be accessed uncached
473 * by the kernel or data corruption may occur
474 */
Thierry Reding4707a342014-07-28 17:20:33 +0200475 kbuf = xlate_dev_kmem_ptr((void *)p);
Robin Murphy488debb92017-01-05 17:15:01 +0000476 if (!virt_addr_valid(kbuf))
477 return -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 if (copy_to_user(buf, kbuf, sz))
480 return -EFAULT;
481 buf += sz;
482 p += sz;
483 read += sz;
484 low_count -= sz;
485 count -= sz;
Tetsuo Handa8619e5b2019-08-26 22:13:25 +0900486 if (should_stop_iteration()) {
487 count = 0;
488 break;
489 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 }
491 }
492
493 if (count > 0) {
494 kbuf = (char *)__get_free_page(GFP_KERNEL);
495 if (!kbuf)
496 return -ENOMEM;
497 while (count > 0) {
Wu Fengguang80ad89a2009-12-14 17:58:10 -0800498 sz = size_inside_page(p, count);
KAMEZAWA Hiroyuki325fda72010-02-02 13:44:05 -0800499 if (!is_vmalloc_or_module_addr((void *)p)) {
500 err = -ENXIO;
501 break;
502 }
Wu Fengguang80ad89a2009-12-14 17:58:10 -0800503 sz = vread(kbuf, (char *)p, sz);
504 if (!sz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 break;
Wu Fengguang80ad89a2009-12-14 17:58:10 -0800506 if (copy_to_user(buf, kbuf, sz)) {
KAMEZAWA Hiroyuki325fda72010-02-02 13:44:05 -0800507 err = -EFAULT;
508 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 }
Wu Fengguang80ad89a2009-12-14 17:58:10 -0800510 count -= sz;
511 buf += sz;
512 read += sz;
513 p += sz;
Tetsuo Handa8619e5b2019-08-26 22:13:25 +0900514 if (should_stop_iteration())
515 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 }
517 free_page((unsigned long)kbuf);
518 }
KAMEZAWA Hiroyuki325fda72010-02-02 13:44:05 -0800519 *ppos = p;
520 return read ? read : err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521}
522
523
Andrew Mortond7d4d842010-03-10 15:21:52 -0800524static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
525 size_t count, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526{
527 ssize_t written, sz;
528 unsigned long copied;
529
530 written = 0;
531#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
532 /* we don't have page 0 mapped on sparc and m68k.. */
Wu Fengguangee323982009-12-14 17:58:10 -0800533 if (p < PAGE_SIZE) {
534 sz = size_inside_page(p, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 /* Hmm. Do something? */
536 buf += sz;
537 p += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 count -= sz;
539 written += sz;
540 }
541#endif
542
543 while (count > 0) {
Thierry Reding4707a342014-07-28 17:20:33 +0200544 void *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
Wu Fengguangee323982009-12-14 17:58:10 -0800546 sz = size_inside_page(p, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
548 /*
Andrew Mortond7d4d842010-03-10 15:21:52 -0800549 * On ia64 if a page has been mapped somewhere as uncached, then
550 * it must also be accessed uncached by the kernel or data
551 * corruption may occur.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 */
Thierry Reding4707a342014-07-28 17:20:33 +0200553 ptr = xlate_dev_kmem_ptr((void *)p);
Robin Murphy488debb92017-01-05 17:15:01 +0000554 if (!virt_addr_valid(ptr))
555 return -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
557 copied = copy_from_user(ptr, buf, sz);
558 if (copied) {
Jan Beulichc654d602006-03-25 03:07:31 -0800559 written += sz - copied;
560 if (written)
561 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 return -EFAULT;
563 }
564 buf += sz;
565 p += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 count -= sz;
567 written += sz;
Tetsuo Handa8619e5b2019-08-26 22:13:25 +0900568 if (should_stop_iteration())
569 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
571
572 *ppos += written;
573 return written;
574}
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576/*
577 * This function writes to the *virtual* memory as seen by the kernel.
578 */
Andrew Mortond7d4d842010-03-10 15:21:52 -0800579static ssize_t write_kmem(struct file *file, const char __user *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 size_t count, loff_t *ppos)
581{
582 unsigned long p = *ppos;
583 ssize_t wrote = 0;
584 ssize_t virtr = 0;
Hans Grob890537b2013-02-06 11:37:20 +0100585 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
KAMEZAWA Hiroyuki325fda72010-02-02 13:44:05 -0800586 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588 if (p < (unsigned long) high_memory) {
Wu Fengguang80ad89a2009-12-14 17:58:10 -0800589 unsigned long to_write = min_t(unsigned long, count,
590 (unsigned long)high_memory - p);
Wu Fengguangee323982009-12-14 17:58:10 -0800591 wrote = do_write_kmem(p, buf, to_write, ppos);
Wu Fengguang80ad89a2009-12-14 17:58:10 -0800592 if (wrote != to_write)
593 return wrote;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 p += wrote;
595 buf += wrote;
596 count -= wrote;
597 }
598
599 if (count > 0) {
600 kbuf = (char *)__get_free_page(GFP_KERNEL);
601 if (!kbuf)
602 return wrote ? wrote : -ENOMEM;
603 while (count > 0) {
Wu Fengguang80ad89a2009-12-14 17:58:10 -0800604 unsigned long sz = size_inside_page(p, count);
605 unsigned long n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
KAMEZAWA Hiroyuki325fda72010-02-02 13:44:05 -0800607 if (!is_vmalloc_or_module_addr((void *)p)) {
608 err = -ENXIO;
609 break;
610 }
Wu Fengguang80ad89a2009-12-14 17:58:10 -0800611 n = copy_from_user(kbuf, buf, sz);
612 if (n) {
KAMEZAWA Hiroyuki325fda72010-02-02 13:44:05 -0800613 err = -EFAULT;
614 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 }
Wu Fengguangc85e9a92010-02-02 13:44:06 -0800616 vwrite(kbuf, (char *)p, sz);
Wu Fengguang80ad89a2009-12-14 17:58:10 -0800617 count -= sz;
618 buf += sz;
619 virtr += sz;
620 p += sz;
Tetsuo Handa8619e5b2019-08-26 22:13:25 +0900621 if (should_stop_iteration())
622 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 }
624 free_page((unsigned long)kbuf);
625 }
626
KAMEZAWA Hiroyuki325fda72010-02-02 13:44:05 -0800627 *ppos = p;
628 return virtr + wrote ? : err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629}
630
Andrew Mortond7d4d842010-03-10 15:21:52 -0800631static ssize_t read_port(struct file *file, char __user *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 size_t count, loff_t *ppos)
633{
634 unsigned long i = *ppos;
635 char __user *tmp = buf;
636
Linus Torvalds96d4f262019-01-03 18:57:57 -0800637 if (!access_ok(buf, count))
Andrew Mortond7d4d842010-03-10 15:21:52 -0800638 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 while (count-- > 0 && i < 65536) {
Andrew Mortond7d4d842010-03-10 15:21:52 -0800640 if (__put_user(inb(i), tmp) < 0)
641 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 i++;
643 tmp++;
644 }
645 *ppos = i;
646 return tmp-buf;
647}
648
Andrew Mortond7d4d842010-03-10 15:21:52 -0800649static ssize_t write_port(struct file *file, const char __user *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 size_t count, loff_t *ppos)
651{
652 unsigned long i = *ppos;
Hans Grob890537b2013-02-06 11:37:20 +0100653 const char __user *tmp = buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Linus Torvalds96d4f262019-01-03 18:57:57 -0800655 if (!access_ok(buf, count))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 return -EFAULT;
657 while (count-- > 0 && i < 65536) {
658 char c;
Rob Ward6a0061b2014-12-20 18:28:36 +0000659
Jan Beulichc654d602006-03-25 03:07:31 -0800660 if (__get_user(c, tmp)) {
661 if (tmp > buf)
662 break;
Andrew Mortond7d4d842010-03-10 15:21:52 -0800663 return -EFAULT;
Jan Beulichc654d602006-03-25 03:07:31 -0800664 }
Andrew Mortond7d4d842010-03-10 15:21:52 -0800665 outb(c, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 i++;
667 tmp++;
668 }
669 *ppos = i;
670 return tmp-buf;
671}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
Andrew Mortond7d4d842010-03-10 15:21:52 -0800673static ssize_t read_null(struct file *file, char __user *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 size_t count, loff_t *ppos)
675{
676 return 0;
677}
678
Andrew Mortond7d4d842010-03-10 15:21:52 -0800679static ssize_t write_null(struct file *file, const char __user *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 size_t count, loff_t *ppos)
681{
682 return count;
683}
684
Al Virocd28e282015-04-03 15:57:04 -0400685static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
Zach Brown162934d2013-05-07 16:18:27 -0700686{
687 return 0;
688}
689
Al Virocd28e282015-04-03 15:57:04 -0400690static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
Zach Brown162934d2013-05-07 16:18:27 -0700691{
Al Virocd28e282015-04-03 15:57:04 -0400692 size_t count = iov_iter_count(from);
693 iov_iter_advance(from, count);
694 return count;
Zach Brown162934d2013-05-07 16:18:27 -0700695}
696
Jens Axboe1ebd32f2006-04-26 14:40:08 +0200697static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
698 struct splice_desc *sd)
699{
700 return sd->len;
701}
702
Andrew Mortond7d4d842010-03-10 15:21:52 -0800703static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
Jens Axboe1ebd32f2006-04-26 14:40:08 +0200704 loff_t *ppos, size_t len, unsigned int flags)
705{
706 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
707}
708
Al Viro13ba33e2014-08-18 10:04:12 -0400709static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
Zach Brown162934d2013-05-07 16:18:27 -0700710{
711 size_t written = 0;
Zach Brown162934d2013-05-07 16:18:27 -0700712
Al Viro13ba33e2014-08-18 10:04:12 -0400713 while (iov_iter_count(iter)) {
714 size_t chunk = iov_iter_count(iter), n;
Rob Ward6a0061b2014-12-20 18:28:36 +0000715
Al Viro13ba33e2014-08-18 10:04:12 -0400716 if (chunk > PAGE_SIZE)
717 chunk = PAGE_SIZE; /* Just for latency reasons */
718 n = iov_iter_zero(chunk, iter);
719 if (!n && iov_iter_count(iter))
720 return written ? written : -EFAULT;
721 written += n;
722 if (signal_pending(current))
723 return written ? written : -ERESTARTSYS;
724 cond_resched();
Zach Brown162934d2013-05-07 16:18:27 -0700725 }
Al Viro13ba33e2014-08-18 10:04:12 -0400726 return written;
Zach Brown162934d2013-05-07 16:18:27 -0700727}
728
Christoph Hellwig99f66732020-09-03 17:59:22 +0200729static ssize_t read_zero(struct file *file, char __user *buf,
730 size_t count, loff_t *ppos)
731{
732 size_t cleared = 0;
733
734 while (count) {
735 size_t chunk = min_t(size_t, count, PAGE_SIZE);
Christoph Hellwigab04de82020-09-07 10:27:00 +0200736 size_t left;
Christoph Hellwig99f66732020-09-03 17:59:22 +0200737
Christoph Hellwigab04de82020-09-07 10:27:00 +0200738 left = clear_user(buf + cleared, chunk);
739 if (unlikely(left)) {
740 cleared += (chunk - left);
741 if (!cleared)
742 return -EFAULT;
743 break;
744 }
Christoph Hellwig99f66732020-09-03 17:59:22 +0200745 cleared += chunk;
746 count -= chunk;
747
748 if (signal_pending(current))
Christoph Hellwigab04de82020-09-07 10:27:00 +0200749 break;
Christoph Hellwig99f66732020-09-03 17:59:22 +0200750 cond_resched();
751 }
752
753 return cleared;
754}
755
Andrew Mortond7d4d842010-03-10 15:21:52 -0800756static int mmap_zero(struct file *file, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757{
Nick Piggin557ed1f2007-10-16 01:24:40 -0700758#ifndef CONFIG_MMU
759 return -ENOSYS;
760#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 if (vma->vm_flags & VM_SHARED)
762 return shmem_zero_setup(vma);
Kirill A. Shutemovbfd40ea2018-07-26 16:37:35 -0700763 vma_set_anonymous(vma);
Nick Piggin557ed1f2007-10-16 01:24:40 -0700764 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
Hugh Dickinsc01d5b32016-07-26 15:26:15 -0700767static unsigned long get_unmapped_area_zero(struct file *file,
768 unsigned long addr, unsigned long len,
769 unsigned long pgoff, unsigned long flags)
770{
771#ifdef CONFIG_MMU
772 if (flags & MAP_SHARED) {
773 /*
774 * mmap_zero() will call shmem_zero_setup() to create a file,
775 * so use shmem's get_unmapped_area in case it can be huge;
776 * and pass NULL for file as in mmap.c's get_unmapped_area(),
777 * so as not to confuse shmem with our handle on "/dev/zero".
778 */
779 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
780 }
781
782 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
783 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
784#else
785 return -ENOSYS;
786#endif
787}
788
Andrew Mortond7d4d842010-03-10 15:21:52 -0800789static ssize_t write_full(struct file *file, const char __user *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 size_t count, loff_t *ppos)
791{
792 return -ENOSPC;
793}
794
795/*
796 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
797 * can fopen() both devices with "a" now. This was previously impossible.
798 * -- SRB.
799 */
Andrew Mortond7d4d842010-03-10 15:21:52 -0800800static loff_t null_lseek(struct file *file, loff_t offset, int orig)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801{
802 return file->f_pos = 0;
803}
804
805/*
806 * The memory devices use the full 32/64 bits of the offset, and so we cannot
807 * check against negative addresses: they are ok. The return value is weird,
808 * though, in that case (0).
809 *
810 * also note that seeking relative to the "end of file" isn't supported:
811 * it has no meaning, so it returns -EINVAL.
812 */
Andrew Mortond7d4d842010-03-10 15:21:52 -0800813static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814{
815 loff_t ret;
816
Al Viro59551022016-01-22 15:40:57 -0500817 inode_lock(file_inode(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 switch (orig) {
Andrew Mortond7d4d842010-03-10 15:21:52 -0800819 case SEEK_CUR:
820 offset += file->f_pos;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500821 fallthrough;
Andrew Mortond7d4d842010-03-10 15:21:52 -0800822 case SEEK_SET:
823 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
Andrzej Hajdaecb63a12016-02-15 15:35:21 +0100824 if ((unsigned long long)offset >= -MAX_ERRNO) {
Andrew Mortond7d4d842010-03-10 15:21:52 -0800825 ret = -EOVERFLOW;
826 break;
827 }
828 file->f_pos = offset;
829 ret = file->f_pos;
830 force_successful_syscall_return();
831 break;
832 default:
833 ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 }
Al Viro59551022016-01-22 15:40:57 -0500835 inode_unlock(file_inode(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 return ret;
837}
838
Dan Williams3234ac62020-05-21 14:06:17 -0700839static struct inode *devmem_inode;
840
841#ifdef CONFIG_IO_STRICT_DEVMEM
842void revoke_devmem(struct resource *res)
843{
Eric Biggersb34e7e22020-07-15 23:05:53 -0700844 /* pairs with smp_store_release() in devmem_init_inode() */
845 struct inode *inode = smp_load_acquire(&devmem_inode);
Dan Williams3234ac62020-05-21 14:06:17 -0700846
847 /*
848 * Check that the initialization has completed. Losing the race
849 * is ok because it means drivers are claiming resources before
850 * the fs_initcall level of init and prevent /dev/mem from
851 * establishing mappings.
852 */
853 if (!inode)
854 return;
855
856 /*
857 * The expectation is that the driver has successfully marked
858 * the resource busy by this point, so devmem_is_allowed()
859 * should start returning false, however for performance this
860 * does not iterate the entire resource range.
861 */
862 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
863 devmem_is_allowed(PHYS_PFN(res->end))) {
864 /*
865 * *cringe* iomem=relaxed says "go ahead, what's the
866 * worst that can happen?"
867 */
868 return;
869 }
870
871 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
872}
873#endif
874
Hans Grob890537b2013-02-06 11:37:20 +0100875static int open_port(struct inode *inode, struct file *filp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
Dan Williams3234ac62020-05-21 14:06:17 -0700877 int rc;
878
Matthew Garrett9b9d8dd2019-08-19 17:17:41 -0700879 if (!capable(CAP_SYS_RAWIO))
880 return -EPERM;
881
Dan Williams3234ac62020-05-21 14:06:17 -0700882 rc = security_locked_down(LOCKDOWN_DEV_MEM);
883 if (rc)
884 return rc;
885
886 if (iminor(inode) != DEVMEM_MINOR)
887 return 0;
888
889 /*
890 * Use a unified address space to have a single point to manage
891 * revocations when drivers want to take over a /dev/mem mapped
892 * range.
893 */
894 inode->i_mapping = devmem_inode->i_mapping;
895 filp->f_mapping = inode->i_mapping;
896
897 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898}
899
900#define zero_lseek null_lseek
901#define full_lseek null_lseek
902#define write_zero write_null
Al Virocd28e282015-04-03 15:57:04 -0400903#define write_iter_zero write_iter_null
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904#define open_mem open_port
905#define open_kmem open_mem
906
Rob Ward73f07182014-12-07 15:40:33 +0000907static const struct file_operations __maybe_unused mem_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 .llseek = memory_lseek,
909 .read = read_mem,
910 .write = write_mem,
911 .mmap = mmap_mem,
912 .open = open_mem,
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100913#ifndef CONFIG_MMU
David Howells5da61852006-09-27 01:50:16 -0700914 .get_unmapped_area = get_unmapped_area_mem,
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100915 .mmap_capabilities = memory_mmap_capabilities,
916#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917};
918
Rob Warda8c91252014-12-07 15:40:34 +0000919static const struct file_operations __maybe_unused kmem_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 .llseek = memory_lseek,
921 .read = read_kmem,
922 .write = write_kmem,
923 .mmap = mmap_kmem,
924 .open = open_kmem,
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100925#ifndef CONFIG_MMU
David Howells5da61852006-09-27 01:50:16 -0700926 .get_unmapped_area = get_unmapped_area_mem,
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100927 .mmap_capabilities = memory_mmap_capabilities,
928#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929};
930
Arjan van de Ven62322d22006-07-03 00:24:21 -0700931static const struct file_operations null_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 .llseek = null_lseek,
933 .read = read_null,
934 .write = write_null,
Al Virocd28e282015-04-03 15:57:04 -0400935 .read_iter = read_iter_null,
936 .write_iter = write_iter_null,
Jens Axboe1ebd32f2006-04-26 14:40:08 +0200937 .splice_write = splice_write_null,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938};
939
Rob Ward3a4bc2f2014-12-07 15:40:35 +0000940static const struct file_operations __maybe_unused port_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 .llseek = memory_lseek,
942 .read = read_port,
943 .write = write_port,
944 .open = open_port,
945};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
Arjan van de Ven62322d22006-07-03 00:24:21 -0700947static const struct file_operations zero_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 .llseek = zero_lseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 .write = write_zero,
Al Viro13ba33e2014-08-18 10:04:12 -0400950 .read_iter = read_iter_zero,
Christoph Hellwig99f66732020-09-03 17:59:22 +0200951 .read = read_zero,
Al Virocd28e282015-04-03 15:57:04 -0400952 .write_iter = write_iter_zero,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 .mmap = mmap_zero,
Hugh Dickinsc01d5b32016-07-26 15:26:15 -0700954 .get_unmapped_area = get_unmapped_area_zero,
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100955#ifndef CONFIG_MMU
956 .mmap_capabilities = zero_mmap_capabilities,
957#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958};
959
Arjan van de Ven62322d22006-07-03 00:24:21 -0700960static const struct file_operations full_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 .llseek = full_lseek,
Al Viro13ba33e2014-08-18 10:04:12 -0400962 .read_iter = read_iter_zero,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 .write = write_full,
964};
965
Kay Sievers389e0cb2009-07-04 16:51:29 +0200966static const struct memdev {
967 const char *name;
Al Viro2c9ede52011-07-23 20:24:48 -0400968 umode_t mode;
Kay Sievers389e0cb2009-07-04 16:51:29 +0200969 const struct file_operations *fops;
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100970 fmode_t fmode;
Kay Sievers389e0cb2009-07-04 16:51:29 +0200971} devlist[] = {
Rob Ward73f07182014-12-07 15:40:33 +0000972#ifdef CONFIG_DEVMEM
Dan Williams3234ac62020-05-21 14:06:17 -0700973 [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
Rob Ward73f07182014-12-07 15:40:33 +0000974#endif
Adriano dos Santos Fernandesd6f47be2009-06-17 16:27:48 -0700975#ifdef CONFIG_DEVKMEM
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100976 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
Adriano dos Santos Fernandesd6f47be2009-06-17 16:27:48 -0700977#endif
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100978 [3] = { "null", 0666, &null_fops, 0 },
Adriano dos Santos Fernandesd6f47be2009-06-17 16:27:48 -0700979#ifdef CONFIG_DEVPORT
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100980 [4] = { "port", 0, &port_fops, 0 },
Adriano dos Santos Fernandesd6f47be2009-06-17 16:27:48 -0700981#endif
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100982 [5] = { "zero", 0666, &zero_fops, 0 },
983 [7] = { "full", 0666, &full_fops, 0 },
984 [8] = { "random", 0666, &random_fops, 0 },
985 [9] = { "urandom", 0666, &urandom_fops, 0 },
Kay Sievers7f3a7812012-05-09 01:37:51 +0200986#ifdef CONFIG_PRINTK
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100987 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
Kay Sievers7f3a7812012-05-09 01:37:51 +0200988#endif
Adriano dos Santos Fernandesd6f47be2009-06-17 16:27:48 -0700989};
990
991static int memory_open(struct inode *inode, struct file *filp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992{
Kay Sievers389e0cb2009-07-04 16:51:29 +0200993 int minor;
994 const struct memdev *dev;
Adriano dos Santos Fernandesd6f47be2009-06-17 16:27:48 -0700995
Kay Sievers389e0cb2009-07-04 16:51:29 +0200996 minor = iminor(inode);
997 if (minor >= ARRAY_SIZE(devlist))
Frederic Weisbecker205153a2009-10-09 20:31:02 +0200998 return -ENXIO;
Adriano dos Santos Fernandesd6f47be2009-06-17 16:27:48 -0700999
Kay Sievers389e0cb2009-07-04 16:51:29 +02001000 dev = &devlist[minor];
1001 if (!dev->fops)
Frederic Weisbecker205153a2009-10-09 20:31:02 +02001002 return -ENXIO;
Adriano dos Santos Fernandesd6f47be2009-06-17 16:27:48 -07001003
Kay Sievers389e0cb2009-07-04 16:51:29 +02001004 filp->f_op = dev->fops;
Christoph Hellwigb4caecd2015-01-14 10:42:32 +01001005 filp->f_mode |= dev->fmode;
KAMEZAWA Hiroyuki4a3956c2010-10-01 14:20:22 -07001006
Kay Sievers389e0cb2009-07-04 16:51:29 +02001007 if (dev->fops->open)
Frederic Weisbecker205153a2009-10-09 20:31:02 +02001008 return dev->fops->open(inode, filp);
1009
1010 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011}
1012
Arjan van de Ven62322d22006-07-03 00:24:21 -07001013static const struct file_operations memory_fops = {
Andrew Mortond7d4d842010-03-10 15:21:52 -08001014 .open = memory_open,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001015 .llseek = noop_llseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016};
1017
Al Viro2c9ede52011-07-23 20:24:48 -04001018static char *mem_devnode(struct device *dev, umode_t *mode)
Kay Sieverse454cea2009-09-18 23:01:12 +02001019{
1020 if (mode && devlist[MINOR(dev->devt)].mode)
1021 *mode = devlist[MINOR(dev->devt)].mode;
1022 return NULL;
1023}
1024
gregkh@suse.deca8eca62005-03-23 09:53:09 -08001025static struct class *mem_class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Dan Williams3234ac62020-05-21 14:06:17 -07001027static int devmem_fs_init_fs_context(struct fs_context *fc)
1028{
1029 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
1030}
1031
1032static struct file_system_type devmem_fs_type = {
1033 .name = "devmem",
1034 .owner = THIS_MODULE,
1035 .init_fs_context = devmem_fs_init_fs_context,
1036 .kill_sb = kill_anon_super,
1037};
1038
1039static int devmem_init_inode(void)
1040{
1041 static struct vfsmount *devmem_vfs_mount;
1042 static int devmem_fs_cnt;
1043 struct inode *inode;
1044 int rc;
1045
1046 rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
1047 if (rc < 0) {
1048 pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
1049 return rc;
1050 }
1051
1052 inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
1053 if (IS_ERR(inode)) {
1054 rc = PTR_ERR(inode);
1055 pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
1056 simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
1057 return rc;
1058 }
1059
Eric Biggersb34e7e22020-07-15 23:05:53 -07001060 /*
1061 * Publish /dev/mem initialized.
1062 * Pairs with smp_load_acquire() in revoke_devmem().
1063 */
1064 smp_store_release(&devmem_inode, inode);
Dan Williams3234ac62020-05-21 14:06:17 -07001065
1066 return 0;
1067}
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069static int __init chr_dev_init(void)
1070{
Kay Sievers389e0cb2009-07-04 16:51:29 +02001071 int minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
Andrew Mortond7d4d842010-03-10 15:21:52 -08001073 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
1075
gregkh@suse.deca8eca62005-03-23 09:53:09 -08001076 mem_class = class_create(THIS_MODULE, "mem");
Anton Blanchard6e191f72010-04-06 14:34:55 -07001077 if (IS_ERR(mem_class))
1078 return PTR_ERR(mem_class);
1079
Kay Sieverse454cea2009-09-18 23:01:12 +02001080 mem_class->devnode = mem_devnode;
Kay Sievers389e0cb2009-07-04 16:51:29 +02001081 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
1082 if (!devlist[minor].name)
1083 continue;
Haren Mynenie1612de2012-07-11 15:18:44 +10001084
1085 /*
Hans Grob890537b2013-02-06 11:37:20 +01001086 * Create /dev/port?
Haren Mynenie1612de2012-07-11 15:18:44 +10001087 */
1088 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
1089 continue;
Dan Williams3234ac62020-05-21 14:06:17 -07001090 if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
1091 continue;
Haren Mynenie1612de2012-07-11 15:18:44 +10001092
Kay Sievers389e0cb2009-07-04 16:51:29 +02001093 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
1094 NULL, devlist[minor].name);
1095 }
Greg Kroah-Hartmanebf644c2006-07-25 17:13:31 -07001096
David Howells31d1d482010-08-06 16:34:43 +01001097 return tty_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098}
1099
1100fs_initcall(chr_dev_init);