blob: 1af823b2fe6bdabea264d17cdbb49670edf61717 [file] [log] [blame]
Dan Williams51cf7842017-07-12 17:58:21 -07001// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */
Dan Williams89ec9f22018-10-29 15:52:42 -07003#include <linux/memremap.h>
Dan Williamsab68f262016-05-18 09:15:08 -07004#include <linux/pagemap.h>
5#include <linux/module.h>
6#include <linux/device.h>
7#include <linux/pfn_t.h>
Dan Williamsba09c012016-07-24 15:55:42 -07008#include <linux/cdev.h>
Dan Williamsab68f262016-05-18 09:15:08 -07009#include <linux/slab.h>
10#include <linux/dax.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
Dave Jiangef842302018-04-19 13:39:43 -070013#include <linux/mman.h>
Dan Williams73616362017-05-04 23:38:43 -070014#include "dax-private.h"
Dan Williams51cf7842017-07-12 17:58:21 -070015#include "bus.h"
Dan Williamsab68f262016-05-18 09:15:08 -070016
Dan Williams5f0694b2017-01-30 21:43:10 -080017static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
Dan Williamsdee41072016-05-14 12:20:44 -070018 const char *func)
19{
Dan Williams5f0694b2017-01-30 21:43:10 -080020 struct dax_region *dax_region = dev_dax->region;
21 struct device *dev = &dev_dax->dev;
Dan Williamsdee41072016-05-14 12:20:44 -070022 unsigned long mask;
23
Dan Williams7b6be842017-04-11 09:49:49 -070024 if (!dax_alive(dev_dax->dax_dev))
Dan Williamsdee41072016-05-14 12:20:44 -070025 return -ENXIO;
26
Dan Williams4cb19352016-11-16 09:00:38 -080027 /* prevent private mappings from being established */
Dan Williams325896f2016-12-06 17:03:35 -080028 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
Jeff Moyer5a14e912018-06-27 11:43:58 -040029 dev_info_ratelimited(dev,
30 "%s: %s: fail, attempted private mapping\n",
Dan Williamsdee41072016-05-14 12:20:44 -070031 current->comm, func);
32 return -EINVAL;
33 }
34
35 mask = dax_region->align - 1;
36 if (vma->vm_start & mask || vma->vm_end & mask) {
Jeff Moyer5a14e912018-06-27 11:43:58 -040037 dev_info_ratelimited(dev,
38 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
Dan Williamsdee41072016-05-14 12:20:44 -070039 current->comm, func, vma->vm_start, vma->vm_end,
40 mask);
41 return -EINVAL;
42 }
43
44 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
45 && (vma->vm_flags & VM_DONTCOPY) == 0) {
Jeff Moyer5a14e912018-06-27 11:43:58 -040046 dev_info_ratelimited(dev,
47 "%s: %s: fail, dax range requires MADV_DONTFORK\n",
Dan Williamsdee41072016-05-14 12:20:44 -070048 current->comm, func);
49 return -EINVAL;
50 }
51
52 if (!vma_is_dax(vma)) {
Jeff Moyer5a14e912018-06-27 11:43:58 -040053 dev_info_ratelimited(dev,
54 "%s: %s: fail, vma is not DAX capable\n",
Dan Williamsdee41072016-05-14 12:20:44 -070055 current->comm, func);
56 return -EINVAL;
57 }
58
59 return 0;
60}
61
Dan Williams73616362017-05-04 23:38:43 -070062/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
63__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
Dan Williamsdee41072016-05-14 12:20:44 -070064 unsigned long size)
65{
Dan Williams753a0852017-07-14 13:54:50 -070066 struct resource *res = &dev_dax->region->res;
67 phys_addr_t phys;
Dan Williamsdee41072016-05-14 12:20:44 -070068
Dan Williams753a0852017-07-14 13:54:50 -070069 phys = pgoff * PAGE_SIZE + res->start;
70 if (phys >= res->start && phys <= res->end) {
Dan Williamsdee41072016-05-14 12:20:44 -070071 if (phys + size - 1 <= res->end)
72 return phys;
73 }
74
75 return -1;
76}
77
Dan Williams226ab562018-07-13 21:49:34 -070078static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
Dan Williams2232c632018-07-13 21:49:40 -070079 struct vm_fault *vmf, pfn_t *pfn)
Dan Williamsdee41072016-05-14 12:20:44 -070080{
Dan Williams5f0694b2017-01-30 21:43:10 -080081 struct device *dev = &dev_dax->dev;
Dan Williamsdee41072016-05-14 12:20:44 -070082 struct dax_region *dax_region;
Dan Williamsdee41072016-05-14 12:20:44 -070083 phys_addr_t phys;
Dave Jiang0134ed42017-03-10 13:24:22 -070084 unsigned int fault_size = PAGE_SIZE;
Dan Williamsdee41072016-05-14 12:20:44 -070085
Dan Williams5f0694b2017-01-30 21:43:10 -080086 if (check_vma(dev_dax, vmf->vma, __func__))
Dan Williamsdee41072016-05-14 12:20:44 -070087 return VM_FAULT_SIGBUS;
88
Dan Williams5f0694b2017-01-30 21:43:10 -080089 dax_region = dev_dax->region;
Dan Williamsdee41072016-05-14 12:20:44 -070090 if (dax_region->align > PAGE_SIZE) {
Dan Williams6daaca52018-03-05 16:40:05 -080091 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
92 dax_region->align, fault_size);
Dan Williamsdee41072016-05-14 12:20:44 -070093 return VM_FAULT_SIGBUS;
94 }
95
Dave Jiang0134ed42017-03-10 13:24:22 -070096 if (fault_size != dax_region->align)
97 return VM_FAULT_SIGBUS;
98
Dan Williams73616362017-05-04 23:38:43 -070099 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
Dan Williamsdee41072016-05-14 12:20:44 -0700100 if (phys == -1) {
Dan Williams6daaca52018-03-05 16:40:05 -0800101 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
Dan Williamsdee41072016-05-14 12:20:44 -0700102 return VM_FAULT_SIGBUS;
103 }
104
Dan Williams2232c632018-07-13 21:49:40 -0700105 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
Dan Williamsdee41072016-05-14 12:20:44 -0700106
Dan Williams2232c632018-07-13 21:49:40 -0700107 return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
Dan Williamsdee41072016-05-14 12:20:44 -0700108}
109
Dan Williams226ab562018-07-13 21:49:34 -0700110static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
Dan Williams2232c632018-07-13 21:49:40 -0700111 struct vm_fault *vmf, pfn_t *pfn)
Dan Williamsdee41072016-05-14 12:20:44 -0700112{
Dave Jiangd8a849e2017-02-22 15:40:03 -0800113 unsigned long pmd_addr = vmf->address & PMD_MASK;
Dan Williams5f0694b2017-01-30 21:43:10 -0800114 struct device *dev = &dev_dax->dev;
Dan Williamsdee41072016-05-14 12:20:44 -0700115 struct dax_region *dax_region;
116 phys_addr_t phys;
117 pgoff_t pgoff;
Dave Jiang0134ed42017-03-10 13:24:22 -0700118 unsigned int fault_size = PMD_SIZE;
Dan Williamsdee41072016-05-14 12:20:44 -0700119
Dan Williams5f0694b2017-01-30 21:43:10 -0800120 if (check_vma(dev_dax, vmf->vma, __func__))
Dan Williamsdee41072016-05-14 12:20:44 -0700121 return VM_FAULT_SIGBUS;
122
Dan Williams5f0694b2017-01-30 21:43:10 -0800123 dax_region = dev_dax->region;
Dan Williamsdee41072016-05-14 12:20:44 -0700124 if (dax_region->align > PMD_SIZE) {
Dan Williams6daaca52018-03-05 16:40:05 -0800125 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
126 dax_region->align, fault_size);
Dan Williamsdee41072016-05-14 12:20:44 -0700127 return VM_FAULT_SIGBUS;
128 }
129
130 /* dax pmd mappings require pfn_t_devmap() */
131 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
Dan Williams6daaca52018-03-05 16:40:05 -0800132 dev_dbg(dev, "region lacks devmap flags\n");
Dan Williamsdee41072016-05-14 12:20:44 -0700133 return VM_FAULT_SIGBUS;
134 }
135
Dave Jiang0134ed42017-03-10 13:24:22 -0700136 if (fault_size < dax_region->align)
137 return VM_FAULT_SIGBUS;
138 else if (fault_size > dax_region->align)
139 return VM_FAULT_FALLBACK;
140
141 /* if we are outside of the VMA */
142 if (pmd_addr < vmf->vma->vm_start ||
143 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
144 return VM_FAULT_SIGBUS;
145
Dave Jiangf4200392017-02-22 15:40:06 -0800146 pgoff = linear_page_index(vmf->vma, pmd_addr);
Dan Williams73616362017-05-04 23:38:43 -0700147 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
Dan Williamsdee41072016-05-14 12:20:44 -0700148 if (phys == -1) {
Dan Williams6daaca52018-03-05 16:40:05 -0800149 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
Dan Williamsdee41072016-05-14 12:20:44 -0700150 return VM_FAULT_SIGBUS;
151 }
152
Dan Williams2232c632018-07-13 21:49:40 -0700153 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
Dan Williamsdee41072016-05-14 12:20:44 -0700154
Dan Williamsfce86ff2019-05-13 17:15:33 -0700155 return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
Dan Williamsdee41072016-05-14 12:20:44 -0700156}
157
Dave Jiang9557fee2017-02-24 14:57:05 -0800158#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
Dan Williams226ab562018-07-13 21:49:34 -0700159static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
Dan Williams2232c632018-07-13 21:49:40 -0700160 struct vm_fault *vmf, pfn_t *pfn)
Dave Jiang9557fee2017-02-24 14:57:05 -0800161{
162 unsigned long pud_addr = vmf->address & PUD_MASK;
Dan Williams5f0694b2017-01-30 21:43:10 -0800163 struct device *dev = &dev_dax->dev;
Dave Jiang9557fee2017-02-24 14:57:05 -0800164 struct dax_region *dax_region;
165 phys_addr_t phys;
166 pgoff_t pgoff;
Dave Jiang70b085b2017-03-10 13:24:27 -0700167 unsigned int fault_size = PUD_SIZE;
168
Dave Jiang9557fee2017-02-24 14:57:05 -0800169
Dan Williams5f0694b2017-01-30 21:43:10 -0800170 if (check_vma(dev_dax, vmf->vma, __func__))
Dave Jiang9557fee2017-02-24 14:57:05 -0800171 return VM_FAULT_SIGBUS;
172
Dan Williams5f0694b2017-01-30 21:43:10 -0800173 dax_region = dev_dax->region;
Dave Jiang9557fee2017-02-24 14:57:05 -0800174 if (dax_region->align > PUD_SIZE) {
Dan Williams6daaca52018-03-05 16:40:05 -0800175 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
176 dax_region->align, fault_size);
Dave Jiang9557fee2017-02-24 14:57:05 -0800177 return VM_FAULT_SIGBUS;
178 }
179
180 /* dax pud mappings require pfn_t_devmap() */
181 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
Dan Williams6daaca52018-03-05 16:40:05 -0800182 dev_dbg(dev, "region lacks devmap flags\n");
Dave Jiang9557fee2017-02-24 14:57:05 -0800183 return VM_FAULT_SIGBUS;
184 }
185
Dave Jiang70b085b2017-03-10 13:24:27 -0700186 if (fault_size < dax_region->align)
187 return VM_FAULT_SIGBUS;
188 else if (fault_size > dax_region->align)
189 return VM_FAULT_FALLBACK;
190
191 /* if we are outside of the VMA */
192 if (pud_addr < vmf->vma->vm_start ||
193 (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
194 return VM_FAULT_SIGBUS;
195
Dave Jiang9557fee2017-02-24 14:57:05 -0800196 pgoff = linear_page_index(vmf->vma, pud_addr);
Dan Williams73616362017-05-04 23:38:43 -0700197 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
Dave Jiang9557fee2017-02-24 14:57:05 -0800198 if (phys == -1) {
Dan Williams6daaca52018-03-05 16:40:05 -0800199 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
Dave Jiang9557fee2017-02-24 14:57:05 -0800200 return VM_FAULT_SIGBUS;
201 }
202
Dan Williams2232c632018-07-13 21:49:40 -0700203 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
Dave Jiang9557fee2017-02-24 14:57:05 -0800204
Dan Williamsfce86ff2019-05-13 17:15:33 -0700205 return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
Dave Jiang9557fee2017-02-24 14:57:05 -0800206}
207#else
Dan Williams226ab562018-07-13 21:49:34 -0700208static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
Dan Williams2232c632018-07-13 21:49:40 -0700209 struct vm_fault *vmf, pfn_t *pfn)
Dave Jiang9557fee2017-02-24 14:57:05 -0800210{
211 return VM_FAULT_FALLBACK;
212}
213#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
214
Dan Williams226ab562018-07-13 21:49:34 -0700215static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
Dave Jiangc791ace2017-02-24 14:57:08 -0800216 enum page_entry_size pe_size)
Dan Williamsdee41072016-05-14 12:20:44 -0700217{
Dave Jiangf4200392017-02-22 15:40:06 -0800218 struct file *filp = vmf->vma->vm_file;
Dan Williams2232c632018-07-13 21:49:40 -0700219 unsigned long fault_size;
Souptick Joarder36bdac12018-09-04 15:46:26 -0700220 vm_fault_t rc = VM_FAULT_SIGBUS;
221 int id;
Dan Williams2232c632018-07-13 21:49:40 -0700222 pfn_t pfn;
Dan Williams5f0694b2017-01-30 21:43:10 -0800223 struct dev_dax *dev_dax = filp->private_data;
Dan Williamsdee41072016-05-14 12:20:44 -0700224
Dan Williams6daaca52018-03-05 16:40:05 -0800225 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
226 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
Oliver O'Halloran76202622017-04-12 01:59:36 +1000227 vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
Dan Williamsdee41072016-05-14 12:20:44 -0700228
Dan Williams7b6be842017-04-11 09:49:49 -0700229 id = dax_read_lock();
Dave Jiangc791ace2017-02-24 14:57:08 -0800230 switch (pe_size) {
231 case PE_SIZE_PTE:
Dan Williams2232c632018-07-13 21:49:40 -0700232 fault_size = PAGE_SIZE;
233 rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
Dave Jianga2d58162017-02-24 14:56:59 -0800234 break;
Dave Jiangc791ace2017-02-24 14:57:08 -0800235 case PE_SIZE_PMD:
Dan Williams2232c632018-07-13 21:49:40 -0700236 fault_size = PMD_SIZE;
237 rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
Dave Jianga2d58162017-02-24 14:56:59 -0800238 break;
Dave Jiangc791ace2017-02-24 14:57:08 -0800239 case PE_SIZE_PUD:
Dan Williams2232c632018-07-13 21:49:40 -0700240 fault_size = PUD_SIZE;
241 rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
Dave Jiang9557fee2017-02-24 14:57:05 -0800242 break;
Dave Jianga2d58162017-02-24 14:56:59 -0800243 default:
Pushkar Jambhlekar54eafcc2017-04-11 09:12:25 -0700244 rc = VM_FAULT_SIGBUS;
Dave Jianga2d58162017-02-24 14:56:59 -0800245 }
Dan Williams2232c632018-07-13 21:49:40 -0700246
247 if (rc == VM_FAULT_NOPAGE) {
248 unsigned long i;
Dan Williams35de2992018-07-13 21:49:45 -0700249 pgoff_t pgoff;
Dan Williams2232c632018-07-13 21:49:40 -0700250
251 /*
252 * In the device-dax case the only possibility for a
253 * VM_FAULT_NOPAGE result is when device-dax capacity is
254 * mapped. No need to consider the zero page, or racing
255 * conflicting mappings.
256 */
Dan Williams35de2992018-07-13 21:49:45 -0700257 pgoff = linear_page_index(vmf->vma, vmf->address
258 & ~(fault_size - 1));
Dan Williams2232c632018-07-13 21:49:40 -0700259 for (i = 0; i < fault_size / PAGE_SIZE; i++) {
260 struct page *page;
261
262 page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
263 if (page->mapping)
264 continue;
265 page->mapping = filp->f_mapping;
Dan Williams35de2992018-07-13 21:49:45 -0700266 page->index = pgoff + i;
Dan Williams2232c632018-07-13 21:49:40 -0700267 }
268 }
Dan Williams7b6be842017-04-11 09:49:49 -0700269 dax_read_unlock(id);
Dan Williamsdee41072016-05-14 12:20:44 -0700270
271 return rc;
272}
273
Dan Williams226ab562018-07-13 21:49:34 -0700274static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
Dave Jiangc791ace2017-02-24 14:57:08 -0800275{
Dan Williams5f0694b2017-01-30 21:43:10 -0800276 return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
Dave Jiangc791ace2017-02-24 14:57:08 -0800277}
278
Dan Williams9702cff2017-11-29 16:10:32 -0800279static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
280{
281 struct file *filp = vma->vm_file;
282 struct dev_dax *dev_dax = filp->private_data;
283 struct dax_region *dax_region = dev_dax->region;
284
285 if (!IS_ALIGNED(addr, dax_region->align))
286 return -EINVAL;
287 return 0;
288}
289
Dan Williamsc1d53b92018-04-05 16:24:28 -0700290static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
291{
292 struct file *filp = vma->vm_file;
293 struct dev_dax *dev_dax = filp->private_data;
294 struct dax_region *dax_region = dev_dax->region;
295
296 return dax_region->align;
297}
298
Dan Williams5f0694b2017-01-30 21:43:10 -0800299static const struct vm_operations_struct dax_vm_ops = {
300 .fault = dev_dax_fault,
301 .huge_fault = dev_dax_huge_fault,
Dan Williams9702cff2017-11-29 16:10:32 -0800302 .split = dev_dax_split,
Dan Williamsc1d53b92018-04-05 16:24:28 -0700303 .pagesize = dev_dax_pagesize,
Dan Williamsdee41072016-05-14 12:20:44 -0700304};
305
Dan Williamsaf69f512016-08-11 00:38:03 -0700306static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
Dan Williamsdee41072016-05-14 12:20:44 -0700307{
Dan Williams5f0694b2017-01-30 21:43:10 -0800308 struct dev_dax *dev_dax = filp->private_data;
Dan Williams7b6be842017-04-11 09:49:49 -0700309 int rc, id;
Dan Williamsdee41072016-05-14 12:20:44 -0700310
Dan Williams6daaca52018-03-05 16:40:05 -0800311 dev_dbg(&dev_dax->dev, "trace\n");
Dan Williamsdee41072016-05-14 12:20:44 -0700312
Dan Williams7b6be842017-04-11 09:49:49 -0700313 /*
314 * We lock to check dax_dev liveness and will re-check at
315 * fault time.
316 */
317 id = dax_read_lock();
Dan Williams5f0694b2017-01-30 21:43:10 -0800318 rc = check_vma(dev_dax, vma, __func__);
Dan Williams7b6be842017-04-11 09:49:49 -0700319 dax_read_unlock(id);
Dan Williamsdee41072016-05-14 12:20:44 -0700320 if (rc)
321 return rc;
322
Dan Williams5f0694b2017-01-30 21:43:10 -0800323 vma->vm_ops = &dax_vm_ops;
Dave Jiange1fb4a02018-08-17 15:43:40 -0700324 vma->vm_flags |= VM_HUGEPAGE;
Dan Williamsdee41072016-05-14 12:20:44 -0700325 return 0;
Dan Williams043a9252016-08-07 08:23:56 -0700326}
Dan Williamsdee41072016-05-14 12:20:44 -0700327
Dan Williams043a9252016-08-07 08:23:56 -0700328/* return an unmapped area aligned to the dax region specified alignment */
Dan Williamsaf69f512016-08-11 00:38:03 -0700329static unsigned long dax_get_unmapped_area(struct file *filp,
Dan Williams043a9252016-08-07 08:23:56 -0700330 unsigned long addr, unsigned long len, unsigned long pgoff,
331 unsigned long flags)
332{
333 unsigned long off, off_end, off_align, len_align, addr_align, align;
Dan Williams5f0694b2017-01-30 21:43:10 -0800334 struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
Dan Williams043a9252016-08-07 08:23:56 -0700335 struct dax_region *dax_region;
336
Dan Williams5f0694b2017-01-30 21:43:10 -0800337 if (!dev_dax || addr)
Dan Williams043a9252016-08-07 08:23:56 -0700338 goto out;
339
Dan Williams5f0694b2017-01-30 21:43:10 -0800340 dax_region = dev_dax->region;
Dan Williams043a9252016-08-07 08:23:56 -0700341 align = dax_region->align;
342 off = pgoff << PAGE_SHIFT;
343 off_end = off + len;
344 off_align = round_up(off, align);
345
346 if ((off_end <= off_align) || ((off_end - off_align) < align))
347 goto out;
348
349 len_align = len + align;
350 if ((off + len_align) < off)
351 goto out;
352
353 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
354 pgoff, flags);
355 if (!IS_ERR_VALUE(addr_align)) {
356 addr_align += (off - addr_align) & (align - 1);
357 return addr_align;
358 }
359 out:
360 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
361}
362
Dave Jiang41c9b1b2018-09-10 16:18:29 -0700363static const struct address_space_operations dev_dax_aops = {
364 .set_page_dirty = noop_set_page_dirty,
365 .invalidatepage = noop_invalidatepage,
366};
367
Dan Williamsaf69f512016-08-11 00:38:03 -0700368static int dax_open(struct inode *inode, struct file *filp)
Dan Williams043a9252016-08-07 08:23:56 -0700369{
Dan Williams7b6be842017-04-11 09:49:49 -0700370 struct dax_device *dax_dev = inode_dax(inode);
371 struct inode *__dax_inode = dax_inode(dax_dev);
372 struct dev_dax *dev_dax = dax_get_private(dax_dev);
Dan Williams043a9252016-08-07 08:23:56 -0700373
Dan Williams6daaca52018-03-05 16:40:05 -0800374 dev_dbg(&dev_dax->dev, "trace\n");
Dan Williams7b6be842017-04-11 09:49:49 -0700375 inode->i_mapping = __dax_inode->i_mapping;
376 inode->i_mapping->host = __dax_inode;
Dave Jiang41c9b1b2018-09-10 16:18:29 -0700377 inode->i_mapping->a_ops = &dev_dax_aops;
Dan Williams3bc52c42016-07-24 21:55:45 -0700378 filp->f_mapping = inode->i_mapping;
Jeff Layton5660e132017-07-06 07:02:25 -0400379 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
Dan Williams5f0694b2017-01-30 21:43:10 -0800380 filp->private_data = dev_dax;
Dan Williamsebd84d72016-08-11 00:41:51 -0700381 inode->i_flags = S_DAX;
Dan Williams043a9252016-08-07 08:23:56 -0700382
Dan Williams043a9252016-08-07 08:23:56 -0700383 return 0;
384}
385
Dan Williamsaf69f512016-08-11 00:38:03 -0700386static int dax_release(struct inode *inode, struct file *filp)
Dan Williams043a9252016-08-07 08:23:56 -0700387{
Dan Williams5f0694b2017-01-30 21:43:10 -0800388 struct dev_dax *dev_dax = filp->private_data;
Dan Williams043a9252016-08-07 08:23:56 -0700389
Dan Williams6daaca52018-03-05 16:40:05 -0800390 dev_dbg(&dev_dax->dev, "trace\n");
Dan Williams043a9252016-08-07 08:23:56 -0700391 return 0;
Dan Williamsdee41072016-05-14 12:20:44 -0700392}
393
Dan Williamsab68f262016-05-18 09:15:08 -0700394static const struct file_operations dax_fops = {
395 .llseek = noop_llseek,
396 .owner = THIS_MODULE,
Dan Williamsaf69f512016-08-11 00:38:03 -0700397 .open = dax_open,
398 .release = dax_release,
399 .get_unmapped_area = dax_get_unmapped_area,
400 .mmap = dax_mmap,
Dave Jiangef842302018-04-19 13:39:43 -0700401 .mmap_supported_flags = MAP_SYNC,
Dan Williamsab68f262016-05-18 09:15:08 -0700402};
403
Dan Williams9567da02017-07-12 17:58:21 -0700404static void dev_dax_cdev_del(void *cdev)
Dan Williams043a9252016-08-07 08:23:56 -0700405{
Dan Williams9567da02017-07-12 17:58:21 -0700406 cdev_del(cdev);
Dan Williamsebd84d72016-08-11 00:41:51 -0700407}
408
Dan Williams9567da02017-07-12 17:58:21 -0700409static void dev_dax_kill(void *dev_dax)
Dan Williams043a9252016-08-07 08:23:56 -0700410{
Dan Williams9567da02017-07-12 17:58:21 -0700411 kill_dev_dax(dev_dax);
412}
413
Dan Williams730926c2017-07-16 13:51:53 -0700414int dev_dax_probe(struct device *dev)
Dan Williams9567da02017-07-12 17:58:21 -0700415{
416 struct dev_dax *dev_dax = to_dev_dax(dev);
417 struct dax_device *dax_dev = dev_dax->dax_dev;
Dan Williams89ec9f22018-10-29 15:52:42 -0700418 struct resource *res = &dev_dax->region->res;
Dan Williams7b6be842017-04-11 09:49:49 -0700419 struct inode *inode;
Dan Williamsba09c012016-07-24 15:55:42 -0700420 struct cdev *cdev;
Dan Williams89ec9f22018-10-29 15:52:42 -0700421 void *addr;
Dan Williams753a0852017-07-14 13:54:50 -0700422 int rc;
Dan Williams43fe51e2017-07-12 13:42:37 -0700423
Dan Williams89ec9f22018-10-29 15:52:42 -0700424 /* 1:1 map region resource range to device-dax instance range */
425 if (!devm_request_mem_region(dev, res->start, resource_size(res),
426 dev_name(dev))) {
427 dev_warn(dev, "could not reserve region %pR\n", res);
428 return -EBUSY;
429 }
430
Christoph Hellwig3ed2dcd2019-06-26 14:27:07 +0200431 dev_dax->pgmap.type = MEMORY_DEVICE_DEVDAX;
Dan Williams89ec9f22018-10-29 15:52:42 -0700432 addr = devm_memremap_pages(dev, &dev_dax->pgmap);
Dan Williams50f44ee2019-06-13 15:56:33 -0700433 if (IS_ERR(addr))
Dan Williams89ec9f22018-10-29 15:52:42 -0700434 return PTR_ERR(addr);
Dan Williams89ec9f22018-10-29 15:52:42 -0700435
Dan Williams7b6be842017-04-11 09:49:49 -0700436 inode = dax_inode(dax_dev);
437 cdev = inode->i_cdev;
Dan Williamsba09c012016-07-24 15:55:42 -0700438 cdev_init(cdev, &dax_fops);
Dan Williams730926c2017-07-16 13:51:53 -0700439 if (dev->class) {
440 /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
441 cdev->owner = dev->parent->driver->owner;
442 } else
443 cdev->owner = dev->driver->owner;
Dan Williams9567da02017-07-12 17:58:21 -0700444 cdev_set_parent(cdev, &dev->kobj);
445 rc = cdev_add(cdev, dev->devt, 1);
Dan Williamsd76911e2016-07-19 17:51:40 -0700446 if (rc)
Dan Williams9567da02017-07-12 17:58:21 -0700447 return rc;
Dan Williamsd76911e2016-07-19 17:51:40 -0700448
Dan Williams9567da02017-07-12 17:58:21 -0700449 rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev);
450 if (rc)
451 return rc;
Dan Williams043a9252016-08-07 08:23:56 -0700452
Dan Williams9567da02017-07-12 17:58:21 -0700453 run_dax(dax_dev);
454 return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax);
Dan Williams043a9252016-08-07 08:23:56 -0700455}
Dan Williams730926c2017-07-16 13:51:53 -0700456EXPORT_SYMBOL_GPL(dev_dax_probe);
Dan Williams9567da02017-07-12 17:58:21 -0700457
458static int dev_dax_remove(struct device *dev)
459{
460 /* all probe actions are unwound by devm */
461 return 0;
462}
463
Dan Williamsd2007812018-11-07 15:31:23 -0800464static struct dax_device_driver device_dax_driver = {
465 .drv = {
466 .probe = dev_dax_probe,
467 .remove = dev_dax_remove,
468 },
469 .match_always = 1,
Dan Williams9567da02017-07-12 17:58:21 -0700470};
Dan Williams043a9252016-08-07 08:23:56 -0700471
Dan Williamsab68f262016-05-18 09:15:08 -0700472static int __init dax_init(void)
473{
Dan Williams9567da02017-07-12 17:58:21 -0700474 return dax_driver_register(&device_dax_driver);
Dan Williamsab68f262016-05-18 09:15:08 -0700475}
476
477static void __exit dax_exit(void)
478{
Dan Williamsd2007812018-11-07 15:31:23 -0800479 dax_driver_unregister(&device_dax_driver);
Dan Williamsab68f262016-05-18 09:15:08 -0700480}
481
482MODULE_AUTHOR("Intel Corporation");
483MODULE_LICENSE("GPL v2");
Dan Williams9567da02017-07-12 17:58:21 -0700484module_init(dax_init);
Dan Williamsab68f262016-05-18 09:15:08 -0700485module_exit(dax_exit);
Dan Williams9567da02017-07-12 17:58:21 -0700486MODULE_ALIAS_DAX_DEVICE(0);