blob: d33a0613ed0c518843eb2bdce05974309939bb9e [file] [log] [blame]
Dan Williams51cf7842017-07-12 17:58:21 -07001// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */
Dan Williams89ec9f22018-10-29 15:52:42 -07003#include <linux/memremap.h>
Dan Williamsab68f262016-05-18 09:15:08 -07004#include <linux/pagemap.h>
5#include <linux/module.h>
6#include <linux/device.h>
7#include <linux/pfn_t.h>
Dan Williamsba09c012016-07-24 15:55:42 -07008#include <linux/cdev.h>
Dan Williamsab68f262016-05-18 09:15:08 -07009#include <linux/slab.h>
10#include <linux/dax.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
Dave Jiangef842302018-04-19 13:39:43 -070013#include <linux/mman.h>
Dan Williams73616362017-05-04 23:38:43 -070014#include "dax-private.h"
Dan Williams51cf7842017-07-12 17:58:21 -070015#include "bus.h"
Dan Williamsab68f262016-05-18 09:15:08 -070016
Dan Williams5f0694b2017-01-30 21:43:10 -080017static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
Dan Williamsdee41072016-05-14 12:20:44 -070018 const char *func)
19{
Dan Williams5f0694b2017-01-30 21:43:10 -080020 struct device *dev = &dev_dax->dev;
Dan Williamsdee41072016-05-14 12:20:44 -070021 unsigned long mask;
22
Dan Williams7b6be842017-04-11 09:49:49 -070023 if (!dax_alive(dev_dax->dax_dev))
Dan Williamsdee41072016-05-14 12:20:44 -070024 return -ENXIO;
25
Dan Williams4cb19352016-11-16 09:00:38 -080026 /* prevent private mappings from being established */
Dan Williams325896f2016-12-06 17:03:35 -080027 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
Jeff Moyer5a14e912018-06-27 11:43:58 -040028 dev_info_ratelimited(dev,
29 "%s: %s: fail, attempted private mapping\n",
Dan Williamsdee41072016-05-14 12:20:44 -070030 current->comm, func);
31 return -EINVAL;
32 }
33
Joao Martins33cf94d2020-10-13 16:50:50 -070034 mask = dev_dax->align - 1;
Dan Williamsdee41072016-05-14 12:20:44 -070035 if (vma->vm_start & mask || vma->vm_end & mask) {
Jeff Moyer5a14e912018-06-27 11:43:58 -040036 dev_info_ratelimited(dev,
37 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
Dan Williamsdee41072016-05-14 12:20:44 -070038 current->comm, func, vma->vm_start, vma->vm_end,
39 mask);
40 return -EINVAL;
41 }
42
Dan Williamsdee41072016-05-14 12:20:44 -070043 if (!vma_is_dax(vma)) {
Jeff Moyer5a14e912018-06-27 11:43:58 -040044 dev_info_ratelimited(dev,
45 "%s: %s: fail, vma is not DAX capable\n",
Dan Williamsdee41072016-05-14 12:20:44 -070046 current->comm, func);
47 return -EINVAL;
48 }
49
50 return 0;
51}
52
Dan Williams73616362017-05-04 23:38:43 -070053/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
54__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
Dan Williamsdee41072016-05-14 12:20:44 -070055 unsigned long size)
56{
Dan Williams60e93dc2020-10-13 16:50:39 -070057 int i;
Dan Williamsdee41072016-05-14 12:20:44 -070058
Dan Williams60e93dc2020-10-13 16:50:39 -070059 for (i = 0; i < dev_dax->nr_range; i++) {
60 struct dev_dax_range *dax_range = &dev_dax->ranges[i];
61 struct range *range = &dax_range->range;
62 unsigned long long pgoff_end;
63 phys_addr_t phys;
64
65 pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
66 if (pgoff < dax_range->pgoff || pgoff > pgoff_end)
67 continue;
68 phys = PFN_PHYS(pgoff - dax_range->pgoff) + range->start;
Dan Williamsf5516ec2020-10-13 16:49:43 -070069 if (phys + size - 1 <= range->end)
Dan Williamsdee41072016-05-14 12:20:44 -070070 return phys;
Dan Williams60e93dc2020-10-13 16:50:39 -070071 break;
Dan Williamsdee41072016-05-14 12:20:44 -070072 }
Dan Williamsdee41072016-05-14 12:20:44 -070073 return -1;
74}
75
Joao Martinsa0fb0382022-01-14 14:04:36 -080076static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
77 unsigned long fault_size)
78{
79 unsigned long i, nr_pages = fault_size / PAGE_SIZE;
80 struct file *filp = vmf->vma->vm_file;
Joao Martins14606002022-01-14 14:04:47 -080081 struct dev_dax *dev_dax = filp->private_data;
Joao Martinsa0fb0382022-01-14 14:04:36 -080082 pgoff_t pgoff;
83
Joao Martins14606002022-01-14 14:04:47 -080084 /* mapping is only set on the head */
85 if (dev_dax->pgmap->vmemmap_shift)
86 nr_pages = 1;
87
Joao Martinsa0fb0382022-01-14 14:04:36 -080088 pgoff = linear_page_index(vmf->vma,
89 ALIGN(vmf->address, fault_size));
90
91 for (i = 0; i < nr_pages; i++) {
92 struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
93
Joao Martins14606002022-01-14 14:04:47 -080094 page = compound_head(page);
Joao Martinsa0fb0382022-01-14 14:04:36 -080095 if (page->mapping)
96 continue;
97
98 page->mapping = filp->f_mapping;
99 page->index = pgoff + i;
100 }
101}
102
Dan Williams226ab562018-07-13 21:49:34 -0700103static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
Joao Martins6ec228b2022-01-14 14:04:43 -0800104 struct vm_fault *vmf)
Dan Williamsdee41072016-05-14 12:20:44 -0700105{
Dan Williams5f0694b2017-01-30 21:43:10 -0800106 struct device *dev = &dev_dax->dev;
Dan Williamsdee41072016-05-14 12:20:44 -0700107 phys_addr_t phys;
Joao Martins6ec228b2022-01-14 14:04:43 -0800108 pfn_t pfn;
Dave Jiang0134ed42017-03-10 13:24:22 -0700109 unsigned int fault_size = PAGE_SIZE;
Dan Williamsdee41072016-05-14 12:20:44 -0700110
Dan Williams5f0694b2017-01-30 21:43:10 -0800111 if (check_vma(dev_dax, vmf->vma, __func__))
Dan Williamsdee41072016-05-14 12:20:44 -0700112 return VM_FAULT_SIGBUS;
113
Joao Martins33cf94d2020-10-13 16:50:50 -0700114 if (dev_dax->align > PAGE_SIZE) {
Dan Williams6daaca52018-03-05 16:40:05 -0800115 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
Joao Martins33cf94d2020-10-13 16:50:50 -0700116 dev_dax->align, fault_size);
Dan Williamsdee41072016-05-14 12:20:44 -0700117 return VM_FAULT_SIGBUS;
118 }
119
Joao Martins33cf94d2020-10-13 16:50:50 -0700120 if (fault_size != dev_dax->align)
Dave Jiang0134ed42017-03-10 13:24:22 -0700121 return VM_FAULT_SIGBUS;
122
Dan Williams73616362017-05-04 23:38:43 -0700123 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
Dan Williamsdee41072016-05-14 12:20:44 -0700124 if (phys == -1) {
Dan Williams6daaca52018-03-05 16:40:05 -0800125 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
Dan Williamsdee41072016-05-14 12:20:44 -0700126 return VM_FAULT_SIGBUS;
127 }
128
Joao Martins6ec228b2022-01-14 14:04:43 -0800129 pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
Dan Williamsdee41072016-05-14 12:20:44 -0700130
Joao Martins6ec228b2022-01-14 14:04:43 -0800131 dax_set_mapping(vmf, pfn, fault_size);
Joao Martins0e7325f2022-01-14 14:04:40 -0800132
Joao Martins6ec228b2022-01-14 14:04:43 -0800133 return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
Dan Williamsdee41072016-05-14 12:20:44 -0700134}
135
Dan Williams226ab562018-07-13 21:49:34 -0700136static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
Joao Martins6ec228b2022-01-14 14:04:43 -0800137 struct vm_fault *vmf)
Dan Williamsdee41072016-05-14 12:20:44 -0700138{
Dave Jiangd8a849e2017-02-22 15:40:03 -0800139 unsigned long pmd_addr = vmf->address & PMD_MASK;
Dan Williams5f0694b2017-01-30 21:43:10 -0800140 struct device *dev = &dev_dax->dev;
Dan Williamsdee41072016-05-14 12:20:44 -0700141 phys_addr_t phys;
142 pgoff_t pgoff;
Joao Martins6ec228b2022-01-14 14:04:43 -0800143 pfn_t pfn;
Dave Jiang0134ed42017-03-10 13:24:22 -0700144 unsigned int fault_size = PMD_SIZE;
Dan Williamsdee41072016-05-14 12:20:44 -0700145
Dan Williams5f0694b2017-01-30 21:43:10 -0800146 if (check_vma(dev_dax, vmf->vma, __func__))
Dan Williamsdee41072016-05-14 12:20:44 -0700147 return VM_FAULT_SIGBUS;
148
Joao Martins33cf94d2020-10-13 16:50:50 -0700149 if (dev_dax->align > PMD_SIZE) {
Dan Williams6daaca52018-03-05 16:40:05 -0800150 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
Joao Martins33cf94d2020-10-13 16:50:50 -0700151 dev_dax->align, fault_size);
Dan Williamsdee41072016-05-14 12:20:44 -0700152 return VM_FAULT_SIGBUS;
153 }
154
Joao Martins33cf94d2020-10-13 16:50:50 -0700155 if (fault_size < dev_dax->align)
Dave Jiang0134ed42017-03-10 13:24:22 -0700156 return VM_FAULT_SIGBUS;
Joao Martins33cf94d2020-10-13 16:50:50 -0700157 else if (fault_size > dev_dax->align)
Dave Jiang0134ed42017-03-10 13:24:22 -0700158 return VM_FAULT_FALLBACK;
159
160 /* if we are outside of the VMA */
161 if (pmd_addr < vmf->vma->vm_start ||
162 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
163 return VM_FAULT_SIGBUS;
164
Dave Jiangf4200392017-02-22 15:40:06 -0800165 pgoff = linear_page_index(vmf->vma, pmd_addr);
Dan Williams73616362017-05-04 23:38:43 -0700166 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
Dan Williamsdee41072016-05-14 12:20:44 -0700167 if (phys == -1) {
Dan Williams6daaca52018-03-05 16:40:05 -0800168 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
Dan Williamsdee41072016-05-14 12:20:44 -0700169 return VM_FAULT_SIGBUS;
170 }
171
Joao Martins6ec228b2022-01-14 14:04:43 -0800172 pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
Dan Williamsdee41072016-05-14 12:20:44 -0700173
Joao Martins6ec228b2022-01-14 14:04:43 -0800174 dax_set_mapping(vmf, pfn, fault_size);
Joao Martins0e7325f2022-01-14 14:04:40 -0800175
Joao Martins6ec228b2022-01-14 14:04:43 -0800176 return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
Dan Williamsdee41072016-05-14 12:20:44 -0700177}
178
Dave Jiang9557fee2017-02-24 14:57:05 -0800179#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
Dan Williams226ab562018-07-13 21:49:34 -0700180static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
Joao Martins6ec228b2022-01-14 14:04:43 -0800181 struct vm_fault *vmf)
Dave Jiang9557fee2017-02-24 14:57:05 -0800182{
183 unsigned long pud_addr = vmf->address & PUD_MASK;
Dan Williams5f0694b2017-01-30 21:43:10 -0800184 struct device *dev = &dev_dax->dev;
Dave Jiang9557fee2017-02-24 14:57:05 -0800185 phys_addr_t phys;
186 pgoff_t pgoff;
Joao Martins6ec228b2022-01-14 14:04:43 -0800187 pfn_t pfn;
Dave Jiang70b085b2017-03-10 13:24:27 -0700188 unsigned int fault_size = PUD_SIZE;
189
Dave Jiang9557fee2017-02-24 14:57:05 -0800190
Dan Williams5f0694b2017-01-30 21:43:10 -0800191 if (check_vma(dev_dax, vmf->vma, __func__))
Dave Jiang9557fee2017-02-24 14:57:05 -0800192 return VM_FAULT_SIGBUS;
193
Joao Martins33cf94d2020-10-13 16:50:50 -0700194 if (dev_dax->align > PUD_SIZE) {
Dan Williams6daaca52018-03-05 16:40:05 -0800195 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
Joao Martins33cf94d2020-10-13 16:50:50 -0700196 dev_dax->align, fault_size);
Dave Jiang9557fee2017-02-24 14:57:05 -0800197 return VM_FAULT_SIGBUS;
198 }
199
Joao Martins33cf94d2020-10-13 16:50:50 -0700200 if (fault_size < dev_dax->align)
Dave Jiang70b085b2017-03-10 13:24:27 -0700201 return VM_FAULT_SIGBUS;
Joao Martins33cf94d2020-10-13 16:50:50 -0700202 else if (fault_size > dev_dax->align)
Dave Jiang70b085b2017-03-10 13:24:27 -0700203 return VM_FAULT_FALLBACK;
204
205 /* if we are outside of the VMA */
206 if (pud_addr < vmf->vma->vm_start ||
207 (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
208 return VM_FAULT_SIGBUS;
209
Dave Jiang9557fee2017-02-24 14:57:05 -0800210 pgoff = linear_page_index(vmf->vma, pud_addr);
Dan Williams73616362017-05-04 23:38:43 -0700211 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
Dave Jiang9557fee2017-02-24 14:57:05 -0800212 if (phys == -1) {
Dan Williams6daaca52018-03-05 16:40:05 -0800213 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
Dave Jiang9557fee2017-02-24 14:57:05 -0800214 return VM_FAULT_SIGBUS;
215 }
216
Joao Martins6ec228b2022-01-14 14:04:43 -0800217 pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
Dave Jiang9557fee2017-02-24 14:57:05 -0800218
Joao Martins6ec228b2022-01-14 14:04:43 -0800219 dax_set_mapping(vmf, pfn, fault_size);
Joao Martins0e7325f2022-01-14 14:04:40 -0800220
Joao Martins6ec228b2022-01-14 14:04:43 -0800221 return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
Dave Jiang9557fee2017-02-24 14:57:05 -0800222}
223#else
Dan Williams226ab562018-07-13 21:49:34 -0700224static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
Joao Martins6ec228b2022-01-14 14:04:43 -0800225 struct vm_fault *vmf)
Dave Jiang9557fee2017-02-24 14:57:05 -0800226{
227 return VM_FAULT_FALLBACK;
228}
229#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
230
Dan Williams226ab562018-07-13 21:49:34 -0700231static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
Dave Jiangc791ace2017-02-24 14:57:08 -0800232 enum page_entry_size pe_size)
Dan Williamsdee41072016-05-14 12:20:44 -0700233{
Dave Jiangf4200392017-02-22 15:40:06 -0800234 struct file *filp = vmf->vma->vm_file;
Souptick Joarder36bdac12018-09-04 15:46:26 -0700235 vm_fault_t rc = VM_FAULT_SIGBUS;
236 int id;
Dan Williams5f0694b2017-01-30 21:43:10 -0800237 struct dev_dax *dev_dax = filp->private_data;
Dan Williamsdee41072016-05-14 12:20:44 -0700238
Dan Williams6daaca52018-03-05 16:40:05 -0800239 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
240 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
Oliver O'Halloran76202622017-04-12 01:59:36 +1000241 vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
Dan Williamsdee41072016-05-14 12:20:44 -0700242
Dan Williams7b6be842017-04-11 09:49:49 -0700243 id = dax_read_lock();
Dave Jiangc791ace2017-02-24 14:57:08 -0800244 switch (pe_size) {
245 case PE_SIZE_PTE:
Joao Martins6ec228b2022-01-14 14:04:43 -0800246 rc = __dev_dax_pte_fault(dev_dax, vmf);
Dave Jianga2d58162017-02-24 14:56:59 -0800247 break;
Dave Jiangc791ace2017-02-24 14:57:08 -0800248 case PE_SIZE_PMD:
Joao Martins6ec228b2022-01-14 14:04:43 -0800249 rc = __dev_dax_pmd_fault(dev_dax, vmf);
Dave Jianga2d58162017-02-24 14:56:59 -0800250 break;
Dave Jiangc791ace2017-02-24 14:57:08 -0800251 case PE_SIZE_PUD:
Joao Martins6ec228b2022-01-14 14:04:43 -0800252 rc = __dev_dax_pud_fault(dev_dax, vmf);
Dave Jiang9557fee2017-02-24 14:57:05 -0800253 break;
Dave Jianga2d58162017-02-24 14:56:59 -0800254 default:
Pushkar Jambhlekar54eafcc2017-04-11 09:12:25 -0700255 rc = VM_FAULT_SIGBUS;
Dave Jianga2d58162017-02-24 14:56:59 -0800256 }
Dan Williams2232c632018-07-13 21:49:40 -0700257
Dan Williams7b6be842017-04-11 09:49:49 -0700258 dax_read_unlock(id);
Dan Williamsdee41072016-05-14 12:20:44 -0700259
260 return rc;
261}
262
Dan Williams226ab562018-07-13 21:49:34 -0700263static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
Dave Jiangc791ace2017-02-24 14:57:08 -0800264{
Dan Williams5f0694b2017-01-30 21:43:10 -0800265 return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
Dave Jiangc791ace2017-02-24 14:57:08 -0800266}
267
Dmitry Safonovdd3b6142020-12-14 19:08:17 -0800268static int dev_dax_may_split(struct vm_area_struct *vma, unsigned long addr)
Dan Williams9702cff2017-11-29 16:10:32 -0800269{
270 struct file *filp = vma->vm_file;
271 struct dev_dax *dev_dax = filp->private_data;
Dan Williams9702cff2017-11-29 16:10:32 -0800272
Joao Martins33cf94d2020-10-13 16:50:50 -0700273 if (!IS_ALIGNED(addr, dev_dax->align))
Dan Williams9702cff2017-11-29 16:10:32 -0800274 return -EINVAL;
275 return 0;
276}
277
Dan Williamsc1d53b92018-04-05 16:24:28 -0700278static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
279{
280 struct file *filp = vma->vm_file;
281 struct dev_dax *dev_dax = filp->private_data;
Dan Williamsc1d53b92018-04-05 16:24:28 -0700282
Joao Martins33cf94d2020-10-13 16:50:50 -0700283 return dev_dax->align;
Dan Williamsc1d53b92018-04-05 16:24:28 -0700284}
285
Dan Williams5f0694b2017-01-30 21:43:10 -0800286static const struct vm_operations_struct dax_vm_ops = {
287 .fault = dev_dax_fault,
288 .huge_fault = dev_dax_huge_fault,
Dmitry Safonovdd3b6142020-12-14 19:08:17 -0800289 .may_split = dev_dax_may_split,
Dan Williamsc1d53b92018-04-05 16:24:28 -0700290 .pagesize = dev_dax_pagesize,
Dan Williamsdee41072016-05-14 12:20:44 -0700291};
292
Dan Williamsaf69f512016-08-11 00:38:03 -0700293static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
Dan Williamsdee41072016-05-14 12:20:44 -0700294{
Dan Williams5f0694b2017-01-30 21:43:10 -0800295 struct dev_dax *dev_dax = filp->private_data;
Dan Williams7b6be842017-04-11 09:49:49 -0700296 int rc, id;
Dan Williamsdee41072016-05-14 12:20:44 -0700297
Dan Williams6daaca52018-03-05 16:40:05 -0800298 dev_dbg(&dev_dax->dev, "trace\n");
Dan Williamsdee41072016-05-14 12:20:44 -0700299
Dan Williams7b6be842017-04-11 09:49:49 -0700300 /*
301 * We lock to check dax_dev liveness and will re-check at
302 * fault time.
303 */
304 id = dax_read_lock();
Dan Williams5f0694b2017-01-30 21:43:10 -0800305 rc = check_vma(dev_dax, vma, __func__);
Dan Williams7b6be842017-04-11 09:49:49 -0700306 dax_read_unlock(id);
Dan Williamsdee41072016-05-14 12:20:44 -0700307 if (rc)
308 return rc;
309
Dan Williams5f0694b2017-01-30 21:43:10 -0800310 vma->vm_ops = &dax_vm_ops;
Dave Jiange1fb4a02018-08-17 15:43:40 -0700311 vma->vm_flags |= VM_HUGEPAGE;
Dan Williamsdee41072016-05-14 12:20:44 -0700312 return 0;
Dan Williams043a9252016-08-07 08:23:56 -0700313}
Dan Williamsdee41072016-05-14 12:20:44 -0700314
Dan Williams043a9252016-08-07 08:23:56 -0700315/* return an unmapped area aligned to the dax region specified alignment */
Dan Williamsaf69f512016-08-11 00:38:03 -0700316static unsigned long dax_get_unmapped_area(struct file *filp,
Dan Williams043a9252016-08-07 08:23:56 -0700317 unsigned long addr, unsigned long len, unsigned long pgoff,
318 unsigned long flags)
319{
320 unsigned long off, off_end, off_align, len_align, addr_align, align;
Dan Williams5f0694b2017-01-30 21:43:10 -0800321 struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
Dan Williams043a9252016-08-07 08:23:56 -0700322
Dan Williams5f0694b2017-01-30 21:43:10 -0800323 if (!dev_dax || addr)
Dan Williams043a9252016-08-07 08:23:56 -0700324 goto out;
325
Joao Martins33cf94d2020-10-13 16:50:50 -0700326 align = dev_dax->align;
Dan Williams043a9252016-08-07 08:23:56 -0700327 off = pgoff << PAGE_SHIFT;
328 off_end = off + len;
329 off_align = round_up(off, align);
330
331 if ((off_end <= off_align) || ((off_end - off_align) < align))
332 goto out;
333
334 len_align = len + align;
335 if ((off + len_align) < off)
336 goto out;
337
338 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
339 pgoff, flags);
340 if (!IS_ERR_VALUE(addr_align)) {
341 addr_align += (off - addr_align) & (align - 1);
342 return addr_align;
343 }
344 out:
345 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
346}
347
Dave Jiang41c9b1b2018-09-10 16:18:29 -0700348static const struct address_space_operations dev_dax_aops = {
Matthew Wilcox (Oracle)b82a96c2021-06-28 19:36:27 -0700349 .set_page_dirty = __set_page_dirty_no_writeback,
Dave Jiang41c9b1b2018-09-10 16:18:29 -0700350 .invalidatepage = noop_invalidatepage,
351};
352
Dan Williamsaf69f512016-08-11 00:38:03 -0700353static int dax_open(struct inode *inode, struct file *filp)
Dan Williams043a9252016-08-07 08:23:56 -0700354{
Dan Williams7b6be842017-04-11 09:49:49 -0700355 struct dax_device *dax_dev = inode_dax(inode);
356 struct inode *__dax_inode = dax_inode(dax_dev);
357 struct dev_dax *dev_dax = dax_get_private(dax_dev);
Dan Williams043a9252016-08-07 08:23:56 -0700358
Dan Williams6daaca52018-03-05 16:40:05 -0800359 dev_dbg(&dev_dax->dev, "trace\n");
Dan Williams7b6be842017-04-11 09:49:49 -0700360 inode->i_mapping = __dax_inode->i_mapping;
361 inode->i_mapping->host = __dax_inode;
Dave Jiang41c9b1b2018-09-10 16:18:29 -0700362 inode->i_mapping->a_ops = &dev_dax_aops;
Dan Williams3bc52c42016-07-24 21:55:45 -0700363 filp->f_mapping = inode->i_mapping;
Jeff Layton5660e132017-07-06 07:02:25 -0400364 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
Jeff Layton735e4ae2020-06-01 21:45:36 -0700365 filp->f_sb_err = file_sample_sb_err(filp);
Dan Williams5f0694b2017-01-30 21:43:10 -0800366 filp->private_data = dev_dax;
Dan Williamsebd84d72016-08-11 00:41:51 -0700367 inode->i_flags = S_DAX;
Dan Williams043a9252016-08-07 08:23:56 -0700368
Dan Williams043a9252016-08-07 08:23:56 -0700369 return 0;
370}
371
Dan Williamsaf69f512016-08-11 00:38:03 -0700372static int dax_release(struct inode *inode, struct file *filp)
Dan Williams043a9252016-08-07 08:23:56 -0700373{
Dan Williams5f0694b2017-01-30 21:43:10 -0800374 struct dev_dax *dev_dax = filp->private_data;
Dan Williams043a9252016-08-07 08:23:56 -0700375
Dan Williams6daaca52018-03-05 16:40:05 -0800376 dev_dbg(&dev_dax->dev, "trace\n");
Dan Williams043a9252016-08-07 08:23:56 -0700377 return 0;
Dan Williamsdee41072016-05-14 12:20:44 -0700378}
379
Dan Williamsab68f262016-05-18 09:15:08 -0700380static const struct file_operations dax_fops = {
381 .llseek = noop_llseek,
382 .owner = THIS_MODULE,
Dan Williamsaf69f512016-08-11 00:38:03 -0700383 .open = dax_open,
384 .release = dax_release,
385 .get_unmapped_area = dax_get_unmapped_area,
386 .mmap = dax_mmap,
Dave Jiangef842302018-04-19 13:39:43 -0700387 .mmap_supported_flags = MAP_SYNC,
Dan Williamsab68f262016-05-18 09:15:08 -0700388};
389
Dan Williams9567da02017-07-12 17:58:21 -0700390static void dev_dax_cdev_del(void *cdev)
Dan Williams043a9252016-08-07 08:23:56 -0700391{
Dan Williams9567da02017-07-12 17:58:21 -0700392 cdev_del(cdev);
Dan Williamsebd84d72016-08-11 00:41:51 -0700393}
394
Dan Williams9567da02017-07-12 17:58:21 -0700395static void dev_dax_kill(void *dev_dax)
Dan Williams043a9252016-08-07 08:23:56 -0700396{
Dan Williams9567da02017-07-12 17:58:21 -0700397 kill_dev_dax(dev_dax);
398}
399
Dan Williamsf11cf812020-10-13 16:50:08 -0700400int dev_dax_probe(struct dev_dax *dev_dax)
Dan Williams9567da02017-07-12 17:58:21 -0700401{
Dan Williams9567da02017-07-12 17:58:21 -0700402 struct dax_device *dax_dev = dev_dax->dax_dev;
Dan Williamsf11cf812020-10-13 16:50:08 -0700403 struct device *dev = &dev_dax->dev;
Dan Williamsf5516ec2020-10-13 16:49:43 -0700404 struct dev_pagemap *pgmap;
Dan Williams7b6be842017-04-11 09:49:49 -0700405 struct inode *inode;
Dan Williamsba09c012016-07-24 15:55:42 -0700406 struct cdev *cdev;
Dan Williams89ec9f22018-10-29 15:52:42 -0700407 void *addr;
Dan Williams60e93dc2020-10-13 16:50:39 -0700408 int rc, i;
Dan Williams89ec9f22018-10-29 15:52:42 -0700409
Joao Martinsfc65c4e2022-01-14 14:04:33 -0800410 if (static_dev_dax(dev_dax)) {
411 if (dev_dax->nr_range > 1) {
412 dev_warn(dev,
413 "static pgmap / multi-range device conflict\n");
414 return -EINVAL;
415 }
Dan Williams60e93dc2020-10-13 16:50:39 -0700416
Joao Martinsfc65c4e2022-01-14 14:04:33 -0800417 pgmap = dev_dax->pgmap;
418 } else {
419 if (dev_dax->pgmap) {
420 dev_warn(dev,
421 "dynamic-dax with pre-populated page map\n");
422 return -EINVAL;
423 }
424
Joao Martins09b80132022-01-14 14:04:29 -0800425 pgmap = devm_kzalloc(dev,
426 struct_size(pgmap, ranges, dev_dax->nr_range - 1),
427 GFP_KERNEL);
Dan Williamsf5516ec2020-10-13 16:49:43 -0700428 if (!pgmap)
429 return -ENOMEM;
Joao Martinsfc65c4e2022-01-14 14:04:33 -0800430
Dan Williams60e93dc2020-10-13 16:50:39 -0700431 pgmap->nr_range = dev_dax->nr_range;
Joao Martinsfc65c4e2022-01-14 14:04:33 -0800432 dev_dax->pgmap = pgmap;
433
434 for (i = 0; i < dev_dax->nr_range; i++) {
435 struct range *range = &dev_dax->ranges[i].range;
436 pgmap->ranges[i] = *range;
437 }
Dan Williamsf5516ec2020-10-13 16:49:43 -0700438 }
Dan Williams60e93dc2020-10-13 16:50:39 -0700439
440 for (i = 0; i < dev_dax->nr_range; i++) {
441 struct range *range = &dev_dax->ranges[i].range;
442
443 if (!devm_request_mem_region(dev, range->start,
444 range_len(range), dev_name(dev))) {
445 dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve range\n",
446 i, range->start, range->end);
447 return -EBUSY;
448 }
Dan Williams60e93dc2020-10-13 16:50:39 -0700449 }
450
Dan Williamsf5516ec2020-10-13 16:49:43 -0700451 pgmap->type = MEMORY_DEVICE_GENERIC;
Joao Martins14606002022-01-14 14:04:47 -0800452 if (dev_dax->align > PAGE_SIZE)
453 pgmap->vmemmap_shift =
454 order_base_2(dev_dax->align >> PAGE_SHIFT);
Dan Williamsf5516ec2020-10-13 16:49:43 -0700455 addr = devm_memremap_pages(dev, pgmap);
Dan Williams50f44ee2019-06-13 15:56:33 -0700456 if (IS_ERR(addr))
Dan Williams89ec9f22018-10-29 15:52:42 -0700457 return PTR_ERR(addr);
Dan Williams89ec9f22018-10-29 15:52:42 -0700458
Dan Williams7b6be842017-04-11 09:49:49 -0700459 inode = dax_inode(dax_dev);
460 cdev = inode->i_cdev;
Dan Williamsba09c012016-07-24 15:55:42 -0700461 cdev_init(cdev, &dax_fops);
Dan Williams83762cb2021-11-15 13:20:57 -0800462 cdev->owner = dev->driver->owner;
Dan Williams9567da02017-07-12 17:58:21 -0700463 cdev_set_parent(cdev, &dev->kobj);
464 rc = cdev_add(cdev, dev->devt, 1);
Dan Williamsd76911e2016-07-19 17:51:40 -0700465 if (rc)
Dan Williams9567da02017-07-12 17:58:21 -0700466 return rc;
Dan Williamsd76911e2016-07-19 17:51:40 -0700467
Dan Williams9567da02017-07-12 17:58:21 -0700468 rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev);
469 if (rc)
470 return rc;
Dan Williams043a9252016-08-07 08:23:56 -0700471
Dan Williams9567da02017-07-12 17:58:21 -0700472 run_dax(dax_dev);
473 return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax);
Dan Williams043a9252016-08-07 08:23:56 -0700474}
Dan Williams730926c2017-07-16 13:51:53 -0700475EXPORT_SYMBOL_GPL(dev_dax_probe);
Dan Williams9567da02017-07-12 17:58:21 -0700476
Dan Williamsd2007812018-11-07 15:31:23 -0800477static struct dax_device_driver device_dax_driver = {
Dan Williamsf11cf812020-10-13 16:50:08 -0700478 .probe = dev_dax_probe,
Uwe Kleine-Königc80b5322021-02-05 23:28:41 +0100479 /* all probe actions are unwound by devm, so .remove isn't necessary */
Dan Williamsd2007812018-11-07 15:31:23 -0800480 .match_always = 1,
Dan Williams9567da02017-07-12 17:58:21 -0700481};
Dan Williams043a9252016-08-07 08:23:56 -0700482
Dan Williamsab68f262016-05-18 09:15:08 -0700483static int __init dax_init(void)
484{
Dan Williams9567da02017-07-12 17:58:21 -0700485 return dax_driver_register(&device_dax_driver);
Dan Williamsab68f262016-05-18 09:15:08 -0700486}
487
488static void __exit dax_exit(void)
489{
Dan Williamsd2007812018-11-07 15:31:23 -0800490 dax_driver_unregister(&device_dax_driver);
Dan Williamsab68f262016-05-18 09:15:08 -0700491}
492
493MODULE_AUTHOR("Intel Corporation");
494MODULE_LICENSE("GPL v2");
Dan Williams9567da02017-07-12 17:58:21 -0700495module_init(dax_init);
Dan Williamsab68f262016-05-18 09:15:08 -0700496module_exit(dax_exit);
Dan Williams9567da02017-07-12 17:58:21 -0700497MODULE_ALIAS_DAX_DEVICE(0);