blob: fc89e91beea7ceaaaca82d9cf2b05d97e1118667 [file] [log] [blame]
Thomas Gleixner5b497af2019-05-29 07:18:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Dan Williams7b6be842017-04-11 09:49:49 -07002/*
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
Dan Williams7b6be842017-04-11 09:49:49 -07004 */
5#include <linux/pagemap.h>
6#include <linux/module.h>
7#include <linux/mount.h>
David Howells75d4e062019-03-25 16:38:24 +00008#include <linux/pseudo_fs.h>
Dan Williams7b6be842017-04-11 09:49:49 -07009#include <linux/magic.h>
Dan Williamsef510422017-05-08 10:55:27 -070010#include <linux/genhd.h>
Dan Williams569d0362017-10-14 11:33:32 -070011#include <linux/pfn_t.h>
Dan Williams7b6be842017-04-11 09:49:49 -070012#include <linux/cdev.h>
13#include <linux/hash.h>
14#include <linux/slab.h>
Dan Williams7e026c82017-05-29 12:57:56 -070015#include <linux/uio.h>
Dan Williams6568b082017-01-24 18:44:18 -080016#include <linux/dax.h>
Dan Williams7b6be842017-04-11 09:49:49 -070017#include <linux/fs.h>
Dan Williams51cf7842017-07-12 17:58:21 -070018#include "dax-private.h"
Dan Williams7b6be842017-04-11 09:49:49 -070019
Christoph Hellwig1b764602021-08-26 15:55:05 +020020/**
21 * struct dax_device - anchor object for dax services
22 * @inode: core vfs
23 * @cdev: optional character interface for "device dax"
24 * @host: optional name for lookups where the device path is not available
25 * @private: dax driver private data
26 * @flags: state and boolean properties
27 */
28struct dax_device {
29 struct hlist_node list;
30 struct inode inode;
31 struct cdev cdev;
32 const char *host;
33 void *private;
34 unsigned long flags;
35 const struct dax_operations *ops;
36};
37
Dan Williams7b6be842017-04-11 09:49:49 -070038static dev_t dax_devt;
39DEFINE_STATIC_SRCU(dax_srcu);
40static struct vfsmount *dax_mnt;
41static DEFINE_IDA(dax_minor_ida);
42static struct kmem_cache *dax_cache __read_mostly;
43static struct super_block *dax_superblock __read_mostly;
44
Dan Williams72058002017-04-19 15:14:31 -070045#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
46static struct hlist_head dax_host_list[DAX_HASH_SIZE];
47static DEFINE_SPINLOCK(dax_host_lock);
48
Dan Williams7b6be842017-04-11 09:49:49 -070049int dax_read_lock(void)
50{
51 return srcu_read_lock(&dax_srcu);
52}
53EXPORT_SYMBOL_GPL(dax_read_lock);
54
55void dax_read_unlock(int id)
56{
57 srcu_read_unlock(&dax_srcu, id);
58}
59EXPORT_SYMBOL_GPL(dax_read_unlock);
60
Christoph Hellwig1b764602021-08-26 15:55:05 +020061static int dax_host_hash(const char *host)
62{
63 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
64}
65
66/**
67 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
68 * @host: alternate name for the device registered by a dax driver
69 */
70static struct dax_device *dax_get_by_host(const char *host)
71{
72 struct dax_device *dax_dev, *found = NULL;
73 int hash, id;
74
75 if (!host)
76 return NULL;
77
78 hash = dax_host_hash(host);
79
80 id = dax_read_lock();
81 spin_lock(&dax_host_lock);
82 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
83 if (!dax_alive(dax_dev)
84 || strcmp(host, dax_dev->host) != 0)
85 continue;
86
87 if (igrab(&dax_dev->inode))
88 found = dax_dev;
89 break;
90 }
91 spin_unlock(&dax_host_lock);
92 dax_read_unlock(id);
93
94 return found;
95}
96
Dan Williams9d109082017-05-13 16:18:21 -070097#ifdef CONFIG_BLOCK
Dan Williams78f35472017-08-30 09:16:38 -070098#include <linux/blkdev.h>
99
Dan Williamsef510422017-05-08 10:55:27 -0700100int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
101 pgoff_t *pgoff)
102{
Vivek Goyal1a9d5d42020-08-19 18:19:39 -0400103 sector_t start_sect = bdev ? get_start_sect(bdev) : 0;
104 phys_addr_t phys_off = (start_sect + sector) * 512;
Dan Williamsef510422017-05-08 10:55:27 -0700105
106 if (pgoff)
107 *pgoff = PHYS_PFN(phys_off);
108 if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
109 return -EINVAL;
110 return 0;
111}
112EXPORT_SYMBOL(bdev_dax_pgoff);
113
Dan Williams26f2f4d2017-09-03 10:17:53 -0700114#if IS_ENABLED(CONFIG_FS_DAX)
Dan Williams78f35472017-08-30 09:16:38 -0700115struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
116{
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200117 if (!blk_queue_dax(bdev->bd_disk->queue))
Dan Williams78f35472017-08-30 09:16:38 -0700118 return NULL;
Vivek Goyalf01b16a2020-01-06 13:11:17 -0500119 return dax_get_by_host(bdev->bd_disk->disk_name);
Dan Williams78f35472017-08-30 09:16:38 -0700120}
121EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
122
Christoph Hellwigcd93a2a2021-08-26 15:55:07 +0200123bool generic_fsdax_supported(struct dax_device *dax_dev,
Dan Williams7bf7eac2019-05-16 13:26:29 -0700124 struct block_device *bdev, int blocksize, sector_t start,
125 sector_t sectors)
Dan Williamsef510422017-05-08 10:55:27 -0700126{
Dan Williamse76384882018-05-16 11:46:08 -0700127 bool dax_enabled = false;
Dan Williamsad428cd2019-02-20 21:12:50 -0800128 pgoff_t pgoff, pgoff_end;
Dan Williamsad428cd2019-02-20 21:12:50 -0800129 void *kaddr, *end_kaddr;
130 pfn_t pfn, end_pfn;
131 sector_t last_page;
132 long len, len2;
133 int err, id;
Dan Williamsef510422017-05-08 10:55:27 -0700134
135 if (blocksize != PAGE_SIZE) {
Christoph Hellwig39b63892021-08-26 15:55:03 +0200136 pr_info("%pg: error: unsupported blocksize for dax\n", bdev);
Dave Jiang80660f22018-05-30 13:03:46 -0700137 return false;
Dan Williamsef510422017-05-08 10:55:27 -0700138 }
139
Adrian Huangd4c5da52020-09-17 19:15:49 +0800140 if (!dax_dev) {
Christoph Hellwig39b63892021-08-26 15:55:03 +0200141 pr_debug("%pg: error: dax unsupported by block device\n", bdev);
Adrian Huangd4c5da52020-09-17 19:15:49 +0800142 return false;
143 }
144
Dan Williams7bf7eac2019-05-16 13:26:29 -0700145 err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
Dan Williamsef510422017-05-08 10:55:27 -0700146 if (err) {
Christoph Hellwig39b63892021-08-26 15:55:03 +0200147 pr_info("%pg: error: unaligned partition for dax\n", bdev);
Dave Jiang80660f22018-05-30 13:03:46 -0700148 return false;
Dan Williamsef510422017-05-08 10:55:27 -0700149 }
150
Dan Williams7bf7eac2019-05-16 13:26:29 -0700151 last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
Dan Williamsad428cd2019-02-20 21:12:50 -0800152 err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
153 if (err) {
Christoph Hellwig39b63892021-08-26 15:55:03 +0200154 pr_info("%pg: error: unaligned partition for dax\n", bdev);
Dan Williamsad428cd2019-02-20 21:12:50 -0800155 return false;
156 }
157
Dan Williamsef510422017-05-08 10:55:27 -0700158 id = dax_read_lock();
Dan Williamsad428cd2019-02-20 21:12:50 -0800159 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
160 len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
Dan Williamsef510422017-05-08 10:55:27 -0700161
Dan Williamsad428cd2019-02-20 21:12:50 -0800162 if (len < 1 || len2 < 1) {
Christoph Hellwig39b63892021-08-26 15:55:03 +0200163 pr_info("%pg: error: dax access failed (%ld)\n",
164 bdev, len < 1 ? len : len2);
Ira Weinyeedfd732020-07-17 00:20:50 -0700165 dax_read_unlock(id);
Dave Jiang80660f22018-05-30 13:03:46 -0700166 return false;
Dan Williamsef510422017-05-08 10:55:27 -0700167 }
168
Dan Williams3fe07912017-10-14 17:13:45 -0700169 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
170 /*
171 * An arch that has enabled the pmem api should also
172 * have its drivers support pfn_t_devmap()
173 *
174 * This is a developer warning and should not trigger in
175 * production. dax_flush() will crash since it depends
176 * on being able to do (page_address(pfn_to_page())).
177 */
178 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
Dan Williamse76384882018-05-16 11:46:08 -0700179 dax_enabled = true;
Dan Williamsad428cd2019-02-20 21:12:50 -0800180 } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
181 struct dev_pagemap *pgmap, *end_pgmap;
Dan Williamse76384882018-05-16 11:46:08 -0700182
183 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
Dan Williamsad428cd2019-02-20 21:12:50 -0800184 end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
185 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
186 && pfn_t_to_page(pfn)->pgmap == pgmap
187 && pfn_t_to_page(end_pfn)->pgmap == pgmap
188 && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
189 && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
Dan Williamse76384882018-05-16 11:46:08 -0700190 dax_enabled = true;
191 put_dev_pagemap(pgmap);
Dan Williamsad428cd2019-02-20 21:12:50 -0800192 put_dev_pagemap(end_pgmap);
193
Dan Williamse76384882018-05-16 11:46:08 -0700194 }
Ira Weinyeedfd732020-07-17 00:20:50 -0700195 dax_read_unlock(id);
Dan Williamse76384882018-05-16 11:46:08 -0700196
197 if (!dax_enabled) {
Christoph Hellwig39b63892021-08-26 15:55:03 +0200198 pr_info("%pg: error: dax support not enabled\n", bdev);
Dave Jiang80660f22018-05-30 13:03:46 -0700199 return false;
Dan Williams569d0362017-10-14 11:33:32 -0700200 }
Dave Jiang80660f22018-05-30 13:03:46 -0700201 return true;
Dan Williamsef510422017-05-08 10:55:27 -0700202}
Christoph Hellwigcd93a2a2021-08-26 15:55:07 +0200203EXPORT_SYMBOL_GPL(generic_fsdax_supported);
Christoph Hellwig60b83402021-08-26 15:55:08 +0200204
205bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
206 int blocksize, sector_t start, sector_t len)
207{
208 bool ret = false;
209 int id;
210
211 if (!dax_dev)
212 return false;
213
214 id = dax_read_lock();
215 if (dax_alive(dax_dev) && dax_dev->ops->dax_supported)
216 ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize,
217 start, len);
218 dax_read_unlock(id);
219 return ret;
220}
221EXPORT_SYMBOL_GPL(dax_supported);
Christoph Hellwigcd93a2a2021-08-26 15:55:07 +0200222#endif /* CONFIG_FS_DAX */
Christoph Hellwigbdd3c502021-08-26 15:55:10 +0200223#endif /* CONFIG_BLOCK */
Dan Williamsef510422017-05-08 10:55:27 -0700224
Dan Williams9a60c3e2017-06-27 17:59:28 -0700225enum dax_device_flags {
226 /* !alive + rcu grace period == no new operations / mappings */
227 DAXDEV_ALIVE,
Dan Williams6e0c90d2017-06-26 21:28:41 -0700228 /* gate whether dax_flush() calls the low level flush routine */
229 DAXDEV_WRITE_CACHE,
Pankaj Guptafefc1d92019-07-05 19:33:24 +0530230 /* flag to check if device supports synchronous flush */
231 DAXDEV_SYNC,
Dan Williams9a60c3e2017-06-27 17:59:28 -0700232};
233
Dan Williams6e0c90d2017-06-26 21:28:41 -0700234static ssize_t write_cache_show(struct device *dev,
235 struct device_attribute *attr, char *buf)
236{
237 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
238 ssize_t rc;
239
240 WARN_ON_ONCE(!dax_dev);
241 if (!dax_dev)
242 return -ENXIO;
243
Ross Zwisler808c3402018-06-06 10:45:14 -0600244 rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
Dan Williams6e0c90d2017-06-26 21:28:41 -0700245 put_dax(dax_dev);
246 return rc;
247}
248
249static ssize_t write_cache_store(struct device *dev,
250 struct device_attribute *attr, const char *buf, size_t len)
251{
252 bool write_cache;
253 int rc = strtobool(buf, &write_cache);
254 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
255
256 WARN_ON_ONCE(!dax_dev);
257 if (!dax_dev)
258 return -ENXIO;
259
260 if (rc)
261 len = rc;
Dan Williams6e0c90d2017-06-26 21:28:41 -0700262 else
Ross Zwisler808c3402018-06-06 10:45:14 -0600263 dax_write_cache(dax_dev, write_cache);
Dan Williams6e0c90d2017-06-26 21:28:41 -0700264
265 put_dax(dax_dev);
266 return len;
267}
268static DEVICE_ATTR_RW(write_cache);
269
270static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
271{
272 struct device *dev = container_of(kobj, typeof(*dev), kobj);
273 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
274
275 WARN_ON_ONCE(!dax_dev);
276 if (!dax_dev)
277 return 0;
278
Mikulas Patockac3ca0152017-08-31 21:47:43 -0400279#ifndef CONFIG_ARCH_HAS_PMEM_API
280 if (a == &dev_attr_write_cache.attr)
Dan Williams6e0c90d2017-06-26 21:28:41 -0700281 return 0;
Mikulas Patockac3ca0152017-08-31 21:47:43 -0400282#endif
Dan Williams6e0c90d2017-06-26 21:28:41 -0700283 return a->mode;
284}
285
286static struct attribute *dax_attributes[] = {
287 &dev_attr_write_cache.attr,
288 NULL,
289};
290
291struct attribute_group dax_attribute_group = {
292 .name = "dax",
293 .attrs = dax_attributes,
294 .is_visible = dax_visible,
295};
296EXPORT_SYMBOL_GPL(dax_attribute_group);
297
Dan Williamsb0686262017-01-26 20:37:35 -0800298/**
299 * dax_direct_access() - translate a device pgoff to an absolute pfn
300 * @dax_dev: a dax_device instance representing the logical memory range
301 * @pgoff: offset in pages from the start of the device to translate
302 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
303 * @kaddr: output parameter that returns a virtual address mapping of pfn
304 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
305 *
306 * Return: negative errno if an error occurs, otherwise the number of
307 * pages accessible at the device relative @pgoff.
308 */
309long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
310 void **kaddr, pfn_t *pfn)
311{
312 long avail;
313
Dan Williamsb0686262017-01-26 20:37:35 -0800314 if (!dax_dev)
315 return -EOPNOTSUPP;
316
317 if (!dax_alive(dax_dev))
318 return -ENXIO;
319
320 if (nr_pages < 0)
Ira Weinyb05d4c52021-05-25 10:24:28 -0700321 return -EINVAL;
Dan Williamsb0686262017-01-26 20:37:35 -0800322
323 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
324 kaddr, pfn);
325 if (!avail)
326 return -ERANGE;
327 return min(avail, nr_pages);
328}
329EXPORT_SYMBOL_GPL(dax_direct_access);
330
Dan Williams7e026c82017-05-29 12:57:56 -0700331size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
332 size_t bytes, struct iov_iter *i)
333{
334 if (!dax_alive(dax_dev))
335 return 0;
336
Dan Williams7e026c82017-05-29 12:57:56 -0700337 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
338}
339EXPORT_SYMBOL_GPL(dax_copy_from_iter);
340
Dan Williamsb3a9a0c2018-05-02 06:46:33 -0700341size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
342 size_t bytes, struct iov_iter *i)
343{
344 if (!dax_alive(dax_dev))
345 return 0;
346
347 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
348}
349EXPORT_SYMBOL_GPL(dax_copy_to_iter);
350
Vivek Goyalf605a262020-02-28 11:34:52 -0500351int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
352 size_t nr_pages)
353{
354 if (!dax_alive(dax_dev))
355 return -ENXIO;
Vivek Goyalf605a262020-02-28 11:34:52 -0500356 /*
357 * There are no callers that want to zero more than one page as of now.
358 * Once users are there, this check can be removed after the
359 * device mapper code has been updated to split ranges across targets.
360 */
361 if (nr_pages != 1)
362 return -EIO;
363
364 return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
365}
366EXPORT_SYMBOL_GPL(dax_zero_page_range);
367
Mikulas Patockac3ca0152017-08-31 21:47:43 -0400368#ifdef CONFIG_ARCH_HAS_PMEM_API
369void arch_wb_cache_pmem(void *addr, size_t size);
370void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
Dan Williamsabebfbe22017-05-29 13:02:52 -0700371{
Ross Zwisler808c3402018-06-06 10:45:14 -0600372 if (unlikely(!dax_write_cache_enabled(dax_dev)))
Dan Williams6e0c90d2017-06-26 21:28:41 -0700373 return;
374
Mikulas Patockac3ca0152017-08-31 21:47:43 -0400375 arch_wb_cache_pmem(addr, size);
Dan Williamsabebfbe22017-05-29 13:02:52 -0700376}
Mikulas Patockac3ca0152017-08-31 21:47:43 -0400377#else
378void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
379{
380}
381#endif
Dan Williamsabebfbe22017-05-29 13:02:52 -0700382EXPORT_SYMBOL_GPL(dax_flush);
383
Dan Williams6e0c90d2017-06-26 21:28:41 -0700384void dax_write_cache(struct dax_device *dax_dev, bool wc)
385{
386 if (wc)
387 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
388 else
389 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
390}
391EXPORT_SYMBOL_GPL(dax_write_cache);
392
Vivek Goyal273752c2017-07-26 09:35:09 -0400393bool dax_write_cache_enabled(struct dax_device *dax_dev)
394{
395 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
396}
397EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
398
Pankaj Guptafefc1d92019-07-05 19:33:24 +0530399bool __dax_synchronous(struct dax_device *dax_dev)
400{
401 return test_bit(DAXDEV_SYNC, &dax_dev->flags);
402}
403EXPORT_SYMBOL_GPL(__dax_synchronous);
404
405void __set_dax_synchronous(struct dax_device *dax_dev)
406{
407 set_bit(DAXDEV_SYNC, &dax_dev->flags);
408}
409EXPORT_SYMBOL_GPL(__set_dax_synchronous);
410
Dan Williams7b6be842017-04-11 09:49:49 -0700411bool dax_alive(struct dax_device *dax_dev)
412{
413 lockdep_assert_held(&dax_srcu);
Dan Williams9a60c3e2017-06-27 17:59:28 -0700414 return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
Dan Williams7b6be842017-04-11 09:49:49 -0700415}
416EXPORT_SYMBOL_GPL(dax_alive);
417
418/*
419 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
420 * that any fault handlers or operations that might have seen
421 * dax_alive(), have completed. Any operations that start after
422 * synchronize_srcu() has run will abort upon seeing !dax_alive().
423 */
424void kill_dax(struct dax_device *dax_dev)
425{
426 if (!dax_dev)
427 return;
428
Dan Williams9a60c3e2017-06-27 17:59:28 -0700429 clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
Dan Williams72058002017-04-19 15:14:31 -0700430
Dan Williams7b6be842017-04-11 09:49:49 -0700431 synchronize_srcu(&dax_srcu);
Dan Williams72058002017-04-19 15:14:31 -0700432
433 spin_lock(&dax_host_lock);
434 hlist_del_init(&dax_dev->list);
435 spin_unlock(&dax_host_lock);
Dan Williams7b6be842017-04-11 09:49:49 -0700436}
437EXPORT_SYMBOL_GPL(kill_dax);
438
Dan Williams9567da02017-07-12 17:58:21 -0700439void run_dax(struct dax_device *dax_dev)
440{
441 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
442}
443EXPORT_SYMBOL_GPL(run_dax);
444
Dan Williams7b6be842017-04-11 09:49:49 -0700445static struct inode *dax_alloc_inode(struct super_block *sb)
446{
447 struct dax_device *dax_dev;
Dan Williamsb9d39d12017-06-09 08:50:49 -0700448 struct inode *inode;
Dan Williams7b6be842017-04-11 09:49:49 -0700449
450 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
Mikulas Patocka9f586ff2017-11-14 09:59:54 -0500451 if (!dax_dev)
452 return NULL;
453
Dan Williamsb9d39d12017-06-09 08:50:49 -0700454 inode = &dax_dev->inode;
455 inode->i_rdev = 0;
456 return inode;
Dan Williams7b6be842017-04-11 09:49:49 -0700457}
458
459static struct dax_device *to_dax_dev(struct inode *inode)
460{
461 return container_of(inode, struct dax_device, inode);
462}
463
Al Viro53e22822019-04-10 14:57:19 -0400464static void dax_free_inode(struct inode *inode)
Dan Williams7b6be842017-04-11 09:49:49 -0700465{
Dan Williams7b6be842017-04-11 09:49:49 -0700466 struct dax_device *dax_dev = to_dax_dev(inode);
Dan Williams72058002017-04-19 15:14:31 -0700467 kfree(dax_dev->host);
468 dax_dev->host = NULL;
Dan Williamsb9d39d12017-06-09 08:50:49 -0700469 if (inode->i_rdev)
Al Viro6f247842021-01-31 19:23:55 -0500470 ida_simple_remove(&dax_minor_ida, iminor(inode));
Dan Williams7b6be842017-04-11 09:49:49 -0700471 kmem_cache_free(dax_cache, dax_dev);
472}
473
474static void dax_destroy_inode(struct inode *inode)
475{
476 struct dax_device *dax_dev = to_dax_dev(inode);
Dan Williams9a60c3e2017-06-27 17:59:28 -0700477 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
Dan Williams7b6be842017-04-11 09:49:49 -0700478 "kill_dax() must be called before final iput()\n");
Dan Williams7b6be842017-04-11 09:49:49 -0700479}
480
481static const struct super_operations dax_sops = {
482 .statfs = simple_statfs,
483 .alloc_inode = dax_alloc_inode,
484 .destroy_inode = dax_destroy_inode,
Al Viro53e22822019-04-10 14:57:19 -0400485 .free_inode = dax_free_inode,
Dan Williams7b6be842017-04-11 09:49:49 -0700486 .drop_inode = generic_delete_inode,
487};
488
David Howells75d4e062019-03-25 16:38:24 +0000489static int dax_init_fs_context(struct fs_context *fc)
Dan Williams7b6be842017-04-11 09:49:49 -0700490{
David Howells75d4e062019-03-25 16:38:24 +0000491 struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
492 if (!ctx)
493 return -ENOMEM;
494 ctx->ops = &dax_sops;
495 return 0;
Dan Williams7b6be842017-04-11 09:49:49 -0700496}
497
498static struct file_system_type dax_fs_type = {
David Howells75d4e062019-03-25 16:38:24 +0000499 .name = "dax",
500 .init_fs_context = dax_init_fs_context,
501 .kill_sb = kill_anon_super,
Dan Williams7b6be842017-04-11 09:49:49 -0700502};
503
504static int dax_test(struct inode *inode, void *data)
505{
506 dev_t devt = *(dev_t *) data;
507
508 return inode->i_rdev == devt;
509}
510
511static int dax_set(struct inode *inode, void *data)
512{
513 dev_t devt = *(dev_t *) data;
514
515 inode->i_rdev = devt;
516 return 0;
517}
518
519static struct dax_device *dax_dev_get(dev_t devt)
520{
521 struct dax_device *dax_dev;
522 struct inode *inode;
523
524 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
525 dax_test, dax_set, &devt);
526
527 if (!inode)
528 return NULL;
529
530 dax_dev = to_dax_dev(inode);
531 if (inode->i_state & I_NEW) {
Dan Williams9a60c3e2017-06-27 17:59:28 -0700532 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
Dan Williams7b6be842017-04-11 09:49:49 -0700533 inode->i_cdev = &dax_dev->cdev;
534 inode->i_mode = S_IFCHR;
535 inode->i_flags = S_DAX;
536 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
537 unlock_new_inode(inode);
538 }
539
540 return dax_dev;
541}
542
Dan Williams72058002017-04-19 15:14:31 -0700543static void dax_add_host(struct dax_device *dax_dev, const char *host)
544{
545 int hash;
546
547 /*
548 * Unconditionally init dax_dev since it's coming from a
549 * non-zeroed slab cache
550 */
551 INIT_HLIST_NODE(&dax_dev->list);
552 dax_dev->host = host;
553 if (!host)
554 return;
555
556 hash = dax_host_hash(host);
557 spin_lock(&dax_host_lock);
558 hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
559 spin_unlock(&dax_host_lock);
560}
561
Dan Williams6568b082017-01-24 18:44:18 -0800562struct dax_device *alloc_dax(void *private, const char *__host,
Pankaj Guptafefc1d92019-07-05 19:33:24 +0530563 const struct dax_operations *ops, unsigned long flags)
Dan Williams7b6be842017-04-11 09:49:49 -0700564{
565 struct dax_device *dax_dev;
Dan Williams72058002017-04-19 15:14:31 -0700566 const char *host;
Dan Williams7b6be842017-04-11 09:49:49 -0700567 dev_t devt;
568 int minor;
569
Vivek Goyal4e4ced92020-04-01 12:11:25 -0400570 if (ops && !ops->zero_page_range) {
571 pr_debug("%s: error: device does not provide dax"
572 " operation zero_page_range()\n",
573 __host ? __host : "Unknown");
574 return ERR_PTR(-EINVAL);
575 }
576
Dan Williams72058002017-04-19 15:14:31 -0700577 host = kstrdup(__host, GFP_KERNEL);
578 if (__host && !host)
Vivek Goyal4e4ced92020-04-01 12:11:25 -0400579 return ERR_PTR(-ENOMEM);
Dan Williams72058002017-04-19 15:14:31 -0700580
Dan Williamscf1e2282017-05-08 12:33:53 -0700581 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
Dan Williams7b6be842017-04-11 09:49:49 -0700582 if (minor < 0)
Dan Williams72058002017-04-19 15:14:31 -0700583 goto err_minor;
Dan Williams7b6be842017-04-11 09:49:49 -0700584
585 devt = MKDEV(MAJOR(dax_devt), minor);
586 dax_dev = dax_dev_get(devt);
587 if (!dax_dev)
Dan Williams72058002017-04-19 15:14:31 -0700588 goto err_dev;
Dan Williams7b6be842017-04-11 09:49:49 -0700589
Dan Williams72058002017-04-19 15:14:31 -0700590 dax_add_host(dax_dev, host);
Dan Williams6568b082017-01-24 18:44:18 -0800591 dax_dev->ops = ops;
Dan Williams7b6be842017-04-11 09:49:49 -0700592 dax_dev->private = private;
Pankaj Guptafefc1d92019-07-05 19:33:24 +0530593 if (flags & DAXDEV_F_SYNC)
594 set_dax_synchronous(dax_dev);
595
Dan Williams7b6be842017-04-11 09:49:49 -0700596 return dax_dev;
597
Dan Williams72058002017-04-19 15:14:31 -0700598 err_dev:
Dan Williams7b6be842017-04-11 09:49:49 -0700599 ida_simple_remove(&dax_minor_ida, minor);
Dan Williams72058002017-04-19 15:14:31 -0700600 err_minor:
601 kfree(host);
Vivek Goyal4e4ced92020-04-01 12:11:25 -0400602 return ERR_PTR(-ENOMEM);
Dan Williams7b6be842017-04-11 09:49:49 -0700603}
604EXPORT_SYMBOL_GPL(alloc_dax);
605
606void put_dax(struct dax_device *dax_dev)
607{
608 if (!dax_dev)
609 return;
610 iput(&dax_dev->inode);
611}
612EXPORT_SYMBOL_GPL(put_dax);
613
614/**
Dan Williams7b6be842017-04-11 09:49:49 -0700615 * inode_dax: convert a public inode into its dax_dev
616 * @inode: An inode with i_cdev pointing to a dax_dev
617 *
618 * Note this is not equivalent to to_dax_dev() which is for private
619 * internal use where we know the inode filesystem type == dax_fs_type.
620 */
621struct dax_device *inode_dax(struct inode *inode)
622{
623 struct cdev *cdev = inode->i_cdev;
624
625 return container_of(cdev, struct dax_device, cdev);
626}
627EXPORT_SYMBOL_GPL(inode_dax);
628
629struct inode *dax_inode(struct dax_device *dax_dev)
630{
631 return &dax_dev->inode;
632}
633EXPORT_SYMBOL_GPL(dax_inode);
634
635void *dax_get_private(struct dax_device *dax_dev)
636{
Dan Williams9567da02017-07-12 17:58:21 -0700637 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
638 return NULL;
Dan Williams7b6be842017-04-11 09:49:49 -0700639 return dax_dev->private;
640}
641EXPORT_SYMBOL_GPL(dax_get_private);
642
643static void init_once(void *_dax_dev)
644{
645 struct dax_device *dax_dev = _dax_dev;
646 struct inode *inode = &dax_dev->inode;
647
Dan Williamsb9d39d12017-06-09 08:50:49 -0700648 memset(dax_dev, 0, sizeof(*dax_dev));
Dan Williams7b6be842017-04-11 09:49:49 -0700649 inode_init_once(inode);
650}
651
Dan Williams9567da02017-07-12 17:58:21 -0700652static int dax_fs_init(void)
Dan Williams7b6be842017-04-11 09:49:49 -0700653{
654 int rc;
655
656 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
657 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
658 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
659 init_once);
660 if (!dax_cache)
661 return -ENOMEM;
662
Dan Williams7b6be842017-04-11 09:49:49 -0700663 dax_mnt = kern_mount(&dax_fs_type);
664 if (IS_ERR(dax_mnt)) {
665 rc = PTR_ERR(dax_mnt);
666 goto err_mount;
667 }
668 dax_superblock = dax_mnt->mnt_sb;
669
670 return 0;
671
672 err_mount:
Dan Williams7b6be842017-04-11 09:49:49 -0700673 kmem_cache_destroy(dax_cache);
674
675 return rc;
676}
677
Dan Williams9567da02017-07-12 17:58:21 -0700678static void dax_fs_exit(void)
Dan Williams7b6be842017-04-11 09:49:49 -0700679{
680 kern_unmount(dax_mnt);
Dan Williams7b6be842017-04-11 09:49:49 -0700681 kmem_cache_destroy(dax_cache);
682}
683
Dan Williams9567da02017-07-12 17:58:21 -0700684static int __init dax_core_init(void)
Dan Williams7b6be842017-04-11 09:49:49 -0700685{
686 int rc;
687
Dan Williams9567da02017-07-12 17:58:21 -0700688 rc = dax_fs_init();
Dan Williams7b6be842017-04-11 09:49:49 -0700689 if (rc)
690 return rc;
691
Dan Williamscf1e2282017-05-08 12:33:53 -0700692 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
Dan Williams7b6be842017-04-11 09:49:49 -0700693 if (rc)
Dan Williams9567da02017-07-12 17:58:21 -0700694 goto err_chrdev;
695
696 rc = dax_bus_init();
697 if (rc)
698 goto err_bus;
699 return 0;
700
701err_bus:
702 unregister_chrdev_region(dax_devt, MINORMASK+1);
703err_chrdev:
704 dax_fs_exit();
705 return 0;
Dan Williams7b6be842017-04-11 09:49:49 -0700706}
707
Dan Williams9567da02017-07-12 17:58:21 -0700708static void __exit dax_core_exit(void)
Dan Williams7b6be842017-04-11 09:49:49 -0700709{
Wang Hai1aa57432020-12-01 21:59:29 +0800710 dax_bus_exit();
Dan Williamscf1e2282017-05-08 12:33:53 -0700711 unregister_chrdev_region(dax_devt, MINORMASK+1);
Dan Williams7b6be842017-04-11 09:49:49 -0700712 ida_destroy(&dax_minor_ida);
Dan Williams9567da02017-07-12 17:58:21 -0700713 dax_fs_exit();
Dan Williams7b6be842017-04-11 09:49:49 -0700714}
715
716MODULE_AUTHOR("Intel Corporation");
717MODULE_LICENSE("GPL v2");
Dan Williams9567da02017-07-12 17:58:21 -0700718subsys_initcall(dax_core_init);
719module_exit(dax_core_exit);