blob: fe7ece1534e1e59d8575434b39060a5ef5cff9da [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
Ross Zwisler9e853f22015-04-01 09:12:19 +02002/*
3 * Persistent Memory Driver
4 *
Dan Williams9f53f9f2015-06-09 15:33:45 -04005 * Copyright (c) 2014-2015, Intel Corporation.
Ross Zwisler9e853f22015-04-01 09:12:19 +02006 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
Ross Zwisler9e853f22015-04-01 09:12:19 +02008 */
9
Ross Zwisler9e853f22015-04-01 09:12:19 +020010#include <linux/blkdev.h>
Matthew Wilcox (Oracle)4ee60ec2021-05-06 18:02:27 -070011#include <linux/pagemap.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020012#include <linux/hdreg.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
Dan Williamsc953cc92018-07-13 21:50:37 -070015#include <linux/set_memory.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020016#include <linux/module.h>
17#include <linux/moduleparam.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080018#include <linux/badblocks.h>
Dan Williams9476df72016-01-15 16:56:19 -080019#include <linux/memremap.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040020#include <linux/vmalloc.h>
Dan Williams71389702017-04-28 10:23:37 -070021#include <linux/blk-mq.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080022#include <linux/pfn_t.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020023#include <linux/slab.h>
Dan Williams0aed55a2017-05-29 12:22:50 -070024#include <linux/uio.h>
Dan Williamsc1d6e822017-01-24 23:02:09 -080025#include <linux/dax.h>
Dan Williams9f53f9f2015-06-09 15:33:45 -040026#include <linux/nd.h>
Christoph Hellwige0cf6152020-06-07 21:41:42 -070027#include <linux/mm.h>
28#include <asm/cacheflush.h>
Dan Williamsf295e532016-06-17 11:08:06 -070029#include "pmem.h"
Dan Williams2361db82021-03-09 17:43:38 -080030#include "btt.h"
Dan Williams32ab0a3f2015-08-01 02:16:37 -040031#include "pfn.h"
Dan Williams9f53f9f2015-06-09 15:33:45 -040032#include "nd.h"
Ross Zwisler9e853f22015-04-01 09:12:19 +020033
Dan Williamsf284a4f2016-07-07 19:44:50 -070034static struct device *to_dev(struct pmem_device *pmem)
35{
36 /*
37 * nvdimm bus services need a 'dev' parameter, and we record the device
38 * at init in bb.dev.
39 */
40 return pmem->bb.dev;
41}
42
43static struct nd_region *to_region(struct pmem_device *pmem)
44{
45 return to_nd_region(to_dev(pmem)->parent);
46}
Ross Zwisler9e853f22015-04-01 09:12:19 +020047
Dan Williamsc953cc92018-07-13 21:50:37 -070048static void hwpoison_clear(struct pmem_device *pmem,
49 phys_addr_t phys, unsigned int len)
50{
51 unsigned long pfn_start, pfn_end, pfn;
52
53 /* only pmem in the linear map supports HWPoison */
54 if (is_vmalloc_addr(pmem->virt_addr))
55 return;
56
57 pfn_start = PHYS_PFN(phys);
58 pfn_end = pfn_start + PHYS_PFN(len);
59 for (pfn = pfn_start; pfn < pfn_end; pfn++) {
60 struct page *page = pfn_to_page(pfn);
61
62 /*
63 * Note, no need to hold a get_dev_pagemap() reference
64 * here since we're in the driver I/O path and
65 * outstanding I/O requests pin the dev_pagemap.
66 */
67 if (test_and_clear_pmem_poison(page))
68 clear_mce_nospec(pfn);
69 }
70}
71
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020072static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
73 phys_addr_t offset, unsigned int len)
Dan Williams59e64732016-03-08 07:16:07 -080074{
Dan Williamsf284a4f2016-07-07 19:44:50 -070075 struct device *dev = to_dev(pmem);
Dan Williams59e64732016-03-08 07:16:07 -080076 sector_t sector;
77 long cleared;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020078 blk_status_t rc = BLK_STS_OK;
Dan Williams59e64732016-03-08 07:16:07 -080079
80 sector = (offset - pmem->data_offset) / 512;
Dan Williams59e64732016-03-08 07:16:07 -080081
Dan Williams868f036f2016-12-16 08:10:31 -080082 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
83 if (cleared < len)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020084 rc = BLK_STS_IOERR;
Dan Williams59e64732016-03-08 07:16:07 -080085 if (cleared > 0 && cleared / 512) {
Dan Williamsc953cc92018-07-13 21:50:37 -070086 hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
Dan Williams868f036f2016-12-16 08:10:31 -080087 cleared /= 512;
Dan Williams426824d2018-03-05 16:39:31 -080088 dev_dbg(dev, "%#llx clear %ld sector%s\n",
Dan Williams868f036f2016-12-16 08:10:31 -080089 (unsigned long long) sector, cleared,
90 cleared > 1 ? "s" : "");
Fabian Frederick0a3f27b2016-12-04 10:48:58 -080091 badblocks_clear(&pmem->bb, sector, cleared);
Toshi Kani975750a2017-06-12 16:25:11 -060092 if (pmem->bb_state)
93 sysfs_notify_dirent(pmem->bb_state);
Dan Williams59e64732016-03-08 07:16:07 -080094 }
Toshi Kani3115bb02016-10-13 09:54:21 -060095
Dan Williamsf2b61252017-05-29 23:00:34 -070096 arch_invalidate_pmem(pmem->virt_addr + offset, len);
Dan Williams868f036f2016-12-16 08:10:31 -080097
98 return rc;
Dan Williams59e64732016-03-08 07:16:07 -080099}
100
Vishal Vermabd697a82016-09-30 17:19:30 -0600101static void write_pmem(void *pmem_addr, struct page *page,
102 unsigned int off, unsigned int len)
103{
Huang Ying98cc0932017-09-06 16:22:27 -0700104 unsigned int chunk;
105 void *mem;
Vishal Vermabd697a82016-09-30 17:19:30 -0600106
Huang Ying98cc0932017-09-06 16:22:27 -0700107 while (len) {
108 mem = kmap_atomic(page);
Li RongQing9dc64882019-04-04 10:58:01 +0800109 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
Huang Ying98cc0932017-09-06 16:22:27 -0700110 memcpy_flushcache(pmem_addr, mem + off, chunk);
111 kunmap_atomic(mem);
112 len -= chunk;
113 off = 0;
114 page++;
Li RongQing9dc64882019-04-04 10:58:01 +0800115 pmem_addr += chunk;
Huang Ying98cc0932017-09-06 16:22:27 -0700116 }
Vishal Vermabd697a82016-09-30 17:19:30 -0600117}
118
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200119static blk_status_t read_pmem(struct page *page, unsigned int off,
Vishal Vermabd697a82016-09-30 17:19:30 -0600120 void *pmem_addr, unsigned int len)
121{
Huang Ying98cc0932017-09-06 16:22:27 -0700122 unsigned int chunk;
Dan Williams60622d62018-05-03 17:06:21 -0700123 unsigned long rem;
Huang Ying98cc0932017-09-06 16:22:27 -0700124 void *mem;
Vishal Vermabd697a82016-09-30 17:19:30 -0600125
Huang Ying98cc0932017-09-06 16:22:27 -0700126 while (len) {
127 mem = kmap_atomic(page);
Li RongQing9dc64882019-04-04 10:58:01 +0800128 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
Dan Williamsec6347b2020-10-05 20:40:16 -0700129 rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
Huang Ying98cc0932017-09-06 16:22:27 -0700130 kunmap_atomic(mem);
Dan Williams60622d62018-05-03 17:06:21 -0700131 if (rem)
Huang Ying98cc0932017-09-06 16:22:27 -0700132 return BLK_STS_IOERR;
133 len -= chunk;
134 off = 0;
135 page++;
Li RongQing9dc64882019-04-04 10:58:01 +0800136 pmem_addr += chunk;
Huang Ying98cc0932017-09-06 16:22:27 -0700137 }
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200138 return BLK_STS_OK;
Vishal Vermabd697a82016-09-30 17:19:30 -0600139}
140
Vivek Goyal5d64efe2020-02-28 11:34:51 -0500141static blk_status_t pmem_do_read(struct pmem_device *pmem,
142 struct page *page, unsigned int page_off,
143 sector_t sector, unsigned int len)
144{
145 blk_status_t rc;
146 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
147 void *pmem_addr = pmem->virt_addr + pmem_off;
148
149 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
150 return BLK_STS_IOERR;
151
152 rc = read_pmem(page, page_off, pmem_addr, len);
153 flush_dcache_page(page);
154 return rc;
155}
156
157static blk_status_t pmem_do_write(struct pmem_device *pmem,
158 struct page *page, unsigned int page_off,
159 sector_t sector, unsigned int len)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200160{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200161 blk_status_t rc = BLK_STS_OK;
Dan Williams59e64732016-03-08 07:16:07 -0800162 bool bad_pmem = false;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400163 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
Dan Williams7a9eb202016-06-03 18:06:47 -0700164 void *pmem_addr = pmem->virt_addr + pmem_off;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200165
Dan Williams59e64732016-03-08 07:16:07 -0800166 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
167 bad_pmem = true;
168
Vivek Goyal5d64efe2020-02-28 11:34:51 -0500169 /*
170 * Note that we write the data both before and after
171 * clearing poison. The write before clear poison
172 * handles situations where the latest written data is
173 * preserved and the clear poison operation simply marks
174 * the address range as valid without changing the data.
175 * In this case application software can assume that an
176 * interrupted write will either return the new good
177 * data or an error.
178 *
179 * However, if pmem_clear_poison() leaves the data in an
180 * indeterminate state we need to perform the write
181 * after clear poison.
182 */
183 flush_dcache_page(page);
184 write_pmem(pmem_addr, page, page_off, len);
185 if (unlikely(bad_pmem)) {
186 rc = pmem_clear_poison(pmem, pmem_off, len);
187 write_pmem(pmem_addr, page, page_off, len);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200188 }
189
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800190 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200191}
192
Christoph Hellwig3e087732021-10-12 13:12:24 +0200193static void pmem_submit_bio(struct bio *bio)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200194{
Pankaj Guptac5d43552019-07-05 19:33:22 +0530195 int ret = 0;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200196 blk_status_t rc = 0;
Dan Williamsf0dc0892015-05-16 12:28:53 -0400197 bool do_acct;
198 unsigned long start;
Dan Williamsedc870e2015-05-16 12:28:51 -0400199 struct bio_vec bvec;
200 struct bvec_iter iter;
Christoph Hellwig309dca302021-01-24 11:02:34 +0100201 struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data;
Dan Williams7e267a82016-06-01 20:48:15 -0700202 struct nd_region *nd_region = to_region(pmem);
203
Ross Zwislerd2d63642018-06-06 10:45:12 -0600204 if (bio->bi_opf & REQ_PREFLUSH)
Pankaj Guptac5d43552019-07-05 19:33:22 +0530205 ret = nvdimm_flush(nd_region, bio);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200206
Christoph Hellwig309dca302021-01-24 11:02:34 +0100207 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
Christoph Hellwig0fd92f82020-05-27 07:24:10 +0200208 if (do_acct)
209 start = bio_start_io_acct(bio);
Dan Williamse10624f2016-01-06 12:03:41 -0800210 bio_for_each_segment(bvec, bio, iter) {
Vivek Goyal5d64efe2020-02-28 11:34:51 -0500211 if (op_is_write(bio_op(bio)))
212 rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
213 iter.bi_sector, bvec.bv_len);
214 else
215 rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
216 iter.bi_sector, bvec.bv_len);
Dan Williamse10624f2016-01-06 12:03:41 -0800217 if (rc) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200218 bio->bi_status = rc;
Dan Williamse10624f2016-01-06 12:03:41 -0800219 break;
220 }
221 }
Dan Williamsf0dc0892015-05-16 12:28:53 -0400222 if (do_acct)
Christoph Hellwig0fd92f82020-05-27 07:24:10 +0200223 bio_end_io_acct(bio, start);
Ross Zwisler61031952015-06-25 03:08:39 -0400224
Jens Axboe1eff9d32016-08-05 15:35:16 -0600225 if (bio->bi_opf & REQ_FUA)
Pankaj Guptac5d43552019-07-05 19:33:22 +0530226 ret = nvdimm_flush(nd_region, bio);
227
228 if (ret)
229 bio->bi_status = errno_to_blk_status(ret);
Ross Zwisler61031952015-06-25 03:08:39 -0400230
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200231 bio_endio(bio);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200232}
233
234static int pmem_rw_page(struct block_device *bdev, sector_t sector,
Tejun Heo3f289dc2018-07-18 04:47:36 -0700235 struct page *page, unsigned int op)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200236{
Christoph Hellwig6ec26b82020-05-08 18:15:17 +0200237 struct pmem_device *pmem = bdev->bd_disk->private_data;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200238 blk_status_t rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200239
Vivek Goyal5d64efe2020-02-28 11:34:51 -0500240 if (op_is_write(op))
Matthew Wilcox (Oracle)af3bbc12020-08-14 17:30:33 -0700241 rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
Vivek Goyal5d64efe2020-02-28 11:34:51 -0500242 else
Matthew Wilcox (Oracle)af3bbc12020-08-14 17:30:33 -0700243 rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
Dan Williamse10624f2016-01-06 12:03:41 -0800244 /*
245 * The ->rw_page interface is subtle and tricky. The core
246 * retries on any error, so we can only invoke page_endio() in
247 * the successful completion case. Otherwise, we'll see crashes
248 * caused by double completion.
249 */
250 if (rc == 0)
Tejun Heo3f289dc2018-07-18 04:47:36 -0700251 page_endio(page, op_is_write(op), 0);
Dan Williamse10624f2016-01-06 12:03:41 -0800252
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200253 return blk_status_to_errno(rc);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200254}
255
Dan Williamsf295e532016-06-17 11:08:06 -0700256/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
Dan Williamsc1d6e822017-01-24 23:02:09 -0800257__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
258 long nr_pages, void **kaddr, pfn_t *pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200259{
Dan Williamsc1d6e822017-01-24 23:02:09 -0800260 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200261
Dan Williamsc1d6e822017-01-24 23:02:09 -0800262 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
263 PFN_PHYS(nr_pages))))
Dan Williams0a70bd42016-02-24 14:02:11 -0800264 return -EIO;
Huaisheng Ye46a590c2018-07-30 15:15:43 +0800265
266 if (kaddr)
267 *kaddr = pmem->virt_addr + offset;
268 if (pfn)
269 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200270
Dan Williams0a70bd42016-02-24 14:02:11 -0800271 /*
272 * If badblocks are present, limit known good range to the
273 * requested range.
274 */
275 if (unlikely(pmem->bb.count))
Dan Williamsc1d6e822017-01-24 23:02:09 -0800276 return nr_pages;
277 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200278}
279
280static const struct block_device_operations pmem_fops = {
281 .owner = THIS_MODULE,
Christoph Hellwigc62b37d2020-07-01 10:59:43 +0200282 .submit_bio = pmem_submit_bio,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200283 .rw_page = pmem_rw_page,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200284};
285
Vivek Goyalf605a262020-02-28 11:34:52 -0500286static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
287 size_t nr_pages)
288{
289 struct pmem_device *pmem = dax_get_private(dax_dev);
290
291 return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0,
292 PFN_PHYS(pgoff) >> SECTOR_SHIFT,
293 PAGE_SIZE));
294}
295
Dan Williamsc1d6e822017-01-24 23:02:09 -0800296static long pmem_dax_direct_access(struct dax_device *dax_dev,
297 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
298{
299 struct pmem_device *pmem = dax_get_private(dax_dev);
300
301 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
302}
303
Dan Williams52f476a2019-05-16 17:05:21 -0700304/*
305 * Use the 'no check' versions of copy_from_iter_flushcache() and
Dan Williamsec6347b2020-10-05 20:40:16 -0700306 * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
Dan Williams52f476a2019-05-16 17:05:21 -0700307 * checking, both file offset and device offset, is handled by
308 * dax_iomap_actor()
309 */
Dan Williams0aed55a2017-05-29 12:22:50 -0700310static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
311 void *addr, size_t bytes, struct iov_iter *i)
312{
Dan Williams52f476a2019-05-16 17:05:21 -0700313 return _copy_from_iter_flushcache(addr, bytes, i);
Dan Williams0aed55a2017-05-29 12:22:50 -0700314}
315
Dan Williamsb3a9a0c2018-05-02 06:46:33 -0700316static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
317 void *addr, size_t bytes, struct iov_iter *i)
318{
Dan Williamsec6347b2020-10-05 20:40:16 -0700319 return _copy_mc_to_iter(addr, bytes, i);
Dan Williamsb3a9a0c2018-05-02 06:46:33 -0700320}
321
Dan Williamsc1d6e822017-01-24 23:02:09 -0800322static const struct dax_operations pmem_dax_ops = {
323 .direct_access = pmem_dax_direct_access,
Dan Williams7bf7eac2019-05-16 13:26:29 -0700324 .dax_supported = generic_fsdax_supported,
Dan Williams0aed55a2017-05-29 12:22:50 -0700325 .copy_from_iter = pmem_copy_from_iter,
Dan Williamsb3a9a0c2018-05-02 06:46:33 -0700326 .copy_to_iter = pmem_copy_to_iter,
Vivek Goyalf605a262020-02-28 11:34:52 -0500327 .zero_page_range = pmem_dax_zero_page_range,
Dan Williamsc1d6e822017-01-24 23:02:09 -0800328};
329
Christoph Hellwige765f132021-09-22 19:34:30 +0200330static ssize_t write_cache_show(struct device *dev,
331 struct device_attribute *attr, char *buf)
332{
333 struct pmem_device *pmem = dev_to_disk(dev)->private_data;
334
335 return sprintf(buf, "%d\n", !!dax_write_cache_enabled(pmem->dax_dev));
336}
337
338static ssize_t write_cache_store(struct device *dev,
339 struct device_attribute *attr, const char *buf, size_t len)
340{
341 struct pmem_device *pmem = dev_to_disk(dev)->private_data;
342 bool write_cache;
343 int rc;
344
345 rc = strtobool(buf, &write_cache);
346 if (rc)
347 return rc;
348 dax_write_cache(pmem->dax_dev, write_cache);
349 return len;
350}
351static DEVICE_ATTR_RW(write_cache);
352
353static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
354{
355#ifndef CONFIG_ARCH_HAS_PMEM_API
356 if (a == &dev_attr_write_cache.attr)
357 return 0;
358#endif
359 return a->mode;
360}
361
362static struct attribute *dax_attributes[] = {
363 &dev_attr_write_cache.attr,
364 NULL,
365};
366
367static const struct attribute_group dax_attribute_group = {
368 .name = "dax",
369 .attrs = dax_attributes,
370 .is_visible = dax_visible,
371};
372
Dan Williams6e0c90d2017-06-26 21:28:41 -0700373static const struct attribute_group *pmem_attribute_groups[] = {
374 &dax_attribute_group,
375 NULL,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200376};
377
Dan Williamsc1d6e822017-01-24 23:02:09 -0800378static void pmem_release_disk(void *__pmem)
Dan Williams030b99e2016-03-17 20:24:31 -0700379{
Dan Williamsc1d6e822017-01-24 23:02:09 -0800380 struct pmem_device *pmem = __pmem;
381
382 kill_dax(pmem->dax_dev);
383 put_dax(pmem->dax_dev);
384 del_gendisk(pmem->disk);
Dan Williams030b99e2016-03-17 20:24:31 -0700385
Christoph Hellwig3dd60fb2021-10-19 09:36:40 +0200386 blk_cleanup_disk(pmem->disk);
387}
Christoph Hellwig1e240e82019-06-26 14:27:08 +0200388
Dan Williams200c79d2016-03-22 00:22:16 -0700389static int pmem_attach_disk(struct device *dev,
390 struct nd_namespace_common *ndns)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200391{
Dan Williams200c79d2016-03-22 00:22:16 -0700392 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
Dan Williamsf284a4f2016-07-07 19:44:50 -0700393 struct nd_region *nd_region = to_nd_region(dev->parent);
Ross Zwislerce7f11a2018-06-06 10:45:13 -0600394 int nid = dev_to_node(dev), fua;
Dan Williams200c79d2016-03-22 00:22:16 -0700395 struct resource *res = &nsio->res;
Dan Williamsa4574f62020-10-13 16:50:29 -0700396 struct range bb_range;
Dan Williams200c79d2016-03-22 00:22:16 -0700397 struct nd_pfn *nd_pfn = NULL;
Dan Williamsc1d6e822017-01-24 23:02:09 -0800398 struct dax_device *dax_dev;
Dan Williams200c79d2016-03-22 00:22:16 -0700399 struct nd_pfn_sb *pfn_sb;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200400 struct pmem_device *pmem;
Dan Williams468ded02016-01-15 16:56:46 -0800401 struct request_queue *q;
Dan Williams200c79d2016-03-22 00:22:16 -0700402 struct gendisk *disk;
403 void *addr;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100404 int rc;
Pankaj Guptafefc1d92019-07-05 19:33:24 +0530405 unsigned long flags = 0UL;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200406
Christoph Hellwig708ab622015-08-10 23:07:08 -0400407 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200408 if (!pmem)
Dan Williams200c79d2016-03-22 00:22:16 -0700409 return -ENOMEM;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200410
Aneesh Kumar K.V8f4b01f2019-10-31 16:27:41 +0530411 rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
412 if (rc)
413 return rc;
414
Christoph Hellwige8d51342017-12-29 08:54:05 +0100415 /* while nsio_rw_bytes is active, parse a pfn info block if present */
416 if (is_nd_pfn(dev)) {
417 nd_pfn = to_nd_pfn(dev);
418 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
419 if (rc)
420 return rc;
421 }
422
423 /* we're attaching a block device, disable raw namespace access */
Aneesh Kumar K.V8f4b01f2019-10-31 16:27:41 +0530424 devm_namespace_disable(dev, ndns);
Christoph Hellwige8d51342017-12-29 08:54:05 +0100425
Dan Williams200c79d2016-03-22 00:22:16 -0700426 dev_set_drvdata(dev, pmem);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200427 pmem->phys_addr = res->start;
428 pmem->size = resource_size(res);
Dan Williams0b277962017-06-09 09:46:50 -0700429 fua = nvdimm_has_flush(nd_region);
430 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
Ross Zwisler61031952015-06-25 03:08:39 -0400431 dev_warn(dev, "unable to guarantee persistence of writes\n");
Dan Williams0b277962017-06-09 09:46:50 -0700432 fua = 0;
433 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200434
Dan Williams947df022016-03-21 22:28:40 -0700435 if (!devm_request_mem_region(dev, res->start, resource_size(res),
Dan Williams450c6632016-11-28 11:15:18 -0800436 dev_name(&ndns->dev))) {
Dan Williams947df022016-03-21 22:28:40 -0700437 dev_warn(dev, "could not reserve region %pR\n", res);
Dan Williams200c79d2016-03-22 00:22:16 -0700438 return -EBUSY;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200439 }
440
Christoph Hellwig87eb73b2021-05-21 07:51:07 +0200441 disk = blk_alloc_disk(nid);
442 if (!disk)
Dan Williams200c79d2016-03-22 00:22:16 -0700443 return -ENOMEM;
Christoph Hellwig87eb73b2021-05-21 07:51:07 +0200444 q = disk->queue;
Dan Williams468ded02016-01-15 16:56:46 -0800445
Christoph Hellwig87eb73b2021-05-21 07:51:07 +0200446 pmem->disk = disk;
Dan Williamsa624eb52021-06-07 16:52:43 -0700447 pmem->pgmap.owner = pmem;
Dan Williams34c0fd52016-01-15 16:56:14 -0800448 pmem->pfn_flags = PFN_DEV;
Dan Williams200c79d2016-03-22 00:22:16 -0700449 if (is_nd_pfn(dev)) {
Christoph Hellwigf6a55e12019-06-26 14:27:10 +0200450 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100451 addr = devm_memremap_pages(dev, &pmem->pgmap);
Dan Williams200c79d2016-03-22 00:22:16 -0700452 pfn_sb = nd_pfn->pfn_sb;
453 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
Christoph Hellwige8d51342017-12-29 08:54:05 +0100454 pmem->pfn_pad = resource_size(res) -
Dan Williamsa4574f62020-10-13 16:50:29 -0700455 range_len(&pmem->pgmap.range);
Dan Williams200c79d2016-03-22 00:22:16 -0700456 pmem->pfn_flags |= PFN_MAP;
Dan Williamsa4574f62020-10-13 16:50:29 -0700457 bb_range = pmem->pgmap.range;
458 bb_range.start += pmem->data_offset;
Dan Williams200c79d2016-03-22 00:22:16 -0700459 } else if (pmem_should_map_pages(dev)) {
Dan Williamsa4574f62020-10-13 16:50:29 -0700460 pmem->pgmap.range.start = res->start;
461 pmem->pgmap.range.end = res->end;
Dan Williamsb7b3c012020-10-13 16:50:34 -0700462 pmem->pgmap.nr_range = 1;
Christoph Hellwigf6a55e12019-06-26 14:27:10 +0200463 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100464 addr = devm_memremap_pages(dev, &pmem->pgmap);
Dan Williams34c0fd52016-01-15 16:56:14 -0800465 pmem->pfn_flags |= PFN_MAP;
Dan Williamsa4574f62020-10-13 16:50:29 -0700466 bb_range = pmem->pgmap.range;
Dan Williams91ed7ac2018-10-04 16:32:08 -0700467 } else {
sumiyawang32b23972021-08-22 19:49:09 +0800468 addr = devm_memremap(dev, pmem->phys_addr,
469 pmem->size, ARCH_MEMREMAP_PMEM);
Dan Williamsa4574f62020-10-13 16:50:29 -0700470 bb_range.start = res->start;
471 bb_range.end = res->end;
Dan Williams91ed7ac2018-10-04 16:32:08 -0700472 }
Dan Williamsb36f4762015-09-15 02:42:20 -0400473
Luis Chamberlainaccf58a2021-11-03 16:04:28 -0700474 if (IS_ERR(addr)) {
475 rc = PTR_ERR(addr);
476 goto out;
477 }
Dan Williams7a9eb202016-06-03 18:06:47 -0700478 pmem->virt_addr = addr;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200479
Ross Zwislerce7f11a2018-06-06 10:45:13 -0600480 blk_queue_write_cache(q, true, fua);
Dan Williams5a922892016-03-21 15:43:53 -0700481 blk_queue_physical_block_size(q, PAGE_SIZE);
Dan Williamsf979b132017-06-04 12:12:07 +0900482 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
Dan Williams5a922892016-03-21 15:43:53 -0700483 blk_queue_max_hw_sectors(q, UINT_MAX);
Bart Van Assche8b904b52018-03-07 17:10:10 -0800484 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
Ross Zwisler45576412018-06-26 16:30:39 -0600485 if (pmem->pfn_flags & PFN_MAP)
486 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200487
Ross Zwisler9e853f22015-04-01 09:12:19 +0200488 disk->fops = &pmem_fops;
Christoph Hellwig6ec26b82020-05-08 18:15:17 +0200489 disk->private_data = pmem;
Vishal Verma5212e112015-06-25 04:20:32 -0400490 nvdimm_namespace_disk_name(ndns, disk->disk_name);
Dan Williamscfe30b82016-03-03 09:38:00 -0800491 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
492 / 512);
Dan Williamsb95f5f42016-01-04 23:50:23 -0800493 if (devm_init_badblocks(dev, &pmem->bb))
494 return -ENOMEM;
Dan Williamsa4574f62020-10-13 16:50:29 -0700495 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
Dan Williams57f7f312016-01-06 12:03:42 -0800496 disk->bb = &pmem->bb;
Dan Williamsf02716d2016-06-15 14:59:17 -0700497
Pankaj Guptafefc1d92019-07-05 19:33:24 +0530498 if (is_nvdimm_sync(nd_region))
499 flags = DAXDEV_F_SYNC;
500 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
Vivek Goyal4e4ced92020-04-01 12:11:25 -0400501 if (IS_ERR(dax_dev)) {
Luis Chamberlainaccf58a2021-11-03 16:04:28 -0700502 rc = PTR_ERR(dax_dev);
503 goto out;
Dan Williamsc1d6e822017-01-24 23:02:09 -0800504 }
Ross Zwislerce7f11a2018-06-06 10:45:13 -0600505 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
Dan Williamsc1d6e822017-01-24 23:02:09 -0800506 pmem->dax_dev = dax_dev;
Dan Williams6e0c90d2017-06-26 21:28:41 -0700507
Luis Chamberlain5a192cc2021-11-03 16:04:29 -0700508 rc = device_add_disk(dev, disk, pmem_attribute_groups);
509 if (rc)
510 goto out_cleanup_dax;
Dan Williamsc1d6e822017-01-24 23:02:09 -0800511 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
Dan Williamsf02716d2016-06-15 14:59:17 -0700512 return -ENOMEM;
513
Christoph Hellwig32f61d62020-09-01 17:57:47 +0200514 nvdimm_check_and_set_ro(disk);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200515
Toshi Kani975750a2017-06-12 16:25:11 -0600516 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
517 "badblocks");
Dan Williams6aa734a2017-06-30 18:56:03 -0700518 if (!pmem->bb_state)
519 dev_warn(dev, "'badblocks' notification disabled\n");
Dan Williams8c2f7e82015-06-25 04:20:04 -0400520 return 0;
Luis Chamberlain5a192cc2021-11-03 16:04:29 -0700521
522out_cleanup_dax:
523 kill_dax(pmem->dax_dev);
524 put_dax(pmem->dax_dev);
Luis Chamberlainaccf58a2021-11-03 16:04:28 -0700525out:
526 blk_cleanup_disk(pmem->disk);
527 return rc;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400528}
Ross Zwisler9e853f22015-04-01 09:12:19 +0200529
Dan Williams9f53f9f2015-06-09 15:33:45 -0400530static int nd_pmem_probe(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200531{
Aneesh Kumar K.V1c97afa2019-09-05 21:15:58 +0530532 int ret;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400533 struct nd_namespace_common *ndns;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200534
Dan Williams8c2f7e82015-06-25 04:20:04 -0400535 ndns = nvdimm_namespace_common_probe(dev);
536 if (IS_ERR(ndns))
537 return PTR_ERR(ndns);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400538
Dan Williams200c79d2016-03-22 00:22:16 -0700539 if (is_nd_btt(dev))
Christoph Hellwig708ab622015-08-10 23:07:08 -0400540 return nvdimm_namespace_attach_btt(ndns);
541
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400542 if (is_nd_pfn(dev))
Dan Williams200c79d2016-03-22 00:22:16 -0700543 return pmem_attach_disk(dev, ndns);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400544
Aneesh Kumar K.V8f4b01f2019-10-31 16:27:41 +0530545 ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
546 if (ret)
547 return ret;
548
Aneesh Kumar K.V1c97afa2019-09-05 21:15:58 +0530549 ret = nd_btt_probe(dev, ndns);
550 if (ret == 0)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400551 return -ENXIO;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400552
Aneesh Kumar K.V1c97afa2019-09-05 21:15:58 +0530553 /*
554 * We have two failure conditions here, there is no
555 * info reserver block or we found a valid info reserve block
556 * but failed to initialize the pfn superblock.
557 *
558 * For the first case consider namespace as a raw pmem namespace
559 * and attach a disk.
560 *
561 * For the latter, consider this a success and advance the namespace
562 * seed.
563 */
564 ret = nd_pfn_probe(dev, ndns);
565 if (ret == 0)
566 return -ENXIO;
567 else if (ret == -EOPNOTSUPP)
568 return ret;
569
570 ret = nd_dax_probe(dev, ndns);
571 if (ret == 0)
572 return -ENXIO;
573 else if (ret == -EOPNOTSUPP)
574 return ret;
Aneesh Kumar K.V8f4b01f2019-10-31 16:27:41 +0530575
576 /* probe complete, attach handles namespace enabling */
577 devm_namespace_disable(dev, ndns);
578
Dan Williams200c79d2016-03-22 00:22:16 -0700579 return pmem_attach_disk(dev, ndns);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200580}
581
Uwe Kleine-König1f975072021-02-12 18:10:43 +0100582static void nd_pmem_remove(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200583{
Dan Williams6aa734a2017-06-30 18:56:03 -0700584 struct pmem_device *pmem = dev_get_drvdata(dev);
585
Dan Williams8c2f7e82015-06-25 04:20:04 -0400586 if (is_nd_btt(dev))
Dan Williams298f2bc2016-03-15 16:41:04 -0700587 nvdimm_namespace_detach_btt(to_nd_btt(dev));
Dan Williams6aa734a2017-06-30 18:56:03 -0700588 else {
589 /*
Dan Williams87a30e12019-07-17 18:08:26 -0700590 * Note, this assumes nd_device_lock() context to not
591 * race nd_pmem_notify()
Dan Williams6aa734a2017-06-30 18:56:03 -0700592 */
593 sysfs_put(pmem->bb_state);
594 pmem->bb_state = NULL;
595 }
Pankaj Guptac5d43552019-07-05 19:33:22 +0530596 nvdimm_flush(to_nd_region(dev->parent), NULL);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200597}
598
Dan Williams476f8482016-07-09 00:12:52 -0700599static void nd_pmem_shutdown(struct device *dev)
600{
Pankaj Guptac5d43552019-07-05 19:33:22 +0530601 nvdimm_flush(to_nd_region(dev->parent), NULL);
Dan Williams476f8482016-07-09 00:12:52 -0700602}
603
Dan Williams2361db82021-03-09 17:43:38 -0800604static void pmem_revalidate_poison(struct device *dev)
Dan Williams71999462016-02-18 10:29:49 -0800605{
Toshi Kanib2518c72017-04-25 17:04:13 -0600606 struct nd_region *nd_region;
Dan Williams298f2bc2016-03-15 16:41:04 -0700607 resource_size_t offset = 0, end_trunc = 0;
608 struct nd_namespace_common *ndns;
609 struct nd_namespace_io *nsio;
Toshi Kanib2518c72017-04-25 17:04:13 -0600610 struct badblocks *bb;
Dan Williamsa4574f62020-10-13 16:50:29 -0700611 struct range range;
Toshi Kani975750a2017-06-12 16:25:11 -0600612 struct kernfs_node *bb_state;
Dan Williams71999462016-02-18 10:29:49 -0800613
Dan Williams298f2bc2016-03-15 16:41:04 -0700614 if (is_nd_btt(dev)) {
615 struct nd_btt *nd_btt = to_nd_btt(dev);
616
617 ndns = nd_btt->ndns;
Toshi Kanib2518c72017-04-25 17:04:13 -0600618 nd_region = to_nd_region(ndns->dev.parent);
619 nsio = to_nd_namespace_io(&ndns->dev);
620 bb = &nsio->bb;
Toshi Kani975750a2017-06-12 16:25:11 -0600621 bb_state = NULL;
Toshi Kanib2518c72017-04-25 17:04:13 -0600622 } else {
623 struct pmem_device *pmem = dev_get_drvdata(dev);
Dan Williamsa3901802016-04-07 20:02:06 -0700624
Toshi Kanib2518c72017-04-25 17:04:13 -0600625 nd_region = to_region(pmem);
626 bb = &pmem->bb;
Toshi Kani975750a2017-06-12 16:25:11 -0600627 bb_state = pmem->bb_state;
Dan Williamsa3901802016-04-07 20:02:06 -0700628
Toshi Kanib2518c72017-04-25 17:04:13 -0600629 if (is_nd_pfn(dev)) {
630 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
631 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
632
633 ndns = nd_pfn->ndns;
634 offset = pmem->data_offset +
635 __le32_to_cpu(pfn_sb->start_pad);
636 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
637 } else {
638 ndns = to_ndns(dev);
639 }
640
641 nsio = to_nd_namespace_io(&ndns->dev);
642 }
643
Dan Williamsa4574f62020-10-13 16:50:29 -0700644 range.start = nsio->res.start + offset;
645 range.end = nsio->res.end - end_trunc;
646 nvdimm_badblocks_populate(nd_region, bb, &range);
Toshi Kani975750a2017-06-12 16:25:11 -0600647 if (bb_state)
648 sysfs_notify_dirent(bb_state);
Dan Williams71999462016-02-18 10:29:49 -0800649}
650
Dan Williams2361db82021-03-09 17:43:38 -0800651static void pmem_revalidate_region(struct device *dev)
652{
653 struct pmem_device *pmem;
654
655 if (is_nd_btt(dev)) {
656 struct nd_btt *nd_btt = to_nd_btt(dev);
657 struct btt *btt = nd_btt->btt;
658
659 nvdimm_check_and_set_ro(btt->btt_disk);
660 return;
661 }
662
663 pmem = dev_get_drvdata(dev);
664 nvdimm_check_and_set_ro(pmem->disk);
665}
666
667static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
668{
669 switch (event) {
670 case NVDIMM_REVALIDATE_POISON:
671 pmem_revalidate_poison(dev);
672 break;
673 case NVDIMM_REVALIDATE_REGION:
674 pmem_revalidate_region(dev);
675 break;
676 default:
677 dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event);
678 break;
679 }
680}
681
Dan Williams9f53f9f2015-06-09 15:33:45 -0400682MODULE_ALIAS("pmem");
683MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400684MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400685static struct nd_device_driver nd_pmem_driver = {
686 .probe = nd_pmem_probe,
687 .remove = nd_pmem_remove,
Dan Williams71999462016-02-18 10:29:49 -0800688 .notify = nd_pmem_notify,
Dan Williams476f8482016-07-09 00:12:52 -0700689 .shutdown = nd_pmem_shutdown,
Dan Williams9f53f9f2015-06-09 15:33:45 -0400690 .drv = {
691 .name = "nd_pmem",
Ross Zwisler9e853f22015-04-01 09:12:19 +0200692 },
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400693 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200694};
695
Johannes Thumshirn03e90842018-03-14 19:25:07 +0100696module_nd_driver(nd_pmem_driver);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200697
698MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
699MODULE_LICENSE("GPL v2");