blob: 7fbc5c5dc8e176b7fda861f9929ea879bad0406e [file] [log] [blame]
Ross Zwisler9e853f22015-04-01 09:12:19 +02001/*
2 * Persistent Memory Driver
3 *
Dan Williams9f53f9f2015-06-09 15:33:45 -04004 * Copyright (c) 2014-2015, Intel Corporation.
Ross Zwisler9e853f22015-04-01 09:12:19 +02005 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080025#include <linux/badblocks.h>
Dan Williams9476df72016-01-15 16:56:19 -080026#include <linux/memremap.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040027#include <linux/vmalloc.h>
Dan Williams71389702017-04-28 10:23:37 -070028#include <linux/blk-mq.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080029#include <linux/pfn_t.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020030#include <linux/slab.h>
Dan Williams0aed55a2017-05-29 12:22:50 -070031#include <linux/uio.h>
Dan Williamsc1d6e822017-01-24 23:02:09 -080032#include <linux/dax.h>
Dan Williams9f53f9f2015-06-09 15:33:45 -040033#include <linux/nd.h>
Minchan Kim23c47d22017-11-15 17:33:00 -080034#include <linux/backing-dev.h>
Dan Williamsf295e532016-06-17 11:08:06 -070035#include "pmem.h"
Dan Williams32ab0a3f2015-08-01 02:16:37 -040036#include "pfn.h"
Dan Williams9f53f9f2015-06-09 15:33:45 -040037#include "nd.h"
Ross Zwisler9e853f22015-04-01 09:12:19 +020038
Dan Williamsf284a4f2016-07-07 19:44:50 -070039static struct device *to_dev(struct pmem_device *pmem)
40{
41 /*
42 * nvdimm bus services need a 'dev' parameter, and we record the device
43 * at init in bb.dev.
44 */
45 return pmem->bb.dev;
46}
47
48static struct nd_region *to_region(struct pmem_device *pmem)
49{
50 return to_nd_region(to_dev(pmem)->parent);
51}
Ross Zwisler9e853f22015-04-01 09:12:19 +020052
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020053static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
54 phys_addr_t offset, unsigned int len)
Dan Williams59e64732016-03-08 07:16:07 -080055{
Dan Williamsf284a4f2016-07-07 19:44:50 -070056 struct device *dev = to_dev(pmem);
Dan Williams59e64732016-03-08 07:16:07 -080057 sector_t sector;
58 long cleared;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020059 blk_status_t rc = BLK_STS_OK;
Dan Williams59e64732016-03-08 07:16:07 -080060
61 sector = (offset - pmem->data_offset) / 512;
Dan Williams59e64732016-03-08 07:16:07 -080062
Dan Williams868f036f2016-12-16 08:10:31 -080063 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
64 if (cleared < len)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020065 rc = BLK_STS_IOERR;
Dan Williams59e64732016-03-08 07:16:07 -080066 if (cleared > 0 && cleared / 512) {
Dan Williams868f036f2016-12-16 08:10:31 -080067 cleared /= 512;
68 dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
69 (unsigned long long) sector, cleared,
70 cleared > 1 ? "s" : "");
Fabian Frederick0a3f27b2016-12-04 10:48:58 -080071 badblocks_clear(&pmem->bb, sector, cleared);
Toshi Kani975750a2017-06-12 16:25:11 -060072 if (pmem->bb_state)
73 sysfs_notify_dirent(pmem->bb_state);
Dan Williams59e64732016-03-08 07:16:07 -080074 }
Toshi Kani3115bb02016-10-13 09:54:21 -060075
Dan Williamsf2b61252017-05-29 23:00:34 -070076 arch_invalidate_pmem(pmem->virt_addr + offset, len);
Dan Williams868f036f2016-12-16 08:10:31 -080077
78 return rc;
Dan Williams59e64732016-03-08 07:16:07 -080079}
80
Vishal Vermabd697a82016-09-30 17:19:30 -060081static void write_pmem(void *pmem_addr, struct page *page,
82 unsigned int off, unsigned int len)
83{
Huang Ying98cc0932017-09-06 16:22:27 -070084 unsigned int chunk;
85 void *mem;
Vishal Vermabd697a82016-09-30 17:19:30 -060086
Huang Ying98cc0932017-09-06 16:22:27 -070087 while (len) {
88 mem = kmap_atomic(page);
89 chunk = min_t(unsigned int, len, PAGE_SIZE);
90 memcpy_flushcache(pmem_addr, mem + off, chunk);
91 kunmap_atomic(mem);
92 len -= chunk;
93 off = 0;
94 page++;
95 pmem_addr += PAGE_SIZE;
96 }
Vishal Vermabd697a82016-09-30 17:19:30 -060097}
98
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020099static blk_status_t read_pmem(struct page *page, unsigned int off,
Vishal Vermabd697a82016-09-30 17:19:30 -0600100 void *pmem_addr, unsigned int len)
101{
Huang Ying98cc0932017-09-06 16:22:27 -0700102 unsigned int chunk;
Vishal Vermabd697a82016-09-30 17:19:30 -0600103 int rc;
Huang Ying98cc0932017-09-06 16:22:27 -0700104 void *mem;
Vishal Vermabd697a82016-09-30 17:19:30 -0600105
Huang Ying98cc0932017-09-06 16:22:27 -0700106 while (len) {
107 mem = kmap_atomic(page);
108 chunk = min_t(unsigned int, len, PAGE_SIZE);
109 rc = memcpy_mcsafe(mem + off, pmem_addr, chunk);
110 kunmap_atomic(mem);
111 if (rc)
112 return BLK_STS_IOERR;
113 len -= chunk;
114 off = 0;
115 page++;
116 pmem_addr += PAGE_SIZE;
117 }
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200118 return BLK_STS_OK;
Vishal Vermabd697a82016-09-30 17:19:30 -0600119}
120
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200121static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
Jens Axboec11f0c02016-08-05 08:11:04 -0600122 unsigned int len, unsigned int off, bool is_write,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200123 sector_t sector)
124{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200125 blk_status_t rc = BLK_STS_OK;
Dan Williams59e64732016-03-08 07:16:07 -0800126 bool bad_pmem = false;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400127 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
Dan Williams7a9eb202016-06-03 18:06:47 -0700128 void *pmem_addr = pmem->virt_addr + pmem_off;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200129
Dan Williams59e64732016-03-08 07:16:07 -0800130 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
131 bad_pmem = true;
132
Jens Axboec11f0c02016-08-05 08:11:04 -0600133 if (!is_write) {
Dan Williams59e64732016-03-08 07:16:07 -0800134 if (unlikely(bad_pmem))
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200135 rc = BLK_STS_IOERR;
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800136 else {
Vishal Vermabd697a82016-09-30 17:19:30 -0600137 rc = read_pmem(page, off, pmem_addr, len);
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800138 flush_dcache_page(page);
139 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200140 } else {
Dan Williams0a370d262016-04-14 19:40:47 -0700141 /*
142 * Note that we write the data both before and after
143 * clearing poison. The write before clear poison
144 * handles situations where the latest written data is
145 * preserved and the clear poison operation simply marks
146 * the address range as valid without changing the data.
147 * In this case application software can assume that an
148 * interrupted write will either return the new good
149 * data or an error.
150 *
151 * However, if pmem_clear_poison() leaves the data in an
152 * indeterminate state we need to perform the write
153 * after clear poison.
154 */
Ross Zwisler9e853f22015-04-01 09:12:19 +0200155 flush_dcache_page(page);
Vishal Vermabd697a82016-09-30 17:19:30 -0600156 write_pmem(pmem_addr, page, off, len);
Dan Williams59e64732016-03-08 07:16:07 -0800157 if (unlikely(bad_pmem)) {
Toshi Kani3115bb02016-10-13 09:54:21 -0600158 rc = pmem_clear_poison(pmem, pmem_off, len);
Vishal Vermabd697a82016-09-30 17:19:30 -0600159 write_pmem(pmem_addr, page, off, len);
Dan Williams59e64732016-03-08 07:16:07 -0800160 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200161 }
162
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800163 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200164}
165
Dan Williams7e267a82016-06-01 20:48:15 -0700166/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
167#ifndef REQ_FLUSH
168#define REQ_FLUSH REQ_PREFLUSH
169#endif
170
Jens Axboedece1632015-11-05 10:41:16 -0700171static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200172{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200173 blk_status_t rc = 0;
Dan Williamsf0dc0892015-05-16 12:28:53 -0400174 bool do_acct;
175 unsigned long start;
Dan Williamsedc870e2015-05-16 12:28:51 -0400176 struct bio_vec bvec;
177 struct bvec_iter iter;
Dan Williamsbd842b82016-03-18 23:47:43 -0700178 struct pmem_device *pmem = q->queuedata;
Dan Williams7e267a82016-06-01 20:48:15 -0700179 struct nd_region *nd_region = to_region(pmem);
180
Jens Axboe1eff9d32016-08-05 15:35:16 -0600181 if (bio->bi_opf & REQ_FLUSH)
Dan Williams7e267a82016-06-01 20:48:15 -0700182 nvdimm_flush(nd_region);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200183
Dan Williamsf0dc0892015-05-16 12:28:53 -0400184 do_acct = nd_iostat_start(bio, &start);
Dan Williamse10624f2016-01-06 12:03:41 -0800185 bio_for_each_segment(bvec, bio, iter) {
186 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
Jens Axboec11f0c02016-08-05 08:11:04 -0600187 bvec.bv_offset, op_is_write(bio_op(bio)),
Dan Williamse10624f2016-01-06 12:03:41 -0800188 iter.bi_sector);
189 if (rc) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200190 bio->bi_status = rc;
Dan Williamse10624f2016-01-06 12:03:41 -0800191 break;
192 }
193 }
Dan Williamsf0dc0892015-05-16 12:28:53 -0400194 if (do_acct)
195 nd_iostat_end(bio, start);
Ross Zwisler61031952015-06-25 03:08:39 -0400196
Jens Axboe1eff9d32016-08-05 15:35:16 -0600197 if (bio->bi_opf & REQ_FUA)
Dan Williams7e267a82016-06-01 20:48:15 -0700198 nvdimm_flush(nd_region);
Ross Zwisler61031952015-06-25 03:08:39 -0400199
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200200 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700201 return BLK_QC_T_NONE;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200202}
203
204static int pmem_rw_page(struct block_device *bdev, sector_t sector,
Jens Axboec11f0c02016-08-05 08:11:04 -0600205 struct page *page, bool is_write)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200206{
Dan Williamsbd842b82016-03-18 23:47:43 -0700207 struct pmem_device *pmem = bdev->bd_queue->queuedata;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200208 blk_status_t rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200209
Huang Ying98cc0932017-09-06 16:22:27 -0700210 rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
211 0, is_write, sector);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200212
Dan Williamse10624f2016-01-06 12:03:41 -0800213 /*
214 * The ->rw_page interface is subtle and tricky. The core
215 * retries on any error, so we can only invoke page_endio() in
216 * the successful completion case. Otherwise, we'll see crashes
217 * caused by double completion.
218 */
219 if (rc == 0)
Jens Axboec11f0c02016-08-05 08:11:04 -0600220 page_endio(page, is_write, 0);
Dan Williamse10624f2016-01-06 12:03:41 -0800221
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200222 return blk_status_to_errno(rc);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200223}
224
Dan Williamsf295e532016-06-17 11:08:06 -0700225/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
Dan Williamsc1d6e822017-01-24 23:02:09 -0800226__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
227 long nr_pages, void **kaddr, pfn_t *pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200228{
Dan Williamsc1d6e822017-01-24 23:02:09 -0800229 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200230
Dan Williamsc1d6e822017-01-24 23:02:09 -0800231 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
232 PFN_PHYS(nr_pages))))
Dan Williams0a70bd42016-02-24 14:02:11 -0800233 return -EIO;
Ross Zwislere2e05392015-08-18 13:55:41 -0600234 *kaddr = pmem->virt_addr + offset;
Dan Williams34c0fd52016-01-15 16:56:14 -0800235 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200236
Dan Williams0a70bd42016-02-24 14:02:11 -0800237 /*
238 * If badblocks are present, limit known good range to the
239 * requested range.
240 */
241 if (unlikely(pmem->bb.count))
Dan Williamsc1d6e822017-01-24 23:02:09 -0800242 return nr_pages;
243 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200244}
245
246static const struct block_device_operations pmem_fops = {
247 .owner = THIS_MODULE,
248 .rw_page = pmem_rw_page,
Dan Williams58138822015-06-23 20:08:34 -0400249 .revalidate_disk = nvdimm_revalidate_disk,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200250};
251
Dan Williamsc1d6e822017-01-24 23:02:09 -0800252static long pmem_dax_direct_access(struct dax_device *dax_dev,
253 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
254{
255 struct pmem_device *pmem = dax_get_private(dax_dev);
256
257 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
258}
259
Dan Williams0aed55a2017-05-29 12:22:50 -0700260static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
261 void *addr, size_t bytes, struct iov_iter *i)
262{
263 return copy_from_iter_flushcache(addr, bytes, i);
264}
265
Dan Williamsc1d6e822017-01-24 23:02:09 -0800266static const struct dax_operations pmem_dax_ops = {
267 .direct_access = pmem_dax_direct_access,
Dan Williams0aed55a2017-05-29 12:22:50 -0700268 .copy_from_iter = pmem_copy_from_iter,
Dan Williamsc1d6e822017-01-24 23:02:09 -0800269};
270
Dan Williams6e0c90d2017-06-26 21:28:41 -0700271static const struct attribute_group *pmem_attribute_groups[] = {
272 &dax_attribute_group,
273 NULL,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200274};
275
Dan Williams030b99e2016-03-17 20:24:31 -0700276static void pmem_release_queue(void *q)
277{
278 blk_cleanup_queue(q);
279}
280
Dan Williams71389702017-04-28 10:23:37 -0700281static void pmem_freeze_queue(void *q)
282{
Linus Torvaldsd3b5d352017-05-01 23:54:56 -0700283 blk_freeze_queue_start(q);
Dan Williams71389702017-04-28 10:23:37 -0700284}
285
Dan Williamsc1d6e822017-01-24 23:02:09 -0800286static void pmem_release_disk(void *__pmem)
Dan Williams030b99e2016-03-17 20:24:31 -0700287{
Dan Williamsc1d6e822017-01-24 23:02:09 -0800288 struct pmem_device *pmem = __pmem;
289
290 kill_dax(pmem->dax_dev);
291 put_dax(pmem->dax_dev);
292 del_gendisk(pmem->disk);
293 put_disk(pmem->disk);
Dan Williams030b99e2016-03-17 20:24:31 -0700294}
295
Dan Williams200c79d2016-03-22 00:22:16 -0700296static int pmem_attach_disk(struct device *dev,
297 struct nd_namespace_common *ndns)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200298{
Dan Williams200c79d2016-03-22 00:22:16 -0700299 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
Dan Williamsf284a4f2016-07-07 19:44:50 -0700300 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams200c79d2016-03-22 00:22:16 -0700301 struct vmem_altmap __altmap, *altmap = NULL;
Dan Williams0b277962017-06-09 09:46:50 -0700302 int nid = dev_to_node(dev), fua, wbc;
Dan Williams200c79d2016-03-22 00:22:16 -0700303 struct resource *res = &nsio->res;
304 struct nd_pfn *nd_pfn = NULL;
Dan Williamsc1d6e822017-01-24 23:02:09 -0800305 struct dax_device *dax_dev;
Dan Williams200c79d2016-03-22 00:22:16 -0700306 struct nd_pfn_sb *pfn_sb;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200307 struct pmem_device *pmem;
Dan Williams200c79d2016-03-22 00:22:16 -0700308 struct resource pfn_res;
Dan Williams468ded02016-01-15 16:56:46 -0800309 struct request_queue *q;
Dan Williams6e0c90d2017-06-26 21:28:41 -0700310 struct device *gendev;
Dan Williams200c79d2016-03-22 00:22:16 -0700311 struct gendisk *disk;
312 void *addr;
313
314 /* while nsio_rw_bytes is active, parse a pfn info block if present */
315 if (is_nd_pfn(dev)) {
316 nd_pfn = to_nd_pfn(dev);
317 altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
318 if (IS_ERR(altmap))
319 return PTR_ERR(altmap);
320 }
321
322 /* we're attaching a block device, disable raw namespace access */
323 devm_nsio_disable(dev, nsio);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200324
Christoph Hellwig708ab622015-08-10 23:07:08 -0400325 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200326 if (!pmem)
Dan Williams200c79d2016-03-22 00:22:16 -0700327 return -ENOMEM;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200328
Dan Williams200c79d2016-03-22 00:22:16 -0700329 dev_set_drvdata(dev, pmem);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200330 pmem->phys_addr = res->start;
331 pmem->size = resource_size(res);
Dan Williams0b277962017-06-09 09:46:50 -0700332 fua = nvdimm_has_flush(nd_region);
333 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
Ross Zwisler61031952015-06-25 03:08:39 -0400334 dev_warn(dev, "unable to guarantee persistence of writes\n");
Dan Williams0b277962017-06-09 09:46:50 -0700335 fua = 0;
336 }
337 wbc = nvdimm_has_cache(nd_region);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200338
Dan Williams947df022016-03-21 22:28:40 -0700339 if (!devm_request_mem_region(dev, res->start, resource_size(res),
Dan Williams450c6632016-11-28 11:15:18 -0800340 dev_name(&ndns->dev))) {
Dan Williams947df022016-03-21 22:28:40 -0700341 dev_warn(dev, "could not reserve region %pR\n", res);
Dan Williams200c79d2016-03-22 00:22:16 -0700342 return -EBUSY;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200343 }
344
Dan Williams468ded02016-01-15 16:56:46 -0800345 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
346 if (!q)
Dan Williams200c79d2016-03-22 00:22:16 -0700347 return -ENOMEM;
Dan Williams468ded02016-01-15 16:56:46 -0800348
Dan Williams71389702017-04-28 10:23:37 -0700349 if (devm_add_action_or_reset(dev, pmem_release_queue, q))
350 return -ENOMEM;
351
Dan Williams34c0fd52016-01-15 16:56:14 -0800352 pmem->pfn_flags = PFN_DEV;
Dan Williams200c79d2016-03-22 00:22:16 -0700353 if (is_nd_pfn(dev)) {
354 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
355 altmap);
356 pfn_sb = nd_pfn->pfn_sb;
357 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
358 pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
359 pmem->pfn_flags |= PFN_MAP;
360 res = &pfn_res; /* for badblocks populate */
361 res->start += pmem->data_offset;
362 } else if (pmem_should_map_pages(dev)) {
363 addr = devm_memremap_pages(dev, &nsio->res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800364 &q->q_usage_counter, NULL);
Dan Williams34c0fd52016-01-15 16:56:14 -0800365 pmem->pfn_flags |= PFN_MAP;
366 } else
Dan Williams200c79d2016-03-22 00:22:16 -0700367 addr = devm_memremap(dev, pmem->phys_addr,
368 pmem->size, ARCH_MEMREMAP_PMEM);
Dan Williamsb36f4762015-09-15 02:42:20 -0400369
Dan Williams030b99e2016-03-17 20:24:31 -0700370 /*
Dan Williams71389702017-04-28 10:23:37 -0700371 * At release time the queue must be frozen before
Dan Williams030b99e2016-03-17 20:24:31 -0700372 * devm_memremap_pages is unwound
373 */
Dan Williams71389702017-04-28 10:23:37 -0700374 if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
Dan Williams200c79d2016-03-22 00:22:16 -0700375 return -ENOMEM;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400376
Dan Williams200c79d2016-03-22 00:22:16 -0700377 if (IS_ERR(addr))
378 return PTR_ERR(addr);
Dan Williams7a9eb202016-06-03 18:06:47 -0700379 pmem->virt_addr = addr;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200380
Dan Williams0b277962017-06-09 09:46:50 -0700381 blk_queue_write_cache(q, wbc, fua);
Dan Williams5a922892016-03-21 15:43:53 -0700382 blk_queue_make_request(q, pmem_make_request);
383 blk_queue_physical_block_size(q, PAGE_SIZE);
Dan Williamsf979b132017-06-04 12:12:07 +0900384 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
Dan Williams5a922892016-03-21 15:43:53 -0700385 blk_queue_max_hw_sectors(q, UINT_MAX);
Dan Williams5a922892016-03-21 15:43:53 -0700386 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
Toshi Kani163d4ba2016-06-23 17:05:50 -0400387 queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
Dan Williams5a922892016-03-21 15:43:53 -0700388 q->queuedata = pmem;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200389
Dan Williams538ea4a2015-10-05 20:35:56 -0400390 disk = alloc_disk_node(0, nid);
Dan Williams030b99e2016-03-17 20:24:31 -0700391 if (!disk)
392 return -ENOMEM;
Dan Williamsc1d6e822017-01-24 23:02:09 -0800393 pmem->disk = disk;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200394
Ross Zwisler9e853f22015-04-01 09:12:19 +0200395 disk->fops = &pmem_fops;
Dan Williams5a922892016-03-21 15:43:53 -0700396 disk->queue = q;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200397 disk->flags = GENHD_FL_EXT_DEVT;
Minchan Kim23c47d22017-11-15 17:33:00 -0800398 disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
Vishal Verma5212e112015-06-25 04:20:32 -0400399 nvdimm_namespace_disk_name(ndns, disk->disk_name);
Dan Williamscfe30b82016-03-03 09:38:00 -0800400 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
401 / 512);
Dan Williamsb95f5f42016-01-04 23:50:23 -0800402 if (devm_init_badblocks(dev, &pmem->bb))
403 return -ENOMEM;
Dan Williamsf284a4f2016-07-07 19:44:50 -0700404 nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
Dan Williams57f7f312016-01-06 12:03:42 -0800405 disk->bb = &pmem->bb;
Dan Williamsf02716d2016-06-15 14:59:17 -0700406
Dan Williamsc1d6e822017-01-24 23:02:09 -0800407 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
408 if (!dax_dev) {
409 put_disk(disk);
410 return -ENOMEM;
411 }
Dan Williams0b277962017-06-09 09:46:50 -0700412 dax_write_cache(dax_dev, wbc);
Dan Williamsc1d6e822017-01-24 23:02:09 -0800413 pmem->dax_dev = dax_dev;
414
Dan Williams6e0c90d2017-06-26 21:28:41 -0700415 gendev = disk_to_dev(disk);
416 gendev->groups = pmem_attribute_groups;
417
Dan Williamsc1d6e822017-01-24 23:02:09 -0800418 device_add_disk(dev, disk);
419 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
Dan Williamsf02716d2016-06-15 14:59:17 -0700420 return -ENOMEM;
421
Dan Williams58138822015-06-23 20:08:34 -0400422 revalidate_disk(disk);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200423
Toshi Kani975750a2017-06-12 16:25:11 -0600424 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
425 "badblocks");
Dan Williams6aa734a2017-06-30 18:56:03 -0700426 if (!pmem->bb_state)
427 dev_warn(dev, "'badblocks' notification disabled\n");
Toshi Kani975750a2017-06-12 16:25:11 -0600428
Dan Williams8c2f7e82015-06-25 04:20:04 -0400429 return 0;
430}
Ross Zwisler9e853f22015-04-01 09:12:19 +0200431
Dan Williams9f53f9f2015-06-09 15:33:45 -0400432static int nd_pmem_probe(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200433{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400434 struct nd_namespace_common *ndns;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200435
Dan Williams8c2f7e82015-06-25 04:20:04 -0400436 ndns = nvdimm_namespace_common_probe(dev);
437 if (IS_ERR(ndns))
438 return PTR_ERR(ndns);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400439
Dan Williams200c79d2016-03-22 00:22:16 -0700440 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
441 return -ENXIO;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200442
Dan Williams200c79d2016-03-22 00:22:16 -0700443 if (is_nd_btt(dev))
Christoph Hellwig708ab622015-08-10 23:07:08 -0400444 return nvdimm_namespace_attach_btt(ndns);
445
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400446 if (is_nd_pfn(dev))
Dan Williams200c79d2016-03-22 00:22:16 -0700447 return pmem_attach_disk(dev, ndns);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400448
Dan Williams200c79d2016-03-22 00:22:16 -0700449 /* if we find a valid info-block we'll come back as that personality */
Dan Williamsc5ed9262016-05-18 14:50:12 -0700450 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
451 || nd_dax_probe(dev, ndns) == 0)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400452 return -ENXIO;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400453
Dan Williams200c79d2016-03-22 00:22:16 -0700454 /* ...otherwise we're just a raw pmem device */
455 return pmem_attach_disk(dev, ndns);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200456}
457
Dan Williams9f53f9f2015-06-09 15:33:45 -0400458static int nd_pmem_remove(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200459{
Dan Williams6aa734a2017-06-30 18:56:03 -0700460 struct pmem_device *pmem = dev_get_drvdata(dev);
461
Dan Williams8c2f7e82015-06-25 04:20:04 -0400462 if (is_nd_btt(dev))
Dan Williams298f2bc2016-03-15 16:41:04 -0700463 nvdimm_namespace_detach_btt(to_nd_btt(dev));
Dan Williams6aa734a2017-06-30 18:56:03 -0700464 else {
465 /*
466 * Note, this assumes device_lock() context to not race
467 * nd_pmem_notify()
468 */
469 sysfs_put(pmem->bb_state);
470 pmem->bb_state = NULL;
471 }
Dan Williams476f8482016-07-09 00:12:52 -0700472 nvdimm_flush(to_nd_region(dev->parent));
473
Ross Zwisler9e853f22015-04-01 09:12:19 +0200474 return 0;
475}
476
Dan Williams476f8482016-07-09 00:12:52 -0700477static void nd_pmem_shutdown(struct device *dev)
478{
479 nvdimm_flush(to_nd_region(dev->parent));
480}
481
Dan Williams71999462016-02-18 10:29:49 -0800482static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
483{
Toshi Kanib2518c72017-04-25 17:04:13 -0600484 struct nd_region *nd_region;
Dan Williams298f2bc2016-03-15 16:41:04 -0700485 resource_size_t offset = 0, end_trunc = 0;
486 struct nd_namespace_common *ndns;
487 struct nd_namespace_io *nsio;
488 struct resource res;
Toshi Kanib2518c72017-04-25 17:04:13 -0600489 struct badblocks *bb;
Toshi Kani975750a2017-06-12 16:25:11 -0600490 struct kernfs_node *bb_state;
Dan Williams71999462016-02-18 10:29:49 -0800491
492 if (event != NVDIMM_REVALIDATE_POISON)
493 return;
494
Dan Williams298f2bc2016-03-15 16:41:04 -0700495 if (is_nd_btt(dev)) {
496 struct nd_btt *nd_btt = to_nd_btt(dev);
497
498 ndns = nd_btt->ndns;
Toshi Kanib2518c72017-04-25 17:04:13 -0600499 nd_region = to_nd_region(ndns->dev.parent);
500 nsio = to_nd_namespace_io(&ndns->dev);
501 bb = &nsio->bb;
Toshi Kani975750a2017-06-12 16:25:11 -0600502 bb_state = NULL;
Toshi Kanib2518c72017-04-25 17:04:13 -0600503 } else {
504 struct pmem_device *pmem = dev_get_drvdata(dev);
Dan Williamsa3901802016-04-07 20:02:06 -0700505
Toshi Kanib2518c72017-04-25 17:04:13 -0600506 nd_region = to_region(pmem);
507 bb = &pmem->bb;
Toshi Kani975750a2017-06-12 16:25:11 -0600508 bb_state = pmem->bb_state;
Dan Williamsa3901802016-04-07 20:02:06 -0700509
Toshi Kanib2518c72017-04-25 17:04:13 -0600510 if (is_nd_pfn(dev)) {
511 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
512 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
513
514 ndns = nd_pfn->ndns;
515 offset = pmem->data_offset +
516 __le32_to_cpu(pfn_sb->start_pad);
517 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
518 } else {
519 ndns = to_ndns(dev);
520 }
521
522 nsio = to_nd_namespace_io(&ndns->dev);
523 }
524
Dan Williams298f2bc2016-03-15 16:41:04 -0700525 res.start = nsio->res.start + offset;
526 res.end = nsio->res.end - end_trunc;
Toshi Kanib2518c72017-04-25 17:04:13 -0600527 nvdimm_badblocks_populate(nd_region, bb, &res);
Toshi Kani975750a2017-06-12 16:25:11 -0600528 if (bb_state)
529 sysfs_notify_dirent(bb_state);
Dan Williams71999462016-02-18 10:29:49 -0800530}
531
Dan Williams9f53f9f2015-06-09 15:33:45 -0400532MODULE_ALIAS("pmem");
533MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400534MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400535static struct nd_device_driver nd_pmem_driver = {
536 .probe = nd_pmem_probe,
537 .remove = nd_pmem_remove,
Dan Williams71999462016-02-18 10:29:49 -0800538 .notify = nd_pmem_notify,
Dan Williams476f8482016-07-09 00:12:52 -0700539 .shutdown = nd_pmem_shutdown,
Dan Williams9f53f9f2015-06-09 15:33:45 -0400540 .drv = {
541 .name = "nd_pmem",
Ross Zwisler9e853f22015-04-01 09:12:19 +0200542 },
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400543 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200544};
545
546static int __init pmem_init(void)
547{
NeilBrown55155292016-03-09 09:21:54 +1100548 return nd_driver_register(&nd_pmem_driver);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200549}
550module_init(pmem_init);
551
552static void pmem_exit(void)
553{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400554 driver_unregister(&nd_pmem_driver.drv);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200555}
556module_exit(pmem_exit);
557
558MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
559MODULE_LICENSE("GPL v2");