Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Persistent Memory Driver |
| 3 | * |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 4 | * Copyright (c) 2014-2015, Intel Corporation. |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 5 | * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. |
| 6 | * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms and conditions of the GNU General Public License, |
| 10 | * version 2, as published by the Free Software Foundation. |
| 11 | * |
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 15 | * more details. |
| 16 | */ |
| 17 | |
| 18 | #include <asm/cacheflush.h> |
| 19 | #include <linux/blkdev.h> |
| 20 | #include <linux/hdreg.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/platform_device.h> |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/moduleparam.h> |
Dan Williams | b95f5f4 | 2016-01-04 23:50:23 -0800 | [diff] [blame] | 25 | #include <linux/badblocks.h> |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 26 | #include <linux/memremap.h> |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 27 | #include <linux/vmalloc.h> |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 28 | #include <linux/pfn_t.h> |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 29 | #include <linux/slab.h> |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 30 | #include <linux/pmem.h> |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 31 | #include <linux/dax.h> |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 32 | #include <linux/nd.h> |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 33 | #include "pmem.h" |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 34 | #include "pfn.h" |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 35 | #include "nd.h" |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 36 | |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame] | 37 | static struct device *to_dev(struct pmem_device *pmem) |
| 38 | { |
| 39 | /* |
| 40 | * nvdimm bus services need a 'dev' parameter, and we record the device |
| 41 | * at init in bb.dev. |
| 42 | */ |
| 43 | return pmem->bb.dev; |
| 44 | } |
| 45 | |
| 46 | static struct nd_region *to_region(struct pmem_device *pmem) |
| 47 | { |
| 48 | return to_nd_region(to_dev(pmem)->parent); |
| 49 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 50 | |
Toshi Kani | 3115bb0 | 2016-10-13 09:54:21 -0600 | [diff] [blame] | 51 | static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 52 | unsigned int len) |
| 53 | { |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame] | 54 | struct device *dev = to_dev(pmem); |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 55 | sector_t sector; |
| 56 | long cleared; |
Dan Williams | 868f036f | 2016-12-16 08:10:31 -0800 | [diff] [blame] | 57 | int rc = 0; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 58 | |
| 59 | sector = (offset - pmem->data_offset) / 512; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 60 | |
Dan Williams | 868f036f | 2016-12-16 08:10:31 -0800 | [diff] [blame] | 61 | cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); |
| 62 | if (cleared < len) |
| 63 | rc = -EIO; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 64 | if (cleared > 0 && cleared / 512) { |
Dan Williams | 868f036f | 2016-12-16 08:10:31 -0800 | [diff] [blame] | 65 | cleared /= 512; |
| 66 | dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__, |
| 67 | (unsigned long long) sector, cleared, |
| 68 | cleared > 1 ? "s" : ""); |
Fabian Frederick | 0a3f27b | 2016-12-04 10:48:58 -0800 | [diff] [blame] | 69 | badblocks_clear(&pmem->bb, sector, cleared); |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 70 | } |
Toshi Kani | 3115bb0 | 2016-10-13 09:54:21 -0600 | [diff] [blame] | 71 | |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 72 | invalidate_pmem(pmem->virt_addr + offset, len); |
Dan Williams | 868f036f | 2016-12-16 08:10:31 -0800 | [diff] [blame] | 73 | |
| 74 | return rc; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 75 | } |
| 76 | |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 77 | static void write_pmem(void *pmem_addr, struct page *page, |
| 78 | unsigned int off, unsigned int len) |
| 79 | { |
| 80 | void *mem = kmap_atomic(page); |
| 81 | |
| 82 | memcpy_to_pmem(pmem_addr, mem + off, len); |
| 83 | kunmap_atomic(mem); |
| 84 | } |
| 85 | |
| 86 | static int read_pmem(struct page *page, unsigned int off, |
| 87 | void *pmem_addr, unsigned int len) |
| 88 | { |
| 89 | int rc; |
| 90 | void *mem = kmap_atomic(page); |
| 91 | |
Dan Williams | 6abccd1 | 2017-01-13 14:14:23 -0800 | [diff] [blame^] | 92 | rc = memcpy_mcsafe(mem + off, pmem_addr, len); |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 93 | kunmap_atomic(mem); |
Stefan Hajnoczi | d47d1d2 | 2017-01-05 10:05:46 +0000 | [diff] [blame] | 94 | if (rc) |
| 95 | return -EIO; |
| 96 | return 0; |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 97 | } |
| 98 | |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 99 | static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 100 | unsigned int len, unsigned int off, bool is_write, |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 101 | sector_t sector) |
| 102 | { |
Dan Williams | b5ebc8e | 2016-03-06 15:20:51 -0800 | [diff] [blame] | 103 | int rc = 0; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 104 | bool bad_pmem = false; |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 105 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
Dan Williams | 7a9eb20 | 2016-06-03 18:06:47 -0700 | [diff] [blame] | 106 | void *pmem_addr = pmem->virt_addr + pmem_off; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 107 | |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 108 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) |
| 109 | bad_pmem = true; |
| 110 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 111 | if (!is_write) { |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 112 | if (unlikely(bad_pmem)) |
Dan Williams | b5ebc8e | 2016-03-06 15:20:51 -0800 | [diff] [blame] | 113 | rc = -EIO; |
| 114 | else { |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 115 | rc = read_pmem(page, off, pmem_addr, len); |
Dan Williams | b5ebc8e | 2016-03-06 15:20:51 -0800 | [diff] [blame] | 116 | flush_dcache_page(page); |
| 117 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 118 | } else { |
Dan Williams | 0a370d26 | 2016-04-14 19:40:47 -0700 | [diff] [blame] | 119 | /* |
| 120 | * Note that we write the data both before and after |
| 121 | * clearing poison. The write before clear poison |
| 122 | * handles situations where the latest written data is |
| 123 | * preserved and the clear poison operation simply marks |
| 124 | * the address range as valid without changing the data. |
| 125 | * In this case application software can assume that an |
| 126 | * interrupted write will either return the new good |
| 127 | * data or an error. |
| 128 | * |
| 129 | * However, if pmem_clear_poison() leaves the data in an |
| 130 | * indeterminate state we need to perform the write |
| 131 | * after clear poison. |
| 132 | */ |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 133 | flush_dcache_page(page); |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 134 | write_pmem(pmem_addr, page, off, len); |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 135 | if (unlikely(bad_pmem)) { |
Toshi Kani | 3115bb0 | 2016-10-13 09:54:21 -0600 | [diff] [blame] | 136 | rc = pmem_clear_poison(pmem, pmem_off, len); |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 137 | write_pmem(pmem_addr, page, off, len); |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 138 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 139 | } |
| 140 | |
Dan Williams | b5ebc8e | 2016-03-06 15:20:51 -0800 | [diff] [blame] | 141 | return rc; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 142 | } |
| 143 | |
Dan Williams | 7e267a8 | 2016-06-01 20:48:15 -0700 | [diff] [blame] | 144 | /* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */ |
| 145 | #ifndef REQ_FLUSH |
| 146 | #define REQ_FLUSH REQ_PREFLUSH |
| 147 | #endif |
| 148 | |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 149 | static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 150 | { |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 151 | int rc = 0; |
Dan Williams | f0dc089 | 2015-05-16 12:28:53 -0400 | [diff] [blame] | 152 | bool do_acct; |
| 153 | unsigned long start; |
Dan Williams | edc870e | 2015-05-16 12:28:51 -0400 | [diff] [blame] | 154 | struct bio_vec bvec; |
| 155 | struct bvec_iter iter; |
Dan Williams | bd842b8 | 2016-03-18 23:47:43 -0700 | [diff] [blame] | 156 | struct pmem_device *pmem = q->queuedata; |
Dan Williams | 7e267a8 | 2016-06-01 20:48:15 -0700 | [diff] [blame] | 157 | struct nd_region *nd_region = to_region(pmem); |
| 158 | |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 159 | if (bio->bi_opf & REQ_FLUSH) |
Dan Williams | 7e267a8 | 2016-06-01 20:48:15 -0700 | [diff] [blame] | 160 | nvdimm_flush(nd_region); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 161 | |
Dan Williams | f0dc089 | 2015-05-16 12:28:53 -0400 | [diff] [blame] | 162 | do_acct = nd_iostat_start(bio, &start); |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 163 | bio_for_each_segment(bvec, bio, iter) { |
| 164 | rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 165 | bvec.bv_offset, op_is_write(bio_op(bio)), |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 166 | iter.bi_sector); |
| 167 | if (rc) { |
| 168 | bio->bi_error = rc; |
| 169 | break; |
| 170 | } |
| 171 | } |
Dan Williams | f0dc089 | 2015-05-16 12:28:53 -0400 | [diff] [blame] | 172 | if (do_acct) |
| 173 | nd_iostat_end(bio, start); |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 174 | |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 175 | if (bio->bi_opf & REQ_FUA) |
Dan Williams | 7e267a8 | 2016-06-01 20:48:15 -0700 | [diff] [blame] | 176 | nvdimm_flush(nd_region); |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 177 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 178 | bio_endio(bio); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 179 | return BLK_QC_T_NONE; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 180 | } |
| 181 | |
| 182 | static int pmem_rw_page(struct block_device *bdev, sector_t sector, |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 183 | struct page *page, bool is_write) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 184 | { |
Dan Williams | bd842b8 | 2016-03-18 23:47:43 -0700 | [diff] [blame] | 185 | struct pmem_device *pmem = bdev->bd_queue->queuedata; |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 186 | int rc; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 187 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 188 | rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 189 | |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 190 | /* |
| 191 | * The ->rw_page interface is subtle and tricky. The core |
| 192 | * retries on any error, so we can only invoke page_endio() in |
| 193 | * the successful completion case. Otherwise, we'll see crashes |
| 194 | * caused by double completion. |
| 195 | */ |
| 196 | if (rc == 0) |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 197 | page_endio(page, is_write, 0); |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 198 | |
| 199 | return rc; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 200 | } |
| 201 | |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 202 | /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 203 | __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, |
| 204 | long nr_pages, void **kaddr, pfn_t *pfn) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 205 | { |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 206 | resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 207 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 208 | if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, |
| 209 | PFN_PHYS(nr_pages)))) |
Dan Williams | 0a70bd4 | 2016-02-24 14:02:11 -0800 | [diff] [blame] | 210 | return -EIO; |
Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 211 | *kaddr = pmem->virt_addr + offset; |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 212 | *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 213 | |
Dan Williams | 0a70bd4 | 2016-02-24 14:02:11 -0800 | [diff] [blame] | 214 | /* |
| 215 | * If badblocks are present, limit known good range to the |
| 216 | * requested range. |
| 217 | */ |
| 218 | if (unlikely(pmem->bb.count)) |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 219 | return nr_pages; |
| 220 | return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); |
| 221 | } |
| 222 | |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 223 | static const struct block_device_operations pmem_fops = { |
| 224 | .owner = THIS_MODULE, |
| 225 | .rw_page = pmem_rw_page, |
Dan Williams | 5813882 | 2015-06-23 20:08:34 -0400 | [diff] [blame] | 226 | .revalidate_disk = nvdimm_revalidate_disk, |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 227 | }; |
| 228 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 229 | static long pmem_dax_direct_access(struct dax_device *dax_dev, |
| 230 | pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) |
| 231 | { |
| 232 | struct pmem_device *pmem = dax_get_private(dax_dev); |
| 233 | |
| 234 | return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); |
| 235 | } |
| 236 | |
| 237 | static const struct dax_operations pmem_dax_ops = { |
| 238 | .direct_access = pmem_dax_direct_access, |
| 239 | }; |
| 240 | |
Dan Williams | 030b99e | 2016-03-17 20:24:31 -0700 | [diff] [blame] | 241 | static void pmem_release_queue(void *q) |
| 242 | { |
| 243 | blk_cleanup_queue(q); |
| 244 | } |
| 245 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 246 | static void pmem_release_disk(void *__pmem) |
Dan Williams | 030b99e | 2016-03-17 20:24:31 -0700 | [diff] [blame] | 247 | { |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 248 | struct pmem_device *pmem = __pmem; |
| 249 | |
| 250 | kill_dax(pmem->dax_dev); |
| 251 | put_dax(pmem->dax_dev); |
| 252 | del_gendisk(pmem->disk); |
| 253 | put_disk(pmem->disk); |
Dan Williams | 030b99e | 2016-03-17 20:24:31 -0700 | [diff] [blame] | 254 | } |
| 255 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 256 | static int pmem_attach_disk(struct device *dev, |
| 257 | struct nd_namespace_common *ndns) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 258 | { |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 259 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame] | 260 | struct nd_region *nd_region = to_nd_region(dev->parent); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 261 | struct vmem_altmap __altmap, *altmap = NULL; |
| 262 | struct resource *res = &nsio->res; |
| 263 | struct nd_pfn *nd_pfn = NULL; |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 264 | struct dax_device *dax_dev; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 265 | int nid = dev_to_node(dev); |
| 266 | struct nd_pfn_sb *pfn_sb; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 267 | struct pmem_device *pmem; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 268 | struct resource pfn_res; |
Dan Williams | 468ded0 | 2016-01-15 16:56:46 -0800 | [diff] [blame] | 269 | struct request_queue *q; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 270 | struct gendisk *disk; |
| 271 | void *addr; |
| 272 | |
| 273 | /* while nsio_rw_bytes is active, parse a pfn info block if present */ |
| 274 | if (is_nd_pfn(dev)) { |
| 275 | nd_pfn = to_nd_pfn(dev); |
| 276 | altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap); |
| 277 | if (IS_ERR(altmap)) |
| 278 | return PTR_ERR(altmap); |
| 279 | } |
| 280 | |
| 281 | /* we're attaching a block device, disable raw namespace access */ |
| 282 | devm_nsio_disable(dev, nsio); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 283 | |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 284 | pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 285 | if (!pmem) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 286 | return -ENOMEM; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 287 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 288 | dev_set_drvdata(dev, pmem); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 289 | pmem->phys_addr = res->start; |
| 290 | pmem->size = resource_size(res); |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame] | 291 | if (nvdimm_has_flush(nd_region) < 0) |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 292 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 293 | |
Dan Williams | 947df02 | 2016-03-21 22:28:40 -0700 | [diff] [blame] | 294 | if (!devm_request_mem_region(dev, res->start, resource_size(res), |
Dan Williams | 450c663 | 2016-11-28 11:15:18 -0800 | [diff] [blame] | 295 | dev_name(&ndns->dev))) { |
Dan Williams | 947df02 | 2016-03-21 22:28:40 -0700 | [diff] [blame] | 296 | dev_warn(dev, "could not reserve region %pR\n", res); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 297 | return -EBUSY; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 298 | } |
| 299 | |
Dan Williams | 468ded0 | 2016-01-15 16:56:46 -0800 | [diff] [blame] | 300 | q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); |
| 301 | if (!q) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 302 | return -ENOMEM; |
Dan Williams | 468ded0 | 2016-01-15 16:56:46 -0800 | [diff] [blame] | 303 | |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 304 | pmem->pfn_flags = PFN_DEV; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 305 | if (is_nd_pfn(dev)) { |
| 306 | addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter, |
| 307 | altmap); |
| 308 | pfn_sb = nd_pfn->pfn_sb; |
| 309 | pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); |
| 310 | pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res); |
| 311 | pmem->pfn_flags |= PFN_MAP; |
| 312 | res = &pfn_res; /* for badblocks populate */ |
| 313 | res->start += pmem->data_offset; |
| 314 | } else if (pmem_should_map_pages(dev)) { |
| 315 | addr = devm_memremap_pages(dev, &nsio->res, |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 316 | &q->q_usage_counter, NULL); |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 317 | pmem->pfn_flags |= PFN_MAP; |
| 318 | } else |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 319 | addr = devm_memremap(dev, pmem->phys_addr, |
| 320 | pmem->size, ARCH_MEMREMAP_PMEM); |
Dan Williams | b36f476 | 2015-09-15 02:42:20 -0400 | [diff] [blame] | 321 | |
Dan Williams | 030b99e | 2016-03-17 20:24:31 -0700 | [diff] [blame] | 322 | /* |
| 323 | * At release time the queue must be dead before |
| 324 | * devm_memremap_pages is unwound |
| 325 | */ |
Dan Williams | f02716d | 2016-06-15 14:59:17 -0700 | [diff] [blame] | 326 | if (devm_add_action_or_reset(dev, pmem_release_queue, q)) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 327 | return -ENOMEM; |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 328 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 329 | if (IS_ERR(addr)) |
| 330 | return PTR_ERR(addr); |
Dan Williams | 7a9eb20 | 2016-06-03 18:06:47 -0700 | [diff] [blame] | 331 | pmem->virt_addr = addr; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 332 | |
Dan Williams | 7e267a8 | 2016-06-01 20:48:15 -0700 | [diff] [blame] | 333 | blk_queue_write_cache(q, true, true); |
Dan Williams | 5a92289 | 2016-03-21 15:43:53 -0700 | [diff] [blame] | 334 | blk_queue_make_request(q, pmem_make_request); |
| 335 | blk_queue_physical_block_size(q, PAGE_SIZE); |
| 336 | blk_queue_max_hw_sectors(q, UINT_MAX); |
| 337 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
| 338 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); |
Toshi Kani | 163d4ba | 2016-06-23 17:05:50 -0400 | [diff] [blame] | 339 | queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); |
Dan Williams | 5a92289 | 2016-03-21 15:43:53 -0700 | [diff] [blame] | 340 | q->queuedata = pmem; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 341 | |
Dan Williams | 538ea4a | 2015-10-05 20:35:56 -0400 | [diff] [blame] | 342 | disk = alloc_disk_node(0, nid); |
Dan Williams | 030b99e | 2016-03-17 20:24:31 -0700 | [diff] [blame] | 343 | if (!disk) |
| 344 | return -ENOMEM; |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 345 | pmem->disk = disk; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 346 | |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 347 | disk->fops = &pmem_fops; |
Dan Williams | 5a92289 | 2016-03-21 15:43:53 -0700 | [diff] [blame] | 348 | disk->queue = q; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 349 | disk->flags = GENHD_FL_EXT_DEVT; |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 350 | nvdimm_namespace_disk_name(ndns, disk->disk_name); |
Dan Williams | cfe30b8 | 2016-03-03 09:38:00 -0800 | [diff] [blame] | 351 | set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) |
| 352 | / 512); |
Dan Williams | b95f5f4 | 2016-01-04 23:50:23 -0800 | [diff] [blame] | 353 | if (devm_init_badblocks(dev, &pmem->bb)) |
| 354 | return -ENOMEM; |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame] | 355 | nvdimm_badblocks_populate(nd_region, &pmem->bb, res); |
Dan Williams | 57f7f31 | 2016-01-06 12:03:42 -0800 | [diff] [blame] | 356 | disk->bb = &pmem->bb; |
Dan Williams | f02716d | 2016-06-15 14:59:17 -0700 | [diff] [blame] | 357 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 358 | dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops); |
| 359 | if (!dax_dev) { |
| 360 | put_disk(disk); |
| 361 | return -ENOMEM; |
| 362 | } |
| 363 | pmem->dax_dev = dax_dev; |
| 364 | |
| 365 | device_add_disk(dev, disk); |
| 366 | if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) |
Dan Williams | f02716d | 2016-06-15 14:59:17 -0700 | [diff] [blame] | 367 | return -ENOMEM; |
| 368 | |
Dan Williams | 5813882 | 2015-06-23 20:08:34 -0400 | [diff] [blame] | 369 | revalidate_disk(disk); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 370 | |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 371 | return 0; |
| 372 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 373 | |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 374 | static int nd_pmem_probe(struct device *dev) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 375 | { |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 376 | struct nd_namespace_common *ndns; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 377 | |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 378 | ndns = nvdimm_namespace_common_probe(dev); |
| 379 | if (IS_ERR(ndns)) |
| 380 | return PTR_ERR(ndns); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 381 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 382 | if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev))) |
| 383 | return -ENXIO; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 384 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 385 | if (is_nd_btt(dev)) |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 386 | return nvdimm_namespace_attach_btt(ndns); |
| 387 | |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 388 | if (is_nd_pfn(dev)) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 389 | return pmem_attach_disk(dev, ndns); |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 390 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 391 | /* if we find a valid info-block we'll come back as that personality */ |
Dan Williams | c5ed926 | 2016-05-18 14:50:12 -0700 | [diff] [blame] | 392 | if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0 |
| 393 | || nd_dax_probe(dev, ndns) == 0) |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 394 | return -ENXIO; |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 395 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 396 | /* ...otherwise we're just a raw pmem device */ |
| 397 | return pmem_attach_disk(dev, ndns); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 398 | } |
| 399 | |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 400 | static int nd_pmem_remove(struct device *dev) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 401 | { |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 402 | if (is_nd_btt(dev)) |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 403 | nvdimm_namespace_detach_btt(to_nd_btt(dev)); |
Dan Williams | 476f848 | 2016-07-09 00:12:52 -0700 | [diff] [blame] | 404 | nvdimm_flush(to_nd_region(dev->parent)); |
| 405 | |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 406 | return 0; |
| 407 | } |
| 408 | |
Dan Williams | 476f848 | 2016-07-09 00:12:52 -0700 | [diff] [blame] | 409 | static void nd_pmem_shutdown(struct device *dev) |
| 410 | { |
| 411 | nvdimm_flush(to_nd_region(dev->parent)); |
| 412 | } |
| 413 | |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 414 | static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) |
| 415 | { |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 416 | struct pmem_device *pmem = dev_get_drvdata(dev); |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame] | 417 | struct nd_region *nd_region = to_region(pmem); |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 418 | resource_size_t offset = 0, end_trunc = 0; |
| 419 | struct nd_namespace_common *ndns; |
| 420 | struct nd_namespace_io *nsio; |
| 421 | struct resource res; |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 422 | |
| 423 | if (event != NVDIMM_REVALIDATE_POISON) |
| 424 | return; |
| 425 | |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 426 | if (is_nd_btt(dev)) { |
| 427 | struct nd_btt *nd_btt = to_nd_btt(dev); |
| 428 | |
| 429 | ndns = nd_btt->ndns; |
| 430 | } else if (is_nd_pfn(dev)) { |
Dan Williams | a390180 | 2016-04-07 20:02:06 -0700 | [diff] [blame] | 431 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); |
| 432 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; |
| 433 | |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 434 | ndns = nd_pfn->ndns; |
| 435 | offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad); |
| 436 | end_trunc = __le32_to_cpu(pfn_sb->end_trunc); |
| 437 | } else |
| 438 | ndns = to_ndns(dev); |
Dan Williams | a390180 | 2016-04-07 20:02:06 -0700 | [diff] [blame] | 439 | |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 440 | nsio = to_nd_namespace_io(&ndns->dev); |
| 441 | res.start = nsio->res.start + offset; |
| 442 | res.end = nsio->res.end - end_trunc; |
Dan Williams | a390180 | 2016-04-07 20:02:06 -0700 | [diff] [blame] | 443 | nvdimm_badblocks_populate(nd_region, &pmem->bb, &res); |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 444 | } |
| 445 | |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 446 | MODULE_ALIAS("pmem"); |
| 447 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 448 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 449 | static struct nd_device_driver nd_pmem_driver = { |
| 450 | .probe = nd_pmem_probe, |
| 451 | .remove = nd_pmem_remove, |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 452 | .notify = nd_pmem_notify, |
Dan Williams | 476f848 | 2016-07-09 00:12:52 -0700 | [diff] [blame] | 453 | .shutdown = nd_pmem_shutdown, |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 454 | .drv = { |
| 455 | .name = "nd_pmem", |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 456 | }, |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 457 | .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 458 | }; |
| 459 | |
| 460 | static int __init pmem_init(void) |
| 461 | { |
NeilBrown | 5515529 | 2016-03-09 09:21:54 +1100 | [diff] [blame] | 462 | return nd_driver_register(&nd_pmem_driver); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 463 | } |
| 464 | module_init(pmem_init); |
| 465 | |
| 466 | static void pmem_exit(void) |
| 467 | { |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 468 | driver_unregister(&nd_pmem_driver.drv); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 469 | } |
| 470 | module_exit(pmem_exit); |
| 471 | |
| 472 | MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); |
| 473 | MODULE_LICENSE("GPL v2"); |