Thomas Gleixner | 2025cf9 | 2019-05-29 07:18:02 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Persistent Memory Driver |
| 4 | * |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 5 | * Copyright (c) 2014-2015, Intel Corporation. |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 6 | * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. |
| 7 | * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 10 | #include <linux/blkdev.h> |
Matthew Wilcox (Oracle) | 4ee60ec | 2021-05-06 18:02:27 -0700 | [diff] [blame] | 11 | #include <linux/pagemap.h> |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 12 | #include <linux/hdreg.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/platform_device.h> |
Dan Williams | c953cc9 | 2018-07-13 21:50:37 -0700 | [diff] [blame] | 15 | #include <linux/set_memory.h> |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 16 | #include <linux/module.h> |
| 17 | #include <linux/moduleparam.h> |
Dan Williams | b95f5f4 | 2016-01-04 23:50:23 -0800 | [diff] [blame] | 18 | #include <linux/badblocks.h> |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 19 | #include <linux/memremap.h> |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 20 | #include <linux/vmalloc.h> |
Dan Williams | 7138970 | 2017-04-28 10:23:37 -0700 | [diff] [blame] | 21 | #include <linux/blk-mq.h> |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 22 | #include <linux/pfn_t.h> |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 23 | #include <linux/slab.h> |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 24 | #include <linux/uio.h> |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 25 | #include <linux/dax.h> |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 26 | #include <linux/nd.h> |
Christoph Hellwig | e0cf615 | 2020-06-07 21:41:42 -0700 | [diff] [blame] | 27 | #include <linux/mm.h> |
| 28 | #include <asm/cacheflush.h> |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 29 | #include "pmem.h" |
Dan Williams | 2361db8 | 2021-03-09 17:43:38 -0800 | [diff] [blame] | 30 | #include "btt.h" |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 31 | #include "pfn.h" |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 32 | #include "nd.h" |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 33 | |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame] | 34 | static struct device *to_dev(struct pmem_device *pmem) |
| 35 | { |
| 36 | /* |
| 37 | * nvdimm bus services need a 'dev' parameter, and we record the device |
| 38 | * at init in bb.dev. |
| 39 | */ |
| 40 | return pmem->bb.dev; |
| 41 | } |
| 42 | |
| 43 | static struct nd_region *to_region(struct pmem_device *pmem) |
| 44 | { |
| 45 | return to_nd_region(to_dev(pmem)->parent); |
| 46 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 47 | |
Dan Williams | c953cc9 | 2018-07-13 21:50:37 -0700 | [diff] [blame] | 48 | static void hwpoison_clear(struct pmem_device *pmem, |
| 49 | phys_addr_t phys, unsigned int len) |
| 50 | { |
| 51 | unsigned long pfn_start, pfn_end, pfn; |
| 52 | |
| 53 | /* only pmem in the linear map supports HWPoison */ |
| 54 | if (is_vmalloc_addr(pmem->virt_addr)) |
| 55 | return; |
| 56 | |
| 57 | pfn_start = PHYS_PFN(phys); |
| 58 | pfn_end = pfn_start + PHYS_PFN(len); |
| 59 | for (pfn = pfn_start; pfn < pfn_end; pfn++) { |
| 60 | struct page *page = pfn_to_page(pfn); |
| 61 | |
| 62 | /* |
| 63 | * Note, no need to hold a get_dev_pagemap() reference |
| 64 | * here since we're in the driver I/O path and |
| 65 | * outstanding I/O requests pin the dev_pagemap. |
| 66 | */ |
| 67 | if (test_and_clear_pmem_poison(page)) |
| 68 | clear_mce_nospec(pfn); |
| 69 | } |
| 70 | } |
| 71 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 72 | static blk_status_t pmem_clear_poison(struct pmem_device *pmem, |
| 73 | phys_addr_t offset, unsigned int len) |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 74 | { |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame] | 75 | struct device *dev = to_dev(pmem); |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 76 | sector_t sector; |
| 77 | long cleared; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 78 | blk_status_t rc = BLK_STS_OK; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 79 | |
| 80 | sector = (offset - pmem->data_offset) / 512; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 81 | |
Dan Williams | 868f036f | 2016-12-16 08:10:31 -0800 | [diff] [blame] | 82 | cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); |
| 83 | if (cleared < len) |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 84 | rc = BLK_STS_IOERR; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 85 | if (cleared > 0 && cleared / 512) { |
Dan Williams | c953cc9 | 2018-07-13 21:50:37 -0700 | [diff] [blame] | 86 | hwpoison_clear(pmem, pmem->phys_addr + offset, cleared); |
Dan Williams | 868f036f | 2016-12-16 08:10:31 -0800 | [diff] [blame] | 87 | cleared /= 512; |
Dan Williams | 426824d | 2018-03-05 16:39:31 -0800 | [diff] [blame] | 88 | dev_dbg(dev, "%#llx clear %ld sector%s\n", |
Dan Williams | 868f036f | 2016-12-16 08:10:31 -0800 | [diff] [blame] | 89 | (unsigned long long) sector, cleared, |
| 90 | cleared > 1 ? "s" : ""); |
Fabian Frederick | 0a3f27b | 2016-12-04 10:48:58 -0800 | [diff] [blame] | 91 | badblocks_clear(&pmem->bb, sector, cleared); |
Toshi Kani | 975750a | 2017-06-12 16:25:11 -0600 | [diff] [blame] | 92 | if (pmem->bb_state) |
| 93 | sysfs_notify_dirent(pmem->bb_state); |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 94 | } |
Toshi Kani | 3115bb0 | 2016-10-13 09:54:21 -0600 | [diff] [blame] | 95 | |
Dan Williams | f2b6125 | 2017-05-29 23:00:34 -0700 | [diff] [blame] | 96 | arch_invalidate_pmem(pmem->virt_addr + offset, len); |
Dan Williams | 868f036f | 2016-12-16 08:10:31 -0800 | [diff] [blame] | 97 | |
| 98 | return rc; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 99 | } |
| 100 | |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 101 | static void write_pmem(void *pmem_addr, struct page *page, |
| 102 | unsigned int off, unsigned int len) |
| 103 | { |
Huang Ying | 98cc093 | 2017-09-06 16:22:27 -0700 | [diff] [blame] | 104 | unsigned int chunk; |
| 105 | void *mem; |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 106 | |
Huang Ying | 98cc093 | 2017-09-06 16:22:27 -0700 | [diff] [blame] | 107 | while (len) { |
| 108 | mem = kmap_atomic(page); |
Li RongQing | 9dc6488 | 2019-04-04 10:58:01 +0800 | [diff] [blame] | 109 | chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
Huang Ying | 98cc093 | 2017-09-06 16:22:27 -0700 | [diff] [blame] | 110 | memcpy_flushcache(pmem_addr, mem + off, chunk); |
| 111 | kunmap_atomic(mem); |
| 112 | len -= chunk; |
| 113 | off = 0; |
| 114 | page++; |
Li RongQing | 9dc6488 | 2019-04-04 10:58:01 +0800 | [diff] [blame] | 115 | pmem_addr += chunk; |
Huang Ying | 98cc093 | 2017-09-06 16:22:27 -0700 | [diff] [blame] | 116 | } |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 117 | } |
| 118 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 119 | static blk_status_t read_pmem(struct page *page, unsigned int off, |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 120 | void *pmem_addr, unsigned int len) |
| 121 | { |
Huang Ying | 98cc093 | 2017-09-06 16:22:27 -0700 | [diff] [blame] | 122 | unsigned int chunk; |
Dan Williams | 60622d6 | 2018-05-03 17:06:21 -0700 | [diff] [blame] | 123 | unsigned long rem; |
Huang Ying | 98cc093 | 2017-09-06 16:22:27 -0700 | [diff] [blame] | 124 | void *mem; |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 125 | |
Huang Ying | 98cc093 | 2017-09-06 16:22:27 -0700 | [diff] [blame] | 126 | while (len) { |
| 127 | mem = kmap_atomic(page); |
Li RongQing | 9dc6488 | 2019-04-04 10:58:01 +0800 | [diff] [blame] | 128 | chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
Dan Williams | ec6347b | 2020-10-05 20:40:16 -0700 | [diff] [blame] | 129 | rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk); |
Huang Ying | 98cc093 | 2017-09-06 16:22:27 -0700 | [diff] [blame] | 130 | kunmap_atomic(mem); |
Dan Williams | 60622d6 | 2018-05-03 17:06:21 -0700 | [diff] [blame] | 131 | if (rem) |
Huang Ying | 98cc093 | 2017-09-06 16:22:27 -0700 | [diff] [blame] | 132 | return BLK_STS_IOERR; |
| 133 | len -= chunk; |
| 134 | off = 0; |
| 135 | page++; |
Li RongQing | 9dc6488 | 2019-04-04 10:58:01 +0800 | [diff] [blame] | 136 | pmem_addr += chunk; |
Huang Ying | 98cc093 | 2017-09-06 16:22:27 -0700 | [diff] [blame] | 137 | } |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 138 | return BLK_STS_OK; |
Vishal Verma | bd697a8 | 2016-09-30 17:19:30 -0600 | [diff] [blame] | 139 | } |
| 140 | |
Vivek Goyal | 5d64efe | 2020-02-28 11:34:51 -0500 | [diff] [blame] | 141 | static blk_status_t pmem_do_read(struct pmem_device *pmem, |
| 142 | struct page *page, unsigned int page_off, |
| 143 | sector_t sector, unsigned int len) |
| 144 | { |
| 145 | blk_status_t rc; |
| 146 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
| 147 | void *pmem_addr = pmem->virt_addr + pmem_off; |
| 148 | |
| 149 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) |
| 150 | return BLK_STS_IOERR; |
| 151 | |
| 152 | rc = read_pmem(page, page_off, pmem_addr, len); |
| 153 | flush_dcache_page(page); |
| 154 | return rc; |
| 155 | } |
| 156 | |
| 157 | static blk_status_t pmem_do_write(struct pmem_device *pmem, |
| 158 | struct page *page, unsigned int page_off, |
| 159 | sector_t sector, unsigned int len) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 160 | { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 161 | blk_status_t rc = BLK_STS_OK; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 162 | bool bad_pmem = false; |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 163 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
Dan Williams | 7a9eb20 | 2016-06-03 18:06:47 -0700 | [diff] [blame] | 164 | void *pmem_addr = pmem->virt_addr + pmem_off; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 165 | |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 166 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) |
| 167 | bad_pmem = true; |
| 168 | |
Vivek Goyal | 5d64efe | 2020-02-28 11:34:51 -0500 | [diff] [blame] | 169 | /* |
| 170 | * Note that we write the data both before and after |
| 171 | * clearing poison. The write before clear poison |
| 172 | * handles situations where the latest written data is |
| 173 | * preserved and the clear poison operation simply marks |
| 174 | * the address range as valid without changing the data. |
| 175 | * In this case application software can assume that an |
| 176 | * interrupted write will either return the new good |
| 177 | * data or an error. |
| 178 | * |
| 179 | * However, if pmem_clear_poison() leaves the data in an |
| 180 | * indeterminate state we need to perform the write |
| 181 | * after clear poison. |
| 182 | */ |
| 183 | flush_dcache_page(page); |
| 184 | write_pmem(pmem_addr, page, page_off, len); |
| 185 | if (unlikely(bad_pmem)) { |
| 186 | rc = pmem_clear_poison(pmem, pmem_off, len); |
| 187 | write_pmem(pmem_addr, page, page_off, len); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 188 | } |
| 189 | |
Dan Williams | b5ebc8e | 2016-03-06 15:20:51 -0800 | [diff] [blame] | 190 | return rc; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 191 | } |
| 192 | |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 193 | static void pmem_submit_bio(struct bio *bio) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 194 | { |
Pankaj Gupta | c5d4355 | 2019-07-05 19:33:22 +0530 | [diff] [blame] | 195 | int ret = 0; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 196 | blk_status_t rc = 0; |
Dan Williams | f0dc089 | 2015-05-16 12:28:53 -0400 | [diff] [blame] | 197 | bool do_acct; |
| 198 | unsigned long start; |
Dan Williams | edc870e | 2015-05-16 12:28:51 -0400 | [diff] [blame] | 199 | struct bio_vec bvec; |
| 200 | struct bvec_iter iter; |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 201 | struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data; |
Dan Williams | 7e267a8 | 2016-06-01 20:48:15 -0700 | [diff] [blame] | 202 | struct nd_region *nd_region = to_region(pmem); |
| 203 | |
Ross Zwisler | d2d6364 | 2018-06-06 10:45:12 -0600 | [diff] [blame] | 204 | if (bio->bi_opf & REQ_PREFLUSH) |
Pankaj Gupta | c5d4355 | 2019-07-05 19:33:22 +0530 | [diff] [blame] | 205 | ret = nvdimm_flush(nd_region, bio); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 206 | |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 207 | do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); |
Christoph Hellwig | 0fd92f8 | 2020-05-27 07:24:10 +0200 | [diff] [blame] | 208 | if (do_acct) |
| 209 | start = bio_start_io_acct(bio); |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 210 | bio_for_each_segment(bvec, bio, iter) { |
Vivek Goyal | 5d64efe | 2020-02-28 11:34:51 -0500 | [diff] [blame] | 211 | if (op_is_write(bio_op(bio))) |
| 212 | rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset, |
| 213 | iter.bi_sector, bvec.bv_len); |
| 214 | else |
| 215 | rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset, |
| 216 | iter.bi_sector, bvec.bv_len); |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 217 | if (rc) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 218 | bio->bi_status = rc; |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 219 | break; |
| 220 | } |
| 221 | } |
Dan Williams | f0dc089 | 2015-05-16 12:28:53 -0400 | [diff] [blame] | 222 | if (do_acct) |
Christoph Hellwig | 0fd92f8 | 2020-05-27 07:24:10 +0200 | [diff] [blame] | 223 | bio_end_io_acct(bio, start); |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 224 | |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 225 | if (bio->bi_opf & REQ_FUA) |
Pankaj Gupta | c5d4355 | 2019-07-05 19:33:22 +0530 | [diff] [blame] | 226 | ret = nvdimm_flush(nd_region, bio); |
| 227 | |
| 228 | if (ret) |
| 229 | bio->bi_status = errno_to_blk_status(ret); |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 230 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 231 | bio_endio(bio); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | static int pmem_rw_page(struct block_device *bdev, sector_t sector, |
Tejun Heo | 3f289dc | 2018-07-18 04:47:36 -0700 | [diff] [blame] | 235 | struct page *page, unsigned int op) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 236 | { |
Christoph Hellwig | 6ec26b8 | 2020-05-08 18:15:17 +0200 | [diff] [blame] | 237 | struct pmem_device *pmem = bdev->bd_disk->private_data; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 238 | blk_status_t rc; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 239 | |
Vivek Goyal | 5d64efe | 2020-02-28 11:34:51 -0500 | [diff] [blame] | 240 | if (op_is_write(op)) |
Matthew Wilcox (Oracle) | af3bbc1 | 2020-08-14 17:30:33 -0700 | [diff] [blame] | 241 | rc = pmem_do_write(pmem, page, 0, sector, thp_size(page)); |
Vivek Goyal | 5d64efe | 2020-02-28 11:34:51 -0500 | [diff] [blame] | 242 | else |
Matthew Wilcox (Oracle) | af3bbc1 | 2020-08-14 17:30:33 -0700 | [diff] [blame] | 243 | rc = pmem_do_read(pmem, page, 0, sector, thp_size(page)); |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 244 | /* |
| 245 | * The ->rw_page interface is subtle and tricky. The core |
| 246 | * retries on any error, so we can only invoke page_endio() in |
| 247 | * the successful completion case. Otherwise, we'll see crashes |
| 248 | * caused by double completion. |
| 249 | */ |
| 250 | if (rc == 0) |
Tejun Heo | 3f289dc | 2018-07-18 04:47:36 -0700 | [diff] [blame] | 251 | page_endio(page, op_is_write(op), 0); |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 252 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 253 | return blk_status_to_errno(rc); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 254 | } |
| 255 | |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 256 | /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 257 | __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, |
| 258 | long nr_pages, void **kaddr, pfn_t *pfn) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 259 | { |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 260 | resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 261 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 262 | if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, |
| 263 | PFN_PHYS(nr_pages)))) |
Dan Williams | 0a70bd4 | 2016-02-24 14:02:11 -0800 | [diff] [blame] | 264 | return -EIO; |
Huaisheng Ye | 46a590c | 2018-07-30 15:15:43 +0800 | [diff] [blame] | 265 | |
| 266 | if (kaddr) |
| 267 | *kaddr = pmem->virt_addr + offset; |
| 268 | if (pfn) |
| 269 | *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 270 | |
Dan Williams | 0a70bd4 | 2016-02-24 14:02:11 -0800 | [diff] [blame] | 271 | /* |
| 272 | * If badblocks are present, limit known good range to the |
| 273 | * requested range. |
| 274 | */ |
| 275 | if (unlikely(pmem->bb.count)) |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 276 | return nr_pages; |
| 277 | return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | static const struct block_device_operations pmem_fops = { |
| 281 | .owner = THIS_MODULE, |
Christoph Hellwig | c62b37d | 2020-07-01 10:59:43 +0200 | [diff] [blame] | 282 | .submit_bio = pmem_submit_bio, |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 283 | .rw_page = pmem_rw_page, |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 284 | }; |
| 285 | |
Vivek Goyal | f605a26 | 2020-02-28 11:34:52 -0500 | [diff] [blame] | 286 | static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, |
| 287 | size_t nr_pages) |
| 288 | { |
| 289 | struct pmem_device *pmem = dax_get_private(dax_dev); |
| 290 | |
| 291 | return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0, |
| 292 | PFN_PHYS(pgoff) >> SECTOR_SHIFT, |
| 293 | PAGE_SIZE)); |
| 294 | } |
| 295 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 296 | static long pmem_dax_direct_access(struct dax_device *dax_dev, |
| 297 | pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) |
| 298 | { |
| 299 | struct pmem_device *pmem = dax_get_private(dax_dev); |
| 300 | |
| 301 | return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); |
| 302 | } |
| 303 | |
Dan Williams | 52f476a | 2019-05-16 17:05:21 -0700 | [diff] [blame] | 304 | /* |
| 305 | * Use the 'no check' versions of copy_from_iter_flushcache() and |
Dan Williams | ec6347b | 2020-10-05 20:40:16 -0700 | [diff] [blame] | 306 | * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds |
Dan Williams | 52f476a | 2019-05-16 17:05:21 -0700 | [diff] [blame] | 307 | * checking, both file offset and device offset, is handled by |
| 308 | * dax_iomap_actor() |
| 309 | */ |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 310 | static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, |
| 311 | void *addr, size_t bytes, struct iov_iter *i) |
| 312 | { |
Dan Williams | 52f476a | 2019-05-16 17:05:21 -0700 | [diff] [blame] | 313 | return _copy_from_iter_flushcache(addr, bytes, i); |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 314 | } |
| 315 | |
Dan Williams | b3a9a0c | 2018-05-02 06:46:33 -0700 | [diff] [blame] | 316 | static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, |
| 317 | void *addr, size_t bytes, struct iov_iter *i) |
| 318 | { |
Dan Williams | ec6347b | 2020-10-05 20:40:16 -0700 | [diff] [blame] | 319 | return _copy_mc_to_iter(addr, bytes, i); |
Dan Williams | b3a9a0c | 2018-05-02 06:46:33 -0700 | [diff] [blame] | 320 | } |
| 321 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 322 | static const struct dax_operations pmem_dax_ops = { |
| 323 | .direct_access = pmem_dax_direct_access, |
Dan Williams | 7bf7eac | 2019-05-16 13:26:29 -0700 | [diff] [blame] | 324 | .dax_supported = generic_fsdax_supported, |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 325 | .copy_from_iter = pmem_copy_from_iter, |
Dan Williams | b3a9a0c | 2018-05-02 06:46:33 -0700 | [diff] [blame] | 326 | .copy_to_iter = pmem_copy_to_iter, |
Vivek Goyal | f605a26 | 2020-02-28 11:34:52 -0500 | [diff] [blame] | 327 | .zero_page_range = pmem_dax_zero_page_range, |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 328 | }; |
| 329 | |
Christoph Hellwig | e765f13 | 2021-09-22 19:34:30 +0200 | [diff] [blame] | 330 | static ssize_t write_cache_show(struct device *dev, |
| 331 | struct device_attribute *attr, char *buf) |
| 332 | { |
| 333 | struct pmem_device *pmem = dev_to_disk(dev)->private_data; |
| 334 | |
| 335 | return sprintf(buf, "%d\n", !!dax_write_cache_enabled(pmem->dax_dev)); |
| 336 | } |
| 337 | |
| 338 | static ssize_t write_cache_store(struct device *dev, |
| 339 | struct device_attribute *attr, const char *buf, size_t len) |
| 340 | { |
| 341 | struct pmem_device *pmem = dev_to_disk(dev)->private_data; |
| 342 | bool write_cache; |
| 343 | int rc; |
| 344 | |
| 345 | rc = strtobool(buf, &write_cache); |
| 346 | if (rc) |
| 347 | return rc; |
| 348 | dax_write_cache(pmem->dax_dev, write_cache); |
| 349 | return len; |
| 350 | } |
| 351 | static DEVICE_ATTR_RW(write_cache); |
| 352 | |
| 353 | static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) |
| 354 | { |
| 355 | #ifndef CONFIG_ARCH_HAS_PMEM_API |
| 356 | if (a == &dev_attr_write_cache.attr) |
| 357 | return 0; |
| 358 | #endif |
| 359 | return a->mode; |
| 360 | } |
| 361 | |
| 362 | static struct attribute *dax_attributes[] = { |
| 363 | &dev_attr_write_cache.attr, |
| 364 | NULL, |
| 365 | }; |
| 366 | |
| 367 | static const struct attribute_group dax_attribute_group = { |
| 368 | .name = "dax", |
| 369 | .attrs = dax_attributes, |
| 370 | .is_visible = dax_visible, |
| 371 | }; |
| 372 | |
Dan Williams | 6e0c90d | 2017-06-26 21:28:41 -0700 | [diff] [blame] | 373 | static const struct attribute_group *pmem_attribute_groups[] = { |
| 374 | &dax_attribute_group, |
| 375 | NULL, |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 376 | }; |
| 377 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 378 | static void pmem_release_disk(void *__pmem) |
Dan Williams | 030b99e | 2016-03-17 20:24:31 -0700 | [diff] [blame] | 379 | { |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 380 | struct pmem_device *pmem = __pmem; |
| 381 | |
| 382 | kill_dax(pmem->dax_dev); |
| 383 | put_dax(pmem->dax_dev); |
| 384 | del_gendisk(pmem->disk); |
Dan Williams | 030b99e | 2016-03-17 20:24:31 -0700 | [diff] [blame] | 385 | |
Christoph Hellwig | 3dd60fb | 2021-10-19 09:36:40 +0200 | [diff] [blame] | 386 | blk_cleanup_disk(pmem->disk); |
| 387 | } |
Christoph Hellwig | 1e240e8 | 2019-06-26 14:27:08 +0200 | [diff] [blame] | 388 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 389 | static int pmem_attach_disk(struct device *dev, |
| 390 | struct nd_namespace_common *ndns) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 391 | { |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 392 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame] | 393 | struct nd_region *nd_region = to_nd_region(dev->parent); |
Ross Zwisler | ce7f11a | 2018-06-06 10:45:13 -0600 | [diff] [blame] | 394 | int nid = dev_to_node(dev), fua; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 395 | struct resource *res = &nsio->res; |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 396 | struct range bb_range; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 397 | struct nd_pfn *nd_pfn = NULL; |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 398 | struct dax_device *dax_dev; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 399 | struct nd_pfn_sb *pfn_sb; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 400 | struct pmem_device *pmem; |
Dan Williams | 468ded0 | 2016-01-15 16:56:46 -0800 | [diff] [blame] | 401 | struct request_queue *q; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 402 | struct gendisk *disk; |
| 403 | void *addr; |
Christoph Hellwig | e8d5134 | 2017-12-29 08:54:05 +0100 | [diff] [blame] | 404 | int rc; |
Pankaj Gupta | fefc1d9 | 2019-07-05 19:33:24 +0530 | [diff] [blame] | 405 | unsigned long flags = 0UL; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 406 | |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 407 | pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 408 | if (!pmem) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 409 | return -ENOMEM; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 410 | |
Aneesh Kumar K.V | 8f4b01f | 2019-10-31 16:27:41 +0530 | [diff] [blame] | 411 | rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve()); |
| 412 | if (rc) |
| 413 | return rc; |
| 414 | |
Christoph Hellwig | e8d5134 | 2017-12-29 08:54:05 +0100 | [diff] [blame] | 415 | /* while nsio_rw_bytes is active, parse a pfn info block if present */ |
| 416 | if (is_nd_pfn(dev)) { |
| 417 | nd_pfn = to_nd_pfn(dev); |
| 418 | rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap); |
| 419 | if (rc) |
| 420 | return rc; |
| 421 | } |
| 422 | |
| 423 | /* we're attaching a block device, disable raw namespace access */ |
Aneesh Kumar K.V | 8f4b01f | 2019-10-31 16:27:41 +0530 | [diff] [blame] | 424 | devm_namespace_disable(dev, ndns); |
Christoph Hellwig | e8d5134 | 2017-12-29 08:54:05 +0100 | [diff] [blame] | 425 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 426 | dev_set_drvdata(dev, pmem); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 427 | pmem->phys_addr = res->start; |
| 428 | pmem->size = resource_size(res); |
Dan Williams | 0b27796 | 2017-06-09 09:46:50 -0700 | [diff] [blame] | 429 | fua = nvdimm_has_flush(nd_region); |
| 430 | if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) { |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 431 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
Dan Williams | 0b27796 | 2017-06-09 09:46:50 -0700 | [diff] [blame] | 432 | fua = 0; |
| 433 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 434 | |
Dan Williams | 947df02 | 2016-03-21 22:28:40 -0700 | [diff] [blame] | 435 | if (!devm_request_mem_region(dev, res->start, resource_size(res), |
Dan Williams | 450c663 | 2016-11-28 11:15:18 -0800 | [diff] [blame] | 436 | dev_name(&ndns->dev))) { |
Dan Williams | 947df02 | 2016-03-21 22:28:40 -0700 | [diff] [blame] | 437 | dev_warn(dev, "could not reserve region %pR\n", res); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 438 | return -EBUSY; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 439 | } |
| 440 | |
Christoph Hellwig | 87eb73b | 2021-05-21 07:51:07 +0200 | [diff] [blame] | 441 | disk = blk_alloc_disk(nid); |
| 442 | if (!disk) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 443 | return -ENOMEM; |
Christoph Hellwig | 87eb73b | 2021-05-21 07:51:07 +0200 | [diff] [blame] | 444 | q = disk->queue; |
Dan Williams | 468ded0 | 2016-01-15 16:56:46 -0800 | [diff] [blame] | 445 | |
Christoph Hellwig | 87eb73b | 2021-05-21 07:51:07 +0200 | [diff] [blame] | 446 | pmem->disk = disk; |
Dan Williams | a624eb5 | 2021-06-07 16:52:43 -0700 | [diff] [blame] | 447 | pmem->pgmap.owner = pmem; |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 448 | pmem->pfn_flags = PFN_DEV; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 449 | if (is_nd_pfn(dev)) { |
Christoph Hellwig | f6a55e1 | 2019-06-26 14:27:10 +0200 | [diff] [blame] | 450 | pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; |
Christoph Hellwig | e8d5134 | 2017-12-29 08:54:05 +0100 | [diff] [blame] | 451 | addr = devm_memremap_pages(dev, &pmem->pgmap); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 452 | pfn_sb = nd_pfn->pfn_sb; |
| 453 | pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); |
Christoph Hellwig | e8d5134 | 2017-12-29 08:54:05 +0100 | [diff] [blame] | 454 | pmem->pfn_pad = resource_size(res) - |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 455 | range_len(&pmem->pgmap.range); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 456 | pmem->pfn_flags |= PFN_MAP; |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 457 | bb_range = pmem->pgmap.range; |
| 458 | bb_range.start += pmem->data_offset; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 459 | } else if (pmem_should_map_pages(dev)) { |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 460 | pmem->pgmap.range.start = res->start; |
| 461 | pmem->pgmap.range.end = res->end; |
Dan Williams | b7b3c01 | 2020-10-13 16:50:34 -0700 | [diff] [blame] | 462 | pmem->pgmap.nr_range = 1; |
Christoph Hellwig | f6a55e1 | 2019-06-26 14:27:10 +0200 | [diff] [blame] | 463 | pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; |
Christoph Hellwig | e8d5134 | 2017-12-29 08:54:05 +0100 | [diff] [blame] | 464 | addr = devm_memremap_pages(dev, &pmem->pgmap); |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 465 | pmem->pfn_flags |= PFN_MAP; |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 466 | bb_range = pmem->pgmap.range; |
Dan Williams | 91ed7ac | 2018-10-04 16:32:08 -0700 | [diff] [blame] | 467 | } else { |
sumiyawang | 32b2397 | 2021-08-22 19:49:09 +0800 | [diff] [blame] | 468 | addr = devm_memremap(dev, pmem->phys_addr, |
| 469 | pmem->size, ARCH_MEMREMAP_PMEM); |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 470 | bb_range.start = res->start; |
| 471 | bb_range.end = res->end; |
Dan Williams | 91ed7ac | 2018-10-04 16:32:08 -0700 | [diff] [blame] | 472 | } |
Dan Williams | b36f476 | 2015-09-15 02:42:20 -0400 | [diff] [blame] | 473 | |
Luis Chamberlain | accf58a | 2021-11-03 16:04:28 -0700 | [diff] [blame] | 474 | if (IS_ERR(addr)) { |
| 475 | rc = PTR_ERR(addr); |
| 476 | goto out; |
| 477 | } |
Dan Williams | 7a9eb20 | 2016-06-03 18:06:47 -0700 | [diff] [blame] | 478 | pmem->virt_addr = addr; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 479 | |
Ross Zwisler | ce7f11a | 2018-06-06 10:45:13 -0600 | [diff] [blame] | 480 | blk_queue_write_cache(q, true, fua); |
Dan Williams | 5a92289 | 2016-03-21 15:43:53 -0700 | [diff] [blame] | 481 | blk_queue_physical_block_size(q, PAGE_SIZE); |
Dan Williams | f979b13 | 2017-06-04 12:12:07 +0900 | [diff] [blame] | 482 | blk_queue_logical_block_size(q, pmem_sector_size(ndns)); |
Dan Williams | 5a92289 | 2016-03-21 15:43:53 -0700 | [diff] [blame] | 483 | blk_queue_max_hw_sectors(q, UINT_MAX); |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 484 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
Ross Zwisler | 4557641 | 2018-06-26 16:30:39 -0600 | [diff] [blame] | 485 | if (pmem->pfn_flags & PFN_MAP) |
| 486 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 487 | |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 488 | disk->fops = &pmem_fops; |
Christoph Hellwig | 6ec26b8 | 2020-05-08 18:15:17 +0200 | [diff] [blame] | 489 | disk->private_data = pmem; |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 490 | nvdimm_namespace_disk_name(ndns, disk->disk_name); |
Dan Williams | cfe30b8 | 2016-03-03 09:38:00 -0800 | [diff] [blame] | 491 | set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) |
| 492 | / 512); |
Dan Williams | b95f5f4 | 2016-01-04 23:50:23 -0800 | [diff] [blame] | 493 | if (devm_init_badblocks(dev, &pmem->bb)) |
| 494 | return -ENOMEM; |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 495 | nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range); |
Dan Williams | 57f7f31 | 2016-01-06 12:03:42 -0800 | [diff] [blame] | 496 | disk->bb = &pmem->bb; |
Dan Williams | f02716d | 2016-06-15 14:59:17 -0700 | [diff] [blame] | 497 | |
Pankaj Gupta | fefc1d9 | 2019-07-05 19:33:24 +0530 | [diff] [blame] | 498 | if (is_nvdimm_sync(nd_region)) |
| 499 | flags = DAXDEV_F_SYNC; |
| 500 | dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags); |
Vivek Goyal | 4e4ced9 | 2020-04-01 12:11:25 -0400 | [diff] [blame] | 501 | if (IS_ERR(dax_dev)) { |
Luis Chamberlain | accf58a | 2021-11-03 16:04:28 -0700 | [diff] [blame] | 502 | rc = PTR_ERR(dax_dev); |
| 503 | goto out; |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 504 | } |
Ross Zwisler | ce7f11a | 2018-06-06 10:45:13 -0600 | [diff] [blame] | 505 | dax_write_cache(dax_dev, nvdimm_has_cache(nd_region)); |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 506 | pmem->dax_dev = dax_dev; |
Dan Williams | 6e0c90d | 2017-06-26 21:28:41 -0700 | [diff] [blame] | 507 | |
Luis Chamberlain | 5a192cc | 2021-11-03 16:04:29 -0700 | [diff] [blame] | 508 | rc = device_add_disk(dev, disk, pmem_attribute_groups); |
| 509 | if (rc) |
| 510 | goto out_cleanup_dax; |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 511 | if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) |
Dan Williams | f02716d | 2016-06-15 14:59:17 -0700 | [diff] [blame] | 512 | return -ENOMEM; |
| 513 | |
Christoph Hellwig | 32f61d6 | 2020-09-01 17:57:47 +0200 | [diff] [blame] | 514 | nvdimm_check_and_set_ro(disk); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 515 | |
Toshi Kani | 975750a | 2017-06-12 16:25:11 -0600 | [diff] [blame] | 516 | pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd, |
| 517 | "badblocks"); |
Dan Williams | 6aa734a | 2017-06-30 18:56:03 -0700 | [diff] [blame] | 518 | if (!pmem->bb_state) |
| 519 | dev_warn(dev, "'badblocks' notification disabled\n"); |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 520 | return 0; |
Luis Chamberlain | 5a192cc | 2021-11-03 16:04:29 -0700 | [diff] [blame] | 521 | |
| 522 | out_cleanup_dax: |
| 523 | kill_dax(pmem->dax_dev); |
| 524 | put_dax(pmem->dax_dev); |
Luis Chamberlain | accf58a | 2021-11-03 16:04:28 -0700 | [diff] [blame] | 525 | out: |
| 526 | blk_cleanup_disk(pmem->disk); |
| 527 | return rc; |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 528 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 529 | |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 530 | static int nd_pmem_probe(struct device *dev) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 531 | { |
Aneesh Kumar K.V | 1c97afa | 2019-09-05 21:15:58 +0530 | [diff] [blame] | 532 | int ret; |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 533 | struct nd_namespace_common *ndns; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 534 | |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 535 | ndns = nvdimm_namespace_common_probe(dev); |
| 536 | if (IS_ERR(ndns)) |
| 537 | return PTR_ERR(ndns); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 538 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 539 | if (is_nd_btt(dev)) |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 540 | return nvdimm_namespace_attach_btt(ndns); |
| 541 | |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 542 | if (is_nd_pfn(dev)) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 543 | return pmem_attach_disk(dev, ndns); |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 544 | |
Aneesh Kumar K.V | 8f4b01f | 2019-10-31 16:27:41 +0530 | [diff] [blame] | 545 | ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve()); |
| 546 | if (ret) |
| 547 | return ret; |
| 548 | |
Aneesh Kumar K.V | 1c97afa | 2019-09-05 21:15:58 +0530 | [diff] [blame] | 549 | ret = nd_btt_probe(dev, ndns); |
| 550 | if (ret == 0) |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 551 | return -ENXIO; |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 552 | |
Aneesh Kumar K.V | 1c97afa | 2019-09-05 21:15:58 +0530 | [diff] [blame] | 553 | /* |
| 554 | * We have two failure conditions here, there is no |
| 555 | * info reserver block or we found a valid info reserve block |
| 556 | * but failed to initialize the pfn superblock. |
| 557 | * |
| 558 | * For the first case consider namespace as a raw pmem namespace |
| 559 | * and attach a disk. |
| 560 | * |
| 561 | * For the latter, consider this a success and advance the namespace |
| 562 | * seed. |
| 563 | */ |
| 564 | ret = nd_pfn_probe(dev, ndns); |
| 565 | if (ret == 0) |
| 566 | return -ENXIO; |
| 567 | else if (ret == -EOPNOTSUPP) |
| 568 | return ret; |
| 569 | |
| 570 | ret = nd_dax_probe(dev, ndns); |
| 571 | if (ret == 0) |
| 572 | return -ENXIO; |
| 573 | else if (ret == -EOPNOTSUPP) |
| 574 | return ret; |
Aneesh Kumar K.V | 8f4b01f | 2019-10-31 16:27:41 +0530 | [diff] [blame] | 575 | |
| 576 | /* probe complete, attach handles namespace enabling */ |
| 577 | devm_namespace_disable(dev, ndns); |
| 578 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 579 | return pmem_attach_disk(dev, ndns); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 580 | } |
| 581 | |
Uwe Kleine-König | 1f97507 | 2021-02-12 18:10:43 +0100 | [diff] [blame] | 582 | static void nd_pmem_remove(struct device *dev) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 583 | { |
Dan Williams | 6aa734a | 2017-06-30 18:56:03 -0700 | [diff] [blame] | 584 | struct pmem_device *pmem = dev_get_drvdata(dev); |
| 585 | |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 586 | if (is_nd_btt(dev)) |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 587 | nvdimm_namespace_detach_btt(to_nd_btt(dev)); |
Dan Williams | 6aa734a | 2017-06-30 18:56:03 -0700 | [diff] [blame] | 588 | else { |
| 589 | /* |
Dan Williams | 87a30e1 | 2019-07-17 18:08:26 -0700 | [diff] [blame] | 590 | * Note, this assumes nd_device_lock() context to not |
| 591 | * race nd_pmem_notify() |
Dan Williams | 6aa734a | 2017-06-30 18:56:03 -0700 | [diff] [blame] | 592 | */ |
| 593 | sysfs_put(pmem->bb_state); |
| 594 | pmem->bb_state = NULL; |
| 595 | } |
Pankaj Gupta | c5d4355 | 2019-07-05 19:33:22 +0530 | [diff] [blame] | 596 | nvdimm_flush(to_nd_region(dev->parent), NULL); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 597 | } |
| 598 | |
Dan Williams | 476f848 | 2016-07-09 00:12:52 -0700 | [diff] [blame] | 599 | static void nd_pmem_shutdown(struct device *dev) |
| 600 | { |
Pankaj Gupta | c5d4355 | 2019-07-05 19:33:22 +0530 | [diff] [blame] | 601 | nvdimm_flush(to_nd_region(dev->parent), NULL); |
Dan Williams | 476f848 | 2016-07-09 00:12:52 -0700 | [diff] [blame] | 602 | } |
| 603 | |
Dan Williams | 2361db8 | 2021-03-09 17:43:38 -0800 | [diff] [blame] | 604 | static void pmem_revalidate_poison(struct device *dev) |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 605 | { |
Toshi Kani | b2518c7 | 2017-04-25 17:04:13 -0600 | [diff] [blame] | 606 | struct nd_region *nd_region; |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 607 | resource_size_t offset = 0, end_trunc = 0; |
| 608 | struct nd_namespace_common *ndns; |
| 609 | struct nd_namespace_io *nsio; |
Toshi Kani | b2518c7 | 2017-04-25 17:04:13 -0600 | [diff] [blame] | 610 | struct badblocks *bb; |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 611 | struct range range; |
Toshi Kani | 975750a | 2017-06-12 16:25:11 -0600 | [diff] [blame] | 612 | struct kernfs_node *bb_state; |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 613 | |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 614 | if (is_nd_btt(dev)) { |
| 615 | struct nd_btt *nd_btt = to_nd_btt(dev); |
| 616 | |
| 617 | ndns = nd_btt->ndns; |
Toshi Kani | b2518c7 | 2017-04-25 17:04:13 -0600 | [diff] [blame] | 618 | nd_region = to_nd_region(ndns->dev.parent); |
| 619 | nsio = to_nd_namespace_io(&ndns->dev); |
| 620 | bb = &nsio->bb; |
Toshi Kani | 975750a | 2017-06-12 16:25:11 -0600 | [diff] [blame] | 621 | bb_state = NULL; |
Toshi Kani | b2518c7 | 2017-04-25 17:04:13 -0600 | [diff] [blame] | 622 | } else { |
| 623 | struct pmem_device *pmem = dev_get_drvdata(dev); |
Dan Williams | a390180 | 2016-04-07 20:02:06 -0700 | [diff] [blame] | 624 | |
Toshi Kani | b2518c7 | 2017-04-25 17:04:13 -0600 | [diff] [blame] | 625 | nd_region = to_region(pmem); |
| 626 | bb = &pmem->bb; |
Toshi Kani | 975750a | 2017-06-12 16:25:11 -0600 | [diff] [blame] | 627 | bb_state = pmem->bb_state; |
Dan Williams | a390180 | 2016-04-07 20:02:06 -0700 | [diff] [blame] | 628 | |
Toshi Kani | b2518c7 | 2017-04-25 17:04:13 -0600 | [diff] [blame] | 629 | if (is_nd_pfn(dev)) { |
| 630 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); |
| 631 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; |
| 632 | |
| 633 | ndns = nd_pfn->ndns; |
| 634 | offset = pmem->data_offset + |
| 635 | __le32_to_cpu(pfn_sb->start_pad); |
| 636 | end_trunc = __le32_to_cpu(pfn_sb->end_trunc); |
| 637 | } else { |
| 638 | ndns = to_ndns(dev); |
| 639 | } |
| 640 | |
| 641 | nsio = to_nd_namespace_io(&ndns->dev); |
| 642 | } |
| 643 | |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 644 | range.start = nsio->res.start + offset; |
| 645 | range.end = nsio->res.end - end_trunc; |
| 646 | nvdimm_badblocks_populate(nd_region, bb, &range); |
Toshi Kani | 975750a | 2017-06-12 16:25:11 -0600 | [diff] [blame] | 647 | if (bb_state) |
| 648 | sysfs_notify_dirent(bb_state); |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 649 | } |
| 650 | |
Dan Williams | 2361db8 | 2021-03-09 17:43:38 -0800 | [diff] [blame] | 651 | static void pmem_revalidate_region(struct device *dev) |
| 652 | { |
| 653 | struct pmem_device *pmem; |
| 654 | |
| 655 | if (is_nd_btt(dev)) { |
| 656 | struct nd_btt *nd_btt = to_nd_btt(dev); |
| 657 | struct btt *btt = nd_btt->btt; |
| 658 | |
| 659 | nvdimm_check_and_set_ro(btt->btt_disk); |
| 660 | return; |
| 661 | } |
| 662 | |
| 663 | pmem = dev_get_drvdata(dev); |
| 664 | nvdimm_check_and_set_ro(pmem->disk); |
| 665 | } |
| 666 | |
| 667 | static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) |
| 668 | { |
| 669 | switch (event) { |
| 670 | case NVDIMM_REVALIDATE_POISON: |
| 671 | pmem_revalidate_poison(dev); |
| 672 | break; |
| 673 | case NVDIMM_REVALIDATE_REGION: |
| 674 | pmem_revalidate_region(dev); |
| 675 | break; |
| 676 | default: |
| 677 | dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event); |
| 678 | break; |
| 679 | } |
| 680 | } |
| 681 | |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 682 | MODULE_ALIAS("pmem"); |
| 683 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 684 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 685 | static struct nd_device_driver nd_pmem_driver = { |
| 686 | .probe = nd_pmem_probe, |
| 687 | .remove = nd_pmem_remove, |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 688 | .notify = nd_pmem_notify, |
Dan Williams | 476f848 | 2016-07-09 00:12:52 -0700 | [diff] [blame] | 689 | .shutdown = nd_pmem_shutdown, |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 690 | .drv = { |
| 691 | .name = "nd_pmem", |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 692 | }, |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 693 | .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 694 | }; |
| 695 | |
Johannes Thumshirn | 03e9084 | 2018-03-14 19:25:07 +0100 | [diff] [blame] | 696 | module_nd_driver(nd_pmem_driver); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 697 | |
| 698 | MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); |
| 699 | MODULE_LICENSE("GPL v2"); |