Thomas Gleixner | fd534e9 | 2019-05-23 11:14:39 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
David Woodhouse | a1452a3 | 2010-08-08 20:58:20 +0100 | [diff] [blame] | 3 | * Interface to Linux block layer for MTD 'translation layers'. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
David Woodhouse | a1452a3 | 2010-08-08 20:58:20 +0100 | [diff] [blame] | 5 | * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/list.h> |
| 12 | #include <linux/fs.h> |
| 13 | #include <linux/mtd/blktrans.h> |
| 14 | #include <linux/mtd/mtd.h> |
| 15 | #include <linux/blkdev.h> |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 16 | #include <linux/blk-mq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/blkpg.h> |
| 18 | #include <linux/spinlock.h> |
| 19 | #include <linux/hdreg.h> |
Ingo Molnar | 48b1926 | 2006-03-31 02:29:41 -0800 | [diff] [blame] | 20 | #include <linux/mutex.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 21 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Ben Dooks | 356d70f | 2007-05-28 20:28:34 +0100 | [diff] [blame] | 23 | #include "mtdcore.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Ben Dooks | 356d70f | 2007-05-28 20:28:34 +0100 | [diff] [blame] | 25 | static LIST_HEAD(blktrans_majors); |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 26 | |
H Hartley Sweeten | 7f53f12 | 2011-01-11 18:46:10 -0600 | [diff] [blame] | 27 | static void blktrans_dev_release(struct kref *kref) |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 28 | { |
| 29 | struct mtd_blktrans_dev *dev = |
| 30 | container_of(kref, struct mtd_blktrans_dev, ref); |
| 31 | |
Christoph Hellwig | 6966bb9 | 2021-06-02 09:53:24 +0300 | [diff] [blame] | 32 | blk_cleanup_disk(dev->disk); |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 33 | blk_mq_free_tag_set(dev->tag_set); |
| 34 | kfree(dev->tag_set); |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 35 | list_del(&dev->list); |
| 36 | kfree(dev); |
| 37 | } |
| 38 | |
H Hartley Sweeten | 7f53f12 | 2011-01-11 18:46:10 -0600 | [diff] [blame] | 39 | static void blktrans_dev_put(struct mtd_blktrans_dev *dev) |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 40 | { |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 41 | kref_put(&dev->ref, blktrans_dev_release); |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 42 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 45 | static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | struct mtd_blktrans_dev *dev, |
| 47 | struct request *req) |
| 48 | { |
| 49 | unsigned long block, nsect; |
| 50 | char *buf; |
| 51 | |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 52 | block = blk_rq_pos(req) << 9 >> tr->blkshift; |
Tejun Heo | 1011c1b | 2009-05-07 22:24:45 +0900 | [diff] [blame] | 53 | nsect = blk_rq_cur_bytes(req) >> tr->blkshift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 55 | if (req_op(req) == REQ_OP_FLUSH) { |
| 56 | if (tr->flush(dev)) |
| 57 | return BLK_STS_IOERR; |
| 58 | return BLK_STS_OK; |
| 59 | } |
Roman Peniaev | 566c0d6a | 2014-03-08 21:59:14 +0900 | [diff] [blame] | 60 | |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 61 | if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > |
| 62 | get_capacity(req->rq_disk)) |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 63 | return BLK_STS_IOERR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 65 | switch (req_op(req)) { |
| 66 | case REQ_OP_DISCARD: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 67 | if (tr->discard(dev, block, nsect)) |
| 68 | return BLK_STS_IOERR; |
| 69 | return BLK_STS_OK; |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 70 | case REQ_OP_READ: |
Christoph Hellwig | 34ab96e | 2018-05-09 15:59:45 +0200 | [diff] [blame] | 71 | buf = kmap(bio_page(req->bio)) + bio_offset(req->bio); |
| 72 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) { |
| 73 | if (tr->readsect(dev, block, buf)) { |
| 74 | kunmap(bio_page(req->bio)); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 75 | return BLK_STS_IOERR; |
Christoph Hellwig | 34ab96e | 2018-05-09 15:59:45 +0200 | [diff] [blame] | 76 | } |
| 77 | } |
| 78 | kunmap(bio_page(req->bio)); |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 79 | rq_flush_dcache_pages(req); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 80 | return BLK_STS_OK; |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 81 | case REQ_OP_WRITE: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | if (!tr->writesect) |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 83 | return BLK_STS_IOERR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 85 | rq_flush_dcache_pages(req); |
Christoph Hellwig | 34ab96e | 2018-05-09 15:59:45 +0200 | [diff] [blame] | 86 | buf = kmap(bio_page(req->bio)) + bio_offset(req->bio); |
| 87 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) { |
| 88 | if (tr->writesect(dev, block, buf)) { |
| 89 | kunmap(bio_page(req->bio)); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 90 | return BLK_STS_IOERR; |
Christoph Hellwig | 34ab96e | 2018-05-09 15:59:45 +0200 | [diff] [blame] | 91 | } |
| 92 | } |
| 93 | kunmap(bio_page(req->bio)); |
Abhishek Sahu | 9a51544 | 2017-08-02 18:03:05 +0530 | [diff] [blame] | 94 | return BLK_STS_OK; |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 95 | default: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 96 | return BLK_STS_IOERR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | } |
| 98 | } |
| 99 | |
Jarkko Lavinen | c7519db | 2011-02-14 16:16:09 +0200 | [diff] [blame] | 100 | int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) |
| 101 | { |
Artem Bityutskiy | 7bf7e37 | 2011-03-25 17:41:20 +0200 | [diff] [blame] | 102 | return dev->bg_stop; |
Jarkko Lavinen | c7519db | 2011-02-14 16:16:09 +0200 | [diff] [blame] | 103 | } |
| 104 | EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); |
| 105 | |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 106 | static struct request *mtd_next_request(struct mtd_blktrans_dev *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | { |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 108 | struct request *rq; |
| 109 | |
| 110 | rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist); |
| 111 | if (rq) { |
| 112 | list_del_init(&rq->queuelist); |
| 113 | blk_mq_start_request(rq); |
| 114 | return rq; |
| 115 | } |
| 116 | |
| 117 | return NULL; |
| 118 | } |
| 119 | |
| 120 | static void mtd_blktrans_work(struct mtd_blktrans_dev *dev) |
| 121 | __releases(&dev->queue_lock) |
| 122 | __acquires(&dev->queue_lock) |
| 123 | { |
Jarkko Lavinen | c7519db | 2011-02-14 16:16:09 +0200 | [diff] [blame] | 124 | struct mtd_blktrans_ops *tr = dev->tr; |
Tejun Heo | 1498ada | 2009-05-08 11:54:11 +0900 | [diff] [blame] | 125 | struct request *req = NULL; |
Jarkko Lavinen | c7519db | 2011-02-14 16:16:09 +0200 | [diff] [blame] | 126 | int background_done = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | |
Ezequiel Garcia | 22a8578 | 2012-11-10 13:08:20 -0300 | [diff] [blame] | 128 | while (1) { |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 129 | blk_status_t res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | |
Artem Bityutskiy | 7bf7e37 | 2011-03-25 17:41:20 +0200 | [diff] [blame] | 131 | dev->bg_stop = false; |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 132 | if (!req && !(req = mtd_next_request(dev))) { |
Jarkko Lavinen | c7519db | 2011-02-14 16:16:09 +0200 | [diff] [blame] | 133 | if (tr->background && !background_done) { |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 134 | spin_unlock_irq(&dev->queue_lock); |
Jarkko Lavinen | c7519db | 2011-02-14 16:16:09 +0200 | [diff] [blame] | 135 | mutex_lock(&dev->lock); |
| 136 | tr->background(dev); |
| 137 | mutex_unlock(&dev->lock); |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 138 | spin_lock_irq(&dev->queue_lock); |
Jarkko Lavinen | c7519db | 2011-02-14 16:16:09 +0200 | [diff] [blame] | 139 | /* |
| 140 | * Do background processing just once per idle |
| 141 | * period. |
| 142 | */ |
Artem Bityutskiy | 7bf7e37 | 2011-03-25 17:41:20 +0200 | [diff] [blame] | 143 | background_done = !dev->bg_stop; |
Jarkko Lavinen | c7519db | 2011-02-14 16:16:09 +0200 | [diff] [blame] | 144 | continue; |
| 145 | } |
Ezequiel Garcia | 22a8578 | 2012-11-10 13:08:20 -0300 | [diff] [blame] | 146 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | } |
| 148 | |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 149 | spin_unlock_irq(&dev->queue_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Ingo Molnar | 48b1926 | 2006-03-31 02:29:41 -0800 | [diff] [blame] | 151 | mutex_lock(&dev->lock); |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 152 | res = do_blktrans_request(dev->tr, dev, req); |
Ingo Molnar | 48b1926 | 2006-03-31 02:29:41 -0800 | [diff] [blame] | 153 | mutex_unlock(&dev->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 155 | if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) { |
| 156 | __blk_mq_end_request(req, res); |
Tejun Heo | 1498ada | 2009-05-08 11:54:11 +0900 | [diff] [blame] | 157 | req = NULL; |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 158 | } |
Jarkko Lavinen | c7519db | 2011-02-14 16:16:09 +0200 | [diff] [blame] | 159 | |
| 160 | background_done = 0; |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 161 | spin_lock_irq(&dev->queue_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | } |
| 164 | |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 165 | static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx, |
| 166 | const struct blk_mq_queue_data *bd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | { |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 168 | struct mtd_blktrans_dev *dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 170 | dev = hctx->queue->queuedata; |
| 171 | if (!dev) { |
| 172 | blk_mq_start_request(bd->rq); |
| 173 | return BLK_STS_IOERR; |
| 174 | } |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 175 | |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 176 | spin_lock_irq(&dev->queue_lock); |
| 177 | list_add_tail(&bd->rq->queuelist, &dev->rq_list); |
| 178 | mtd_blktrans_work(dev); |
| 179 | spin_unlock_irq(&dev->queue_lock); |
| 180 | |
| 181 | return BLK_STS_OK; |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 182 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | |
Al Viro | af0e2a0 | 2008-03-02 10:35:06 -0500 | [diff] [blame] | 184 | static int blktrans_open(struct block_device *bdev, fmode_t mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | { |
Christoph Hellwig | ee28b42 | 2021-08-23 09:33:59 +0200 | [diff] [blame] | 186 | struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; |
Maxim Levitsky | 008c751 | 2010-10-15 17:20:43 +0200 | [diff] [blame] | 187 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
Christoph Hellwig | ee28b42 | 2021-08-23 09:33:59 +0200 | [diff] [blame] | 189 | kref_get(&dev->ref); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | |
Brian Norris | f3c6379 | 2015-10-26 10:20:23 -0700 | [diff] [blame] | 191 | mutex_lock(&dev->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | |
Brian Norris | 342ff28 | 2011-11-07 15:51:05 -0800 | [diff] [blame] | 193 | if (dev->open) |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 194 | goto unlock; |
Maxim Levitsky | 008c751 | 2010-10-15 17:20:43 +0200 | [diff] [blame] | 195 | |
Maxim Levitsky | 008c751 | 2010-10-15 17:20:43 +0200 | [diff] [blame] | 196 | __module_get(dev->tr->owner); |
| 197 | |
Artem Bityutskiy | 94735ec | 2011-04-18 07:50:37 +0300 | [diff] [blame] | 198 | if (!dev->mtd) |
| 199 | goto unlock; |
| 200 | |
| 201 | if (dev->tr->open) { |
| 202 | ret = dev->tr->open(dev); |
| 203 | if (ret) |
| 204 | goto error_put; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | } |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 206 | |
Artem Bityutskiy | 94735ec | 2011-04-18 07:50:37 +0300 | [diff] [blame] | 207 | ret = __get_mtd_device(dev->mtd); |
| 208 | if (ret) |
| 209 | goto error_release; |
Alexander Stein | 70d5098 | 2012-01-10 13:26:58 +0100 | [diff] [blame] | 210 | dev->file_mode = mode; |
Artem Bityutskiy | 94735ec | 2011-04-18 07:50:37 +0300 | [diff] [blame] | 211 | |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 212 | unlock: |
Brian Norris | 342ff28 | 2011-11-07 15:51:05 -0800 | [diff] [blame] | 213 | dev->open++; |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 214 | mutex_unlock(&dev->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | return ret; |
Artem Bityutskiy | 94735ec | 2011-04-18 07:50:37 +0300 | [diff] [blame] | 216 | |
| 217 | error_release: |
| 218 | if (dev->tr->release) |
| 219 | dev->tr->release(dev); |
| 220 | error_put: |
| 221 | module_put(dev->tr->owner); |
Artem Bityutskiy | 94735ec | 2011-04-18 07:50:37 +0300 | [diff] [blame] | 222 | mutex_unlock(&dev->lock); |
| 223 | blktrans_dev_put(dev); |
| 224 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | } |
| 226 | |
Al Viro | db2a144 | 2013-05-05 21:52:57 -0400 | [diff] [blame] | 227 | static void blktrans_release(struct gendisk *disk, fmode_t mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | { |
Christoph Hellwig | ee28b42 | 2021-08-23 09:33:59 +0200 | [diff] [blame] | 229 | struct mtd_blktrans_dev *dev = disk->private_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | |
Brian Norris | f3c6379 | 2015-10-26 10:20:23 -0700 | [diff] [blame] | 231 | mutex_lock(&dev->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
Maxim Levitsky | 008c751 | 2010-10-15 17:20:43 +0200 | [diff] [blame] | 233 | if (--dev->open) |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 234 | goto unlock; |
| 235 | |
Maxim Levitsky | 008c751 | 2010-10-15 17:20:43 +0200 | [diff] [blame] | 236 | module_put(dev->tr->owner); |
| 237 | |
| 238 | if (dev->mtd) { |
Al Viro | a8ca889 | 2013-05-05 21:31:22 -0400 | [diff] [blame] | 239 | if (dev->tr->release) |
| 240 | dev->tr->release(dev); |
Maxim Levitsky | 008c751 | 2010-10-15 17:20:43 +0200 | [diff] [blame] | 241 | __put_mtd_device(dev->mtd); |
| 242 | } |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 243 | unlock: |
| 244 | mutex_unlock(&dev->lock); |
| 245 | blktrans_dev_put(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | } |
| 247 | |
Christoph Hellwig | a885c8c | 2006-01-08 01:02:50 -0800 | [diff] [blame] | 248 | static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
| 249 | { |
Christoph Hellwig | 37b143d | 2021-08-23 09:33:58 +0200 | [diff] [blame] | 250 | struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 251 | int ret = -ENXIO; |
Christoph Hellwig | a885c8c | 2006-01-08 01:02:50 -0800 | [diff] [blame] | 252 | |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 253 | mutex_lock(&dev->lock); |
| 254 | |
| 255 | if (!dev->mtd) |
| 256 | goto unlock; |
| 257 | |
Brian Norris | c4a3f13c | 2015-05-21 10:44:32 -0700 | [diff] [blame] | 258 | ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY; |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 259 | unlock: |
| 260 | mutex_unlock(&dev->lock); |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 261 | return ret; |
Christoph Hellwig | a885c8c | 2006-01-08 01:02:50 -0800 | [diff] [blame] | 262 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | |
Ezequiel Garcia | 9329c5e | 2012-11-09 12:36:35 -0300 | [diff] [blame] | 264 | static const struct block_device_operations mtd_block_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | .owner = THIS_MODULE, |
Al Viro | af0e2a0 | 2008-03-02 10:35:06 -0500 | [diff] [blame] | 266 | .open = blktrans_open, |
| 267 | .release = blktrans_release, |
Christoph Hellwig | a885c8c | 2006-01-08 01:02:50 -0800 | [diff] [blame] | 268 | .getgeo = blktrans_getgeo, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | }; |
| 270 | |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 271 | static const struct blk_mq_ops mtd_mq_ops = { |
| 272 | .queue_rq = mtd_queue_rq, |
| 273 | }; |
| 274 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) |
| 276 | { |
| 277 | struct mtd_blktrans_ops *tr = new->tr; |
Chris Malley | 71a928c | 2008-05-19 20:11:50 +0100 | [diff] [blame] | 278 | struct mtd_blktrans_dev *d; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | int last_devnum = -1; |
| 280 | struct gendisk *gd; |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 281 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | |
Christoph Hellwig | f214eeb | 2021-08-23 09:33:53 +0200 | [diff] [blame] | 283 | lockdep_assert_held(&mtd_table_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
Chris Malley | 71a928c | 2008-05-19 20:11:50 +0100 | [diff] [blame] | 285 | list_for_each_entry(d, &tr->devs, list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | if (new->devnum == -1) { |
| 287 | /* Use first free number */ |
| 288 | if (d->devnum != last_devnum+1) { |
| 289 | /* Found a free devnum. Plug it in here */ |
| 290 | new->devnum = last_devnum+1; |
| 291 | list_add_tail(&new->list, &d->list); |
| 292 | goto added; |
| 293 | } |
| 294 | } else if (d->devnum == new->devnum) { |
| 295 | /* Required number taken */ |
| 296 | return -EBUSY; |
| 297 | } else if (d->devnum > new->devnum) { |
| 298 | /* Required number was free */ |
| 299 | list_add_tail(&new->list, &d->list); |
| 300 | goto added; |
Thomas Gleixner | 97894cd | 2005-11-07 11:15:26 +0000 | [diff] [blame] | 301 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | last_devnum = d->devnum; |
| 303 | } |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 304 | |
| 305 | ret = -EBUSY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | if (new->devnum == -1) |
| 307 | new->devnum = last_devnum+1; |
| 308 | |
Ben Hutchings | 4d3a853 | 2010-01-29 20:59:53 +0000 | [diff] [blame] | 309 | /* Check that the device and any partitions will get valid |
| 310 | * minor numbers and that the disk naming code below can cope |
| 311 | * with this number. */ |
| 312 | if (new->devnum > (MINORMASK >> tr->part_bits) || |
Christoph Hellwig | 560a391 | 2021-08-23 09:33:57 +0200 | [diff] [blame] | 313 | (tr->part_bits && new->devnum >= 27 * 26)) |
Christoph Hellwig | 6966bb9 | 2021-06-02 09:53:24 +0300 | [diff] [blame] | 314 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | list_add_tail(&new->list, &tr->devs); |
| 317 | added: |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 318 | |
David Woodhouse | ce37ab4 | 2007-12-03 12:46:12 +0000 | [diff] [blame] | 319 | mutex_init(&new->lock); |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 320 | kref_init(&new->ref); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | if (!tr->writesect) |
| 322 | new->readonly = 1; |
| 323 | |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 324 | ret = -ENOMEM; |
Christoph Hellwig | 6966bb9 | 2021-06-02 09:53:24 +0300 | [diff] [blame] | 325 | new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL); |
| 326 | if (!new->tag_set) |
| 327 | goto out_list_del; |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 328 | |
Christoph Hellwig | 6966bb9 | 2021-06-02 09:53:24 +0300 | [diff] [blame] | 329 | ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2, |
| 330 | BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING); |
| 331 | if (ret) |
| 332 | goto out_kfree_tag_set; |
| 333 | |
| 334 | /* Create gendisk */ |
| 335 | gd = blk_mq_alloc_disk(new->tag_set, new); |
| 336 | if (IS_ERR(gd)) { |
| 337 | ret = PTR_ERR(gd); |
| 338 | goto out_free_tag_set; |
| 339 | } |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 340 | |
| 341 | new->disk = gd; |
Christoph Hellwig | 07a719f | 2021-06-16 09:15:46 +0200 | [diff] [blame] | 342 | new->rq = new->disk->queue; |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 343 | gd->private_data = new; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | gd->major = tr->major; |
| 345 | gd->first_minor = (new->devnum) << tr->part_bits; |
Christoph Hellwig | 6966bb9 | 2021-06-02 09:53:24 +0300 | [diff] [blame] | 346 | gd->minors = 1 << tr->part_bits; |
Ezequiel Garcia | 9329c5e | 2012-11-09 12:36:35 -0300 | [diff] [blame] | 347 | gd->fops = &mtd_block_ops; |
Thomas Gleixner | 97894cd | 2005-11-07 11:15:26 +0000 | [diff] [blame] | 348 | |
Todd Poynor | 65a8de3 | 2005-07-29 20:42:07 +0100 | [diff] [blame] | 349 | if (tr->part_bits) |
| 350 | if (new->devnum < 26) |
| 351 | snprintf(gd->disk_name, sizeof(gd->disk_name), |
| 352 | "%s%c", tr->name, 'a' + new->devnum); |
| 353 | else |
| 354 | snprintf(gd->disk_name, sizeof(gd->disk_name), |
| 355 | "%s%c%c", tr->name, |
| 356 | 'a' - 1 + new->devnum / 26, |
| 357 | 'a' + new->devnum % 26); |
| 358 | else |
| 359 | snprintf(gd->disk_name, sizeof(gd->disk_name), |
| 360 | "%s%d", tr->name, new->devnum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | |
Peng Fan | 2ce401d | 2015-09-11 21:41:47 +0800 | [diff] [blame] | 362 | set_capacity(gd, ((u64)new->size * tr->blksize) >> 9); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 364 | /* Create the request queue */ |
| 365 | spin_lock_init(&new->queue_lock); |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 366 | INIT_LIST_HEAD(&new->rq_list); |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 367 | |
Roman Peniaev | 566c0d6a | 2014-03-08 21:59:14 +0900 | [diff] [blame] | 368 | if (tr->flush) |
Jens Axboe | fec3ff5d | 2016-03-30 10:17:47 -0600 | [diff] [blame] | 369 | blk_queue_write_cache(new->rq, true, false); |
Roman Peniaev | 566c0d6a | 2014-03-08 21:59:14 +0900 | [diff] [blame] | 370 | |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 371 | blk_queue_logical_block_size(new->rq, tr->blksize); |
| 372 | |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 373 | blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq); |
| 374 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq); |
Dan McGee | 16f7eca | 2011-09-28 00:21:42 -0500 | [diff] [blame] | 375 | |
Jarkko Lavinen | 115ee88 | 2011-02-14 16:16:10 +0200 | [diff] [blame] | 376 | if (tr->discard) { |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 377 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq); |
Jens Axboe | 2bb4cd5 | 2015-07-14 08:15:12 -0600 | [diff] [blame] | 378 | blk_queue_max_discard_sectors(new->rq, UINT_MAX); |
Zhihao Cheng | 2b6d283 | 2021-06-15 17:39:05 +0800 | [diff] [blame] | 379 | new->rq->limits.discard_granularity = tr->blksize; |
Jarkko Lavinen | 115ee88 | 2011-02-14 16:16:10 +0200 | [diff] [blame] | 380 | } |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 381 | |
| 382 | gd->queue = new->rq; |
| 383 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | if (new->readonly) |
| 385 | set_disk_ro(gd, 1); |
| 386 | |
Hannes Reinecke | fef912b | 2018-09-28 08:17:19 +0200 | [diff] [blame] | 387 | device_add_disk(&new->mtd->dev, gd, NULL); |
Maxim Levitsky | 026ec57 | 2010-02-22 20:39:33 +0200 | [diff] [blame] | 388 | |
Maxim Levitsky | 133fa8c7 | 2010-02-26 22:08:40 +0200 | [diff] [blame] | 389 | if (new->disk_attributes) { |
| 390 | ret = sysfs_create_group(&disk_to_dev(gd)->kobj, |
Maxim Levitsky | 026ec57 | 2010-02-22 20:39:33 +0200 | [diff] [blame] | 391 | new->disk_attributes); |
Maxim Levitsky | 133fa8c7 | 2010-02-26 22:08:40 +0200 | [diff] [blame] | 392 | WARN_ON(ret); |
| 393 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | return 0; |
Christoph Hellwig | 6966bb9 | 2021-06-02 09:53:24 +0300 | [diff] [blame] | 395 | |
| 396 | out_free_tag_set: |
| 397 | blk_mq_free_tag_set(new->tag_set); |
| 398 | out_kfree_tag_set: |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 399 | kfree(new->tag_set); |
Christoph Hellwig | 6966bb9 | 2021-06-02 09:53:24 +0300 | [diff] [blame] | 400 | out_list_del: |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 401 | list_del(&new->list); |
Maxim Levitsky | a863862 | 2010-02-22 20:39:29 +0200 | [diff] [blame] | 402 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) |
| 406 | { |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 407 | unsigned long flags; |
| 408 | |
Christoph Hellwig | f214eeb | 2021-08-23 09:33:53 +0200 | [diff] [blame] | 409 | lockdep_assert_held(&mtd_table_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | |
Maxim Levitsky | 026ec57 | 2010-02-22 20:39:33 +0200 | [diff] [blame] | 411 | if (old->disk_attributes) |
| 412 | sysfs_remove_group(&disk_to_dev(old->disk)->kobj, |
| 413 | old->disk_attributes); |
| 414 | |
Maxim Levitsky | dba76c0 | 2010-07-28 18:53:16 +0300 | [diff] [blame] | 415 | /* Stop new requests to arrive */ |
| 416 | del_gendisk(old->disk); |
| 417 | |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 418 | /* Kill current requests */ |
| 419 | spin_lock_irqsave(&old->queue_lock, flags); |
| 420 | old->rq->queuedata = NULL; |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 421 | spin_unlock_irqrestore(&old->queue_lock, flags); |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 422 | |
Jens Axboe | 891b7c5 | 2018-10-16 08:09:58 -0600 | [diff] [blame] | 423 | /* freeze+quiesce queue to ensure all requests are flushed */ |
| 424 | blk_mq_freeze_queue(old->rq); |
| 425 | blk_mq_quiesce_queue(old->rq); |
| 426 | blk_mq_unquiesce_queue(old->rq); |
| 427 | blk_mq_unfreeze_queue(old->rq); |
| 428 | |
Maxim Levitsky | 008c751 | 2010-10-15 17:20:43 +0200 | [diff] [blame] | 429 | /* If the device is currently open, tell trans driver to close it, |
| 430 | then put mtd device, and don't touch it again */ |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 431 | mutex_lock(&old->lock); |
Maxim Levitsky | 008c751 | 2010-10-15 17:20:43 +0200 | [diff] [blame] | 432 | if (old->open) { |
| 433 | if (old->tr->release) |
| 434 | old->tr->release(old); |
| 435 | __put_mtd_device(old->mtd); |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 436 | } |
| 437 | |
Maxim Levitsky | 048d871 | 2010-02-22 20:39:30 +0200 | [diff] [blame] | 438 | old->mtd = NULL; |
| 439 | |
| 440 | mutex_unlock(&old->lock); |
| 441 | blktrans_dev_put(old); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | return 0; |
| 443 | } |
| 444 | |
| 445 | static void blktrans_notify_remove(struct mtd_info *mtd) |
| 446 | { |
Chris Malley | 71a928c | 2008-05-19 20:11:50 +0100 | [diff] [blame] | 447 | struct mtd_blktrans_ops *tr; |
| 448 | struct mtd_blktrans_dev *dev, *next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | |
Chris Malley | 71a928c | 2008-05-19 20:11:50 +0100 | [diff] [blame] | 450 | list_for_each_entry(tr, &blktrans_majors, list) |
| 451 | list_for_each_entry_safe(dev, next, &tr->devs, list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | if (dev->mtd == mtd) |
| 453 | tr->remove_dev(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | } |
| 455 | |
| 456 | static void blktrans_notify_add(struct mtd_info *mtd) |
| 457 | { |
Chris Malley | 71a928c | 2008-05-19 20:11:50 +0100 | [diff] [blame] | 458 | struct mtd_blktrans_ops *tr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | |
| 460 | if (mtd->type == MTD_ABSENT) |
| 461 | return; |
| 462 | |
Chris Malley | 71a928c | 2008-05-19 20:11:50 +0100 | [diff] [blame] | 463 | list_for_each_entry(tr, &blktrans_majors, list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | tr->add_mtd(tr, mtd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | } |
| 466 | |
| 467 | static struct mtd_notifier blktrans_notifier = { |
| 468 | .add = blktrans_notify_add, |
| 469 | .remove = blktrans_notify_remove, |
| 470 | }; |
Thomas Gleixner | 97894cd | 2005-11-07 11:15:26 +0000 | [diff] [blame] | 471 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | int register_mtd_blktrans(struct mtd_blktrans_ops *tr) |
| 473 | { |
Ben Hutchings | f1332ba | 2010-01-29 20:57:11 +0000 | [diff] [blame] | 474 | struct mtd_info *mtd; |
| 475 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | |
Thomas Gleixner | 97894cd | 2005-11-07 11:15:26 +0000 | [diff] [blame] | 477 | /* Register the notifier if/when the first device type is |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | registered, to prevent the link/init ordering from fucking |
| 479 | us over. */ |
| 480 | if (!blktrans_notifier.list.next) |
| 481 | register_mtd_user(&blktrans_notifier); |
| 482 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | ret = register_blkdev(tr->major, tr->name); |
Frank Li | 6fe4c59 | 2010-10-26 11:02:19 +0800 | [diff] [blame] | 484 | if (ret < 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", |
| 486 | tr->name, tr->major, ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | return ret; |
| 488 | } |
David Woodhouse | eae9acd | 2008-08-05 18:08:25 +0100 | [diff] [blame] | 489 | |
Frank Li | 6fe4c59 | 2010-10-26 11:02:19 +0800 | [diff] [blame] | 490 | if (ret) |
| 491 | tr->major = ret; |
| 492 | |
Richard Purdie | 1918767 | 2006-10-27 09:09:33 +0100 | [diff] [blame] | 493 | tr->blkshift = ffs(tr->blksize) - 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | INIT_LIST_HEAD(&tr->devs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | |
Desmond Cheong Zhi Xi | 962bf78 | 2021-06-18 00:09:04 +0800 | [diff] [blame] | 497 | mutex_lock(&mtd_table_mutex); |
| 498 | list_add(&tr->list, &blktrans_majors); |
Ben Hutchings | f1332ba | 2010-01-29 20:57:11 +0000 | [diff] [blame] | 499 | mtd_for_each_device(mtd) |
| 500 | if (mtd->type != MTD_ABSENT) |
| 501 | tr->add_mtd(tr, mtd); |
Ingo Molnar | 48b1926 | 2006-03-31 02:29:41 -0800 | [diff] [blame] | 502 | mutex_unlock(&mtd_table_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | return 0; |
| 504 | } |
| 505 | |
| 506 | int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) |
| 507 | { |
Chris Malley | 71a928c | 2008-05-19 20:11:50 +0100 | [diff] [blame] | 508 | struct mtd_blktrans_dev *dev, *next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | |
Ingo Molnar | 48b1926 | 2006-03-31 02:29:41 -0800 | [diff] [blame] | 510 | mutex_lock(&mtd_table_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | /* Remove it from the list of active majors */ |
| 513 | list_del(&tr->list); |
| 514 | |
Chris Malley | 71a928c | 2008-05-19 20:11:50 +0100 | [diff] [blame] | 515 | list_for_each_entry_safe(dev, next, &tr->devs, list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | tr->remove_dev(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | |
Ingo Molnar | 48b1926 | 2006-03-31 02:29:41 -0800 | [diff] [blame] | 518 | mutex_unlock(&mtd_table_mutex); |
Desmond Cheong Zhi Xi | b7abb05 | 2021-07-17 18:07:19 +0800 | [diff] [blame] | 519 | unregister_blkdev(tr->major, tr->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | |
Eric Sesterhenn | 373ebfb | 2006-03-26 18:15:12 +0200 | [diff] [blame] | 521 | BUG_ON(!list_empty(&tr->devs)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | return 0; |
| 523 | } |
| 524 | |
| 525 | static void __exit mtd_blktrans_exit(void) |
| 526 | { |
| 527 | /* No race here -- if someone's currently in register_mtd_blktrans |
| 528 | we're screwed anyway. */ |
| 529 | if (blktrans_notifier.list.next) |
| 530 | unregister_mtd_user(&blktrans_notifier); |
| 531 | } |
| 532 | |
| 533 | module_exit(mtd_blktrans_exit); |
| 534 | |
| 535 | EXPORT_SYMBOL_GPL(register_mtd_blktrans); |
| 536 | EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); |
| 537 | EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); |
| 538 | EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); |
| 539 | |
| 540 | MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); |
| 541 | MODULE_LICENSE("GPL"); |
| 542 | MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); |