Thomas Gleixner | 1a59d1b8 | 2019-05-27 08:55:05 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Filename: dev.c |
| 4 | * |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> |
| 6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> |
| 7 | * |
| 8 | * (C) Copyright 2013 IBM Corporation |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/pci.h> |
| 15 | #include <linux/slab.h> |
| 16 | |
| 17 | #include <linux/hdreg.h> |
| 18 | #include <linux/genhd.h> |
| 19 | #include <linux/blkdev.h> |
| 20 | #include <linux/bio.h> |
| 21 | |
| 22 | #include <linux/fs.h> |
| 23 | |
| 24 | #include "rsxx_priv.h" |
| 25 | |
| 26 | static unsigned int blkdev_minors = 64; |
| 27 | module_param(blkdev_minors, uint, 0444); |
| 28 | MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)"); |
| 29 | |
| 30 | /* |
| 31 | * For now I'm making this tweakable in case any applications hit this limit. |
| 32 | * If you see a "bio too big" error in the log you will need to raise this |
| 33 | * value. |
| 34 | */ |
| 35 | static unsigned int blkdev_max_hw_sectors = 1024; |
| 36 | module_param(blkdev_max_hw_sectors, uint, 0444); |
| 37 | MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO"); |
| 38 | |
| 39 | static unsigned int enable_blkdev = 1; |
| 40 | module_param(enable_blkdev , uint, 0444); |
| 41 | MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces"); |
| 42 | |
| 43 | |
| 44 | struct rsxx_bio_meta { |
| 45 | struct bio *bio; |
| 46 | atomic_t pending_dmas; |
| 47 | atomic_t error; |
| 48 | unsigned long start_time; |
| 49 | }; |
| 50 | |
| 51 | static struct kmem_cache *bio_meta_pool; |
| 52 | |
| 53 | /*----------------- Block Device Operations -----------------*/ |
| 54 | static int rsxx_blkdev_ioctl(struct block_device *bdev, |
| 55 | fmode_t mode, |
| 56 | unsigned int cmd, |
| 57 | unsigned long arg) |
| 58 | { |
| 59 | struct rsxx_cardinfo *card = bdev->bd_disk->private_data; |
| 60 | |
| 61 | switch (cmd) { |
| 62 | case RSXX_GETREG: |
| 63 | return rsxx_reg_access(card, (void __user *)arg, 1); |
| 64 | case RSXX_SETREG: |
| 65 | return rsxx_reg_access(card, (void __user *)arg, 0); |
| 66 | } |
| 67 | |
| 68 | return -ENOTTY; |
| 69 | } |
| 70 | |
| 71 | static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
| 72 | { |
| 73 | struct rsxx_cardinfo *card = bdev->bd_disk->private_data; |
| 74 | u64 blocks = card->size8 >> 9; |
| 75 | |
| 76 | /* |
| 77 | * get geometry: Fake it. I haven't found any drivers that set |
| 78 | * geo->start, so we won't either. |
| 79 | */ |
| 80 | if (card->size8) { |
| 81 | geo->heads = 64; |
| 82 | geo->sectors = 16; |
| 83 | do_div(blocks, (geo->heads * geo->sectors)); |
| 84 | geo->cylinders = blocks; |
| 85 | } else { |
| 86 | geo->heads = 0; |
| 87 | geo->sectors = 0; |
| 88 | geo->cylinders = 0; |
| 89 | } |
| 90 | return 0; |
| 91 | } |
| 92 | |
| 93 | static const struct block_device_operations rsxx_fops = { |
| 94 | .owner = THIS_MODULE, |
| 95 | .getgeo = rsxx_getgeo, |
| 96 | .ioctl = rsxx_blkdev_ioctl, |
| 97 | }; |
| 98 | |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 99 | static void bio_dma_done_cb(struct rsxx_cardinfo *card, |
| 100 | void *cb_data, |
| 101 | unsigned int error) |
| 102 | { |
Philip J Kelleher | c206c70 | 2013-02-18 21:35:59 +0100 | [diff] [blame] | 103 | struct rsxx_bio_meta *meta = cb_data; |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 104 | |
| 105 | if (error) |
| 106 | atomic_set(&meta->error, 1); |
| 107 | |
| 108 | if (atomic_dec_and_test(&meta->pending_dmas)) { |
Philip J Kelleher | 0ab4743 | 2013-06-18 14:36:26 -0500 | [diff] [blame] | 109 | if (!card->eeh_state && card->gendisk) |
Christoph Hellwig | 421716b | 2020-05-27 07:24:06 +0200 | [diff] [blame^] | 110 | bio_end_io_acct(meta->bio, meta->start_time); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 111 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 112 | if (atomic_read(&meta->error)) |
| 113 | bio_io_error(meta->bio); |
| 114 | else |
| 115 | bio_endio(meta->bio); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 116 | kmem_cache_free(bio_meta_pool, meta); |
| 117 | } |
| 118 | } |
| 119 | |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 120 | static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 121 | { |
| 122 | struct rsxx_cardinfo *card = q->queuedata; |
| 123 | struct rsxx_bio_meta *bio_meta; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 124 | blk_status_t st = BLK_STS_IOERR; |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 125 | |
NeilBrown | af67c31 | 2017-06-18 14:38:57 +1000 | [diff] [blame] | 126 | blk_queue_split(q, &bio); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 127 | |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 128 | might_sleep(); |
| 129 | |
Philip J Kelleher | 66bc600 | 2013-06-18 14:46:04 -0500 | [diff] [blame] | 130 | if (!card) |
| 131 | goto req_err; |
| 132 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 133 | if (bio_end_sector(bio) > get_capacity(card->gendisk)) |
Philip J Kelleher | 3eb8dca | 2013-06-18 14:48:38 -0500 | [diff] [blame] | 134 | goto req_err; |
| 135 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 136 | if (unlikely(card->halt)) |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 137 | goto req_err; |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 138 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 139 | if (unlikely(card->dma_fault)) |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 140 | goto req_err; |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 141 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 142 | if (bio->bi_iter.bi_size == 0) { |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 143 | dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); |
| 144 | goto req_err; |
| 145 | } |
| 146 | |
| 147 | bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL); |
| 148 | if (!bio_meta) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 149 | st = BLK_STS_RESOURCE; |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 150 | goto req_err; |
| 151 | } |
| 152 | |
| 153 | bio_meta->bio = bio; |
| 154 | atomic_set(&bio_meta->error, 0); |
| 155 | atomic_set(&bio_meta->pending_dmas, 0); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 156 | |
Philip J Kelleher | 0ab4743 | 2013-06-18 14:36:26 -0500 | [diff] [blame] | 157 | if (!unlikely(card->halt)) |
Christoph Hellwig | 421716b | 2020-05-27 07:24:06 +0200 | [diff] [blame^] | 158 | bio_meta->start_time = bio_start_io_acct(bio); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 159 | |
| 160 | dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", |
| 161 | bio_data_dir(bio) ? 'W' : 'R', bio_meta, |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 162 | (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 163 | |
| 164 | st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, |
| 165 | bio_dma_done_cb, bio_meta); |
| 166 | if (st) |
| 167 | goto queue_err; |
| 168 | |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 169 | return BLK_QC_T_NONE; |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 170 | |
| 171 | queue_err: |
| 172 | kmem_cache_free(bio_meta_pool, bio_meta); |
| 173 | req_err: |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 174 | if (st) |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 175 | bio->bi_status = st; |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 176 | bio_endio(bio); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 177 | return BLK_QC_T_NONE; |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | /*----------------- Device Setup -------------------*/ |
| 181 | static bool rsxx_discard_supported(struct rsxx_cardinfo *card) |
| 182 | { |
| 183 | unsigned char pci_rev; |
| 184 | |
| 185 | pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev); |
| 186 | |
| 187 | return (pci_rev >= RSXX_DISCARD_SUPPORT); |
| 188 | } |
| 189 | |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 190 | int rsxx_attach_dev(struct rsxx_cardinfo *card) |
| 191 | { |
| 192 | mutex_lock(&card->dev_lock); |
| 193 | |
| 194 | /* The block device requires the stripe size from the config. */ |
| 195 | if (enable_blkdev) { |
| 196 | if (card->config_valid) |
| 197 | set_capacity(card->gendisk, card->size8 >> 9); |
| 198 | else |
| 199 | set_capacity(card->gendisk, 0); |
Hannes Reinecke | fef912b | 2018-09-28 08:17:19 +0200 | [diff] [blame] | 200 | device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 201 | card->bdev_attached = 1; |
| 202 | } |
| 203 | |
| 204 | mutex_unlock(&card->dev_lock); |
| 205 | |
| 206 | return 0; |
| 207 | } |
| 208 | |
| 209 | void rsxx_detach_dev(struct rsxx_cardinfo *card) |
| 210 | { |
| 211 | mutex_lock(&card->dev_lock); |
| 212 | |
| 213 | if (card->bdev_attached) { |
| 214 | del_gendisk(card->gendisk); |
| 215 | card->bdev_attached = 0; |
| 216 | } |
| 217 | |
| 218 | mutex_unlock(&card->dev_lock); |
| 219 | } |
| 220 | |
| 221 | int rsxx_setup_dev(struct rsxx_cardinfo *card) |
| 222 | { |
| 223 | unsigned short blk_size; |
| 224 | |
| 225 | mutex_init(&card->dev_lock); |
| 226 | |
| 227 | if (!enable_blkdev) |
| 228 | return 0; |
| 229 | |
| 230 | card->major = register_blkdev(0, DRIVER_NAME); |
| 231 | if (card->major < 0) { |
| 232 | dev_err(CARD_TO_DEV(card), "Failed to get major number\n"); |
| 233 | return -ENOMEM; |
| 234 | } |
| 235 | |
Christoph Hellwig | 3d745ea | 2020-03-27 09:30:11 +0100 | [diff] [blame] | 236 | card->queue = blk_alloc_queue(rsxx_make_request, NUMA_NO_NODE); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 237 | if (!card->queue) { |
| 238 | dev_err(CARD_TO_DEV(card), "Failed queue alloc\n"); |
| 239 | unregister_blkdev(card->major, DRIVER_NAME); |
| 240 | return -ENOMEM; |
| 241 | } |
| 242 | |
| 243 | card->gendisk = alloc_disk(blkdev_minors); |
| 244 | if (!card->gendisk) { |
| 245 | dev_err(CARD_TO_DEV(card), "Failed disk alloc\n"); |
| 246 | blk_cleanup_queue(card->queue); |
| 247 | unregister_blkdev(card->major, DRIVER_NAME); |
| 248 | return -ENOMEM; |
| 249 | } |
| 250 | |
Philip J Kelleher | 8c49a77 | 2013-10-18 17:12:35 -0500 | [diff] [blame] | 251 | if (card->config_valid) { |
| 252 | blk_size = card->config.data.block_size; |
| 253 | blk_queue_dma_alignment(card->queue, blk_size - 1); |
| 254 | blk_queue_logical_block_size(card->queue, blk_size); |
| 255 | } |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 256 | |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 257 | blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 258 | blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); |
| 259 | |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 260 | blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue); |
| 261 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 262 | if (rsxx_discard_supported(card)) { |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 263 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 264 | blk_queue_max_discard_sectors(card->queue, |
| 265 | RSXX_HW_BLK_SIZE >> 9); |
| 266 | card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE; |
| 267 | card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE; |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 268 | } |
| 269 | |
| 270 | card->queue->queuedata = card; |
| 271 | |
| 272 | snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name), |
| 273 | "rsxx%d", card->disk_id); |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 274 | card->gendisk->major = card->major; |
| 275 | card->gendisk->first_minor = 0; |
| 276 | card->gendisk->fops = &rsxx_fops; |
| 277 | card->gendisk->private_data = card; |
| 278 | card->gendisk->queue = card->queue; |
| 279 | |
| 280 | return 0; |
| 281 | } |
| 282 | |
| 283 | void rsxx_destroy_dev(struct rsxx_cardinfo *card) |
| 284 | { |
| 285 | if (!enable_blkdev) |
| 286 | return; |
| 287 | |
| 288 | put_disk(card->gendisk); |
| 289 | card->gendisk = NULL; |
| 290 | |
| 291 | blk_cleanup_queue(card->queue); |
Philip J Kelleher | 66bc600 | 2013-06-18 14:46:04 -0500 | [diff] [blame] | 292 | card->queue->queuedata = NULL; |
josh.h.morris@us.ibm.com | 8722ff8 | 2013-02-05 14:15:02 +0100 | [diff] [blame] | 293 | unregister_blkdev(card->major, DRIVER_NAME); |
| 294 | } |
| 295 | |
| 296 | int rsxx_dev_init(void) |
| 297 | { |
| 298 | bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN); |
| 299 | if (!bio_meta_pool) |
| 300 | return -ENOMEM; |
| 301 | |
| 302 | return 0; |
| 303 | } |
| 304 | |
| 305 | void rsxx_dev_cleanup(void) |
| 306 | { |
| 307 | kmem_cache_destroy(bio_meta_pool); |
| 308 | } |
| 309 | |
| 310 | |