blob: dd33f1bdf3b8368aa1fb704ab87839907a7fc646 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +01002/*
3* Filename: dev.c
4*
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +01005* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +01009*/
10
11#include <linux/kernel.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/slab.h>
16
17#include <linux/hdreg.h>
18#include <linux/genhd.h>
19#include <linux/blkdev.h>
20#include <linux/bio.h>
21
22#include <linux/fs.h>
23
24#include "rsxx_priv.h"
25
26static unsigned int blkdev_minors = 64;
27module_param(blkdev_minors, uint, 0444);
28MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
29
30/*
31 * For now I'm making this tweakable in case any applications hit this limit.
32 * If you see a "bio too big" error in the log you will need to raise this
33 * value.
34 */
35static unsigned int blkdev_max_hw_sectors = 1024;
36module_param(blkdev_max_hw_sectors, uint, 0444);
37MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
38
39static unsigned int enable_blkdev = 1;
40module_param(enable_blkdev , uint, 0444);
41MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
42
43
44struct rsxx_bio_meta {
45 struct bio *bio;
46 atomic_t pending_dmas;
47 atomic_t error;
48 unsigned long start_time;
49};
50
51static struct kmem_cache *bio_meta_pool;
52
Christoph Hellwig3e087732021-10-12 13:12:24 +020053static void rsxx_submit_bio(struct bio *bio);
Christoph Hellwigc62b37d2020-07-01 10:59:43 +020054
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +010055/*----------------- Block Device Operations -----------------*/
56static int rsxx_blkdev_ioctl(struct block_device *bdev,
57 fmode_t mode,
58 unsigned int cmd,
59 unsigned long arg)
60{
61 struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
62
63 switch (cmd) {
64 case RSXX_GETREG:
65 return rsxx_reg_access(card, (void __user *)arg, 1);
66 case RSXX_SETREG:
67 return rsxx_reg_access(card, (void __user *)arg, 0);
68 }
69
70 return -ENOTTY;
71}
72
73static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
74{
75 struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
76 u64 blocks = card->size8 >> 9;
77
78 /*
79 * get geometry: Fake it. I haven't found any drivers that set
80 * geo->start, so we won't either.
81 */
82 if (card->size8) {
83 geo->heads = 64;
84 geo->sectors = 16;
85 do_div(blocks, (geo->heads * geo->sectors));
86 geo->cylinders = blocks;
87 } else {
88 geo->heads = 0;
89 geo->sectors = 0;
90 geo->cylinders = 0;
91 }
92 return 0;
93}
94
95static const struct block_device_operations rsxx_fops = {
96 .owner = THIS_MODULE,
Christoph Hellwigc62b37d2020-07-01 10:59:43 +020097 .submit_bio = rsxx_submit_bio,
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +010098 .getgeo = rsxx_getgeo,
99 .ioctl = rsxx_blkdev_ioctl,
100};
101
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100102static void bio_dma_done_cb(struct rsxx_cardinfo *card,
103 void *cb_data,
104 unsigned int error)
105{
Philip J Kelleherc206c702013-02-18 21:35:59 +0100106 struct rsxx_bio_meta *meta = cb_data;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100107
108 if (error)
109 atomic_set(&meta->error, 1);
110
111 if (atomic_dec_and_test(&meta->pending_dmas)) {
Philip J Kelleher0ab47432013-06-18 14:36:26 -0500112 if (!card->eeh_state && card->gendisk)
Christoph Hellwig421716b2020-05-27 07:24:06 +0200113 bio_end_io_acct(meta->bio, meta->start_time);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100114
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200115 if (atomic_read(&meta->error))
116 bio_io_error(meta->bio);
117 else
118 bio_endio(meta->bio);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100119 kmem_cache_free(bio_meta_pool, meta);
120 }
121}
122
Christoph Hellwig3e087732021-10-12 13:12:24 +0200123static void rsxx_submit_bio(struct bio *bio)
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100124{
Christoph Hellwig309dca302021-01-24 11:02:34 +0100125 struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100126 struct rsxx_bio_meta *bio_meta;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200127 blk_status_t st = BLK_STS_IOERR;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100128
Christoph Hellwigf695ca32020-07-01 10:59:39 +0200129 blk_queue_split(&bio);
Kent Overstreet54efd502015-04-23 22:37:18 -0700130
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100131 might_sleep();
132
Philip J Kelleher66bc6002013-06-18 14:46:04 -0500133 if (!card)
134 goto req_err;
135
Kent Overstreet4f024f32013-10-11 15:44:27 -0700136 if (bio_end_sector(bio) > get_capacity(card->gendisk))
Philip J Kelleher3eb8dca2013-06-18 14:48:38 -0500137 goto req_err;
138
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200139 if (unlikely(card->halt))
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100140 goto req_err;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100141
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200142 if (unlikely(card->dma_fault))
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100143 goto req_err;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100144
Kent Overstreet4f024f32013-10-11 15:44:27 -0700145 if (bio->bi_iter.bi_size == 0) {
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100146 dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
147 goto req_err;
148 }
149
150 bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
151 if (!bio_meta) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200152 st = BLK_STS_RESOURCE;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100153 goto req_err;
154 }
155
156 bio_meta->bio = bio;
157 atomic_set(&bio_meta->error, 0);
158 atomic_set(&bio_meta->pending_dmas, 0);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100159
Philip J Kelleher0ab47432013-06-18 14:36:26 -0500160 if (!unlikely(card->halt))
Christoph Hellwig421716b2020-05-27 07:24:06 +0200161 bio_meta->start_time = bio_start_io_acct(bio);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100162
163 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
164 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
Kent Overstreet4f024f32013-10-11 15:44:27 -0700165 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100166
167 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
168 bio_dma_done_cb, bio_meta);
169 if (st)
170 goto queue_err;
171
Christoph Hellwig3e087732021-10-12 13:12:24 +0200172 return;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100173
174queue_err:
175 kmem_cache_free(bio_meta_pool, bio_meta);
176req_err:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200177 if (st)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200178 bio->bi_status = st;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200179 bio_endio(bio);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100180}
181
182/*----------------- Device Setup -------------------*/
183static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
184{
185 unsigned char pci_rev;
186
187 pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
188
189 return (pci_rev >= RSXX_DISCARD_SUPPORT);
190}
191
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100192int rsxx_attach_dev(struct rsxx_cardinfo *card)
193{
Luis Chamberlain54494d12021-09-27 15:01:53 -0700194 int err = 0;
195
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100196 mutex_lock(&card->dev_lock);
197
198 /* The block device requires the stripe size from the config. */
199 if (enable_blkdev) {
200 if (card->config_valid)
201 set_capacity(card->gendisk, card->size8 >> 9);
202 else
203 set_capacity(card->gendisk, 0);
Luis Chamberlain54494d12021-09-27 15:01:53 -0700204 err = device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
205 if (err == 0)
206 card->bdev_attached = 1;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100207 }
208
209 mutex_unlock(&card->dev_lock);
210
Luis Chamberlain54494d12021-09-27 15:01:53 -0700211 if (err)
212 blk_cleanup_disk(card->gendisk);
213
214 return err;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100215}
216
217void rsxx_detach_dev(struct rsxx_cardinfo *card)
218{
219 mutex_lock(&card->dev_lock);
220
221 if (card->bdev_attached) {
222 del_gendisk(card->gendisk);
223 card->bdev_attached = 0;
224 }
225
226 mutex_unlock(&card->dev_lock);
227}
228
229int rsxx_setup_dev(struct rsxx_cardinfo *card)
230{
231 unsigned short blk_size;
232
233 mutex_init(&card->dev_lock);
234
235 if (!enable_blkdev)
236 return 0;
237
238 card->major = register_blkdev(0, DRIVER_NAME);
239 if (card->major < 0) {
240 dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
241 return -ENOMEM;
242 }
243
Christoph Hellwig0be79662021-05-21 07:50:59 +0200244 card->gendisk = blk_alloc_disk(blkdev_minors);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100245 if (!card->gendisk) {
246 dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100247 unregister_blkdev(card->major, DRIVER_NAME);
248 return -ENOMEM;
249 }
250
Philip J Kelleher8c49a772013-10-18 17:12:35 -0500251 if (card->config_valid) {
252 blk_size = card->config.data.block_size;
Christoph Hellwig0be79662021-05-21 07:50:59 +0200253 blk_queue_dma_alignment(card->gendisk->queue, blk_size - 1);
254 blk_queue_logical_block_size(card->gendisk->queue, blk_size);
Philip J Kelleher8c49a772013-10-18 17:12:35 -0500255 }
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100256
Christoph Hellwig0be79662021-05-21 07:50:59 +0200257 blk_queue_max_hw_sectors(card->gendisk->queue, blkdev_max_hw_sectors);
258 blk_queue_physical_block_size(card->gendisk->queue, RSXX_HW_BLK_SIZE);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100259
Christoph Hellwig0be79662021-05-21 07:50:59 +0200260 blk_queue_flag_set(QUEUE_FLAG_NONROT, card->gendisk->queue);
261 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->gendisk->queue);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100262 if (rsxx_discard_supported(card)) {
Christoph Hellwig0be79662021-05-21 07:50:59 +0200263 blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->gendisk->queue);
264 blk_queue_max_discard_sectors(card->gendisk->queue,
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100265 RSXX_HW_BLK_SIZE >> 9);
Christoph Hellwig0be79662021-05-21 07:50:59 +0200266 card->gendisk->queue->limits.discard_granularity =
267 RSXX_HW_BLK_SIZE;
268 card->gendisk->queue->limits.discard_alignment =
269 RSXX_HW_BLK_SIZE;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100270 }
271
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100272 snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
273 "rsxx%d", card->disk_id);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100274 card->gendisk->major = card->major;
Christoph Hellwig0be79662021-05-21 07:50:59 +0200275 card->gendisk->minors = blkdev_minors;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100276 card->gendisk->fops = &rsxx_fops;
277 card->gendisk->private_data = card;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100278
279 return 0;
280}
281
282void rsxx_destroy_dev(struct rsxx_cardinfo *card)
283{
284 if (!enable_blkdev)
285 return;
286
Christoph Hellwig0be79662021-05-21 07:50:59 +0200287 blk_cleanup_disk(card->gendisk);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100288 card->gendisk = NULL;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100289 unregister_blkdev(card->major, DRIVER_NAME);
290}
291
292int rsxx_dev_init(void)
293{
294 bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
295 if (!bio_meta_pool)
296 return -ENOMEM;
297
298 return 0;
299}
300
301void rsxx_dev_cleanup(void)
302{
303 kmem_cache_destroy(bio_meta_pool);
304}
305
306