blob: eb51893c74a4ba4053fe8d15e064fbf42bed9845 [file] [log] [blame]
Sebastian Ottf30664e2012-08-28 16:50:38 +02001/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/interrupt.h>
12#include <linux/spinlock.h>
Sebastian Ott9d4df772014-12-05 16:32:13 +010013#include <linux/mempool.h>
Sebastian Ottf30664e2012-08-28 16:50:38 +020014#include <linux/module.h>
15#include <linux/blkdev.h>
Sebastian Ott12d90762017-01-25 16:18:53 +010016#include <linux/blk-mq.h>
Sebastian Ottf30664e2012-08-28 16:50:38 +020017#include <linux/genhd.h>
18#include <linux/slab.h>
19#include <linux/list.h>
20#include <asm/eadm.h>
21#include "scm_blk.h"
22
23debug_info_t *scm_debug;
24static int scm_major;
Sebastian Ott9d4df772014-12-05 16:32:13 +010025static mempool_t *aidaw_pool;
Sebastian Ottf30664e2012-08-28 16:50:38 +020026static DEFINE_SPINLOCK(list_lock);
27static LIST_HEAD(inactive_requests);
28static unsigned int nr_requests = 64;
Sebastian Ott86223842014-12-05 16:47:17 +010029static unsigned int nr_requests_per_io = 8;
Sebastian Ottf30664e2012-08-28 16:50:38 +020030static atomic_t nr_devices = ATOMIC_INIT(0);
31module_param(nr_requests, uint, S_IRUGO);
32MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
33
Sebastian Ott86223842014-12-05 16:47:17 +010034module_param(nr_requests_per_io, uint, S_IRUGO);
35MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
36
Sebastian Ottf30664e2012-08-28 16:50:38 +020037MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
38MODULE_LICENSE("GPL");
39MODULE_ALIAS("scm:scmdev*");
40
41static void __scm_free_rq(struct scm_request *scmrq)
42{
43 struct aob_rq_header *aobrq = to_aobrq(scmrq);
44
45 free_page((unsigned long) scmrq->aob);
Sebastian Ott86223842014-12-05 16:47:17 +010046 kfree(scmrq->request);
Sebastian Ottf30664e2012-08-28 16:50:38 +020047 kfree(aobrq);
48}
49
50static void scm_free_rqs(void)
51{
52 struct list_head *iter, *safe;
53 struct scm_request *scmrq;
54
55 spin_lock_irq(&list_lock);
56 list_for_each_safe(iter, safe, &inactive_requests) {
57 scmrq = list_entry(iter, struct scm_request, list);
58 list_del(&scmrq->list);
59 __scm_free_rq(scmrq);
60 }
61 spin_unlock_irq(&list_lock);
Sebastian Ott9d4df772014-12-05 16:32:13 +010062
63 mempool_destroy(aidaw_pool);
Sebastian Ottf30664e2012-08-28 16:50:38 +020064}
65
66static int __scm_alloc_rq(void)
67{
68 struct aob_rq_header *aobrq;
69 struct scm_request *scmrq;
70
71 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
72 if (!aobrq)
73 return -ENOMEM;
74
75 scmrq = (void *) aobrq->data;
Sebastian Ottf30664e2012-08-28 16:50:38 +020076 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
Sebastian Ott86223842014-12-05 16:47:17 +010077 if (!scmrq->aob)
78 goto free;
Sebastian Ott0d804b22012-08-28 16:51:19 +020079
Sebastian Ott86223842014-12-05 16:47:17 +010080 scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
81 GFP_KERNEL);
82 if (!scmrq->request)
83 goto free;
84
Sebastian Ottf30664e2012-08-28 16:50:38 +020085 INIT_LIST_HEAD(&scmrq->list);
86 spin_lock_irq(&list_lock);
87 list_add(&scmrq->list, &inactive_requests);
88 spin_unlock_irq(&list_lock);
89
90 return 0;
Sebastian Ott86223842014-12-05 16:47:17 +010091free:
92 __scm_free_rq(scmrq);
93 return -ENOMEM;
Sebastian Ottf30664e2012-08-28 16:50:38 +020094}
95
96static int scm_alloc_rqs(unsigned int nrqs)
97{
98 int ret = 0;
99
Sebastian Ott9d4df772014-12-05 16:32:13 +0100100 aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
101 if (!aidaw_pool)
102 return -ENOMEM;
103
Sebastian Ottf30664e2012-08-28 16:50:38 +0200104 while (nrqs-- && !ret)
105 ret = __scm_alloc_rq();
106
107 return ret;
108}
109
110static struct scm_request *scm_request_fetch(void)
111{
112 struct scm_request *scmrq = NULL;
113
Sebastian Ott12d90762017-01-25 16:18:53 +0100114 spin_lock_irq(&list_lock);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200115 if (list_empty(&inactive_requests))
116 goto out;
117 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
118 list_del(&scmrq->list);
119out:
Sebastian Ott12d90762017-01-25 16:18:53 +0100120 spin_unlock_irq(&list_lock);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200121 return scmrq;
122}
123
124static void scm_request_done(struct scm_request *scmrq)
125{
126 unsigned long flags;
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100127 struct msb *msb;
128 u64 aidaw;
129 int i;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200130
Sebastian Ott86223842014-12-05 16:47:17 +0100131 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100132 msb = &scmrq->aob->msb[i];
133 aidaw = msb->data_addr;
134
135 if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
136 IS_ALIGNED(aidaw, PAGE_SIZE))
137 mempool_free(virt_to_page(aidaw), aidaw_pool);
138 }
Sebastian Ott9d4df772014-12-05 16:32:13 +0100139
Sebastian Ottf30664e2012-08-28 16:50:38 +0200140 spin_lock_irqsave(&list_lock, flags);
141 list_add(&scmrq->list, &inactive_requests);
142 spin_unlock_irqrestore(&list_lock, flags);
143}
144
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100145static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
146{
147 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
148}
149
Sebastian Ottde88d0d2014-12-05 16:41:47 +0100150static inline struct aidaw *scm_aidaw_alloc(void)
Sebastian Ott9d4df772014-12-05 16:32:13 +0100151{
152 struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
153
154 return page ? page_address(page) : NULL;
155}
156
Sebastian Ottde88d0d2014-12-05 16:41:47 +0100157static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
158{
159 unsigned long _aidaw = (unsigned long) aidaw;
160 unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
161
162 return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
163}
164
165struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
166{
167 struct aidaw *aidaw;
168
169 if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
170 return scmrq->next_aidaw;
171
172 aidaw = scm_aidaw_alloc();
173 if (aidaw)
174 memset(aidaw, 0, PAGE_SIZE);
175 return aidaw;
176}
177
Sebastian Ott9d4df772014-12-05 16:32:13 +0100178static int scm_request_prepare(struct scm_request *scmrq)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200179{
180 struct scm_blk_dev *bdev = scmrq->bdev;
181 struct scm_device *scmdev = bdev->gendisk->private_data;
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100182 int pos = scmrq->aob->request.msb_count;
183 struct msb *msb = &scmrq->aob->msb[pos];
184 struct request *req = scmrq->request[pos];
Sebastian Ottf30664e2012-08-28 16:50:38 +0200185 struct req_iterator iter;
Sebastian Ottde88d0d2014-12-05 16:41:47 +0100186 struct aidaw *aidaw;
Kent Overstreet79886132013-11-23 17:19:00 -0800187 struct bio_vec bv;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200188
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100189 aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
Sebastian Ott9d4df772014-12-05 16:32:13 +0100190 if (!aidaw)
191 return -ENOMEM;
192
Sebastian Ottf30664e2012-08-28 16:50:38 +0200193 msb->bs = MSB_BS_4K;
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100194 scmrq->aob->request.msb_count++;
195 msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
196 msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200197 msb->flags |= MSB_FLAG_IDA;
198 msb->data_addr = (u64) aidaw;
199
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100200 rq_for_each_segment(bv, req, iter) {
Kent Overstreet79886132013-11-23 17:19:00 -0800201 WARN_ON(bv.bv_offset);
202 msb->blk_count += bv.bv_len >> 12;
203 aidaw->data_addr = (u64) page_address(bv.bv_page);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200204 aidaw++;
205 }
Sebastian Ott9d4df772014-12-05 16:32:13 +0100206
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100207 scmrq->next_aidaw = aidaw;
Sebastian Ott9d4df772014-12-05 16:32:13 +0100208 return 0;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200209}
210
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100211static inline void scm_request_set(struct scm_request *scmrq,
212 struct request *req)
213{
214 scmrq->request[scmrq->aob->request.msb_count] = req;
215}
216
Sebastian Ottf30664e2012-08-28 16:50:38 +0200217static inline void scm_request_init(struct scm_blk_dev *bdev,
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100218 struct scm_request *scmrq)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200219{
220 struct aob_rq_header *aobrq = to_aobrq(scmrq);
221 struct aob *aob = scmrq->aob;
222
Sebastian Ott86223842014-12-05 16:47:17 +0100223 memset(scmrq->request, 0,
224 nr_requests_per_io * sizeof(scmrq->request[0]));
Sebastian Ottf30664e2012-08-28 16:50:38 +0200225 memset(aob, 0, sizeof(*aob));
Sebastian Ottf30664e2012-08-28 16:50:38 +0200226 aobrq->scmdev = bdev->scmdev;
227 aob->request.cmd_code = ARQB_CMD_MOVE;
228 aob->request.data = (u64) aobrq;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200229 scmrq->bdev = bdev;
230 scmrq->retries = 4;
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200231 scmrq->error = BLK_STS_OK;
Sebastian Ottde88d0d2014-12-05 16:41:47 +0100232 /* We don't use all msbs - place aidaws at the end of the aob page. */
Sebastian Ott86223842014-12-05 16:47:17 +0100233 scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
Sebastian Ottf30664e2012-08-28 16:50:38 +0200234}
235
Sebastian Ott94d26bf2016-12-01 12:51:32 +0100236static void scm_request_requeue(struct scm_request *scmrq)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200237{
Sebastian Ottf30664e2012-08-28 16:50:38 +0200238 struct scm_blk_dev *bdev = scmrq->bdev;
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100239 int i;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200240
Sebastian Ott86223842014-12-05 16:47:17 +0100241 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
Sebastian Ott12d90762017-01-25 16:18:53 +0100242 blk_mq_requeue_request(scmrq->request[i], false);
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100243
Sebastian Ott8360cb52013-02-28 12:07:27 +0100244 atomic_dec(&bdev->queued_reqs);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200245 scm_request_done(scmrq);
Sebastian Ott12d90762017-01-25 16:18:53 +0100246 blk_mq_kick_requeue_list(bdev->rq);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200247}
248
Sebastian Ott94d26bf2016-12-01 12:51:32 +0100249static void scm_request_finish(struct scm_request *scmrq)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200250{
Sebastian Ott8360cb52013-02-28 12:07:27 +0100251 struct scm_blk_dev *bdev = scmrq->bdev;
Sebastian Ottc8b85022017-09-18 12:25:22 +0200252 blk_status_t *error;
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100253 int i;
Sebastian Ott8360cb52013-02-28 12:07:27 +0100254
Sebastian Ott12d90762017-01-25 16:18:53 +0100255 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
Sebastian Otta3c1a212017-06-15 17:13:15 +0200256 error = blk_mq_rq_to_pdu(scmrq->request[i]);
257 *error = scmrq->error;
258 blk_mq_complete_request(scmrq->request[i]);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200259 }
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100260
Sebastian Ott8360cb52013-02-28 12:07:27 +0100261 atomic_dec(&bdev->queued_reqs);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200262 scm_request_done(scmrq);
263}
264
Sebastian Ott12d90762017-01-25 16:18:53 +0100265static void scm_request_start(struct scm_request *scmrq)
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100266{
267 struct scm_blk_dev *bdev = scmrq->bdev;
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100268
269 atomic_inc(&bdev->queued_reqs);
Sebastian Ott12d90762017-01-25 16:18:53 +0100270 if (eadm_start_aob(scmrq->aob)) {
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100271 SCM_LOG(5, "no subchannel");
272 scm_request_requeue(scmrq);
273 }
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100274}
275
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100276struct scm_queue {
277 struct scm_request *scmrq;
278 spinlock_t lock;
279};
280
Stephen Rothwelldd1023c2017-07-04 17:58:18 +1000281static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
Sebastian Ott12d90762017-01-25 16:18:53 +0100282 const struct blk_mq_queue_data *qd)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200283{
Sebastian Ott12d90762017-01-25 16:18:53 +0100284 struct scm_device *scmdev = hctx->queue->queuedata;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200285 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100286 struct scm_queue *sq = hctx->driver_data;
Sebastian Ott12d90762017-01-25 16:18:53 +0100287 struct request *req = qd->rq;
288 struct scm_request *scmrq;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200289
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100290 spin_lock(&sq->lock);
Sebastian Ott12d90762017-01-25 16:18:53 +0100291 if (!scm_permit_request(bdev, req)) {
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100292 spin_unlock(&sq->lock);
Stephen Rothwelldd1023c2017-07-04 17:58:18 +1000293 return BLK_STS_RESOURCE;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200294 }
Sebastian Ott12d90762017-01-25 16:18:53 +0100295
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100296 scmrq = sq->scmrq;
Sebastian Ott12d90762017-01-25 16:18:53 +0100297 if (!scmrq) {
298 scmrq = scm_request_fetch();
299 if (!scmrq) {
300 SCM_LOG(5, "no request");
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100301 spin_unlock(&sq->lock);
Stephen Rothwelldd1023c2017-07-04 17:58:18 +1000302 return BLK_STS_RESOURCE;
Sebastian Ott12d90762017-01-25 16:18:53 +0100303 }
304 scm_request_init(bdev, scmrq);
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100305 sq->scmrq = scmrq;
Sebastian Ott12d90762017-01-25 16:18:53 +0100306 }
307 scm_request_set(scmrq, req);
308
309 if (scm_request_prepare(scmrq)) {
310 SCM_LOG(5, "aidaw alloc failed");
311 scm_request_set(scmrq, NULL);
312
313 if (scmrq->aob->request.msb_count)
314 scm_request_start(scmrq);
315
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100316 sq->scmrq = NULL;
317 spin_unlock(&sq->lock);
Stephen Rothwelldd1023c2017-07-04 17:58:18 +1000318 return BLK_STS_RESOURCE;
Sebastian Ott12d90762017-01-25 16:18:53 +0100319 }
320 blk_mq_start_request(req);
321
322 if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
Sebastian Ottbbc610a2014-12-05 16:43:58 +0100323 scm_request_start(scmrq);
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100324 sq->scmrq = NULL;
Sebastian Ott12d90762017-01-25 16:18:53 +0100325 }
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100326 spin_unlock(&sq->lock);
Stephen Rothwelldd1023c2017-07-04 17:58:18 +1000327 return BLK_STS_OK;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200328}
329
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100330static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
331 unsigned int idx)
332{
333 struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
334
335 if (!qd)
336 return -ENOMEM;
337
338 spin_lock_init(&qd->lock);
339 hctx->driver_data = qd;
340
341 return 0;
342}
343
344static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
345{
346 struct scm_queue *qd = hctx->driver_data;
347
348 WARN_ON(qd->scmrq);
349 kfree(hctx->driver_data);
350 hctx->driver_data = NULL;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200351}
352
353static void __scmrq_log_error(struct scm_request *scmrq)
354{
355 struct aob *aob = scmrq->aob;
356
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200357 if (scmrq->error == BLK_STS_TIMEOUT)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200358 SCM_LOG(1, "Request timeout");
359 else {
360 SCM_LOG(1, "Request error");
361 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
362 }
363 if (scmrq->retries)
364 SCM_LOG(1, "Retry request");
365 else
366 pr_err("An I/O operation to SCM failed with rc=%d\n",
367 scmrq->error);
368}
369
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100370static void scm_blk_handle_error(struct scm_request *scmrq)
371{
372 struct scm_blk_dev *bdev = scmrq->bdev;
373 unsigned long flags;
374
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200375 if (scmrq->error != BLK_STS_IOERR)
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100376 goto restart;
377
378 /* For -EIO the response block is valid. */
379 switch (scmrq->aob->response.eqc) {
380 case EQC_WR_PROHIBIT:
381 spin_lock_irqsave(&bdev->lock, flags);
382 if (bdev->state != SCM_WR_PROHIBIT)
Sebastian Ott3bff6032013-03-18 16:01:30 +0100383 pr_info("%lx: Write access to the SCM increment is suspended\n",
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100384 (unsigned long) bdev->scmdev->address);
385 bdev->state = SCM_WR_PROHIBIT;
386 spin_unlock_irqrestore(&bdev->lock, flags);
387 goto requeue;
388 default:
389 break;
390 }
391
392restart:
Sebastian Ott605c3692013-11-14 10:44:56 +0100393 if (!eadm_start_aob(scmrq->aob))
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100394 return;
395
396requeue:
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100397 scm_request_requeue(scmrq);
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100398}
399
Linus Torvaldse0f3e8f2017-07-03 15:39:36 -0700400void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200401{
Sebastian Ottc7b3e9232017-01-31 16:15:25 +0100402 struct scm_request *scmrq = data;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200403
Sebastian Ottc7b3e9232017-01-31 16:15:25 +0100404 scmrq->error = error;
405 if (error) {
406 __scmrq_log_error(scmrq);
407 if (scmrq->retries-- > 0) {
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100408 scm_blk_handle_error(scmrq);
Sebastian Ottc7b3e9232017-01-31 16:15:25 +0100409 return;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200410 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200411 }
Sebastian Ottc7b3e9232017-01-31 16:15:25 +0100412
413 scm_request_finish(scmrq);
414}
415
416static void scm_blk_request_done(struct request *req)
417{
Sebastian Ottc8b85022017-09-18 12:25:22 +0200418 blk_status_t *error = blk_mq_rq_to_pdu(req);
Sebastian Otta3c1a212017-06-15 17:13:15 +0200419
420 blk_mq_end_request(req, *error);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200421}
422
Sebastian Ott605c3692013-11-14 10:44:56 +0100423static const struct block_device_operations scm_blk_devops = {
424 .owner = THIS_MODULE,
425};
426
Sebastian Ott12d90762017-01-25 16:18:53 +0100427static const struct blk_mq_ops scm_mq_ops = {
428 .queue_rq = scm_blk_request,
Sebastian Ottc7b3e9232017-01-31 16:15:25 +0100429 .complete = scm_blk_request_done,
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100430 .init_hctx = scm_blk_init_hctx,
431 .exit_hctx = scm_blk_exit_hctx,
Sebastian Ott12d90762017-01-25 16:18:53 +0100432};
433
Sebastian Ottf30664e2012-08-28 16:50:38 +0200434int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
435{
Sebastian Ottf30664e2012-08-28 16:50:38 +0200436 unsigned int devindex, nr_max_blk;
Sebastian Ott12d90762017-01-25 16:18:53 +0100437 struct request_queue *rq;
438 int len, ret;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200439
440 devindex = atomic_inc_return(&nr_devices) - 1;
441 /* scma..scmz + scmaa..scmzz */
442 if (devindex > 701) {
443 ret = -ENODEV;
444 goto out;
445 }
446
447 bdev->scmdev = scmdev;
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100448 bdev->state = SCM_OPER;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200449 spin_lock_init(&bdev->lock);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200450 atomic_set(&bdev->queued_reqs, 0);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200451
Sebastian Ott12d90762017-01-25 16:18:53 +0100452 bdev->tag_set.ops = &scm_mq_ops;
Sebastian Ottc8b85022017-09-18 12:25:22 +0200453 bdev->tag_set.cmd_size = sizeof(blk_status_t);
Sebastian Ott9861dbd2017-02-24 17:50:17 +0100454 bdev->tag_set.nr_hw_queues = nr_requests;
Sebastian Ott12d90762017-01-25 16:18:53 +0100455 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
456 bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
457
458 ret = blk_mq_alloc_tag_set(&bdev->tag_set);
459 if (ret)
Sebastian Ottf30664e2012-08-28 16:50:38 +0200460 goto out;
461
Sebastian Ott12d90762017-01-25 16:18:53 +0100462 rq = blk_mq_init_queue(&bdev->tag_set);
463 if (IS_ERR(rq)) {
464 ret = PTR_ERR(rq);
465 goto out_tag;
466 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200467 bdev->rq = rq;
468 nr_max_blk = min(scmdev->nr_max_block,
469 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
470
471 blk_queue_logical_block_size(rq, 1 << 12);
472 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
473 blk_queue_max_segments(rq, nr_max_blk);
474 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
Mike Snitzerb277da02014-10-04 10:55:32 -0600475 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200476
477 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
Sebastian Ott12d90762017-01-25 16:18:53 +0100478 if (!bdev->gendisk) {
479 ret = -ENOMEM;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200480 goto out_queue;
Sebastian Ott12d90762017-01-25 16:18:53 +0100481 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200482 rq->queuedata = scmdev;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200483 bdev->gendisk->private_data = scmdev;
484 bdev->gendisk->fops = &scm_blk_devops;
485 bdev->gendisk->queue = rq;
486 bdev->gendisk->major = scm_major;
487 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
488
489 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
490 if (devindex > 25) {
491 len += snprintf(bdev->gendisk->disk_name + len,
492 DISK_NAME_LEN - len, "%c",
493 'a' + (devindex / 26) - 1);
494 devindex = devindex % 26;
495 }
496 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
497 'a' + devindex);
498
499 /* 512 byte sectors */
500 set_capacity(bdev->gendisk, scmdev->size >> 9);
Dan Williams0d52c7562016-06-15 19:44:20 -0700501 device_add_disk(&scmdev->dev, bdev->gendisk);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200502 return 0;
503
504out_queue:
505 blk_cleanup_queue(rq);
Sebastian Ott12d90762017-01-25 16:18:53 +0100506out_tag:
507 blk_mq_free_tag_set(&bdev->tag_set);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200508out:
509 atomic_dec(&nr_devices);
510 return ret;
511}
512
513void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
514{
Sebastian Ottf30664e2012-08-28 16:50:38 +0200515 del_gendisk(bdev->gendisk);
516 blk_cleanup_queue(bdev->gendisk->queue);
Sebastian Ott12d90762017-01-25 16:18:53 +0100517 blk_mq_free_tag_set(&bdev->tag_set);
Sebastian Ottf30664e2012-08-28 16:50:38 +0200518 put_disk(bdev->gendisk);
519}
520
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100521void scm_blk_set_available(struct scm_blk_dev *bdev)
522{
523 unsigned long flags;
524
525 spin_lock_irqsave(&bdev->lock, flags);
526 if (bdev->state == SCM_WR_PROHIBIT)
Sebastian Ott3bff6032013-03-18 16:01:30 +0100527 pr_info("%lx: Write access to the SCM increment is restored\n",
Sebastian Ott4fa3c012013-02-28 12:07:48 +0100528 (unsigned long) bdev->scmdev->address);
529 bdev->state = SCM_OPER;
530 spin_unlock_irqrestore(&bdev->lock, flags);
531}
532
Sebastian Ott86223842014-12-05 16:47:17 +0100533static bool __init scm_blk_params_valid(void)
534{
535 if (!nr_requests_per_io || nr_requests_per_io > 64)
536 return false;
537
Sebastian Ott94d26bf2016-12-01 12:51:32 +0100538 return true;
Sebastian Ott86223842014-12-05 16:47:17 +0100539}
540
Sebastian Ottf30664e2012-08-28 16:50:38 +0200541static int __init scm_blk_init(void)
542{
Sebastian Ott0d804b22012-08-28 16:51:19 +0200543 int ret = -EINVAL;
544
Sebastian Ott86223842014-12-05 16:47:17 +0100545 if (!scm_blk_params_valid())
Sebastian Ott0d804b22012-08-28 16:51:19 +0200546 goto out;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200547
548 ret = register_blkdev(0, "scm");
549 if (ret < 0)
550 goto out;
551
552 scm_major = ret;
Wei Yongjun94f98522013-03-20 13:40:54 +0100553 ret = scm_alloc_rqs(nr_requests);
554 if (ret)
Sebastian Ottfff60fa2013-04-25 13:03:18 +0200555 goto out_free;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200556
557 scm_debug = debug_register("scm_log", 16, 1, 16);
Wei Yongjun94f98522013-03-20 13:40:54 +0100558 if (!scm_debug) {
559 ret = -ENOMEM;
Sebastian Ottf30664e2012-08-28 16:50:38 +0200560 goto out_free;
Wei Yongjun94f98522013-03-20 13:40:54 +0100561 }
Sebastian Ottf30664e2012-08-28 16:50:38 +0200562
563 debug_register_view(scm_debug, &debug_hex_ascii_view);
564 debug_set_level(scm_debug, 2);
565
566 ret = scm_drv_init();
567 if (ret)
568 goto out_dbf;
569
570 return ret;
571
572out_dbf:
573 debug_unregister(scm_debug);
574out_free:
575 scm_free_rqs();
Sebastian Ottf30664e2012-08-28 16:50:38 +0200576 unregister_blkdev(scm_major, "scm");
577out:
578 return ret;
579}
580module_init(scm_blk_init);
581
582static void __exit scm_blk_cleanup(void)
583{
584 scm_drv_cleanup();
585 debug_unregister(scm_debug);
586 scm_free_rqs();
587 unregister_blkdev(scm_major, "scm");
588}
589module_exit(scm_blk_cleanup);