Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Block driver for s390 storage class memory. |
| 3 | * |
| 4 | * Copyright IBM Corp. 2012 |
| 5 | * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/spinlock.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/blkdev.h> |
| 11 | #include <linux/genhd.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/list.h> |
| 14 | #include <asm/eadm.h> |
| 15 | #include "scm_blk.h" |
| 16 | |
| 17 | static unsigned int write_cluster_size = 64; |
| 18 | module_param(write_cluster_size, uint, S_IRUGO); |
| 19 | MODULE_PARM_DESC(write_cluster_size, |
| 20 | "Number of pages used for contiguous writes."); |
| 21 | |
| 22 | #define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE) |
| 23 | |
| 24 | void __scm_free_rq_cluster(struct scm_request *scmrq) |
| 25 | { |
| 26 | int i; |
| 27 | |
| 28 | if (!scmrq->cluster.buf) |
| 29 | return; |
| 30 | |
| 31 | for (i = 0; i < 2 * write_cluster_size; i++) |
| 32 | free_page((unsigned long) scmrq->cluster.buf[i]); |
| 33 | |
| 34 | kfree(scmrq->cluster.buf); |
| 35 | } |
| 36 | |
| 37 | int __scm_alloc_rq_cluster(struct scm_request *scmrq) |
| 38 | { |
| 39 | int i; |
| 40 | |
| 41 | scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size, |
| 42 | GFP_KERNEL); |
| 43 | if (!scmrq->cluster.buf) |
| 44 | return -ENOMEM; |
| 45 | |
| 46 | for (i = 0; i < 2 * write_cluster_size; i++) { |
| 47 | scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA); |
| 48 | if (!scmrq->cluster.buf[i]) |
| 49 | return -ENOMEM; |
| 50 | } |
| 51 | INIT_LIST_HEAD(&scmrq->cluster.list); |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | void scm_request_cluster_init(struct scm_request *scmrq) |
| 56 | { |
| 57 | scmrq->cluster.state = CLUSTER_NONE; |
| 58 | } |
| 59 | |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 60 | static bool clusters_intersect(struct request *A, struct request *B) |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 61 | { |
| 62 | unsigned long firstA, lastA, firstB, lastB; |
| 63 | |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 64 | firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE; |
| 65 | lastA = (((u64) blk_rq_pos(A) << 9) + |
| 66 | blk_rq_bytes(A) - 1) / CLUSTER_SIZE; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 67 | |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 68 | firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE; |
| 69 | lastB = (((u64) blk_rq_pos(B) << 9) + |
| 70 | blk_rq_bytes(B) - 1) / CLUSTER_SIZE; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 71 | |
| 72 | return (firstB <= lastA && firstA <= lastB); |
| 73 | } |
| 74 | |
| 75 | bool scm_reserve_cluster(struct scm_request *scmrq) |
| 76 | { |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 77 | struct request *req = scmrq->request[scmrq->aob->request.msb_count]; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 78 | struct scm_blk_dev *bdev = scmrq->bdev; |
| 79 | struct scm_request *iter; |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 80 | int pos, add = 1; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 81 | |
| 82 | if (write_cluster_size == 0) |
| 83 | return true; |
| 84 | |
| 85 | spin_lock(&bdev->lock); |
| 86 | list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 87 | if (iter == scmrq) { |
| 88 | /* |
| 89 | * We don't have to use clusters_intersect here, since |
| 90 | * cluster requests are always started separately. |
| 91 | */ |
| 92 | add = 0; |
| 93 | continue; |
| 94 | } |
Sebastian Ott | a13ccb0 | 2015-02-23 19:50:47 +0100 | [diff] [blame^] | 95 | for (pos = 0; pos < iter->aob->request.msb_count; pos++) { |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 96 | if (clusters_intersect(req, iter->request[pos]) && |
| 97 | (rq_data_dir(req) == WRITE || |
| 98 | rq_data_dir(iter->request[pos]) == WRITE)) { |
| 99 | spin_unlock(&bdev->lock); |
| 100 | return false; |
| 101 | } |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 102 | } |
| 103 | } |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 104 | if (add) |
| 105 | list_add(&scmrq->cluster.list, &bdev->cluster_list); |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 106 | spin_unlock(&bdev->lock); |
| 107 | |
| 108 | return true; |
| 109 | } |
| 110 | |
| 111 | void scm_release_cluster(struct scm_request *scmrq) |
| 112 | { |
| 113 | struct scm_blk_dev *bdev = scmrq->bdev; |
| 114 | unsigned long flags; |
| 115 | |
| 116 | if (write_cluster_size == 0) |
| 117 | return; |
| 118 | |
| 119 | spin_lock_irqsave(&bdev->lock, flags); |
| 120 | list_del(&scmrq->cluster.list); |
| 121 | spin_unlock_irqrestore(&bdev->lock, flags); |
| 122 | } |
| 123 | |
| 124 | void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) |
| 125 | { |
| 126 | INIT_LIST_HEAD(&bdev->cluster_list); |
| 127 | blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); |
| 128 | } |
| 129 | |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 130 | static int scm_prepare_cluster_request(struct scm_request *scmrq) |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 131 | { |
| 132 | struct scm_blk_dev *bdev = scmrq->bdev; |
| 133 | struct scm_device *scmdev = bdev->gendisk->private_data; |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 134 | struct request *req = scmrq->request[0]; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 135 | struct msb *msb = &scmrq->aob->msb[0]; |
| 136 | struct req_iterator iter; |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 137 | struct aidaw *aidaw; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 138 | struct bio_vec bv; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 139 | int i = 0; |
| 140 | u64 addr; |
| 141 | |
| 142 | switch (scmrq->cluster.state) { |
| 143 | case CLUSTER_NONE: |
| 144 | scmrq->cluster.state = CLUSTER_READ; |
| 145 | /* fall through */ |
| 146 | case CLUSTER_READ: |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 147 | msb->bs = MSB_BS_4K; |
| 148 | msb->oc = MSB_OC_READ; |
| 149 | msb->flags = MSB_FLAG_IDA; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 150 | msb->blk_count = write_cluster_size; |
| 151 | |
| 152 | addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); |
| 153 | msb->scm_addr = round_down(addr, CLUSTER_SIZE); |
| 154 | |
| 155 | if (msb->scm_addr != |
| 156 | round_down(addr + (u64) blk_rq_bytes(req) - 1, |
| 157 | CLUSTER_SIZE)) |
| 158 | msb->blk_count = 2 * write_cluster_size; |
| 159 | |
Sebastian Ott | de88d0d | 2014-12-05 16:41:47 +0100 | [diff] [blame] | 160 | aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE); |
| 161 | if (!aidaw) |
| 162 | return -ENOMEM; |
| 163 | |
| 164 | scmrq->aob->request.msb_count = 1; |
| 165 | msb->data_addr = (u64) aidaw; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 166 | for (i = 0; i < msb->blk_count; i++) { |
| 167 | aidaw->data_addr = (u64) scmrq->cluster.buf[i]; |
| 168 | aidaw++; |
| 169 | } |
| 170 | |
| 171 | break; |
| 172 | case CLUSTER_WRITE: |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 173 | aidaw = (void *) msb->data_addr; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 174 | msb->oc = MSB_OC_WRITE; |
| 175 | |
| 176 | for (addr = msb->scm_addr; |
| 177 | addr < scmdev->address + ((u64) blk_rq_pos(req) << 9); |
| 178 | addr += PAGE_SIZE) { |
| 179 | aidaw->data_addr = (u64) scmrq->cluster.buf[i]; |
| 180 | aidaw++; |
| 181 | i++; |
| 182 | } |
| 183 | rq_for_each_segment(bv, req, iter) { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 184 | aidaw->data_addr = (u64) page_address(bv.bv_page); |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 185 | aidaw++; |
| 186 | i++; |
| 187 | } |
| 188 | for (; i < msb->blk_count; i++) { |
| 189 | aidaw->data_addr = (u64) scmrq->cluster.buf[i]; |
| 190 | aidaw++; |
| 191 | } |
| 192 | break; |
| 193 | } |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 194 | return 0; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | bool scm_need_cluster_request(struct scm_request *scmrq) |
| 198 | { |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 199 | int pos = scmrq->aob->request.msb_count; |
| 200 | |
| 201 | if (rq_data_dir(scmrq->request[pos]) == READ) |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 202 | return false; |
| 203 | |
Sebastian Ott | bbc610a | 2014-12-05 16:43:58 +0100 | [diff] [blame] | 204 | return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE; |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 205 | } |
| 206 | |
| 207 | /* Called with queue lock held. */ |
| 208 | void scm_initiate_cluster_request(struct scm_request *scmrq) |
| 209 | { |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 210 | if (scm_prepare_cluster_request(scmrq)) |
| 211 | goto requeue; |
Sebastian Ott | 605c369 | 2013-11-14 10:44:56 +0100 | [diff] [blame] | 212 | if (eadm_start_aob(scmrq->aob)) |
Sebastian Ott | 9d4df77 | 2014-12-05 16:32:13 +0100 | [diff] [blame] | 213 | goto requeue; |
| 214 | return; |
| 215 | requeue: |
| 216 | scm_request_requeue(scmrq); |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 217 | } |
| 218 | |
| 219 | bool scm_test_cluster_request(struct scm_request *scmrq) |
| 220 | { |
| 221 | return scmrq->cluster.state != CLUSTER_NONE; |
| 222 | } |
| 223 | |
| 224 | void scm_cluster_request_irq(struct scm_request *scmrq) |
| 225 | { |
| 226 | struct scm_blk_dev *bdev = scmrq->bdev; |
| 227 | unsigned long flags; |
| 228 | |
| 229 | switch (scmrq->cluster.state) { |
| 230 | case CLUSTER_NONE: |
| 231 | BUG(); |
| 232 | break; |
| 233 | case CLUSTER_READ: |
| 234 | if (scmrq->error) { |
| 235 | scm_request_finish(scmrq); |
| 236 | break; |
| 237 | } |
| 238 | scmrq->cluster.state = CLUSTER_WRITE; |
| 239 | spin_lock_irqsave(&bdev->rq_lock, flags); |
| 240 | scm_initiate_cluster_request(scmrq); |
| 241 | spin_unlock_irqrestore(&bdev->rq_lock, flags); |
| 242 | break; |
| 243 | case CLUSTER_WRITE: |
| 244 | scm_request_finish(scmrq); |
| 245 | break; |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | bool scm_cluster_size_valid(void) |
| 250 | { |
Sebastian Ott | bd86055 | 2013-04-25 13:02:39 +0200 | [diff] [blame] | 251 | if (write_cluster_size == 1 || write_cluster_size > 128) |
| 252 | return false; |
| 253 | |
| 254 | return !(write_cluster_size & (write_cluster_size - 1)); |
Sebastian Ott | 0d804b2 | 2012-08-28 16:51:19 +0200 | [diff] [blame] | 255 | } |