blob: d07ad5318a85ece4ed449c49c02c9a5d1dfc271f [file] [log] [blame]
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001/******************************************************************************
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04002 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04007 * drivers/block/xen-blkfront.c
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04008 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37#include <linux/spinlock.h>
38#include <linux/kthread.h>
39#include <linux/list.h>
40#include <linux/delay.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080041#include <linux/freezer.h>
Jeremy Fitzhardingeafd91d02009-09-15 14:12:37 -070042
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080043#include <xen/events.h>
44#include <xen/page.h>
45#include <asm/xen/hypervisor.h>
46#include <asm/xen/hypercall.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040047#include "common.h"
48
Tom Goetz314146e2011-03-17 12:14:29 -040049#define WRITE_BARRIER (REQ_WRITE | REQ_FLUSH | REQ_FUA)
50
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040051/*
52 * These are rather arbitrary. They are fairly large because adjacent requests
53 * pulled from a communication ring are quite likely to end up being part of
54 * the same scatter/gather request at the disc.
55 *
56 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
57 *
58 * This will increase the chances of being able to write whole tracks.
59 * 64 should be enough to keep us competitive with Linux.
60 */
61static int blkif_reqs = 64;
62module_param_named(reqs, blkif_reqs, int, 0);
63MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
64
65/* Run-time switchable: /sys/module/blkback/parameters/ */
66static unsigned int log_stats = 0;
67static unsigned int debug_lvl = 0;
68module_param(log_stats, int, 0644);
69module_param(debug_lvl, int, 0644);
70
71/*
72 * Each outstanding request that we've passed to the lower device layers has a
73 * 'pending_req' allocated to it. Each buffer_head that completes decrements
74 * the pendcnt towards zero. When it hits zero, the specified domain has a
75 * response queued for it, with the saved 'id' passed back.
76 */
77typedef struct {
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -040078 struct blkif_st *blkif;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040079 u64 id;
80 int nr_pages;
81 atomic_t pendcnt;
82 unsigned short operation;
83 int status;
84 struct list_head free_list;
85} pending_req_t;
86
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040087#define BLKBACK_INVALID_HANDLE (~0)
88
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -050089struct xen_blkbk {
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -040090 pending_req_t *pending_reqs;
91 /* List of all 'pending_req' available */
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -050092 struct list_head pending_free;
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -040093 /* And its spinlock. */
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -050094 spinlock_t pending_free_lock;
95 wait_queue_head_t pending_free_wq;
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -040096 /* The list of all pages that are available. */
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -050097 struct page **pending_pages;
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -040098 /* And the grant handles that are available. */
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -050099 grant_handle_t *pending_grant_handles;
100};
101
102static struct xen_blkbk *blkbk;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400103
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400104/*
105 * Little helpful macro to figure out the index and virtual address of the
106 * pending_pages[..]. For each 'pending_req' we have have up to
107 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
108 * 10 and would index in the pending_pages[..]. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400109static inline int vaddr_pagenr(pending_req_t *req, int seg)
110{
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500111 return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400112}
113
Jan Beulichefe08a32010-02-05 14:19:33 -0500114#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
115
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400116static inline unsigned long vaddr(pending_req_t *req, int seg)
117{
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500118 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400119 return (unsigned long)pfn_to_kaddr(pfn);
120}
121
122#define pending_handle(_req, _seg) \
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500123 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400124
125
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400126static int do_block_io_op(struct blkif_st *blkif);
127static void dispatch_rw_block_io(struct blkif_st *blkif,
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800128 struct blkif_request *req,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400129 pending_req_t *pending_req);
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400130static void make_response(struct blkif_st *blkif, u64 id,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400131 unsigned short op, int st);
132
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400133/*
134 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400135 */
136static pending_req_t* alloc_req(void)
137{
138 pending_req_t *req = NULL;
139 unsigned long flags;
140
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500141 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
142 if (!list_empty(&blkbk->pending_free)) {
143 req = list_entry(blkbk->pending_free.next, pending_req_t, free_list);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400144 list_del(&req->free_list);
145 }
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500146 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400147 return req;
148}
149
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400150/*
151 * Return the 'pending_req' structure back to the freepool. We also
152 * wake up the thread if it was waiting for a free page.
153 */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400154static void free_req(pending_req_t *req)
155{
156 unsigned long flags;
157 int was_empty;
158
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500159 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
160 was_empty = list_empty(&blkbk->pending_free);
161 list_add(&req->free_list, &blkbk->pending_free);
162 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400163 if (was_empty)
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500164 wake_up(&blkbk->pending_free_wq);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400165}
166
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400167/*
168 * Give back a reference count on the underlaying storage.
169 * It is OK to make multiple calls in this function as it
170 * resets the plug to NULL when it is done on the first call.
171 */
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400172static void unplug_queue(struct blkif_st *blkif)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400173{
174 if (blkif->plug == NULL)
175 return;
176 if (blkif->plug->unplug_fn)
177 blkif->plug->unplug_fn(blkif->plug);
178 blk_put_queue(blkif->plug);
179 blkif->plug = NULL;
180}
181
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400182/*
183 * Take a reference count on the underlaying storage.
184 * It is OK to call this multiple times as we check to make sure
185 * not to double reference. We also give back a reference count
186 * if it corresponds to another queue.
187 */
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400188static void plug_queue(struct blkif_st *blkif, struct block_device *bdev)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400189{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800190 struct request_queue *q = bdev_get_queue(bdev);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400191
192 if (q == blkif->plug)
193 return;
194 unplug_queue(blkif);
195 blk_get_queue(q);
196 blkif->plug = q;
197}
198
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400199/*
200 * Unmap the grant references, and also remove the M2P over-rides
201 * used in the 'pending_req'.
202*/
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400203static void fast_flush_area(pending_req_t *req)
204{
205 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
206 unsigned int i, invcount = 0;
207 grant_handle_t handle;
208 int ret;
209
210 for (i = 0; i < req->nr_pages; i++) {
211 handle = pending_handle(req, i);
212 if (handle == BLKBACK_INVALID_HANDLE)
213 continue;
214 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
215 GNTMAP_host_map, handle);
216 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
217 invcount++;
218 }
219
220 ret = HYPERVISOR_grant_table_op(
221 GNTTABOP_unmap_grant_ref, unmap, invcount);
222 BUG_ON(ret);
Konrad Rzeszutek Wilk5dc03632011-03-01 16:46:45 -0500223 /* Note, we use invcount, so nr->pages, so we can't index
224 * using vaddr(req, i). */
225 for (i = 0; i < invcount; i++) {
226 ret = m2p_remove_override(
227 virt_to_page(unmap[i].host_addr), false);
228 if (ret) {
229 printk(KERN_ALERT "Failed to remove M2P override for " \
230 "%lx\n", (unsigned long)unmap[i].host_addr);
231 continue;
232 }
233 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400234}
235
236/******************************************************************
237 * SCHEDULER FUNCTIONS
238 */
239
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400240static void print_stats(struct blkif_st *blkif)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400241{
242 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
243 current->comm, blkif->st_oo_req,
244 blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
245 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
246 blkif->st_rd_req = 0;
247 blkif->st_wr_req = 0;
248 blkif->st_oo_req = 0;
249}
250
251int blkif_schedule(void *arg)
252{
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400253 struct blkif_st *blkif = arg;
K. Y. Srinivasan2ccbfe22010-03-11 13:39:50 -0800254 struct vbd *vbd = &blkif->vbd;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400255
256 blkif_get(blkif);
257
258 if (debug_lvl)
259 printk(KERN_DEBUG "%s: started\n", current->comm);
260
261 while (!kthread_should_stop()) {
262 if (try_to_freeze())
263 continue;
K. Y. Srinivasan2ccbfe22010-03-11 13:39:50 -0800264 if (unlikely(vbd->size != vbd_size(vbd)))
265 vbd_resize(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400266
267 wait_event_interruptible(
268 blkif->wq,
269 blkif->waiting_reqs || kthread_should_stop());
270 wait_event_interruptible(
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500271 blkbk->pending_free_wq,
272 !list_empty(&blkbk->pending_free) || kthread_should_stop());
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400273
274 blkif->waiting_reqs = 0;
275 smp_mb(); /* clear flag *before* checking for work */
276
277 if (do_block_io_op(blkif))
278 blkif->waiting_reqs = 1;
279 unplug_queue(blkif);
280
281 if (log_stats && time_after(jiffies, blkif->st_print))
282 print_stats(blkif);
283 }
284
285 if (log_stats)
286 print_stats(blkif);
287 if (debug_lvl)
288 printk(KERN_DEBUG "%s: exiting\n", current->comm);
289
290 blkif->xenblkd = NULL;
291 blkif_put(blkif);
292
293 return 0;
294}
295
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400296/*
297 * Completion callback on the bio's. Called as bh->b_end_io()
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400298 */
299
300static void __end_block_io_op(pending_req_t *pending_req, int error)
301{
302 /* An error fails the entire request. */
303 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
304 (error == -EOPNOTSUPP)) {
305 DPRINTK("blkback: write barrier op failed, not supported\n");
306 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
307 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
308 } else if (error) {
309 DPRINTK("Buffer not up-to-date at end of operation, "
310 "error=%d\n", error);
311 pending_req->status = BLKIF_RSP_ERROR;
312 }
313
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400314 /* If all of the bio's have completed it is time to unmap
315 * the grant references associated with 'request' and provide
316 * the proper response on the ring. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400317 if (atomic_dec_and_test(&pending_req->pendcnt)) {
318 fast_flush_area(pending_req);
319 make_response(pending_req->blkif, pending_req->id,
320 pending_req->operation, pending_req->status);
321 blkif_put(pending_req->blkif);
322 free_req(pending_req);
323 }
324}
325
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400326/*
327 * bio callback.
328 */
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800329static void end_block_io_op(struct bio *bio, int error)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400330{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400331 __end_block_io_op(bio->bi_private, error);
332 bio_put(bio);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400333}
334
335
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400336/*
337 * Notification from the guest OS.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400338 */
339
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400340static void blkif_notify_work(struct blkif_st *blkif)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400341{
342 blkif->waiting_reqs = 1;
343 wake_up(&blkif->wq);
344}
345
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800346irqreturn_t blkif_be_int(int irq, void *dev_id)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400347{
348 blkif_notify_work(dev_id);
349 return IRQ_HANDLED;
350}
351
352
353
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400354/*
355 * Function to copy the from the ring buffer the 'struct blkif_request'
356 * (which has the sectors we want, number of them, grant references, etc),
357 * and transmute it to the block API to hand it over to the proper block disk.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400358 */
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400359static int do_block_io_op(struct blkif_st *blkif)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400360{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800361 union blkif_back_rings *blk_rings = &blkif->blk_rings;
362 struct blkif_request req;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400363 pending_req_t *pending_req;
364 RING_IDX rc, rp;
365 int more_to_do = 0;
366
367 rc = blk_rings->common.req_cons;
368 rp = blk_rings->common.sring->req_prod;
369 rmb(); /* Ensure we see queued requests up to 'rp'. */
370
371 while (rc != rp) {
372
373 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
374 break;
375
Keir Fraser8270b452009-03-06 08:29:15 +0000376 if (kthread_should_stop()) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400377 more_to_do = 1;
378 break;
379 }
380
Keir Fraser8270b452009-03-06 08:29:15 +0000381 pending_req = alloc_req();
382 if (NULL == pending_req) {
383 blkif->st_oo_req++;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400384 more_to_do = 1;
385 break;
386 }
387
388 switch (blkif->blk_protocol) {
389 case BLKIF_PROTOCOL_NATIVE:
390 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
391 break;
392 case BLKIF_PROTOCOL_X86_32:
393 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
394 break;
395 case BLKIF_PROTOCOL_X86_64:
396 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
397 break;
398 default:
399 BUG();
400 }
401 blk_rings->common.req_cons = ++rc; /* before make_response() */
402
403 /* Apply all sanity checks to /private copy/ of request. */
404 barrier();
405
406 switch (req.operation) {
407 case BLKIF_OP_READ:
408 blkif->st_rd_req++;
409 dispatch_rw_block_io(blkif, &req, pending_req);
410 break;
411 case BLKIF_OP_WRITE_BARRIER:
412 blkif->st_br_req++;
413 /* fall through */
414 case BLKIF_OP_WRITE:
415 blkif->st_wr_req++;
416 dispatch_rw_block_io(blkif, &req, pending_req);
417 break;
418 default:
419 /* A good sign something is wrong: sleep for a while to
420 * avoid excessive CPU consumption by a bad guest. */
421 msleep(1);
422 DPRINTK("error: unknown block io operation [%d]\n",
423 req.operation);
424 make_response(blkif, req.id, req.operation,
425 BLKIF_RSP_ERROR);
426 free_req(pending_req);
427 break;
428 }
429
430 /* Yield point for this unbounded loop. */
431 cond_resched();
432 }
433
434 return more_to_do;
435}
436
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400437/*
438 * Transumation of the 'struct blkif_request' to a proper 'struct bio'
439 * and call the 'submit_bio' to pass it to the underlaying storage.
440 */
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400441static void dispatch_rw_block_io(struct blkif_st *blkif,
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800442 struct blkif_request *req,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400443 pending_req_t *pending_req)
444{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400445 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
446 struct phys_req preq;
447 struct {
448 unsigned long buf; unsigned int nsec;
449 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
450 unsigned int nseg;
451 struct bio *bio = NULL;
452 int ret, i;
453 int operation;
454
455 switch (req->operation) {
456 case BLKIF_OP_READ:
457 operation = READ;
458 break;
459 case BLKIF_OP_WRITE:
460 operation = WRITE;
461 break;
462 case BLKIF_OP_WRITE_BARRIER:
Tom Goetz314146e2011-03-17 12:14:29 -0400463 operation = WRITE_BARRIER;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400464 break;
465 default:
466 operation = 0; /* make gcc happy */
467 BUG();
468 }
469
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400470 /* Check that the number of segments is sane. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400471 nseg = req->nr_segments;
Tom Goetz314146e2011-03-17 12:14:29 -0400472 if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400473 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
474 DPRINTK("Bad number of segments in request (%d)\n", nseg);
475 goto fail_response;
476 }
477
478 preq.dev = req->handle;
Konrad Rzeszutek Wilkc35950b2011-03-01 16:22:28 -0500479 preq.sector_number = req->u.rw.sector_number;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400480 preq.nr_sects = 0;
481
482 pending_req->blkif = blkif;
483 pending_req->id = req->id;
484 pending_req->operation = req->operation;
485 pending_req->status = BLKIF_RSP_OKAY;
486 pending_req->nr_pages = nseg;
487
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400488 /* Fill out preq.nr_sects with proper amount of sectors, and setup
489 * assign map[..] with the PFN of the page in our domain with the
490 * corresponding grant reference for each page.*/
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400491 for (i = 0; i < nseg; i++) {
492 uint32_t flags;
493
Konrad Rzeszutek Wilkc35950b2011-03-01 16:22:28 -0500494 seg[i].nsec = req->u.rw.seg[i].last_sect -
495 req->u.rw.seg[i].first_sect + 1;
Konrad Rzeszutek Wilkc35950b2011-03-01 16:22:28 -0500496 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
497 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400498 goto fail_response;
499 preq.nr_sects += seg[i].nsec;
500
501 flags = GNTMAP_host_map;
502 if (operation != READ)
503 flags |= GNTMAP_readonly;
504 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
Konrad Rzeszutek Wilkc35950b2011-03-01 16:22:28 -0500505 req->u.rw.seg[i].gref, blkif->domid);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400506 }
507
508 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
509 BUG_ON(ret);
510
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400511 /* Now swizzel the MFN in our domain with the MFN from the other domain
512 * so that when we access vaddr(pending_req,i) it has the contents of the
513 * page from the other domain. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400514 for (i = 0; i < nseg; i++) {
515 if (unlikely(map[i].status != 0)) {
516 DPRINTK("invalid buffer -- could not remap it\n");
517 map[i].handle = BLKBACK_INVALID_HANDLE;
518 ret |= 1;
519 }
520
521 pending_handle(pending_req, i) = map[i].handle;
522
523 if (ret)
524 continue;
Konrad Rzeszutek Wilk5dc03632011-03-01 16:46:45 -0500525
526 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
527 blkbk->pending_page(pending_req, i), false);
528 if (ret) {
529 printk(KERN_ALERT "Failed to install M2P override for"\
530 " %lx (ret: %d)\n", (unsigned long)map[i].dev_bus_addr, ret);
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400531 /* We could switch over to GNTTABOP_copy */
Konrad Rzeszutek Wilk5dc03632011-03-01 16:46:45 -0500532 continue;
533 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400534
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400535 seg[i].buf = map[i].dev_bus_addr |
Konrad Rzeszutek Wilkc35950b2011-03-01 16:22:28 -0500536 (req->u.rw.seg[i].first_sect << 9);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400537 }
538
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400539 /* If we have failed at this point, we need to undo the M2P override, set
540 * gnttab_set_unmap_op on all of the grant references and perform the
541 * hypercall to unmap the grants - that is all done in fast_flush_area. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400542 if (ret)
543 goto fail_flush;
544
545 if (vbd_translate(&preq, blkif, operation) != 0) {
546 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
547 operation == READ ? "read" : "write",
548 preq.sector_number,
549 preq.sector_number + preq.nr_sects, preq.dev);
550 goto fail_flush;
551 }
552
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400553 /* Get a reference count for the disk queue and start sending I/O */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400554 plug_queue(blkif, preq.bdev);
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400555
556 /* We set it one so that the last submit_bio does not have to call
557 * atomic_inc. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400558 atomic_set(&pending_req->pendcnt, 1);
559 blkif_get(blkif);
560
561 for (i = 0; i < nseg; i++) {
562 if (((int)preq.sector_number|(int)seg[i].nsec) &
Jeremy Fitzhardinge05d43862009-06-29 14:58:45 -0700563 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400564 DPRINTK("Misaligned I/O request from domain %d",
565 blkif->domid);
566 goto fail_put_bio;
567 }
568
569 while ((bio == NULL) ||
570 (bio_add_page(bio,
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500571 blkbk->pending_page(pending_req, i),
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400572 seg[i].nsec << 9,
573 seg[i].buf & ~PAGE_MASK) == 0)) {
574 if (bio) {
575 atomic_inc(&pending_req->pendcnt);
576 submit_bio(operation, bio);
577 }
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400578
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400579 bio = bio_alloc(GFP_KERNEL, nseg-i);
580 if (unlikely(bio == NULL))
581 goto fail_put_bio;
582
583 bio->bi_bdev = preq.bdev;
584 bio->bi_private = pending_req;
585 bio->bi_end_io = end_block_io_op;
586 bio->bi_sector = preq.sector_number;
587 }
588
589 preq.sector_number += seg[i].nsec;
590 }
591
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400592 /* This will be hit if the operation was a barrier. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400593 if (!bio) {
Tom Goetz314146e2011-03-17 12:14:29 -0400594 BUG_ON(operation != WRITE_BARRIER);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400595 bio = bio_alloc(GFP_KERNEL, 0);
596 if (unlikely(bio == NULL))
597 goto fail_put_bio;
598
599 bio->bi_bdev = preq.bdev;
600 bio->bi_private = pending_req;
601 bio->bi_end_io = end_block_io_op;
602 bio->bi_sector = -1;
603 }
604
605 submit_bio(operation, bio);
606
607 if (operation == READ)
608 blkif->st_rd_sect += preq.nr_sects;
Tom Goetz314146e2011-03-17 12:14:29 -0400609 else if (operation == WRITE || operation == WRITE_BARRIER)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400610 blkif->st_wr_sect += preq.nr_sects;
611
612 return;
613
614 fail_flush:
615 fast_flush_area(pending_req);
616 fail_response:
617 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
618 free_req(pending_req);
619 msleep(1); /* back off a bit */
620 return;
621
622 fail_put_bio:
623 __end_block_io_op(pending_req, -EINVAL);
624 if (bio)
625 bio_put(bio);
626 unplug_queue(blkif);
627 msleep(1); /* back off a bit */
628 return;
629}
630
631
632
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400633/*
634 * Put a response on the ring on how the operation fared.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400635 */
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400636static void make_response(struct blkif_st *blkif, u64 id,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400637 unsigned short op, int st)
638{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800639 struct blkif_response resp;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400640 unsigned long flags;
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800641 union blkif_back_rings *blk_rings = &blkif->blk_rings;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400642 int more_to_do = 0;
643 int notify;
644
645 resp.id = id;
646 resp.operation = op;
647 resp.status = st;
648
649 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
650 /* Place on the response ring for the relevant domain. */
651 switch (blkif->blk_protocol) {
652 case BLKIF_PROTOCOL_NATIVE:
653 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
654 &resp, sizeof(resp));
655 break;
656 case BLKIF_PROTOCOL_X86_32:
657 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
658 &resp, sizeof(resp));
659 break;
660 case BLKIF_PROTOCOL_X86_64:
661 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
662 &resp, sizeof(resp));
663 break;
664 default:
665 BUG();
666 }
667 blk_rings->common.rsp_prod_pvt++;
668 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
669 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
670 /*
671 * Tail check for pending requests. Allows frontend to avoid
672 * notifications if requests are already in flight (lower
673 * overheads and promotes batching).
674 */
675 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
676
677 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
678 more_to_do = 1;
679 }
680
681 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
682
683 if (more_to_do)
684 blkif_notify_work(blkif);
685 if (notify)
686 notify_remote_via_irq(blkif->irq);
687}
688
689static int __init blkif_init(void)
690{
691 int i, mmap_pages;
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400692 int rc = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400693
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800694 if (!xen_pv_domain())
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400695 return -ENODEV;
696
Konrad Rzeszutek Wilka742b022011-03-14 12:41:26 -0400697 blkbk = (struct xen_blkbk *)kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500698 if (!blkbk) {
699 printk(KERN_ALERT "%s: out of memory!\n", __func__);
700 return -ENOMEM;
701 }
702
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400703 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
704
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500705 blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) *
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400706 blkif_reqs, GFP_KERNEL);
Konrad Rzeszutek Wilka742b022011-03-14 12:41:26 -0400707 blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
708 mmap_pages, GFP_KERNEL);
709 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
710 mmap_pages, GFP_KERNEL);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400711
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500712 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) {
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400713 rc = -ENOMEM;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400714 goto out_of_memory;
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400715 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400716
Konrad Rzeszutek Wilk464fb412011-03-01 16:26:10 -0500717 for (i = 0; i < mmap_pages; i++) {
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500718 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
Konrad Rzeszutek Wilka742b022011-03-14 12:41:26 -0400719 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
Konrad Rzeszutek Wilk464fb412011-03-01 16:26:10 -0500720 if (blkbk->pending_pages[i] == NULL) {
721 rc = -ENOMEM;
722 goto out_of_memory;
723 }
724 }
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400725 rc = blkif_interface_init();
726 if (rc)
727 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400728
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500729 memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
730
731 INIT_LIST_HEAD(&blkbk->pending_free);
732 spin_lock_init(&blkbk->pending_free_lock);
733 init_waitqueue_head(&blkbk->pending_free_wq);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400734
735 for (i = 0; i < blkif_reqs; i++)
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500736 list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400737
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400738 rc = blkif_xenbus_init();
739 if (rc)
740 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400741
742 return 0;
743
744 out_of_memory:
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400745 printk(KERN_ERR "%s: out of memory\n", __func__);
746 failed_init:
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500747 kfree(blkbk->pending_reqs);
Konrad Rzeszutek Wilka742b022011-03-14 12:41:26 -0400748 kfree(blkbk->pending_grant_handles);
Konrad Rzeszutek Wilk464fb412011-03-01 16:26:10 -0500749 for (i = 0; i < mmap_pages; i++) {
750 if (blkbk->pending_pages[i])
751 __free_page(blkbk->pending_pages[i]);
752 }
Konrad Rzeszutek Wilka742b022011-03-14 12:41:26 -0400753 kfree(blkbk->pending_pages);
754 kfree(blkbk);
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500755 blkbk = NULL;
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400756 return rc;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400757}
758
759module_init(blkif_init);
760
761MODULE_LICENSE("Dual BSD/GPL");