blob: 1ac53da8410f555bf90ba00bc6af75469c6cad08 [file] [log] [blame]
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
Konrad Rzeszutek Wilk5a577e32011-05-12 16:58:21 -040027#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28#define __XEN_BLKIF__BACKEND__COMMON_H__
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040029
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040030#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/slab.h>
33#include <linux/blkdev.h>
34#include <linux/vmalloc.h>
35#include <linux/wait.h>
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -040036#include <linux/io.h>
Roger Pau Monne0a8704a2012-10-24 18:58:45 +020037#include <linux/rbtree.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040038#include <asm/setup.h>
39#include <asm/pgalloc.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040040#include <asm/hypervisor.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080041#include <xen/grant_table.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040042#include <xen/xenbus.h>
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040043#include <xen/interface/io/ring.h>
44#include <xen/interface/io/blkif.h>
45#include <xen/interface/io/protocols.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040046
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -040047#define DRV_PFX "xen-blkback:"
Konrad Rzeszutek Wilk1afbd732011-05-11 16:15:24 -040048#define DPRINTK(fmt, args...) \
Joe Jin1bc05b02011-08-15 12:57:07 +080049 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
Konrad Rzeszutek Wilk1afbd732011-05-11 16:15:24 -040050 __func__, __LINE__, ##args)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040051
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040052
Roger Pau Monne402b27f2013-04-18 16:06:54 +020053/*
54 * This is the maximum number of segments that would be allowed in indirect
55 * requests. This value will also be passed to the frontend.
56 */
57#define MAX_INDIRECT_SEGMENTS 256
58
59#define SEGS_PER_INDIRECT_FRAME \
60 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
61#define MAX_INDIRECT_PAGES \
62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
63#define INDIRECT_PAGES(_segs) \
64 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
65
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040066/* Not a real protocol. Used to generate ring structs which contain
67 * the elements common to all protocols only. This way we get a
68 * compiler-checkable way to use common struct elements, so we can
69 * avoid using switch(protocol) in a number of places. */
70struct blkif_common_request {
71 char dummy;
72};
73struct blkif_common_response {
74 char dummy;
75};
76
Li Dongyangb3cb0d62011-09-01 18:39:10 +080077struct blkif_x86_32_request_rw {
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040078 uint8_t nr_segments; /* number of segments */
79 blkif_vdev_t handle; /* only for read/write requests */
80 uint64_t id; /* private guest value, echoed in resp */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -040081 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
82 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
83} __attribute__((__packed__));
84
85struct blkif_x86_32_request_discard {
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -040086 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -040087 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
88 uint64_t id; /* private guest value, echoed in resp */
89 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
90 uint64_t nr_sectors;
91} __attribute__((__packed__));
92
David Vrabel0e367ae2013-03-07 17:32:01 +000093struct blkif_x86_32_request_other {
94 uint8_t _pad1;
95 blkif_vdev_t _pad2;
96 uint64_t id; /* private guest value, echoed in resp */
97} __attribute__((__packed__));
98
Roger Pau Monne402b27f2013-04-18 16:06:54 +020099struct blkif_x86_32_request_indirect {
100 uint8_t indirect_op;
101 uint16_t nr_segments;
102 uint64_t id;
103 blkif_sector_t sector_number;
104 blkif_vdev_t handle;
105 uint16_t _pad1;
106 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
107 /*
108 * The maximum number of indirect segments (and pages) that will
109 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
110 * is also exported to the guest (via xenstore
111 * feature-max-indirect-segments entry), so the frontend knows how
112 * many indirect segments the backend supports.
113 */
114 uint64_t _pad2; /* make it 64 byte aligned */
115} __attribute__((__packed__));
116
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400117struct blkif_x86_32_request {
118 uint8_t operation; /* BLKIF_OP_??? */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800119 union {
120 struct blkif_x86_32_request_rw rw;
121 struct blkif_x86_32_request_discard discard;
David Vrabel0e367ae2013-03-07 17:32:01 +0000122 struct blkif_x86_32_request_other other;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200123 struct blkif_x86_32_request_indirect indirect;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800124 } u;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400125} __attribute__((__packed__));
126
127/* i386 protocol version */
128#pragma pack(push, 4)
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400129struct blkif_x86_32_response {
130 uint64_t id; /* copied from request */
131 uint8_t operation; /* copied from request */
132 int16_t status; /* BLKIF_RSP_??? */
133};
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400134#pragma pack(pop)
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400135/* x86_64 protocol version */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800136
137struct blkif_x86_64_request_rw {
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400138 uint8_t nr_segments; /* number of segments */
139 blkif_vdev_t handle; /* only for read/write requests */
140 uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
141 uint64_t id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800142 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
143 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400144} __attribute__((__packed__));
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800145
146struct blkif_x86_64_request_discard {
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400147 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400148 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
149 uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
150 uint64_t id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800151 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400152 uint64_t nr_sectors;
153} __attribute__((__packed__));
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800154
David Vrabel0e367ae2013-03-07 17:32:01 +0000155struct blkif_x86_64_request_other {
156 uint8_t _pad1;
157 blkif_vdev_t _pad2;
158 uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
159 uint64_t id; /* private guest value, echoed in resp */
160} __attribute__((__packed__));
161
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200162struct blkif_x86_64_request_indirect {
163 uint8_t indirect_op;
164 uint16_t nr_segments;
165 uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
166 uint64_t id;
167 blkif_sector_t sector_number;
168 blkif_vdev_t handle;
169 uint16_t _pad2;
170 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
171 /*
172 * The maximum number of indirect segments (and pages) that will
173 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
174 * is also exported to the guest (via xenstore
175 * feature-max-indirect-segments entry), so the frontend knows how
176 * many indirect segments the backend supports.
177 */
178 uint32_t _pad3; /* make it 64 byte aligned */
179} __attribute__((__packed__));
180
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400181struct blkif_x86_64_request {
182 uint8_t operation; /* BLKIF_OP_??? */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800183 union {
184 struct blkif_x86_64_request_rw rw;
185 struct blkif_x86_64_request_discard discard;
David Vrabel0e367ae2013-03-07 17:32:01 +0000186 struct blkif_x86_64_request_other other;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200187 struct blkif_x86_64_request_indirect indirect;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800188 } u;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400189} __attribute__((__packed__));
190
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400191struct blkif_x86_64_response {
192 uint64_t __attribute__((__aligned__(8))) id;
193 uint8_t operation; /* copied from request */
194 int16_t status; /* BLKIF_RSP_??? */
195};
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400196
197DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
198 struct blkif_common_response);
199DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
200 struct blkif_x86_32_response);
201DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
202 struct blkif_x86_64_response);
203
204union blkif_back_rings {
205 struct blkif_back_ring native;
206 struct blkif_common_back_ring common;
207 struct blkif_x86_32_back_ring x86_32;
208 struct blkif_x86_64_back_ring x86_64;
209};
210
211enum blkif_protocol {
212 BLKIF_PROTOCOL_NATIVE = 1,
213 BLKIF_PROTOCOL_X86_32 = 2,
214 BLKIF_PROTOCOL_X86_64 = 3,
215};
216
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400217struct xen_vbd {
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400218 /* What the domain refers to this vbd as. */
219 blkif_vdev_t handle;
220 /* Non-zero -> read-only */
221 unsigned char readonly;
222 /* VDISK_xxx */
223 unsigned char type;
224 /* phys device that this vbd maps to. */
225 u32 pdevice;
226 struct block_device *bdev;
227 /* Cached size parameter. */
228 sector_t size;
Oliver Chick1f999572012-09-21 10:04:18 +0100229 unsigned int flush_support:1;
230 unsigned int discard_secure:1;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200231 unsigned int feature_gnt_persistent:1;
232 unsigned int overflow_max_grants:1;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400233};
234
235struct backend_info;
236
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200237/* Number of available flags */
238#define PERSISTENT_GNT_FLAGS_SIZE 2
239/* This persistent grant is currently in use */
240#define PERSISTENT_GNT_ACTIVE 0
241/*
242 * This persistent grant has been used, this flag is set when we remove the
243 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
244 */
245#define PERSISTENT_GNT_WAS_ACTIVE 1
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200246
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200247/* Number of requests that we can fit in a ring */
248#define XEN_BLKIF_REQS 32
249
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200250struct persistent_gnt {
251 struct page *page;
252 grant_ref_t gnt;
253 grant_handle_t handle;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200254 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200255 struct rb_node node;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200256 struct list_head remove_node;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200257};
258
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -0400259struct xen_blkif {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400260 /* Unique identifier for this interface. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400261 domid_t domid;
262 unsigned int handle;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400263 /* Physical parameters of the comms window. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400264 unsigned int irq;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400265 /* Comms information. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400266 enum blkif_protocol blk_protocol;
267 union blkif_back_rings blk_rings;
David Vrabel2d073842011-09-29 16:53:30 +0100268 void *blk_ring;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400269 /* The VBD attached to this interface. */
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400270 struct xen_vbd vbd;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400271 /* Back pointer to the backend_info. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400272 struct backend_info *be;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400273 /* Private fields. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400274 spinlock_t blk_ring_lock;
275 atomic_t refcnt;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400276
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400277 wait_queue_head_t wq;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400278 /* for barrier (drain) requests */
279 struct completion drain_complete;
280 atomic_t drain;
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400281 /* One thread per one blkif. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400282 struct task_struct *xenblkd;
283 unsigned int waiting_reqs;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400284
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200285 /* tree to store persistent grants */
286 struct rb_root persistent_gnts;
287 unsigned int persistent_gnt_c;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200288 atomic_t persistent_gnt_in_use;
289 unsigned long next_lru;
290
291 /* used by the kworker that offload work from the persistent purge */
292 struct list_head persistent_purge_list;
293 struct work_struct persistent_purge_work;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200294
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200295 /* buffer of free pages to map grant refs */
296 spinlock_t free_pages_lock;
297 int free_pages_num;
298 struct list_head free_pages;
299
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200300 /* Allocation of pending_reqs */
301 struct pending_req *pending_reqs;
302 /* List of all 'pending_req' available */
303 struct list_head pending_free;
304 /* And its spinlock. */
305 spinlock_t pending_free_lock;
306 wait_queue_head_t pending_free_wq;
307
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400308 /* statistics */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400309 unsigned long st_print;
Zoltan Kiss986cacb2013-03-11 16:15:50 +0000310 unsigned long long st_rd_req;
311 unsigned long long st_wr_req;
312 unsigned long long st_oo_req;
313 unsigned long long st_f_req;
314 unsigned long long st_ds_req;
315 unsigned long long st_rd_sect;
316 unsigned long long st_wr_sect;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400317
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400318 wait_queue_head_t waiting_to_free;
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400319};
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400320
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200321struct seg_buf {
322 unsigned long offset;
323 unsigned int nsec;
324};
325
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200326/*
327 * Each outstanding request that we've passed to the lower device layers has a
328 * 'pending_req' allocated to it. Each buffer_head that completes decrements
329 * the pendcnt towards zero. When it hits zero, the specified domain has a
330 * response queued for it, with the saved 'id' passed back.
331 */
332struct pending_req {
333 struct xen_blkif *blkif;
334 u64 id;
335 int nr_pages;
336 atomic_t pendcnt;
337 unsigned short operation;
338 int status;
339 struct list_head free_list;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200340 struct page *pages[MAX_INDIRECT_SEGMENTS];
341 struct persistent_gnt *persistent_gnts[MAX_INDIRECT_SEGMENTS];
342 grant_handle_t grant_handles[MAX_INDIRECT_SEGMENTS];
343 grant_ref_t grefs[MAX_INDIRECT_SEGMENTS];
344 /* Indirect descriptors */
345 struct persistent_gnt *indirect_persistent_gnts[MAX_INDIRECT_PAGES];
346 struct page *indirect_pages[MAX_INDIRECT_PAGES];
347 grant_handle_t indirect_handles[MAX_INDIRECT_PAGES];
348 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
349 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200350};
351
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400352
353#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
354 (_v)->bdev->bd_part->nr_sects : \
355 get_capacity((_v)->bdev->bd_disk))
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400356
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400357#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
358#define xen_blkif_put(_b) \
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400359 do { \
360 if (atomic_dec_and_test(&(_b)->refcnt)) \
361 wake_up(&(_b)->waiting_to_free);\
362 } while (0)
363
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400364struct phys_req {
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400365 unsigned short dev;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800366 blkif_sector_t nr_sects;
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400367 struct block_device *bdev;
368 blkif_sector_t sector_number;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400369};
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400370int xen_blkif_interface_init(void);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400371
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400372int xen_blkif_xenbus_init(void);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400373
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400374irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
375int xen_blkif_schedule(void *arg);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200376int xen_blkif_purge_persistent(void *arg);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400377
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -0400378int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
379 struct backend_info *be, int state);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400380
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400381int xen_blkbk_barrier(struct xenbus_transaction xbt,
382 struct backend_info *be, int state);
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400383struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
Jeremy Fitzhardinge98e036a2010-03-18 15:35:05 -0700384
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -0400385static inline void blkif_get_x86_32_req(struct blkif_request *dst,
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400386 struct blkif_x86_32_request *src)
387{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200388 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400389 dst->operation = src->operation;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800390 switch (src->operation) {
391 case BLKIF_OP_READ:
392 case BLKIF_OP_WRITE:
393 case BLKIF_OP_WRITE_BARRIER:
394 case BLKIF_OP_FLUSH_DISKCACHE:
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400395 dst->u.rw.nr_segments = src->u.rw.nr_segments;
396 dst->u.rw.handle = src->u.rw.handle;
397 dst->u.rw.id = src->u.rw.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800398 dst->u.rw.sector_number = src->u.rw.sector_number;
399 barrier();
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400400 if (n > dst->u.rw.nr_segments)
401 n = dst->u.rw.nr_segments;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800402 for (i = 0; i < n; i++)
403 dst->u.rw.seg[i] = src->u.rw.seg[i];
404 break;
405 case BLKIF_OP_DISCARD:
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400406 dst->u.discard.flag = src->u.discard.flag;
Konrad Rzeszutek Wilk8c9ce602012-05-25 16:11:09 -0400407 dst->u.discard.id = src->u.discard.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800408 dst->u.discard.sector_number = src->u.discard.sector_number;
409 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
410 break;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200411 case BLKIF_OP_INDIRECT:
412 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
413 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
414 dst->u.indirect.handle = src->u.indirect.handle;
415 dst->u.indirect.id = src->u.indirect.id;
416 dst->u.indirect.sector_number = src->u.indirect.sector_number;
417 barrier();
418 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
419 for (i = 0; i < j; i++)
420 dst->u.indirect.indirect_grefs[i] =
421 src->u.indirect.indirect_grefs[i];
422 break;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800423 default:
David Vrabel0e367ae2013-03-07 17:32:01 +0000424 /*
425 * Don't know how to translate this op. Only get the
426 * ID so failure can be reported to the frontend.
427 */
428 dst->u.other.id = src->u.other.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800429 break;
430 }
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400431}
432
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -0400433static inline void blkif_get_x86_64_req(struct blkif_request *dst,
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400434 struct blkif_x86_64_request *src)
435{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200436 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400437 dst->operation = src->operation;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800438 switch (src->operation) {
439 case BLKIF_OP_READ:
440 case BLKIF_OP_WRITE:
441 case BLKIF_OP_WRITE_BARRIER:
442 case BLKIF_OP_FLUSH_DISKCACHE:
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400443 dst->u.rw.nr_segments = src->u.rw.nr_segments;
444 dst->u.rw.handle = src->u.rw.handle;
445 dst->u.rw.id = src->u.rw.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800446 dst->u.rw.sector_number = src->u.rw.sector_number;
447 barrier();
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400448 if (n > dst->u.rw.nr_segments)
449 n = dst->u.rw.nr_segments;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800450 for (i = 0; i < n; i++)
451 dst->u.rw.seg[i] = src->u.rw.seg[i];
452 break;
453 case BLKIF_OP_DISCARD:
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400454 dst->u.discard.flag = src->u.discard.flag;
Konrad Rzeszutek Wilk8c9ce602012-05-25 16:11:09 -0400455 dst->u.discard.id = src->u.discard.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800456 dst->u.discard.sector_number = src->u.discard.sector_number;
457 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
458 break;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200459 case BLKIF_OP_INDIRECT:
460 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
461 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
462 dst->u.indirect.handle = src->u.indirect.handle;
463 dst->u.indirect.id = src->u.indirect.id;
464 dst->u.indirect.sector_number = src->u.indirect.sector_number;
465 barrier();
466 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
467 for (i = 0; i < j; i++)
468 dst->u.indirect.indirect_grefs[i] =
469 src->u.indirect.indirect_grefs[i];
470 break;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800471 default:
David Vrabel0e367ae2013-03-07 17:32:01 +0000472 /*
473 * Don't know how to translate this op. Only get the
474 * ID so failure can be reported to the frontend.
475 */
476 dst->u.other.id = src->u.other.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800477 break;
478 }
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400479}
480
Konrad Rzeszutek Wilk5a577e32011-05-12 16:58:21 -0400481#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */