blob: 40df3103e890bf355b5156231e28928da1d4293f [file] [log] [blame]
Thomas Gleixner3b20eb22019-05-29 16:57:35 -07001/* SPDX-License-Identifier: GPL-2.0-only */
K. Y. Srinivasan5c473402011-05-12 19:34:14 -07002/*
3 *
4 * Copyright (c) 2011, Microsoft Corporation.
5 *
K. Y. Srinivasan5c473402011-05-12 19:34:14 -07006 * Authors:
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
K. Y. Srinivasan5c473402011-05-12 19:34:14 -070010 */
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070011
12#ifndef _HYPERV_H
13#define _HYPERV_H
14
Bjarke Istrup Pedersen5267cf02014-01-22 09:16:58 +000015#include <uapi/linux/hyperv.h>
16
K. Y. Srinivasan29394372012-01-27 15:55:58 -080017#include <linux/types.h>
K. Y. Srinivasan8ff3e6f2011-05-12 19:34:27 -070018#include <linux/scatterlist.h>
19#include <linux/list.h>
20#include <linux/timer.h>
K. Y. Srinivasan8ff3e6f2011-05-12 19:34:27 -070021#include <linux/completion.h>
22#include <linux/device.h>
K. Y. Srinivasan2e2c1d12011-08-25 09:48:31 -070023#include <linux/mod_devicetable.h>
Stephen Hemminger631e63a2017-02-11 23:02:20 -070024#include <linux/interrupt.h>
Long Li63273cb2018-03-27 17:48:38 -070025#include <linux/reciprocal_div.h>
K. Y. Srinivasan8ff3e6f2011-05-12 19:34:27 -070026
K. Y. Srinivasan7e5ec362014-03-07 00:10:34 -080027#define MAX_PAGE_BUFFER_COUNT 32
K. Y. Srinivasana363bf72011-05-12 19:34:16 -070028#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
29
30#pragma pack(push, 1)
31
32/* Single-page buffer */
33struct hv_page_buffer {
34 u32 len;
35 u32 offset;
36 u64 pfn;
37};
38
39/* Multiple-page buffer */
40struct hv_multipage_buffer {
41 /* Length and Offset determines the # of pfns in the array */
42 u32 len;
43 u32 offset;
44 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
45};
46
K. Y. Srinivasand61031e2015-01-09 23:54:34 -080047/*
48 * Multiple-page buffer array; the pfn array is variable size:
49 * The number of entries in the PFN array is determined by
50 * "len" and "offset".
51 */
52struct hv_mpb_array {
53 /* Length and Offset determines the # of pfns in the array */
54 u32 len;
55 u32 offset;
56 u64 pfn_array[];
57};
58
K. Y. Srinivasana363bf72011-05-12 19:34:16 -070059/* 0x18 includes the proprietary packet header */
60#define MAX_PAGE_BUFFER_PACKET (0x18 + \
61 (sizeof(struct hv_page_buffer) * \
62 MAX_PAGE_BUFFER_COUNT))
63#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
64 sizeof(struct hv_multipage_buffer))
65
66
67#pragma pack(pop)
68
K. Y. Srinivasan7effffb2011-05-12 19:34:17 -070069struct hv_ring_buffer {
70 /* Offset in bytes from the start of ring data below */
71 u32 write_index;
72
73 /* Offset in bytes from the start of ring data below */
74 u32 read_index;
75
76 u32 interrupt_mask;
77
K. Y. Srinivasan24166032012-12-01 06:46:39 -080078 /*
Michael Kelley71b38242018-06-05 13:37:51 -070079 * WS2012/Win8 and later versions of Hyper-V implement interrupt
80 * driven flow management. The feature bit feat_pending_send_sz
81 * is set by the host on the host->guest ring buffer, and by the
82 * guest on the guest->host ring buffer.
K. Y. Srinivasan24166032012-12-01 06:46:39 -080083 *
Michael Kelley71b38242018-06-05 13:37:51 -070084 * The meaning of the feature bit is a bit complex in that it has
85 * semantics that apply to both ring buffers. If the guest sets
86 * the feature bit in the guest->host ring buffer, the guest is
87 * telling the host that:
88 * 1) It will set the pending_send_sz field in the guest->host ring
89 * buffer when it is waiting for space to become available, and
90 * 2) It will read the pending_send_sz field in the host->guest
91 * ring buffer and interrupt the host when it frees enough space
92 *
93 * Similarly, if the host sets the feature bit in the host->guest
94 * ring buffer, the host is telling the guest that:
95 * 1) It will set the pending_send_sz field in the host->guest ring
96 * buffer when it is waiting for space to become available, and
97 * 2) It will read the pending_send_sz field in the guest->host
98 * ring buffer and interrupt the guest when it frees enough space
99 *
100 * If either the guest or host does not set the feature bit that it
101 * owns, that guest or host must do polling if it encounters a full
102 * ring buffer, and not signal the other end with an interrupt.
K. Y. Srinivasan7effffb2011-05-12 19:34:17 -0700103 */
K. Y. Srinivasan24166032012-12-01 06:46:39 -0800104 u32 pending_send_sz;
K. Y. Srinivasan24166032012-12-01 06:46:39 -0800105 u32 reserved1[12];
K. Y. Srinivasan24166032012-12-01 06:46:39 -0800106 union {
107 struct {
108 u32 feat_pending_send_sz:1;
109 };
110 u32 value;
111 } feature_bits;
112
113 /* Pad it to PAGE_SIZE so that data starts on page boundary */
114 u8 reserved2[4028];
K. Y. Srinivasan7effffb2011-05-12 19:34:17 -0700115
116 /*
117 * Ring data starts here + RingDataStartOffset
118 * !!! DO NOT place any fields below this !!!
119 */
Gustavo A. R. Silvadb5871e2020-05-07 13:53:23 -0500120 u8 buffer[];
K. Y. Srinivasan7effffb2011-05-12 19:34:17 -0700121} __packed;
122
123struct hv_ring_buffer_info {
124 struct hv_ring_buffer *ring_buffer;
125 u32 ring_size; /* Include the shared header */
Long Li63273cb2018-03-27 17:48:38 -0700126 struct reciprocal_value ring_size_div10_reciprocal;
K. Y. Srinivasan7effffb2011-05-12 19:34:17 -0700127 spinlock_t ring_lock;
128
129 u32 ring_datasize; /* < ring_size */
K. Y. Srinivasanab028db2016-04-02 17:59:51 -0700130 u32 priv_read_index;
Kimberly Brown14948e32019-03-14 16:05:15 -0400131 /*
132 * The ring buffer mutex lock. This lock prevents the ring buffer from
133 * being freed while the ring buffer is being accessed.
134 */
135 struct mutex ring_buffer_mutex;
K. Y. Srinivasan7effffb2011-05-12 19:34:17 -0700136};
137
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000138
Stephen Hemmingere4165a02017-02-11 23:02:24 -0700139static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
K. Y. Srinivasana6341f02016-04-02 17:59:46 -0700140{
141 u32 read_loc, write_loc, dsize, read;
142
143 dsize = rbi->ring_datasize;
144 read_loc = rbi->ring_buffer->read_index;
145 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
146
147 read = write_loc >= read_loc ? (write_loc - read_loc) :
148 (dsize - read_loc) + write_loc;
149
150 return read;
151}
152
Stephen Hemmingere4165a02017-02-11 23:02:24 -0700153static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
K. Y. Srinivasana6341f02016-04-02 17:59:46 -0700154{
155 u32 read_loc, write_loc, dsize, write;
156
157 dsize = rbi->ring_datasize;
158 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
159 write_loc = rbi->ring_buffer->write_index;
160
161 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
162 read_loc - write_loc;
163 return write;
164}
165
Long Li63273cb2018-03-27 17:48:38 -0700166static inline u32 hv_get_avail_to_write_percent(
167 const struct hv_ring_buffer_info *rbi)
168{
169 u32 avail_write = hv_get_bytes_to_write(rbi);
170
171 return reciprocal_divide(
172 (avail_write << 3) + (avail_write << 1),
173 rbi->ring_size_div10_reciprocal);
174}
175
K. Y. Srinivasaneafa7072012-12-01 06:46:44 -0800176/*
177 * VMBUS version is 32 bit entity broken up into
178 * two 16 bit quantities: major_number. minor_number.
179 *
180 * 0 . 13 (Windows Server 2008)
181 * 1 . 1 (Windows 7)
182 * 2 . 4 (Windows 8)
K. Y. Srinivasan03367ef2014-04-03 18:02:45 -0700183 * 3 . 0 (Windows 8 R2)
Keith Mange6c4e5f92015-05-26 14:23:01 -0700184 * 4 . 0 (Windows 10)
Andrea Parri2d4f49b2019-10-15 13:46:45 +0200185 * 4 . 1 (Windows 10 RS3)
Dexuan Cuiae20b252018-05-12 02:30:33 -0700186 * 5 . 0 (Newer Windows 10)
Andrea Parri2d4f49b2019-10-15 13:46:45 +0200187 * 5 . 1 (Windows 10 RS4)
188 * 5 . 2 (Windows Server 2019, RS5)
K. Y. Srinivasaneafa7072012-12-01 06:46:44 -0800189 */
190
191#define VERSION_WS2008 ((0 << 16) | (13))
192#define VERSION_WIN7 ((1 << 16) | (1))
193#define VERSION_WIN8 ((2 << 16) | (4))
K. Y. Srinivasan03367ef2014-04-03 18:02:45 -0700194#define VERSION_WIN8_1 ((3 << 16) | (0))
Andrea Parri2d4f49b2019-10-15 13:46:45 +0200195#define VERSION_WIN10 ((4 << 16) | (0))
196#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
Dexuan Cuiae20b252018-05-12 02:30:33 -0700197#define VERSION_WIN10_V5 ((5 << 16) | (0))
Andrea Parri2d4f49b2019-10-15 13:46:45 +0200198#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
199#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
K. Y. Srinivasaneafa7072012-12-01 06:46:44 -0800200
K. Y. Srinivasan517d8dc2011-05-12 19:34:19 -0700201/* Make maximum size of pipe payload of 16K */
202#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
203
204/* Define PipeMode values. */
205#define VMBUS_PIPE_TYPE_BYTE 0x00000000
206#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
207
208/* The size of the user defined data buffer for non-pipe offers. */
209#define MAX_USER_DEFINED_BYTES 120
210
211/* The size of the user defined data buffer for pipe offers. */
212#define MAX_PIPE_USER_DEFINED_BYTES 116
213
214/*
215 * At the center of the Channel Management library is the Channel Offer. This
216 * struct contains the fundamental information about an offer.
217 */
218struct vmbus_channel_offer {
Andy Shevchenko593db802019-01-10 16:25:32 +0200219 guid_t if_type;
220 guid_t if_instance;
K. Y. Srinivasan29423b72012-12-01 06:46:40 -0800221
222 /*
223 * These two fields are not currently used.
224 */
225 u64 reserved1;
226 u64 reserved2;
227
K. Y. Srinivasan517d8dc2011-05-12 19:34:19 -0700228 u16 chn_flags;
229 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
230
231 union {
232 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
233 struct {
234 unsigned char user_def[MAX_USER_DEFINED_BYTES];
235 } std;
236
237 /*
238 * Pipes:
239 * The following sructure is an integrated pipe protocol, which
240 * is implemented on top of standard user-defined data. Pipe
241 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
242 * use.
243 */
244 struct {
245 u32 pipe_mode;
246 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
247 } pipe;
248 } u;
K. Y. Srinivasan29423b72012-12-01 06:46:40 -0800249 /*
Dexuan Cuied56ef62019-09-05 23:01:16 +0000250 * The sub_channel_index is defined in Win8: a value of zero means a
251 * primary channel and a value of non-zero means a sub-channel.
252 *
253 * Before Win8, the field is reserved, meaning it's always zero.
K. Y. Srinivasan29423b72012-12-01 06:46:40 -0800254 */
255 u16 sub_channel_index;
256 u16 reserved3;
K. Y. Srinivasan517d8dc2011-05-12 19:34:19 -0700257} __packed;
258
259/* Server Flags */
260#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
261#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
262#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
263#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
264#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
265#define VMBUS_CHANNEL_PARENT_OFFER 0x200
266#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
Dexuan Cuie8d6ca02016-01-27 22:29:38 -0800267#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
K. Y. Srinivasan517d8dc2011-05-12 19:34:19 -0700268
K. Y. Srinivasan50ed40e2011-05-12 19:34:20 -0700269struct vmpacket_descriptor {
270 u16 type;
271 u16 offset8;
272 u16 len8;
273 u16 flags;
274 u64 trans_id;
275} __packed;
276
277struct vmpacket_header {
278 u32 prev_pkt_start_offset;
279 struct vmpacket_descriptor descriptor;
280} __packed;
281
282struct vmtransfer_page_range {
283 u32 byte_count;
284 u32 byte_offset;
285} __packed;
286
287struct vmtransfer_page_packet_header {
288 struct vmpacket_descriptor d;
289 u16 xfer_pageset_id;
K. Y. Srinivasan1508d812012-08-16 08:23:20 -0700290 u8 sender_owns_set;
K. Y. Srinivasan50ed40e2011-05-12 19:34:20 -0700291 u8 reserved;
292 u32 range_cnt;
293 struct vmtransfer_page_range ranges[1];
294} __packed;
295
296struct vmgpadl_packet_header {
297 struct vmpacket_descriptor d;
298 u32 gpadl;
299 u32 reserved;
300} __packed;
301
302struct vmadd_remove_transfer_page_set {
303 struct vmpacket_descriptor d;
304 u32 gpadl;
305 u16 xfer_pageset_id;
306 u16 reserved;
307} __packed;
308
309/*
310 * This structure defines a range in guest physical space that can be made to
311 * look virtually contiguous.
312 */
313struct gpa_range {
314 u32 byte_count;
315 u32 byte_offset;
Gustavo A. R. Silvadb5871e2020-05-07 13:53:23 -0500316 u64 pfn_array[];
K. Y. Srinivasan50ed40e2011-05-12 19:34:20 -0700317};
318
319/*
320 * This is the format for an Establish Gpadl packet, which contains a handle by
321 * which this GPADL will be known and a set of GPA ranges associated with it.
322 * This can be converted to a MDL by the guest OS. If there are multiple GPA
323 * ranges, then the resulting MDL will be "chained," representing multiple VA
324 * ranges.
325 */
326struct vmestablish_gpadl {
327 struct vmpacket_descriptor d;
328 u32 gpadl;
329 u32 range_cnt;
330 struct gpa_range range[1];
331} __packed;
332
333/*
334 * This is the format for a Teardown Gpadl packet, which indicates that the
335 * GPADL handle in the Establish Gpadl packet will never be referenced again.
336 */
337struct vmteardown_gpadl {
338 struct vmpacket_descriptor d;
339 u32 gpadl;
340 u32 reserved; /* for alignment to a 8-byte boundary */
341} __packed;
342
343/*
344 * This is the format for a GPA-Direct packet, which contains a set of GPA
345 * ranges, in addition to commands and/or data.
346 */
347struct vmdata_gpa_direct {
348 struct vmpacket_descriptor d;
349 u32 reserved;
350 u32 range_cnt;
351 struct gpa_range range[1];
352} __packed;
353
354/* This is the format for a Additional Data Packet. */
355struct vmadditional_data {
356 struct vmpacket_descriptor d;
357 u64 total_bytes;
358 u32 offset;
359 u32 byte_cnt;
360 unsigned char data[1];
361} __packed;
362
363union vmpacket_largest_possible_header {
364 struct vmpacket_descriptor simple_hdr;
365 struct vmtransfer_page_packet_header xfer_page_hdr;
366 struct vmgpadl_packet_header gpadl_hdr;
367 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
368 struct vmestablish_gpadl establish_gpadl_hdr;
369 struct vmteardown_gpadl teardown_gpadl_hdr;
370 struct vmdata_gpa_direct data_gpa_direct_hdr;
371};
372
373#define VMPACKET_DATA_START_ADDRESS(__packet) \
374 (void *)(((unsigned char *)__packet) + \
375 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
376
377#define VMPACKET_DATA_LENGTH(__packet) \
378 ((((struct vmpacket_descriptor)__packet)->len8 - \
379 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
380
381#define VMPACKET_TRANSFER_MODE(__packet) \
382 (((struct IMPACT)__packet)->type)
383
384enum vmbus_packet_type {
385 VM_PKT_INVALID = 0x0,
386 VM_PKT_SYNCH = 0x1,
387 VM_PKT_ADD_XFER_PAGESET = 0x2,
388 VM_PKT_RM_XFER_PAGESET = 0x3,
389 VM_PKT_ESTABLISH_GPADL = 0x4,
390 VM_PKT_TEARDOWN_GPADL = 0x5,
391 VM_PKT_DATA_INBAND = 0x6,
392 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
393 VM_PKT_DATA_USING_GPADL = 0x8,
394 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
395 VM_PKT_CANCEL_REQUEST = 0xa,
396 VM_PKT_COMP = 0xb,
397 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
398 VM_PKT_ADDITIONAL_DATA = 0xd
399};
400
401#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
K. Y. Srinivasan517d8dc2011-05-12 19:34:19 -0700402
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700403
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700404/* Version 1 messages */
405enum vmbus_channel_message_type {
406 CHANNELMSG_INVALID = 0,
407 CHANNELMSG_OFFERCHANNEL = 1,
408 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
409 CHANNELMSG_REQUESTOFFERS = 3,
410 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
411 CHANNELMSG_OPENCHANNEL = 5,
412 CHANNELMSG_OPENCHANNEL_RESULT = 6,
413 CHANNELMSG_CLOSECHANNEL = 7,
414 CHANNELMSG_GPADL_HEADER = 8,
415 CHANNELMSG_GPADL_BODY = 9,
416 CHANNELMSG_GPADL_CREATED = 10,
417 CHANNELMSG_GPADL_TEARDOWN = 11,
418 CHANNELMSG_GPADL_TORNDOWN = 12,
419 CHANNELMSG_RELID_RELEASED = 13,
420 CHANNELMSG_INITIATE_CONTACT = 14,
421 CHANNELMSG_VERSION_RESPONSE = 15,
422 CHANNELMSG_UNLOAD = 16,
K. Y. Srinivasan2db84ef2015-04-22 21:31:32 -0700423 CHANNELMSG_UNLOAD_RESPONSE = 17,
Dexuan Cui5c23a1a2016-01-27 22:29:40 -0800424 CHANNELMSG_18 = 18,
425 CHANNELMSG_19 = 19,
426 CHANNELMSG_20 = 20,
427 CHANNELMSG_TL_CONNECT_REQUEST = 21,
Andrea Parri (Microsoft)75278102020-04-06 02:15:13 +0200428 CHANNELMSG_MODIFYCHANNEL = 22,
Dexuan Cuiddc9d352020-01-19 15:29:22 -0800429 CHANNELMSG_TL_CONNECT_RESULT = 23,
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700430 CHANNELMSG_COUNT
431};
432
Dexuan Cuid8bd2d42019-09-05 23:01:22 +0000433/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
434#define INVALID_RELID U32_MAX
435
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700436struct vmbus_channel_message_header {
437 enum vmbus_channel_message_type msgtype;
438 u32 padding;
439} __packed;
440
441/* Query VMBus Version parameters */
442struct vmbus_channel_query_vmbus_version {
443 struct vmbus_channel_message_header header;
444 u32 version;
445} __packed;
446
447/* VMBus Version Supported parameters */
448struct vmbus_channel_version_supported {
449 struct vmbus_channel_message_header header;
K. Y. Srinivasan1508d812012-08-16 08:23:20 -0700450 u8 version_supported;
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700451} __packed;
452
453/* Offer Channel parameters */
454struct vmbus_channel_offer_channel {
455 struct vmbus_channel_message_header header;
456 struct vmbus_channel_offer offer;
457 u32 child_relid;
458 u8 monitorid;
K. Y. Srinivasan29423b72012-12-01 06:46:40 -0800459 /*
460 * win7 and beyond splits this field into a bit field.
461 */
462 u8 monitor_allocated:1;
463 u8 reserved:7;
464 /*
465 * These are new fields added in win7 and later.
466 * Do not access these fields without checking the
467 * negotiated protocol.
468 *
469 * If "is_dedicated_interrupt" is set, we must not set the
470 * associated bit in the channel bitmap while sending the
471 * interrupt to the host.
472 *
473 * connection_id is to be used in signaling the host.
474 */
475 u16 is_dedicated_interrupt:1;
476 u16 reserved1:15;
477 u32 connection_id;
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700478} __packed;
479
480/* Rescind Offer parameters */
481struct vmbus_channel_rescind_offer {
482 struct vmbus_channel_message_header header;
483 u32 child_relid;
484} __packed;
485
Stephen Hemminger4827ee12017-03-04 18:27:18 -0700486static inline u32
487hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
488{
489 return rbi->ring_buffer->pending_send_sz;
490}
491
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700492/*
493 * Request Offer -- no parameters, SynIC message contains the partition ID
494 * Set Snoop -- no parameters, SynIC message contains the partition ID
495 * Clear Snoop -- no parameters, SynIC message contains the partition ID
496 * All Offers Delivered -- no parameters, SynIC message contains the partition
497 * ID
498 * Flush Client -- no parameters, SynIC message contains the partition ID
499 */
500
501/* Open Channel parameters */
502struct vmbus_channel_open_channel {
503 struct vmbus_channel_message_header header;
504
505 /* Identifies the specific VMBus channel that is being opened. */
506 u32 child_relid;
507
508 /* ID making a particular open request at a channel offer unique. */
509 u32 openid;
510
511 /* GPADL for the channel's ring buffer. */
512 u32 ringbuffer_gpadlhandle;
513
K. Y. Srinivasanabbf3b22012-12-01 06:46:48 -0800514 /*
515 * Starting with win8, this field will be used to specify
516 * the target virtual processor on which to deliver the interrupt for
517 * the host to guest communication.
518 * Prior to win8, incoming channel interrupts would only
519 * be delivered on cpu 0. Setting this value to 0 would
520 * preserve the earlier behavior.
521 */
522 u32 target_vp;
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700523
524 /*
Stephen Hemminger2a9d7de2017-03-04 18:27:17 -0700525 * The upstream ring buffer begins at offset zero in the memory
526 * described by RingBufferGpadlHandle. The downstream ring buffer
527 * follows it at this offset (in pages).
528 */
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700529 u32 downstream_ringbuffer_pageoffset;
530
531 /* User-specific data to be passed along to the server endpoint. */
532 unsigned char userdata[MAX_USER_DEFINED_BYTES];
533} __packed;
534
535/* Open Channel Result parameters */
536struct vmbus_channel_open_result {
537 struct vmbus_channel_message_header header;
538 u32 child_relid;
539 u32 openid;
540 u32 status;
541} __packed;
542
543/* Close channel parameters; */
544struct vmbus_channel_close_channel {
545 struct vmbus_channel_message_header header;
546 u32 child_relid;
547} __packed;
548
549/* Channel Message GPADL */
550#define GPADL_TYPE_RING_BUFFER 1
551#define GPADL_TYPE_SERVER_SAVE_AREA 2
552#define GPADL_TYPE_TRANSACTION 8
553
554/*
555 * The number of PFNs in a GPADL message is defined by the number of
556 * pages that would be spanned by ByteCount and ByteOffset. If the
557 * implied number of PFNs won't fit in this packet, there will be a
558 * follow-up packet that contains more.
559 */
560struct vmbus_channel_gpadl_header {
561 struct vmbus_channel_message_header header;
562 u32 child_relid;
563 u32 gpadl;
564 u16 range_buflen;
565 u16 rangecount;
Gustavo A. R. Silvadb5871e2020-05-07 13:53:23 -0500566 struct gpa_range range[];
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700567} __packed;
568
569/* This is the followup packet that contains more PFNs. */
570struct vmbus_channel_gpadl_body {
571 struct vmbus_channel_message_header header;
572 u32 msgnumber;
573 u32 gpadl;
Gustavo A. R. Silvadb5871e2020-05-07 13:53:23 -0500574 u64 pfn[];
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700575} __packed;
576
577struct vmbus_channel_gpadl_created {
578 struct vmbus_channel_message_header header;
579 u32 child_relid;
580 u32 gpadl;
581 u32 creation_status;
582} __packed;
583
584struct vmbus_channel_gpadl_teardown {
585 struct vmbus_channel_message_header header;
586 u32 child_relid;
587 u32 gpadl;
588} __packed;
589
590struct vmbus_channel_gpadl_torndown {
591 struct vmbus_channel_message_header header;
592 u32 gpadl;
593} __packed;
594
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700595struct vmbus_channel_relid_released {
596 struct vmbus_channel_message_header header;
597 u32 child_relid;
598} __packed;
599
600struct vmbus_channel_initiate_contact {
601 struct vmbus_channel_message_header header;
602 u32 vmbus_version_requested;
K. Y. Srinivasane28bab42014-01-15 17:12:58 -0800603 u32 target_vcpu; /* The VCPU the host should respond to */
Dexuan Cuiae20b252018-05-12 02:30:33 -0700604 union {
605 u64 interrupt_page;
606 struct {
607 u8 msg_sint;
608 u8 padding1[3];
609 u32 padding2;
610 };
611 };
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700612 u64 monitor_page1;
613 u64 monitor_page2;
614} __packed;
615
Dexuan Cui5c23a1a2016-01-27 22:29:40 -0800616/* Hyper-V socket: guest's connect()-ing to host */
617struct vmbus_channel_tl_connect_request {
618 struct vmbus_channel_message_header header;
Andy Shevchenko593db802019-01-10 16:25:32 +0200619 guid_t guest_endpoint_id;
620 guid_t host_service_id;
Dexuan Cui5c23a1a2016-01-27 22:29:40 -0800621} __packed;
622
Andrea Parri (Microsoft)75278102020-04-06 02:15:13 +0200623/* Modify Channel parameters, cf. vmbus_send_modifychannel() */
624struct vmbus_channel_modifychannel {
625 struct vmbus_channel_message_header header;
626 u32 child_relid;
627 u32 target_vp;
628} __packed;
629
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700630struct vmbus_channel_version_response {
631 struct vmbus_channel_message_header header;
K. Y. Srinivasan1508d812012-08-16 08:23:20 -0700632 u8 version_supported;
Dexuan Cuiae20b252018-05-12 02:30:33 -0700633
634 u8 connection_state;
635 u16 padding;
636
637 /*
638 * On new hosts that support VMBus protocol 5.0, we must use
639 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
640 * and for subsequent messages, we must use the Message Connection ID
641 * field in the host-returned Version Response Message.
642 *
643 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
644 */
645 u32 msg_conn_id;
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700646} __packed;
647
648enum vmbus_channel_state {
649 CHANNEL_OFFER_STATE,
650 CHANNEL_OPENING_STATE,
651 CHANNEL_OPEN_STATE,
K. Y. Srinivasane68d2972013-05-23 12:02:32 -0700652 CHANNEL_OPENED_STATE,
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700653};
654
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700655/*
656 * Represents each channel msg on the vmbus connection This is a
657 * variable-size data structure depending on the msg type itself
658 */
659struct vmbus_channel_msginfo {
660 /* Bookkeeping stuff */
661 struct list_head msglistentry;
662
663 /* So far, this is only used to handle gpadl body message */
664 struct list_head submsglist;
665
666 /* Synchronize the request/response if needed */
667 struct completion waitevent;
K. Y. Srinivasanccb61f82016-12-22 16:54:00 -0800668 struct vmbus_channel *waiting_channel;
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700669 union {
670 struct vmbus_channel_version_supported version_supported;
671 struct vmbus_channel_open_result open_result;
672 struct vmbus_channel_gpadl_torndown gpadl_torndown;
673 struct vmbus_channel_gpadl_created gpadl_created;
674 struct vmbus_channel_version_response version_response;
675 } response;
676
677 u32 msgsize;
678 /*
679 * The channel message that goes out on the "wire".
680 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
681 */
Gustavo A. R. Silvadb5871e2020-05-07 13:53:23 -0500682 unsigned char msg[];
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700683};
684
K. Y. Srinivasanf9f1db82011-06-06 15:49:58 -0700685struct vmbus_close_msg {
686 struct vmbus_channel_msginfo info;
687 struct vmbus_channel_close_channel msg;
688};
689
K. Y. Srinivasanb3bf60c2012-12-01 06:46:45 -0800690/* Define connection identifier type. */
691union hv_connection_id {
692 u32 asu32;
693 struct {
694 u32 id:24;
695 u32 reserved:8;
696 } u;
697};
698
K. Y. Srinivasan7047f172015-12-25 20:00:30 -0800699enum vmbus_device_type {
700 HV_IDE = 0,
701 HV_SCSI,
702 HV_FC,
703 HV_NIC,
704 HV_ND,
705 HV_PCIE,
706 HV_FB,
707 HV_KBD,
708 HV_MOUSE,
709 HV_KVP,
710 HV_TS,
711 HV_HB,
712 HV_SHUTDOWN,
713 HV_FCOPY,
714 HV_BACKUP,
715 HV_DM,
Haiyang Zhangf45be722016-12-03 12:34:29 -0800716 HV_UNKNOWN,
K. Y. Srinivasan7047f172015-12-25 20:00:30 -0800717};
718
719struct vmbus_device {
720 u16 dev_type;
Andy Shevchenko593db802019-01-10 16:25:32 +0200721 guid_t guid;
K. Y. Srinivasan7047f172015-12-25 20:00:30 -0800722 bool perf_device;
723};
724
K. Y. Srinivasan7d7c75c2011-06-06 15:49:57 -0700725struct vmbus_channel {
726 struct list_head listentry;
727
728 struct hv_device *device_obj;
729
K. Y. Srinivasan7d7c75c2011-06-06 15:49:57 -0700730 enum vmbus_channel_state state;
K. Y. Srinivasan7d7c75c2011-06-06 15:49:57 -0700731
732 struct vmbus_channel_offer_channel offermsg;
733 /*
734 * These are based on the OfferMsg.MonitorId.
735 * Save it here for easy access.
736 */
737 u8 monitor_grp;
738 u8 monitor_bit;
739
Haiyang Zhangc3582a22014-12-01 13:28:39 -0800740 bool rescind; /* got rescind msg */
K. Y. Srinivasan7fa32e52017-11-14 06:53:33 -0700741 struct completion rescind_event;
Haiyang Zhangc3582a22014-12-01 13:28:39 -0800742
K. Y. Srinivasan7d7c75c2011-06-06 15:49:57 -0700743 u32 ringbuffer_gpadlhandle;
744
745 /* Allocated memory for ring buffer */
Stephen Hemminger52a42c22018-09-14 09:10:16 -0700746 struct page *ringbuffer_page;
K. Y. Srinivasan7d7c75c2011-06-06 15:49:57 -0700747 u32 ringbuffer_pagecount;
Stephen Hemmingerae6935e2018-09-14 09:10:17 -0700748 u32 ringbuffer_send_offset;
K. Y. Srinivasan7d7c75c2011-06-06 15:49:57 -0700749 struct hv_ring_buffer_info outbound; /* send to parent */
750 struct hv_ring_buffer_info inbound; /* receive from parent */
K. Y. Srinivasan7d7c75c2011-06-06 15:49:57 -0700751
K. Y. Srinivasanf9f1db82011-06-06 15:49:58 -0700752 struct vmbus_close_msg close_msg;
753
Stephen Hemminger6981fbf2017-10-29 11:33:40 -0700754 /* Statistics */
755 u64 interrupts; /* Host to Guest interrupts */
756 u64 sig_events; /* Guest to Host events */
757
Kimberly Brown396ae572019-02-04 02:13:09 -0500758 /*
759 * Guest to host interrupts caused by the outbound ring buffer changing
760 * from empty to not empty.
761 */
762 u64 intr_out_empty;
763
764 /*
765 * Indicates that a full outbound ring buffer was encountered. The flag
766 * is set to true when a full outbound ring buffer is encountered and
767 * set to false when a write to the outbound ring buffer is completed.
768 */
769 bool out_full_flag;
770
Stephen Hemminger51c6ce22017-02-11 23:02:18 -0700771 /* Channel callback's invoked in softirq context */
Stephen Hemminger631e63a2017-02-11 23:02:20 -0700772 struct tasklet_struct callback_event;
K. Y. Srinivasan7d7c75c2011-06-06 15:49:57 -0700773 void (*onchannel_callback)(void *context);
774 void *channel_callback_context;
K. Y. Srinivasan132368b2012-12-01 06:46:33 -0800775
Andrea Parri (Microsoft)7769e182020-04-06 02:15:14 +0200776 void (*change_target_cpu_callback)(struct vmbus_channel *channel,
777 u32 old, u32 new);
778
K. Y. Srinivasan132368b2012-12-01 06:46:33 -0800779 /*
Andrea Parri (Microsoft)9403b662020-04-06 02:15:09 +0200780 * Synchronize channel scheduling and channel removal; see the inline
781 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
782 */
783 spinlock_t sched_lock;
784
785 /*
Stephen Hemmingerb71e3282017-02-11 23:02:21 -0700786 * A channel can be marked for one of three modes of reading:
787 * BATCHED - callback called from taslket and should read
788 * channel until empty. Interrupts from the host
789 * are masked while read is in process (default).
790 * DIRECT - callback called from tasklet (softirq).
791 * ISR - callback called in interrupt context and must
792 * invoke its own deferred processing.
793 * Host interrupts are disabled and must be re-enabled
794 * when ring is empty.
K. Y. Srinivasan132368b2012-12-01 06:46:33 -0800795 */
Stephen Hemmingerb71e3282017-02-11 23:02:21 -0700796 enum hv_callback_mode {
797 HV_CALL_BATCHED,
798 HV_CALL_DIRECT,
799 HV_CALL_ISR
800 } callback_mode;
K. Y. Srinivasanb3bf60c2012-12-01 06:46:45 -0800801
802 bool is_dedicated_interrupt;
Vitaly Kuznetsov05784172017-08-02 18:09:16 +0200803 u64 sig_event;
K. Y. Srinivasanabbf3b22012-12-01 06:46:48 -0800804
805 /*
806 * Starting with win8, this field will be used to specify
807 * the target virtual processor on which to deliver the interrupt for
808 * the host to guest communication.
809 * Prior to win8, incoming channel interrupts would only
810 * be delivered on cpu 0. Setting this value to 0 would
811 * preserve the earlier behavior.
812 */
813 u32 target_vp;
K. Y. Srinivasand3ba7202014-04-08 18:45:53 -0700814 /* The corresponding CPUID in the guest */
815 u32 target_cpu;
K. Y. Srinivasan1f656ff2015-05-30 23:37:48 -0700816 int numa_node;
817 /*
K. Y. Srinivasane68d2972013-05-23 12:02:32 -0700818 * Support for sub-channels. For high performance devices,
819 * it will be useful to have multiple sub-channels to support
820 * a scalable communication infrastructure with the host.
821 * The support for sub-channels is implemented as an extention
822 * to the current infrastructure.
823 * The initial offer is considered the primary channel and this
824 * offer message will indicate if the host supports sub-channels.
825 * The guest is free to ask for sub-channels to be offerred and can
826 * open these sub-channels as a normal "primary" channel. However,
827 * all sub-channels will have the same type and instance guids as the
828 * primary channel. Requests sent on a given channel will result in a
829 * response on the same channel.
830 */
831
832 /*
833 * Sub-channel creation callback. This callback will be called in
834 * process context when a sub-channel offer is received from the host.
835 * The guest can open the sub-channel in the context of this callback.
836 */
837 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
838
Vitaly Kuznetsov67fae052015-01-20 16:45:05 +0100839 /*
Dexuan Cui499e8402016-01-27 22:29:42 -0800840 * Channel rescind callback. Some channels (the hvsock ones), need to
841 * register a callback which is invoked in vmbus_onoffer_rescind().
842 */
843 void (*chn_rescind_callback)(struct vmbus_channel *channel);
844
845 /*
Vitaly Kuznetsov67fae052015-01-20 16:45:05 +0100846 * The spinlock to protect the structure. It is being used to protect
847 * test-and-set access to various attributes of the structure as well
848 * as all sc_list operations.
849 */
850 spinlock_t lock;
K. Y. Srinivasane68d2972013-05-23 12:02:32 -0700851 /*
852 * All Sub-channels of a primary channel are linked here.
853 */
854 struct list_head sc_list;
855 /*
856 * The primary channel this sub-channel belongs to.
857 * This will be NULL for the primary channel.
858 */
859 struct vmbus_channel *primary_channel;
K. Y. Srinivasan8a7206a2014-02-03 12:42:45 -0800860 /*
861 * Support per-channel state for use by vmbus drivers.
862 */
863 void *per_channel_state;
Stephen Hemminger8200f202017-03-04 18:13:57 -0700864
865 /*
866 * Defer freeing channel until after all cpu's have
867 * gone through grace period.
868 */
869 struct rcu_head rcu;
870
K. Y. Srinivasan85998462015-12-14 16:01:54 -0800871 /*
Stephen Hemmingerc2e5df62017-09-21 20:58:49 -0700872 * For sysfs per-channel properties.
873 */
874 struct kobject kobj;
875
876 /*
K. Y. Srinivasan37242872016-07-01 16:26:37 -0700877 * For performance critical channels (storage, networking
878 * etc,), Hyper-V has a mechanism to enhance the throughput
879 * at the expense of latency:
880 * When the host is to be signaled, we just set a bit in a shared page
881 * and this bit will be inspected by the hypervisor within a certain
882 * window and if the bit is set, the host will be signaled. The window
883 * of time is the monitor latency - currently around 100 usecs. This
884 * mechanism improves throughput by:
885 *
886 * A) Making the host more efficient - each time it wakes up,
887 * potentially it will process morev number of packets. The
888 * monitor latency allows a batch to build up.
889 * B) By deferring the hypercall to signal, we will also minimize
890 * the interrupts.
891 *
892 * Clearly, these optimizations improve throughput at the expense of
893 * latency. Furthermore, since the channel is shared for both
894 * control and data messages, control messages currently suffer
895 * unnecessary latency adversley impacting performance and boot
896 * time. To fix this issue, permit tagging the channel as being
897 * in "low latency" mode. In this mode, we will bypass the monitor
898 * mechanism.
899 */
900 bool low_latency;
K. Y. Srinivasanfe760e42016-01-27 22:29:45 -0800901
K. Y. Srinivasan6f3d7912017-08-11 10:03:59 -0700902 bool probe_done;
903
Dexuan Cui37c25782018-12-03 00:54:35 +0000904 /*
Andrea Parri (Microsoft)afaa33d2020-05-22 19:19:01 +0200905 * Cache the device ID here for easy access; this is useful, in
906 * particular, in situations where the channel's device_obj has
907 * not been allocated/initialized yet.
908 */
909 u16 device_id;
910
911 /*
Dexuan Cui37c25782018-12-03 00:54:35 +0000912 * We must offload the handling of the primary/sub channels
913 * from the single-threaded vmbus_connection.work_queue to
914 * two different workqueue, otherwise we can block
915 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
916 */
917 struct work_struct add_channel_work;
Kimberly Brown396ae572019-02-04 02:13:09 -0500918
919 /*
920 * Guest to host interrupts caused by the inbound ring buffer changing
921 * from full to not full while a packet is waiting.
922 */
923 u64 intr_in_full;
924
925 /*
926 * The total number of write operations that encountered a full
927 * outbound ring buffer.
928 */
929 u64 out_full_total;
930
931 /*
932 * The number of write operations that were the first to encounter a
933 * full outbound ring buffer.
934 */
935 u64 out_full_first;
Branden Bonabyaf9ca6f2019-10-03 17:01:49 -0400936
937 /* enabling/disabling fuzz testing on the channel (default is false)*/
938 bool fuzz_testing_state;
939
940 /*
941 * Interrupt delay will delay the guest from emptying the ring buffer
942 * for a specific amount of time. The delay is in microseconds and will
943 * be between 1 to a maximum of 1000, its default is 0 (no delay).
944 * The Message delay will delay guest reading on a per message basis
945 * in microseconds between 1 to 1000 with the default being 0
946 * (no delay).
947 */
948 u32 fuzz_testing_interrupt_delay;
949 u32 fuzz_testing_message_delay;
950
K. Y. Srinivasan7d7c75c2011-06-06 15:49:57 -0700951};
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -0700952
Dexuan Cuie8d6ca02016-01-27 22:29:38 -0800953static inline bool is_hvsock_channel(const struct vmbus_channel *c)
954{
955 return !!(c->offermsg.offer.chn_flags &
956 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
957}
958
Dexuan Cuied56ef62019-09-05 23:01:16 +0000959static inline bool is_sub_channel(const struct vmbus_channel *c)
960{
961 return c->offermsg.offer.sub_channel_index != 0;
962}
963
Stephen Hemmingerb71e3282017-02-11 23:02:21 -0700964static inline void set_channel_read_mode(struct vmbus_channel *c,
965 enum hv_callback_mode mode)
K. Y. Srinivasan132368b2012-12-01 06:46:33 -0800966{
Stephen Hemmingerb71e3282017-02-11 23:02:21 -0700967 c->callback_mode = mode;
K. Y. Srinivasan132368b2012-12-01 06:46:33 -0800968}
969
K. Y. Srinivasan8a7206a2014-02-03 12:42:45 -0800970static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
971{
972 c->per_channel_state = s;
973}
974
975static inline void *get_per_channel_state(struct vmbus_channel *c)
976{
977 return c->per_channel_state;
978}
979
Dexuan Cui3c753542016-01-27 22:29:37 -0800980static inline void set_channel_pending_send_size(struct vmbus_channel *c,
981 u32 size)
982{
Kimberly Brown396ae572019-02-04 02:13:09 -0500983 unsigned long flags;
984
985 if (size) {
986 spin_lock_irqsave(&c->outbound.ring_lock, flags);
987 ++c->out_full_total;
988
989 if (!c->out_full_flag) {
990 ++c->out_full_first;
991 c->out_full_flag = true;
992 }
993 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
994 } else {
995 c->out_full_flag = false;
996 }
997
Dexuan Cui3c753542016-01-27 22:29:37 -0800998 c->outbound.ring_buffer->pending_send_sz = size;
999}
1000
K. Y. Srinivasan37242872016-07-01 16:26:37 -07001001static inline void set_low_latency_mode(struct vmbus_channel *c)
1002{
1003 c->low_latency = true;
1004}
1005
1006static inline void clear_low_latency_mode(struct vmbus_channel *c)
1007{
1008 c->low_latency = false;
1009}
1010
Vitaly Kuznetsov5cc41502020-04-06 12:41:52 +02001011void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
K. Y. Srinivasanb56dda02011-05-12 19:34:21 -07001012
1013int vmbus_request_offers(void);
1014
K. Y. Srinivasane68d2972013-05-23 12:02:32 -07001015/*
1016 * APIs for managing sub-channels.
1017 */
1018
1019void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1020 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1021
Dexuan Cui499e8402016-01-27 22:29:42 -08001022void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1023 void (*chn_rescind_cb)(struct vmbus_channel *));
1024
K. Y. Srinivasane68d2972013-05-23 12:02:32 -07001025/*
K. Y. Srinivasane68d2972013-05-23 12:02:32 -07001026 * Check if sub-channels have already been offerred. This API will be useful
1027 * when the driver is unloaded after establishing sub-channels. In this case,
1028 * when the driver is re-loaded, the driver would have to check if the
1029 * subchannels have already been established before attempting to request
1030 * the creation of sub-channels.
1031 * This function returns TRUE to indicate that subchannels have already been
1032 * created.
1033 * This function should be invoked after setting the callback function for
1034 * sub-channel creation.
1035 */
1036bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1037
K. Y. Srinivasanc35470b2011-05-12 19:34:22 -07001038/* The format must be the same as struct vmdata_gpa_direct */
1039struct vmbus_channel_packet_page_buffer {
1040 u16 type;
1041 u16 dataoffset8;
1042 u16 length8;
1043 u16 flags;
1044 u64 transactionid;
1045 u32 reserved;
1046 u32 rangecount;
1047 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1048} __packed;
1049
1050/* The format must be the same as struct vmdata_gpa_direct */
1051struct vmbus_channel_packet_multipage_buffer {
1052 u16 type;
1053 u16 dataoffset8;
1054 u16 length8;
1055 u16 flags;
1056 u64 transactionid;
1057 u32 reserved;
1058 u32 rangecount; /* Always 1 in this case */
1059 struct hv_multipage_buffer range;
1060} __packed;
1061
K. Y. Srinivasand61031e2015-01-09 23:54:34 -08001062/* The format must be the same as struct vmdata_gpa_direct */
1063struct vmbus_packet_mpb_array {
1064 u16 type;
1065 u16 dataoffset8;
1066 u16 length8;
1067 u16 flags;
1068 u64 transactionid;
1069 u32 reserved;
1070 u32 rangecount; /* Always 1 in this case */
1071 struct hv_mpb_array range;
1072} __packed;
1073
Stephen Hemmingerae6935e2018-09-14 09:10:17 -07001074int vmbus_alloc_ring(struct vmbus_channel *channel,
1075 u32 send_size, u32 recv_size);
1076void vmbus_free_ring(struct vmbus_channel *channel);
1077
1078int vmbus_connect_ring(struct vmbus_channel *channel,
1079 void (*onchannel_callback)(void *context),
1080 void *context);
1081int vmbus_disconnect_ring(struct vmbus_channel *channel);
K. Y. Srinivasanc35470b2011-05-12 19:34:22 -07001082
1083extern int vmbus_open(struct vmbus_channel *channel,
1084 u32 send_ringbuffersize,
1085 u32 recv_ringbuffersize,
1086 void *userdata,
1087 u32 userdatalen,
Stephen Hemminger2a9d7de2017-03-04 18:27:17 -07001088 void (*onchannel_callback)(void *context),
K. Y. Srinivasanc35470b2011-05-12 19:34:22 -07001089 void *context);
1090
1091extern void vmbus_close(struct vmbus_channel *channel);
1092
1093extern int vmbus_sendpacket(struct vmbus_channel *channel,
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -08001094 void *buffer,
K. Y. Srinivasanc35470b2011-05-12 19:34:22 -07001095 u32 bufferLen,
1096 u64 requestid,
1097 enum vmbus_packet_type type,
1098 u32 flags);
1099
1100extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1101 struct hv_page_buffer pagebuffers[],
1102 u32 pagecount,
1103 void *buffer,
1104 u32 bufferlen,
1105 u64 requestid);
1106
K. Y. Srinivasand61031e2015-01-09 23:54:34 -08001107extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1108 struct vmbus_packet_mpb_array *mpb,
1109 u32 desc_size,
1110 void *buffer,
1111 u32 bufferlen,
1112 u64 requestid);
1113
K. Y. Srinivasanc35470b2011-05-12 19:34:22 -07001114extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1115 void *kbuffer,
1116 u32 size,
1117 u32 *gpadl_handle);
1118
1119extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1120 u32 gpadl_handle);
1121
Dexuan Cuid3b26dd2018-08-02 03:08:23 +00001122void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1123
K. Y. Srinivasanc35470b2011-05-12 19:34:22 -07001124extern int vmbus_recvpacket(struct vmbus_channel *channel,
1125 void *buffer,
1126 u32 bufferlen,
1127 u32 *buffer_actual_len,
1128 u64 *requestid);
1129
1130extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1131 void *buffer,
1132 u32 bufferlen,
1133 u32 *buffer_actual_len,
1134 u64 *requestid);
1135
K. Y. Srinivasanc35470b2011-05-12 19:34:22 -07001136
K. Y. Srinivasanc35470b2011-05-12 19:34:22 -07001137extern void vmbus_ontimer(unsigned long data);
1138
K. Y. Srinivasan35ea09c2011-05-12 19:34:24 -07001139/* Base driver object */
1140struct hv_driver {
1141 const char *name;
1142
Dexuan Cui8981da32016-01-27 22:29:41 -08001143 /*
1144 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1145 * channel flag, actually doesn't mean a synthetic device because the
1146 * offer's if_type/if_instance can change for every new hvsock
1147 * connection.
1148 *
1149 * However, to facilitate the notification of new-offer/rescind-offer
1150 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1151 * a special vmbus device, and hence we need the below flag to
1152 * indicate if the driver is the hvsock driver or not: we need to
1153 * specially treat the hvosck offer & driver in vmbus_match().
1154 */
1155 bool hvsock;
1156
K. Y. Srinivasan35ea09c2011-05-12 19:34:24 -07001157 /* the device type supported by this driver */
Andy Shevchenko593db802019-01-10 16:25:32 +02001158 guid_t dev_type;
K. Y. Srinivasan2e2c1d12011-08-25 09:48:31 -07001159 const struct hv_vmbus_device_id *id_table;
K. Y. Srinivasan35ea09c2011-05-12 19:34:24 -07001160
1161 struct device_driver driver;
1162
Stephen Hemmingerfc769362016-12-03 12:34:39 -08001163 /* dynamic device GUID's */
1164 struct {
1165 spinlock_t lock;
1166 struct list_head list;
1167 } dynids;
1168
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001169 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
K. Y. Srinivasan35ea09c2011-05-12 19:34:24 -07001170 int (*remove)(struct hv_device *);
1171 void (*shutdown)(struct hv_device *);
1172
Dexuan Cui271b2222019-09-05 23:01:17 +00001173 int (*suspend)(struct hv_device *);
1174 int (*resume)(struct hv_device *);
1175
K. Y. Srinivasan35ea09c2011-05-12 19:34:24 -07001176};
1177
1178/* Base device object */
1179struct hv_device {
1180 /* the device type id of this device */
Andy Shevchenko593db802019-01-10 16:25:32 +02001181 guid_t dev_type;
K. Y. Srinivasan35ea09c2011-05-12 19:34:24 -07001182
1183 /* the device instance id of this device */
Andy Shevchenko593db802019-01-10 16:25:32 +02001184 guid_t dev_instance;
K. Y. Srinivasan7047f172015-12-25 20:00:30 -08001185 u16 vendor_id;
1186 u16 device_id;
K. Y. Srinivasan35ea09c2011-05-12 19:34:24 -07001187
1188 struct device device;
Stephen Hemmingerd765edb2018-08-10 23:06:08 +00001189 char *driver_override; /* Driver name to force a match */
K. Y. Srinivasan35ea09c2011-05-12 19:34:24 -07001190
1191 struct vmbus_channel *channel;
Stephen Hemmingerc2e5df62017-09-21 20:58:49 -07001192 struct kset *channels_kset;
Branden Bonabyaf9ca6f2019-10-03 17:01:49 -04001193
1194 /* place holder to keep track of the dir for hv device in debugfs */
1195 struct dentry *debug_dir;
1196
K. Y. Srinivasan35ea09c2011-05-12 19:34:24 -07001197};
1198
K. Y. Srinivasan27b5b3c2011-05-12 19:34:25 -07001199
1200static inline struct hv_device *device_to_hv_device(struct device *d)
1201{
1202 return container_of(d, struct hv_device, device);
1203}
1204
1205static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1206{
1207 return container_of(d, struct hv_driver, driver);
1208}
1209
K. Y. Srinivasanab101e82011-09-13 10:59:40 -07001210static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1211{
1212 dev_set_drvdata(&dev->device, data);
1213}
1214
1215static inline void *hv_get_drvdata(struct hv_device *dev)
1216{
1217 return dev_get_drvdata(&dev->device);
1218}
K. Y. Srinivasan27b5b3c2011-05-12 19:34:25 -07001219
Stephen Hemminger4827ee12017-03-04 18:27:18 -07001220struct hv_ring_buffer_debug_info {
1221 u32 current_interrupt_mask;
1222 u32 current_read_index;
1223 u32 current_write_index;
1224 u32 bytes_avail_toread;
1225 u32 bytes_avail_towrite;
1226};
1227
Dexuan Cuiba50bf12018-12-17 20:16:09 +00001228
Kimberly Brown14948e32019-03-14 16:05:15 -04001229int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
Dexuan Cuiba50bf12018-12-17 20:16:09 +00001230 struct hv_ring_buffer_debug_info *debug_info);
Stephen Hemminger4827ee12017-03-04 18:27:18 -07001231
K. Y. Srinivasan27b5b3c2011-05-12 19:34:25 -07001232/* Vmbus interface */
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001233#define vmbus_driver_register(driver) \
1234 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1235int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1236 struct module *owner,
1237 const char *mod_name);
1238void vmbus_driver_unregister(struct hv_driver *hv_driver);
K. Y. Srinivasan27b5b3c2011-05-12 19:34:25 -07001239
Dexuan Cui85d9aa72016-01-27 22:29:43 -08001240void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1241
Jake Oshins35464482015-08-05 00:52:37 -07001242int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1243 resource_size_t min, resource_size_t max,
1244 resource_size_t size, resource_size_t align,
1245 bool fb_overlap_ok);
Jake Oshins97fb77dc2016-04-05 10:22:51 -07001246void vmbus_free_mmio(resource_size_t start, resource_size_t size);
Jake Oshins619848b2015-12-14 16:01:39 -08001247
K. Y. Srinivasanb1897022011-05-12 19:34:26 -07001248/*
K. Y. Srinivasan7fb96562013-01-23 17:42:40 -08001249 * GUID definitions of various offer types - services offered to the guest.
1250 */
1251
1252/*
1253 * Network GUID
1254 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1255 */
1256#define HV_NIC_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001257 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1258 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
K. Y. Srinivasan7fb96562013-01-23 17:42:40 -08001259
1260/*
1261 * IDE GUID
1262 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1263 */
1264#define HV_IDE_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001265 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1266 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
K. Y. Srinivasan7fb96562013-01-23 17:42:40 -08001267
1268/*
1269 * SCSI GUID
1270 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1271 */
1272#define HV_SCSI_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001273 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1274 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
K. Y. Srinivasan7fb96562013-01-23 17:42:40 -08001275
1276/*
1277 * Shutdown GUID
1278 * {0e0b6031-5213-4934-818b-38d90ced39db}
1279 */
1280#define HV_SHUTDOWN_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001281 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1282 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
K. Y. Srinivasan7fb96562013-01-23 17:42:40 -08001283
1284/*
1285 * Time Synch GUID
1286 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1287 */
1288#define HV_TS_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001289 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1290 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
K. Y. Srinivasan7fb96562013-01-23 17:42:40 -08001291
1292/*
1293 * Heartbeat GUID
1294 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1295 */
1296#define HV_HEART_BEAT_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001297 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1298 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
K. Y. Srinivasan7fb96562013-01-23 17:42:40 -08001299
1300/*
1301 * KVP GUID
1302 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1303 */
1304#define HV_KVP_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001305 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1306 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
K. Y. Srinivasan7fb96562013-01-23 17:42:40 -08001307
1308/*
1309 * Dynamic memory GUID
1310 * {525074dc-8985-46e2-8057-a307dc18a502}
1311 */
1312#define HV_DM_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001313 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1314 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
K. Y. Srinivasan7fb96562013-01-23 17:42:40 -08001315
1316/*
1317 * Mouse GUID
1318 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1319 */
1320#define HV_MOUSE_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001321 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1322 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
K. Y. Srinivasan7fb96562013-01-23 17:42:40 -08001323
1324/*
Dexuan Cui20481572015-12-21 12:21:22 -08001325 * Keyboard GUID
1326 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1327 */
1328#define HV_KBD_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001329 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1330 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
Dexuan Cui20481572015-12-21 12:21:22 -08001331
1332/*
K. Y. Srinivasan96dd86f2013-03-15 12:30:06 -07001333 * VSS (Backup/Restore) GUID
1334 */
1335#define HV_VSS_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001336 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1337 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
K. Y. Srinivasan96dd86f2013-03-15 12:30:06 -07001338/*
Haiyang Zhang68a2d20b2013-04-29 15:05:42 -07001339 * Synthetic Video GUID
1340 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1341 */
1342#define HV_SYNTHVID_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001343 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1344 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
Haiyang Zhang68a2d20b2013-04-29 15:05:42 -07001345
Haiyang Zhang68a2d20b2013-04-29 15:05:42 -07001346/*
K. Y. Srinivasan98b80d82013-05-23 12:02:33 -07001347 * Synthetic FC GUID
1348 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1349 */
1350#define HV_SYNTHFC_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001351 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1352 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
K. Y. Srinivasan98b80d82013-05-23 12:02:33 -07001353
1354/*
K. Y. Srinivasan013254762014-02-16 11:34:30 -08001355 * Guest File Copy Service
1356 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1357 */
1358
1359#define HV_FCOPY_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001360 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1361 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
K. Y. Srinivasan013254762014-02-16 11:34:30 -08001362
1363/*
K. Y. Srinivasan04653a02015-02-27 11:26:05 -08001364 * NetworkDirect. This is the guest RDMA service.
1365 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1366 */
1367#define HV_ND_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001368 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1369 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
K. Y. Srinivasan04653a02015-02-27 11:26:05 -08001370
1371/*
Jake Oshins3053c762015-12-14 16:01:41 -08001372 * PCI Express Pass Through
1373 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1374 */
1375
1376#define HV_PCIE_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001377 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1378 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
Jake Oshins3053c762015-12-14 16:01:41 -08001379
1380/*
Dexuan Cui0f988292016-09-07 05:39:34 -07001381 * Linux doesn't support the 3 devices: the first two are for
1382 * Automatic Virtual Machine Activation, and the third is for
1383 * Remote Desktop Virtualization.
1384 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1385 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1386 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1387 */
1388
1389#define HV_AVMA1_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001390 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1391 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
Dexuan Cui0f988292016-09-07 05:39:34 -07001392
1393#define HV_AVMA2_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001394 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1395 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
Dexuan Cui0f988292016-09-07 05:39:34 -07001396
1397#define HV_RDV_GUID \
Andy Shevchenko593db802019-01-10 16:25:32 +02001398 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1399 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
Dexuan Cui0f988292016-09-07 05:39:34 -07001400
1401/*
K. Y. Srinivasanb1897022011-05-12 19:34:26 -07001402 * Common header for Hyper-V ICs
1403 */
1404
1405#define ICMSGTYPE_NEGOTIATE 0
1406#define ICMSGTYPE_HEARTBEAT 1
1407#define ICMSGTYPE_KVPEXCHANGE 2
1408#define ICMSGTYPE_SHUTDOWN 3
1409#define ICMSGTYPE_TIMESYNC 4
1410#define ICMSGTYPE_VSS 5
1411
1412#define ICMSGHDRFLAG_TRANSACTION 1
1413#define ICMSGHDRFLAG_REQUEST 2
1414#define ICMSGHDRFLAG_RESPONSE 4
1415
K. Y. Srinivasanb1897022011-05-12 19:34:26 -07001416
K. Y. Srinivasana29b6432011-09-18 10:31:33 -07001417/*
1418 * While we want to handle util services as regular devices,
1419 * there is only one instance of each of these services; so
1420 * we statically allocate the service specific state.
1421 */
1422
1423struct hv_util_service {
1424 u8 *recv_buffer;
K. Y. Srinivasanb9830d12016-02-26 15:13:19 -08001425 void *channel;
K. Y. Srinivasana29b6432011-09-18 10:31:33 -07001426 void (*util_cb)(void *);
1427 int (*util_init)(struct hv_util_service *);
1428 void (*util_deinit)(void);
Dexuan Cui54e19d32020-01-25 21:49:44 -08001429 int (*util_pre_suspend)(void);
1430 int (*util_pre_resume)(void);
K. Y. Srinivasana29b6432011-09-18 10:31:33 -07001431};
1432
K. Y. Srinivasanb1897022011-05-12 19:34:26 -07001433struct vmbuspipe_hdr {
1434 u32 flags;
1435 u32 msgsize;
1436} __packed;
1437
1438struct ic_version {
1439 u16 major;
1440 u16 minor;
1441} __packed;
1442
1443struct icmsg_hdr {
1444 struct ic_version icverframe;
1445 u16 icmsgtype;
1446 struct ic_version icvermsg;
1447 u16 icmsgsize;
1448 u32 status;
1449 u8 ictransaction_id;
1450 u8 icflags;
1451 u8 reserved[2];
1452} __packed;
1453
1454struct icmsg_negotiate {
1455 u16 icframe_vercnt;
1456 u16 icmsg_vercnt;
1457 u32 reserved;
1458 struct ic_version icversion_data[1]; /* any size array */
1459} __packed;
1460
1461struct shutdown_msg_data {
1462 u32 reason_code;
1463 u32 timeout_seconds;
1464 u32 flags;
1465 u8 display_message[2048];
1466} __packed;
1467
1468struct heartbeat_msg_data {
1469 u64 seq_num;
1470 u32 reserved[8];
1471} __packed;
1472
1473/* Time Sync IC defs */
1474#define ICTIMESYNCFLAG_PROBE 0
1475#define ICTIMESYNCFLAG_SYNC 1
1476#define ICTIMESYNCFLAG_SAMPLE 2
1477
1478#ifdef __x86_64__
1479#define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1480#else
1481#define WLTIMEDELTA 116444736000000000LL
1482#endif
1483
1484struct ictimesync_data {
1485 u64 parenttime;
1486 u64 childtime;
1487 u64 roundtriptime;
1488 u8 flags;
1489} __packed;
1490
Alex Ng8e1d2602016-09-08 05:24:14 -07001491struct ictimesync_ref_data {
1492 u64 parenttime;
1493 u64 vmreferencetime;
1494 u8 flags;
1495 char leapflags;
1496 char stratum;
1497 u8 reserved[3];
1498} __packed;
1499
K. Y. Srinivasanb1897022011-05-12 19:34:26 -07001500struct hyperv_service_callback {
1501 u8 msg_type;
1502 char *log_msg;
Andy Shevchenko593db802019-01-10 16:25:32 +02001503 guid_t data;
K. Y. Srinivasanb1897022011-05-12 19:34:26 -07001504 struct vmbus_channel *channel;
Stephen Hemminger2a9d7de2017-03-04 18:27:17 -07001505 void (*callback)(void *context);
K. Y. Srinivasanb1897022011-05-12 19:34:26 -07001506};
1507
K. Y. Srinivasanc836d0a2012-05-12 13:44:58 -07001508#define MAX_SRV_VER 0x7ffffff
Alex Nga1656452017-01-28 12:37:17 -07001509extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1510 const int *fw_version, int fw_vercnt,
1511 const int *srv_version, int srv_vercnt,
1512 int *nego_fw_version, int *nego_srv_version);
K. Y. Srinivasanb1897022011-05-12 19:34:26 -07001513
Stephen Hemminger800b9322018-09-14 09:10:15 -07001514void hv_process_channel_removal(struct vmbus_channel *channel);
K. Y. Srinivasan96dd86f2013-03-15 12:30:06 -07001515
K. Y. Srinivasan1f6ee4e2016-11-06 13:14:17 -08001516void vmbus_setevent(struct vmbus_channel *channel);
K. Y. Srinivasan37f72782012-12-01 06:46:41 -08001517/*
1518 * Negotiated version with the Host.
1519 */
1520
1521extern __u32 vmbus_proto_version;
1522
Andy Shevchenko593db802019-01-10 16:25:32 +02001523int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1524 const guid_t *shv_host_servie_id);
Andrea Parri (Microsoft)75278102020-04-06 02:15:13 +02001525int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
K. Y. Srinivasan5cc47242016-04-02 17:59:49 -07001526void vmbus_set_event(struct vmbus_channel *channel);
K. Y. Srinivasan687f32e2016-04-02 17:59:50 -07001527
1528/* Get the start of the ring buffer. */
1529static inline void *
Stephen Hemmingere4165a02017-02-11 23:02:24 -07001530hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
K. Y. Srinivasan687f32e2016-04-02 17:59:50 -07001531{
Stephen Hemmingere4165a02017-02-11 23:02:24 -07001532 return ring_info->ring_buffer->buffer;
K. Y. Srinivasan687f32e2016-04-02 17:59:50 -07001533}
1534
1535/*
Stephen Hemminger6e47dd32017-02-11 23:02:23 -07001536 * Mask off host interrupt callback notifications
1537 */
1538static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1539{
1540 rbi->ring_buffer->interrupt_mask = 1;
1541
1542 /* make sure mask update is not reordered */
1543 virt_mb();
1544}
1545
1546/*
1547 * Re-enable host callback and return number of outstanding bytes
1548 */
1549static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1550{
1551
1552 rbi->ring_buffer->interrupt_mask = 0;
1553
1554 /* make sure mask update is not reordered */
1555 virt_mb();
1556
1557 /*
1558 * Now check to see if the ring buffer is still empty.
1559 * If it is not, we raced and we need to process new
1560 * incoming messages.
1561 */
1562 return hv_get_bytes_to_read(rbi);
1563}
1564
1565/*
K. Y. Srinivasanab028db2016-04-02 17:59:51 -07001566 * An API to support in-place processing of incoming VMBUS packets.
1567 */
K. Y. Srinivasanab028db2016-04-02 17:59:51 -07001568
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001569/* Get data payload associated with descriptor */
1570static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1571{
1572 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1573}
1574
1575/* Get data size associated with descriptor */
1576static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1577{
1578 return (desc->len8 << 3) - (desc->offset8 << 3);
1579}
1580
1581
1582struct vmpacket_descriptor *
1583hv_pkt_iter_first(struct vmbus_channel *channel);
1584
1585struct vmpacket_descriptor *
1586__hv_pkt_iter_next(struct vmbus_channel *channel,
1587 const struct vmpacket_descriptor *pkt);
1588
1589void hv_pkt_iter_close(struct vmbus_channel *channel);
1590
1591/*
1592 * Get next packet descriptor from iterator
1593 * If at end of list, return NULL and update host.
1594 */
K. Y. Srinivasanab028db2016-04-02 17:59:51 -07001595static inline struct vmpacket_descriptor *
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001596hv_pkt_iter_next(struct vmbus_channel *channel,
1597 const struct vmpacket_descriptor *pkt)
K. Y. Srinivasanab028db2016-04-02 17:59:51 -07001598{
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001599 struct vmpacket_descriptor *nxt;
K. Y. Srinivasanab028db2016-04-02 17:59:51 -07001600
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001601 nxt = __hv_pkt_iter_next(channel, pkt);
1602 if (!nxt)
1603 hv_pkt_iter_close(channel);
K. Y. Srinivasanab028db2016-04-02 17:59:51 -07001604
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001605 return nxt;
K. Y. Srinivasanab028db2016-04-02 17:59:51 -07001606}
1607
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001608#define foreach_vmbus_pkt(pkt, channel) \
1609 for (pkt = hv_pkt_iter_first(channel); pkt; \
1610 pkt = hv_pkt_iter_next(channel, pkt))
K. Y. Srinivasanab028db2016-04-02 17:59:51 -07001611
Dexuan Cuie5d2f912019-08-22 05:05:37 +00001612/*
Haiyang Zhang348dd932019-08-22 05:05:41 +00001613 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
Dexuan Cuie5d2f912019-08-22 05:05:37 +00001614 * sends requests to read and write blocks. Each block must be 128 bytes or
1615 * smaller. Optionally, the VF driver can register a callback function which
1616 * will be invoked when the host says that one or more of the first 64 block
1617 * IDs is "invalid" which means that the VF driver should reread them.
1618 */
1619#define HV_CONFIG_BLOCK_SIZE_MAX 128
Haiyang Zhang348dd932019-08-22 05:05:41 +00001620
1621int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1622 unsigned int block_id, unsigned int *bytes_returned);
1623int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1624 unsigned int block_id);
1625int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1626 void (*block_invalidate)(void *context,
1627 u64 block_mask));
1628
1629struct hyperv_pci_block_ops {
1630 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1631 unsigned int block_id, unsigned int *bytes_returned);
1632 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1633 unsigned int block_id);
1634 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1635 void (*block_invalidate)(void *context,
1636 u64 block_mask));
1637};
1638
1639extern struct hyperv_pci_block_ops hvpci_block_ops;
1640
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -07001641#endif /* _HYPERV_H */