Thomas Gleixner | 3b20eb2 | 2019-05-29 16:57:35 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
K. Y. Srinivasan | 0f2a661 | 2011-05-12 19:34:28 -0700 | [diff] [blame] | 2 | /* |
| 3 | * |
| 4 | * Copyright (c) 2011, Microsoft Corporation. |
| 5 | * |
K. Y. Srinivasan | 0f2a661 | 2011-05-12 19:34:28 -0700 | [diff] [blame] | 6 | * Authors: |
| 7 | * Haiyang Zhang <haiyangz@microsoft.com> |
| 8 | * Hank Janssen <hjanssen@microsoft.com> |
| 9 | * K. Y. Srinivasan <kys@microsoft.com> |
K. Y. Srinivasan | 0f2a661 | 2011-05-12 19:34:28 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #ifndef _HYPERV_VMBUS_H |
| 13 | #define _HYPERV_VMBUS_H |
| 14 | |
K. Y. Srinivasan | 43c698b | 2011-05-12 19:34:33 -0700 | [diff] [blame] | 15 | #include <linux/list.h> |
| 16 | #include <asm/sync_bitops.h> |
Vitaly Kuznetsov | 5a48580 | 2018-03-20 15:02:05 +0100 | [diff] [blame] | 17 | #include <asm/hyperv-tlfs.h> |
K. Y. Srinivasan | 43c698b | 2011-05-12 19:34:33 -0700 | [diff] [blame] | 18 | #include <linux/atomic.h> |
Greg Kroah-Hartman | 46a9719 | 2011-10-04 12:29:52 -0700 | [diff] [blame] | 19 | #include <linux/hyperv.h> |
Stephen Hemminger | 37cdd99 | 2017-02-11 23:02:19 -0700 | [diff] [blame] | 20 | #include <linux/interrupt.h> |
K. Y. Srinivasan | 43c698b | 2011-05-12 19:34:33 -0700 | [diff] [blame] | 21 | |
Vitaly Kuznetsov | c9fe0f8 | 2017-10-29 12:21:00 -0700 | [diff] [blame] | 22 | #include "hv_trace.h" |
| 23 | |
K. Y. Srinivasan | afbdc4a | 2011-05-12 19:34:29 -0700 | [diff] [blame] | 24 | /* |
K. Y. Srinivasan | c0b200c | 2015-12-14 16:01:32 -0800 | [diff] [blame] | 25 | * Timeout for services such as KVP and fcopy. |
| 26 | */ |
| 27 | #define HV_UTIL_TIMEOUT 30 |
| 28 | |
| 29 | /* |
Vitaly Kuznetsov | 4dbfc2e | 2016-04-30 19:21:33 -0700 | [diff] [blame] | 30 | * Timeout for guest-host handshake for services. |
| 31 | */ |
Vitaly Kuznetsov | d7edd31 | 2016-11-06 13:14:06 -0800 | [diff] [blame] | 32 | #define HV_UTIL_NEGO_TIMEOUT 55 |
Vitaly Kuznetsov | 4dbfc2e | 2016-04-30 19:21:33 -0700 | [diff] [blame] | 33 | |
K. Y. Srinivasan | afbdc4a | 2011-05-12 19:34:29 -0700 | [diff] [blame] | 34 | |
| 35 | /* Definitions for the monitored notification facility */ |
| 36 | union hv_monitor_trigger_group { |
| 37 | u64 as_uint64; |
| 38 | struct { |
| 39 | u32 pending; |
| 40 | u32 armed; |
| 41 | }; |
| 42 | }; |
| 43 | |
| 44 | struct hv_monitor_parameter { |
| 45 | union hv_connection_id connectionid; |
| 46 | u16 flagnumber; |
| 47 | u16 rsvdz; |
| 48 | }; |
| 49 | |
| 50 | union hv_monitor_trigger_state { |
| 51 | u32 asu32; |
| 52 | |
| 53 | struct { |
| 54 | u32 group_enable:4; |
| 55 | u32 rsvdz:28; |
| 56 | }; |
| 57 | }; |
| 58 | |
| 59 | /* struct hv_monitor_page Layout */ |
| 60 | /* ------------------------------------------------------ */ |
| 61 | /* | 0 | TriggerState (4 bytes) | Rsvd1 (4 bytes) | */ |
| 62 | /* | 8 | TriggerGroup[0] | */ |
| 63 | /* | 10 | TriggerGroup[1] | */ |
| 64 | /* | 18 | TriggerGroup[2] | */ |
| 65 | /* | 20 | TriggerGroup[3] | */ |
| 66 | /* | 28 | Rsvd2[0] | */ |
| 67 | /* | 30 | Rsvd2[1] | */ |
| 68 | /* | 38 | Rsvd2[2] | */ |
| 69 | /* | 40 | NextCheckTime[0][0] | NextCheckTime[0][1] | */ |
| 70 | /* | ... | */ |
| 71 | /* | 240 | Latency[0][0..3] | */ |
| 72 | /* | 340 | Rsvz3[0] | */ |
| 73 | /* | 440 | Parameter[0][0] | */ |
| 74 | /* | 448 | Parameter[0][1] | */ |
| 75 | /* | ... | */ |
| 76 | /* | 840 | Rsvd4[0] | */ |
| 77 | /* ------------------------------------------------------ */ |
| 78 | struct hv_monitor_page { |
| 79 | union hv_monitor_trigger_state trigger_state; |
| 80 | u32 rsvdz1; |
| 81 | |
| 82 | union hv_monitor_trigger_group trigger_group[4]; |
| 83 | u64 rsvdz2[3]; |
| 84 | |
| 85 | s32 next_checktime[4][32]; |
| 86 | |
| 87 | u16 latency[4][32]; |
| 88 | u64 rsvdz3[32]; |
| 89 | |
| 90 | struct hv_monitor_parameter parameter[4][32]; |
| 91 | |
| 92 | u8 rsvdz4[1984]; |
| 93 | }; |
| 94 | |
K. Y. Srinivasan | 8e27a23 | 2017-01-19 11:51:59 -0700 | [diff] [blame] | 95 | #define HV_HYPERCALL_PARAM_ALIGN sizeof(u64) |
| 96 | |
K. Y. Srinivasan | afbdc4a | 2011-05-12 19:34:29 -0700 | [diff] [blame] | 97 | /* Definition of the hv_post_message hypercall input structure. */ |
| 98 | struct hv_input_post_message { |
| 99 | union hv_connection_id connectionid; |
| 100 | u32 reserved; |
Andrey Smetanin | 7797dcf | 2015-11-30 19:22:13 +0300 | [diff] [blame] | 101 | u32 message_type; |
K. Y. Srinivasan | afbdc4a | 2011-05-12 19:34:29 -0700 | [diff] [blame] | 102 | u32 payload_size; |
| 103 | u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; |
| 104 | }; |
| 105 | |
K. Y. Srinivasan | afbdc4a | 2011-05-12 19:34:29 -0700 | [diff] [blame] | 106 | |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 107 | enum { |
| 108 | VMBUS_MESSAGE_CONNECTION_ID = 1, |
Dexuan Cui | ae20b25 | 2018-05-12 02:30:33 -0700 | [diff] [blame] | 109 | VMBUS_MESSAGE_CONNECTION_ID_4 = 4, |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 110 | VMBUS_MESSAGE_PORT_ID = 1, |
| 111 | VMBUS_EVENT_CONNECTION_ID = 2, |
| 112 | VMBUS_EVENT_PORT_ID = 2, |
| 113 | VMBUS_MONITOR_CONNECTION_ID = 3, |
| 114 | VMBUS_MONITOR_PORT_ID = 3, |
| 115 | VMBUS_MESSAGE_SINT = 2, |
| 116 | }; |
| 117 | |
Stephen Hemminger | 37cdd99 | 2017-02-11 23:02:19 -0700 | [diff] [blame] | 118 | /* |
| 119 | * Per cpu state for channel handling |
| 120 | */ |
| 121 | struct hv_per_cpu_context { |
| 122 | void *synic_message_page; |
| 123 | void *synic_event_page; |
| 124 | /* |
| 125 | * buffer to post messages to the host. |
| 126 | */ |
| 127 | void *post_msg_page; |
| 128 | |
| 129 | /* |
| 130 | * Starting with win8, we can take channel interrupts on any CPU; |
| 131 | * we will manage the tasklet that handles events messages on a per CPU |
| 132 | * basis. |
| 133 | */ |
Stephen Hemminger | 37cdd99 | 2017-02-11 23:02:19 -0700 | [diff] [blame] | 134 | struct tasklet_struct msg_dpc; |
| 135 | |
| 136 | /* |
| 137 | * To optimize the mapping of relid to channel, maintain |
| 138 | * per-cpu list of the channels based on their CPU affinity. |
| 139 | */ |
| 140 | struct list_head chan_list; |
Stephen Hemminger | 37cdd99 | 2017-02-11 23:02:19 -0700 | [diff] [blame] | 141 | }; |
| 142 | |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 143 | struct hv_context { |
| 144 | /* We only support running on top of Hyper-V |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 145 | * So at this point this really can only contain the Hyper-V ID |
| 146 | */ |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 147 | u64 guestid; |
| 148 | |
K. Y. Srinivasan | ca9357b | 2015-08-05 00:52:42 -0700 | [diff] [blame] | 149 | void *tsc_page; |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 150 | |
Stephen Hemminger | 37cdd99 | 2017-02-11 23:02:19 -0700 | [diff] [blame] | 151 | struct hv_per_cpu_context __percpu *cpu_context; |
| 152 | |
K. Y. Srinivasan | 917ea42 | 2012-12-01 06:46:47 -0800 | [diff] [blame] | 153 | /* |
K. Y. Srinivasan | 9f01ec5 | 2015-08-05 00:52:38 -0700 | [diff] [blame] | 154 | * To manage allocations in a NUMA node. |
| 155 | * Array indexed by numa node ID. |
| 156 | */ |
| 157 | struct cpumask *hv_numa_map; |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 158 | }; |
| 159 | |
| 160 | extern struct hv_context hv_context; |
| 161 | |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 162 | /* Hv Interface */ |
| 163 | |
| 164 | extern int hv_init(void); |
| 165 | |
Dan Carpenter | 415f0a0 | 2012-03-28 09:58:07 +0300 | [diff] [blame] | 166 | extern int hv_post_message(union hv_connection_id connection_id, |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 167 | enum hv_message_type message_type, |
| 168 | void *payload, size_t payload_size); |
| 169 | |
Jason Wang | 2608fb6 | 2013-06-19 11:28:10 +0800 | [diff] [blame] | 170 | extern int hv_synic_alloc(void); |
| 171 | |
| 172 | extern void hv_synic_free(void); |
| 173 | |
Vitaly Kuznetsov | 76d36ab | 2016-12-07 14:53:11 -0800 | [diff] [blame] | 174 | extern int hv_synic_init(unsigned int cpu); |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 175 | |
Vitaly Kuznetsov | 76d36ab | 2016-12-07 14:53:11 -0800 | [diff] [blame] | 176 | extern int hv_synic_cleanup(unsigned int cpu); |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 177 | |
K. Y. Srinivasan | 940655c | 2011-05-12 19:34:31 -0700 | [diff] [blame] | 178 | /* Interface */ |
| 179 | |
Kimberly Brown | 14948e3 | 2019-03-14 16:05:15 -0400 | [diff] [blame] | 180 | void hv_ringbuffer_pre_init(struct vmbus_channel *channel); |
K. Y. Srinivasan | 940655c | 2011-05-12 19:34:31 -0700 | [diff] [blame] | 181 | |
Vitaly Kuznetsov | 9988ce6 | 2016-09-02 05:58:20 -0700 | [diff] [blame] | 182 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
| 183 | struct page *pages, u32 pagecnt); |
K. Y. Srinivasan | 940655c | 2011-05-12 19:34:31 -0700 | [diff] [blame] | 184 | |
| 185 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); |
| 186 | |
K. Y. Srinivasan | 1f6ee4e | 2016-11-06 13:14:17 -0800 | [diff] [blame] | 187 | int hv_ringbuffer_write(struct vmbus_channel *channel, |
Stephen Hemminger | e4165a0 | 2017-02-11 23:02:24 -0700 | [diff] [blame] | 188 | const struct kvec *kv_list, u32 kv_count); |
K. Y. Srinivasan | 940655c | 2011-05-12 19:34:31 -0700 | [diff] [blame] | 189 | |
K. Y. Srinivasan | 3372592 | 2016-11-06 13:14:18 -0800 | [diff] [blame] | 190 | int hv_ringbuffer_read(struct vmbus_channel *channel, |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 191 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
K. Y. Srinivasan | 3372592 | 2016-11-06 13:14:18 -0800 | [diff] [blame] | 192 | u64 *requestid, bool raw); |
K. Y. Srinivasan | 940655c | 2011-05-12 19:34:31 -0700 | [diff] [blame] | 193 | |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 194 | /* |
| 195 | * Maximum channels is determined by the size of the interrupt page |
| 196 | * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt |
| 197 | * and the other is receive endpoint interrupt |
| 198 | */ |
| 199 | #define MAX_NUM_CHANNELS ((PAGE_SIZE >> 1) << 3) /* 16348 channels */ |
| 200 | |
| 201 | /* The value here must be in multiple of 32 */ |
| 202 | /* TODO: Need to make this configurable */ |
| 203 | #define MAX_NUM_CHANNELS_SUPPORTED 256 |
| 204 | |
| 205 | |
| 206 | enum vmbus_connect_state { |
| 207 | DISCONNECTED, |
| 208 | CONNECTING, |
| 209 | CONNECTED, |
| 210 | DISCONNECTING |
| 211 | }; |
| 212 | |
| 213 | #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT |
| 214 | |
| 215 | struct vmbus_connection { |
K. Y. Srinivasan | 54a66265 | 2017-04-30 16:21:18 -0700 | [diff] [blame] | 216 | /* |
| 217 | * CPU on which the initial host contact was made. |
| 218 | */ |
| 219 | int connect_cpu; |
| 220 | |
Dexuan Cui | ae20b25 | 2018-05-12 02:30:33 -0700 | [diff] [blame] | 221 | u32 msg_conn_id; |
| 222 | |
K. Y. Srinivasan | 54a66265 | 2017-04-30 16:21:18 -0700 | [diff] [blame] | 223 | atomic_t offer_in_progress; |
| 224 | |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 225 | enum vmbus_connect_state conn_state; |
| 226 | |
| 227 | atomic_t next_gpadl_handle; |
| 228 | |
K. Y. Srinivasan | 2db84ef | 2015-04-22 21:31:32 -0700 | [diff] [blame] | 229 | struct completion unload_event; |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 230 | /* |
| 231 | * Represents channel interrupts. Each bit position represents a |
| 232 | * channel. When a channel sends an interrupt via VMBUS, it finds its |
| 233 | * bit in the sendInterruptPage, set it and calls Hv to generate a port |
| 234 | * event. The other end receives the port event and parse the |
| 235 | * recvInterruptPage to see which bit is set |
| 236 | */ |
| 237 | void *int_page; |
| 238 | void *send_int_page; |
| 239 | void *recv_int_page; |
| 240 | |
| 241 | /* |
| 242 | * 2 pages - 1st page for parent->child notification and 2nd |
| 243 | * is child->parent notification |
| 244 | */ |
Greg Kroah-Hartman | 8681db4 | 2013-09-13 11:32:55 -0700 | [diff] [blame] | 245 | struct hv_monitor_page *monitor_pages[2]; |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 246 | struct list_head chn_msg_list; |
| 247 | spinlock_t channelmsg_lock; |
| 248 | |
| 249 | /* List of channels */ |
| 250 | struct list_head chn_list; |
Dexuan Cui | d6f591e | 2015-12-14 16:01:51 -0800 | [diff] [blame] | 251 | struct mutex channel_mutex; |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 252 | |
Dexuan Cui | 37c2578 | 2018-12-03 00:54:35 +0000 | [diff] [blame] | 253 | /* |
| 254 | * An offer message is handled first on the work_queue, and then |
| 255 | * is further handled on handle_primary_chan_wq or |
| 256 | * handle_sub_chan_wq. |
| 257 | */ |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 258 | struct workqueue_struct *work_queue; |
Dexuan Cui | 37c2578 | 2018-12-03 00:54:35 +0000 | [diff] [blame] | 259 | struct workqueue_struct *handle_primary_chan_wq; |
| 260 | struct workqueue_struct *handle_sub_chan_wq; |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 261 | }; |
| 262 | |
| 263 | |
| 264 | struct vmbus_msginfo { |
| 265 | /* Bookkeeping stuff */ |
| 266 | struct list_head msglist_entry; |
| 267 | |
| 268 | /* The message itself */ |
| 269 | unsigned char msg[0]; |
| 270 | }; |
| 271 | |
| 272 | |
| 273 | extern struct vmbus_connection vmbus_connection; |
| 274 | |
Stephen Hemminger | 5c1bec6 | 2017-02-05 17:20:31 -0700 | [diff] [blame] | 275 | static inline void vmbus_send_interrupt(u32 relid) |
| 276 | { |
| 277 | sync_set_bit(relid, vmbus_connection.send_int_page); |
| 278 | } |
| 279 | |
Dexuan Cui | 652594c | 2015-03-27 09:10:08 -0700 | [diff] [blame] | 280 | enum vmbus_message_handler_type { |
| 281 | /* The related handler can sleep. */ |
| 282 | VMHT_BLOCKING = 0, |
| 283 | |
| 284 | /* The related handler must NOT sleep. */ |
| 285 | VMHT_NON_BLOCKING = 1, |
| 286 | }; |
| 287 | |
| 288 | struct vmbus_channel_message_table_entry { |
| 289 | enum vmbus_channel_message_type message_type; |
| 290 | enum vmbus_message_handler_type handler_type; |
| 291 | void (*message_handler)(struct vmbus_channel_message_header *msg); |
| 292 | }; |
| 293 | |
Stephen Hemminger | e6242fa | 2017-03-04 18:27:16 -0700 | [diff] [blame] | 294 | extern const struct vmbus_channel_message_table_entry |
Dexuan Cui | 652594c | 2015-03-27 09:10:08 -0700 | [diff] [blame] | 295 | channel_message_table[CHANNELMSG_COUNT]; |
| 296 | |
Vitaly Kuznetsov | 0f70b66 | 2016-02-26 15:13:17 -0800 | [diff] [blame] | 297 | |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 298 | /* General vmbus interface */ |
| 299 | |
Andy Shevchenko | 593db80 | 2019-01-10 16:25:32 +0200 | [diff] [blame] | 300 | struct hv_device *vmbus_device_create(const guid_t *type, |
| 301 | const guid_t *instance, |
stephen hemminger | 1b9d48f | 2014-06-03 08:38:15 -0700 | [diff] [blame] | 302 | struct vmbus_channel *channel); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 303 | |
K. Y. Srinivasan | 22794281 | 2011-09-08 07:24:13 -0700 | [diff] [blame] | 304 | int vmbus_device_register(struct hv_device *child_device_obj); |
K. Y. Srinivasan | 696453b | 2011-09-08 07:24:14 -0700 | [diff] [blame] | 305 | void vmbus_device_unregister(struct hv_device *device_obj); |
Stephen Hemminger | c2e5df6 | 2017-09-21 20:58:49 -0700 | [diff] [blame] | 306 | int vmbus_add_channel_kobj(struct hv_device *device_obj, |
| 307 | struct vmbus_channel *channel); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 308 | |
Kimberly Brown | 46fc154 | 2019-03-19 00:04:01 -0400 | [diff] [blame] | 309 | void vmbus_remove_channel_attr_group(struct vmbus_channel *channel); |
| 310 | |
Dexuan Cui | d43e2fe | 2015-03-27 09:10:09 -0700 | [diff] [blame] | 311 | struct vmbus_channel *relid2channel(u32 relid); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 312 | |
K. Y. Srinivasan | 93e5bd0 | 2011-12-12 09:29:17 -0800 | [diff] [blame] | 313 | void vmbus_free_channels(void); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 314 | |
| 315 | /* Connection interface */ |
| 316 | |
| 317 | int vmbus_connect(void); |
Vitaly Kuznetsov | 09a1962 | 2015-02-27 11:25:54 -0800 | [diff] [blame] | 318 | void vmbus_disconnect(void); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 319 | |
Vitaly Kuznetsov | c0bb039 | 2016-12-07 01:16:24 -0800 | [diff] [blame] | 320 | int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 321 | |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 322 | void vmbus_on_event(unsigned long data); |
K. Y. Srinivasan | d81274a | 2016-02-26 15:13:21 -0800 | [diff] [blame] | 323 | void vmbus_on_msg_dpc(unsigned long data); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 324 | |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 325 | int hv_kvp_init(struct hv_util_service *srv); |
Vitaly Kuznetsov | 3647a83 | 2015-04-11 18:07:39 -0700 | [diff] [blame] | 326 | void hv_kvp_deinit(void); |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 327 | void hv_kvp_onchannelcallback(void *context); |
Vitaly Kuznetsov | 3647a83 | 2015-04-11 18:07:39 -0700 | [diff] [blame] | 328 | |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 329 | int hv_vss_init(struct hv_util_service *srv); |
Vitaly Kuznetsov | 3647a83 | 2015-04-11 18:07:39 -0700 | [diff] [blame] | 330 | void hv_vss_deinit(void); |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 331 | void hv_vss_onchannelcallback(void *context); |
Vitaly Kuznetsov | 3647a83 | 2015-04-11 18:07:39 -0700 | [diff] [blame] | 332 | |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 333 | int hv_fcopy_init(struct hv_util_service *srv); |
K. Y. Srinivasan | 01325476 | 2014-02-16 11:34:30 -0800 | [diff] [blame] | 334 | void hv_fcopy_deinit(void); |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 335 | void hv_fcopy_onchannelcallback(void *context); |
Vitaly Kuznetsov | 75ff3a8 | 2016-02-26 15:13:16 -0800 | [diff] [blame] | 336 | void vmbus_initiate_unload(bool crash); |
K. Y. Srinivasan | 01325476 | 2014-02-16 11:34:30 -0800 | [diff] [blame] | 337 | |
Vitaly Kuznetsov | 8efe78f | 2015-04-11 18:07:41 -0700 | [diff] [blame] | 338 | static inline void hv_poll_channel(struct vmbus_channel *channel, |
| 339 | void (*cb)(void *)) |
| 340 | { |
| 341 | if (!channel) |
| 342 | return; |
| 343 | |
K. Y. Srinivasan | 1e052a1 | 2017-04-30 16:21:17 -0700 | [diff] [blame] | 344 | if (in_interrupt() && (channel->target_cpu == smp_processor_id())) { |
| 345 | cb(channel); |
| 346 | return; |
| 347 | } |
Olaf Hering | 3cace4a | 2015-12-14 16:01:33 -0800 | [diff] [blame] | 348 | smp_call_function_single(channel->target_cpu, cb, channel, true); |
Vitaly Kuznetsov | 8efe78f | 2015-04-11 18:07:41 -0700 | [diff] [blame] | 349 | } |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 350 | |
Vitaly Kuznetsov | 636c88d | 2015-04-11 18:07:46 -0700 | [diff] [blame] | 351 | enum hvutil_device_state { |
| 352 | HVUTIL_DEVICE_INIT = 0, /* driver is loaded, waiting for userspace */ |
| 353 | HVUTIL_READY, /* userspace is registered */ |
| 354 | HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */ |
| 355 | HVUTIL_USERSPACE_REQ, /* request to userspace was sent */ |
| 356 | HVUTIL_USERSPACE_RECV, /* reply from userspace was received */ |
| 357 | HVUTIL_DEVICE_DYING, /* driver unload is in progress */ |
| 358 | }; |
| 359 | |
K. Y. Srinivasan | 0f2a661 | 2011-05-12 19:34:28 -0700 | [diff] [blame] | 360 | #endif /* _HYPERV_VMBUS_H */ |