Thomas Gleixner | 3b20eb2 | 2019-05-29 16:57:35 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
K. Y. Srinivasan | 0f2a661 | 2011-05-12 19:34:28 -0700 | [diff] [blame] | 2 | /* |
| 3 | * |
| 4 | * Copyright (c) 2011, Microsoft Corporation. |
| 5 | * |
K. Y. Srinivasan | 0f2a661 | 2011-05-12 19:34:28 -0700 | [diff] [blame] | 6 | * Authors: |
| 7 | * Haiyang Zhang <haiyangz@microsoft.com> |
| 8 | * Hank Janssen <hjanssen@microsoft.com> |
| 9 | * K. Y. Srinivasan <kys@microsoft.com> |
K. Y. Srinivasan | 0f2a661 | 2011-05-12 19:34:28 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #ifndef _HYPERV_VMBUS_H |
| 13 | #define _HYPERV_VMBUS_H |
| 14 | |
K. Y. Srinivasan | 43c698b | 2011-05-12 19:34:33 -0700 | [diff] [blame] | 15 | #include <linux/list.h> |
Arnd Bergmann | f03e04b | 2021-10-18 15:19:08 +0200 | [diff] [blame] | 16 | #include <linux/bitops.h> |
K. Y. Srinivasan | 43c698b | 2011-05-12 19:34:33 -0700 | [diff] [blame] | 17 | #include <asm/sync_bitops.h> |
Vitaly Kuznetsov | 5a48580 | 2018-03-20 15:02:05 +0100 | [diff] [blame] | 18 | #include <asm/hyperv-tlfs.h> |
K. Y. Srinivasan | 43c698b | 2011-05-12 19:34:33 -0700 | [diff] [blame] | 19 | #include <linux/atomic.h> |
Greg Kroah-Hartman | 46a9719 | 2011-10-04 12:29:52 -0700 | [diff] [blame] | 20 | #include <linux/hyperv.h> |
Stephen Hemminger | 37cdd99 | 2017-02-11 23:02:19 -0700 | [diff] [blame] | 21 | #include <linux/interrupt.h> |
K. Y. Srinivasan | 43c698b | 2011-05-12 19:34:33 -0700 | [diff] [blame] | 22 | |
Vitaly Kuznetsov | c9fe0f8 | 2017-10-29 12:21:00 -0700 | [diff] [blame] | 23 | #include "hv_trace.h" |
| 24 | |
K. Y. Srinivasan | afbdc4a | 2011-05-12 19:34:29 -0700 | [diff] [blame] | 25 | /* |
K. Y. Srinivasan | c0b200c | 2015-12-14 16:01:32 -0800 | [diff] [blame] | 26 | * Timeout for services such as KVP and fcopy. |
| 27 | */ |
| 28 | #define HV_UTIL_TIMEOUT 30 |
| 29 | |
| 30 | /* |
Vitaly Kuznetsov | 4dbfc2e | 2016-04-30 19:21:33 -0700 | [diff] [blame] | 31 | * Timeout for guest-host handshake for services. |
| 32 | */ |
Vitaly Kuznetsov | d7edd31 | 2016-11-06 13:14:06 -0800 | [diff] [blame] | 33 | #define HV_UTIL_NEGO_TIMEOUT 55 |
Vitaly Kuznetsov | 4dbfc2e | 2016-04-30 19:21:33 -0700 | [diff] [blame] | 34 | |
K. Y. Srinivasan | afbdc4a | 2011-05-12 19:34:29 -0700 | [diff] [blame] | 35 | |
| 36 | /* Definitions for the monitored notification facility */ |
| 37 | union hv_monitor_trigger_group { |
| 38 | u64 as_uint64; |
| 39 | struct { |
| 40 | u32 pending; |
| 41 | u32 armed; |
| 42 | }; |
| 43 | }; |
| 44 | |
| 45 | struct hv_monitor_parameter { |
| 46 | union hv_connection_id connectionid; |
| 47 | u16 flagnumber; |
| 48 | u16 rsvdz; |
| 49 | }; |
| 50 | |
| 51 | union hv_monitor_trigger_state { |
| 52 | u32 asu32; |
| 53 | |
| 54 | struct { |
| 55 | u32 group_enable:4; |
| 56 | u32 rsvdz:28; |
| 57 | }; |
| 58 | }; |
| 59 | |
| 60 | /* struct hv_monitor_page Layout */ |
| 61 | /* ------------------------------------------------------ */ |
| 62 | /* | 0 | TriggerState (4 bytes) | Rsvd1 (4 bytes) | */ |
| 63 | /* | 8 | TriggerGroup[0] | */ |
| 64 | /* | 10 | TriggerGroup[1] | */ |
| 65 | /* | 18 | TriggerGroup[2] | */ |
| 66 | /* | 20 | TriggerGroup[3] | */ |
| 67 | /* | 28 | Rsvd2[0] | */ |
| 68 | /* | 30 | Rsvd2[1] | */ |
| 69 | /* | 38 | Rsvd2[2] | */ |
| 70 | /* | 40 | NextCheckTime[0][0] | NextCheckTime[0][1] | */ |
| 71 | /* | ... | */ |
| 72 | /* | 240 | Latency[0][0..3] | */ |
| 73 | /* | 340 | Rsvz3[0] | */ |
| 74 | /* | 440 | Parameter[0][0] | */ |
| 75 | /* | 448 | Parameter[0][1] | */ |
| 76 | /* | ... | */ |
| 77 | /* | 840 | Rsvd4[0] | */ |
| 78 | /* ------------------------------------------------------ */ |
| 79 | struct hv_monitor_page { |
| 80 | union hv_monitor_trigger_state trigger_state; |
| 81 | u32 rsvdz1; |
| 82 | |
| 83 | union hv_monitor_trigger_group trigger_group[4]; |
| 84 | u64 rsvdz2[3]; |
| 85 | |
| 86 | s32 next_checktime[4][32]; |
| 87 | |
| 88 | u16 latency[4][32]; |
| 89 | u64 rsvdz3[32]; |
| 90 | |
| 91 | struct hv_monitor_parameter parameter[4][32]; |
| 92 | |
| 93 | u8 rsvdz4[1984]; |
| 94 | }; |
| 95 | |
K. Y. Srinivasan | 8e27a23 | 2017-01-19 11:51:59 -0700 | [diff] [blame] | 96 | #define HV_HYPERCALL_PARAM_ALIGN sizeof(u64) |
| 97 | |
K. Y. Srinivasan | afbdc4a | 2011-05-12 19:34:29 -0700 | [diff] [blame] | 98 | /* Definition of the hv_post_message hypercall input structure. */ |
| 99 | struct hv_input_post_message { |
| 100 | union hv_connection_id connectionid; |
| 101 | u32 reserved; |
Andrey Smetanin | 7797dcf | 2015-11-30 19:22:13 +0300 | [diff] [blame] | 102 | u32 message_type; |
K. Y. Srinivasan | afbdc4a | 2011-05-12 19:34:29 -0700 | [diff] [blame] | 103 | u32 payload_size; |
| 104 | u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; |
| 105 | }; |
| 106 | |
K. Y. Srinivasan | afbdc4a | 2011-05-12 19:34:29 -0700 | [diff] [blame] | 107 | |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 108 | enum { |
| 109 | VMBUS_MESSAGE_CONNECTION_ID = 1, |
Dexuan Cui | ae20b25 | 2018-05-12 02:30:33 -0700 | [diff] [blame] | 110 | VMBUS_MESSAGE_CONNECTION_ID_4 = 4, |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 111 | VMBUS_MESSAGE_PORT_ID = 1, |
| 112 | VMBUS_EVENT_CONNECTION_ID = 2, |
| 113 | VMBUS_EVENT_PORT_ID = 2, |
| 114 | VMBUS_MONITOR_CONNECTION_ID = 3, |
| 115 | VMBUS_MONITOR_PORT_ID = 3, |
| 116 | VMBUS_MESSAGE_SINT = 2, |
| 117 | }; |
| 118 | |
Stephen Hemminger | 37cdd99 | 2017-02-11 23:02:19 -0700 | [diff] [blame] | 119 | /* |
| 120 | * Per cpu state for channel handling |
| 121 | */ |
| 122 | struct hv_per_cpu_context { |
| 123 | void *synic_message_page; |
| 124 | void *synic_event_page; |
| 125 | /* |
| 126 | * buffer to post messages to the host. |
| 127 | */ |
| 128 | void *post_msg_page; |
| 129 | |
| 130 | /* |
| 131 | * Starting with win8, we can take channel interrupts on any CPU; |
| 132 | * we will manage the tasklet that handles events messages on a per CPU |
| 133 | * basis. |
| 134 | */ |
Stephen Hemminger | 37cdd99 | 2017-02-11 23:02:19 -0700 | [diff] [blame] | 135 | struct tasklet_struct msg_dpc; |
Stephen Hemminger | 37cdd99 | 2017-02-11 23:02:19 -0700 | [diff] [blame] | 136 | }; |
| 137 | |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 138 | struct hv_context { |
| 139 | /* We only support running on top of Hyper-V |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 140 | * So at this point this really can only contain the Hyper-V ID |
| 141 | */ |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 142 | u64 guestid; |
| 143 | |
Stephen Hemminger | 37cdd99 | 2017-02-11 23:02:19 -0700 | [diff] [blame] | 144 | struct hv_per_cpu_context __percpu *cpu_context; |
| 145 | |
K. Y. Srinivasan | 917ea42 | 2012-12-01 06:46:47 -0800 | [diff] [blame] | 146 | /* |
K. Y. Srinivasan | 9f01ec5 | 2015-08-05 00:52:38 -0700 | [diff] [blame] | 147 | * To manage allocations in a NUMA node. |
| 148 | * Array indexed by numa node ID. |
| 149 | */ |
| 150 | struct cpumask *hv_numa_map; |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 151 | }; |
| 152 | |
| 153 | extern struct hv_context hv_context; |
| 154 | |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 155 | /* Hv Interface */ |
| 156 | |
| 157 | extern int hv_init(void); |
| 158 | |
Dan Carpenter | 415f0a0 | 2012-03-28 09:58:07 +0300 | [diff] [blame] | 159 | extern int hv_post_message(union hv_connection_id connection_id, |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 160 | enum hv_message_type message_type, |
| 161 | void *payload, size_t payload_size); |
| 162 | |
Jason Wang | 2608fb6 | 2013-06-19 11:28:10 +0800 | [diff] [blame] | 163 | extern int hv_synic_alloc(void); |
| 164 | |
| 165 | extern void hv_synic_free(void); |
| 166 | |
Dexuan Cui | dba61cd | 2019-09-05 23:01:15 +0000 | [diff] [blame] | 167 | extern void hv_synic_enable_regs(unsigned int cpu); |
Vitaly Kuznetsov | 76d36ab | 2016-12-07 14:53:11 -0800 | [diff] [blame] | 168 | extern int hv_synic_init(unsigned int cpu); |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 169 | |
Dexuan Cui | dba61cd | 2019-09-05 23:01:15 +0000 | [diff] [blame] | 170 | extern void hv_synic_disable_regs(unsigned int cpu); |
Vitaly Kuznetsov | 76d36ab | 2016-12-07 14:53:11 -0800 | [diff] [blame] | 171 | extern int hv_synic_cleanup(unsigned int cpu); |
K. Y. Srinivasan | 3645a91 | 2011-05-12 19:34:30 -0700 | [diff] [blame] | 172 | |
K. Y. Srinivasan | 940655c | 2011-05-12 19:34:31 -0700 | [diff] [blame] | 173 | /* Interface */ |
| 174 | |
Kimberly Brown | 14948e3 | 2019-03-14 16:05:15 -0400 | [diff] [blame] | 175 | void hv_ringbuffer_pre_init(struct vmbus_channel *channel); |
K. Y. Srinivasan | 940655c | 2011-05-12 19:34:31 -0700 | [diff] [blame] | 176 | |
Vitaly Kuznetsov | 9988ce6 | 2016-09-02 05:58:20 -0700 | [diff] [blame] | 177 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
| 178 | struct page *pages, u32 pagecnt); |
K. Y. Srinivasan | 940655c | 2011-05-12 19:34:31 -0700 | [diff] [blame] | 179 | |
| 180 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); |
| 181 | |
K. Y. Srinivasan | 1f6ee4e | 2016-11-06 13:14:17 -0800 | [diff] [blame] | 182 | int hv_ringbuffer_write(struct vmbus_channel *channel, |
Stephen Hemminger | e4165a0 | 2017-02-11 23:02:24 -0700 | [diff] [blame] | 183 | const struct kvec *kv_list, u32 kv_count); |
K. Y. Srinivasan | 940655c | 2011-05-12 19:34:31 -0700 | [diff] [blame] | 184 | |
K. Y. Srinivasan | 3372592 | 2016-11-06 13:14:18 -0800 | [diff] [blame] | 185 | int hv_ringbuffer_read(struct vmbus_channel *channel, |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 186 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
K. Y. Srinivasan | 3372592 | 2016-11-06 13:14:18 -0800 | [diff] [blame] | 187 | u64 *requestid, bool raw); |
K. Y. Srinivasan | 940655c | 2011-05-12 19:34:31 -0700 | [diff] [blame] | 188 | |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 189 | /* |
Maya Nakamura | 83527ef | 2019-07-12 08:25:18 +0000 | [diff] [blame] | 190 | * The Maximum number of channels (16348) is determined by the size of the |
| 191 | * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to |
| 192 | * send endpoint interrupts, and the other is to receive endpoint interrupts. |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 193 | */ |
Maya Nakamura | 83527ef | 2019-07-12 08:25:18 +0000 | [diff] [blame] | 194 | #define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3) |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 195 | |
| 196 | /* The value here must be in multiple of 32 */ |
| 197 | /* TODO: Need to make this configurable */ |
| 198 | #define MAX_NUM_CHANNELS_SUPPORTED 256 |
| 199 | |
Andrea Parri (Microsoft) | 8b6a877 | 2020-04-06 02:15:06 +0200 | [diff] [blame] | 200 | #define MAX_CHANNEL_RELIDS \ |
| 201 | max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT) |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 202 | |
| 203 | enum vmbus_connect_state { |
| 204 | DISCONNECTED, |
| 205 | CONNECTING, |
| 206 | CONNECTED, |
| 207 | DISCONNECTING |
| 208 | }; |
| 209 | |
| 210 | #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT |
| 211 | |
Andrea Parri (Microsoft) | 8a857c5 | 2020-04-06 02:15:04 +0200 | [diff] [blame] | 212 | /* |
| 213 | * The CPU that Hyper-V will interrupt for VMBUS messages, such as |
| 214 | * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER. |
| 215 | */ |
| 216 | #define VMBUS_CONNECT_CPU 0 |
K. Y. Srinivasan | 54a66265 | 2017-04-30 16:21:18 -0700 | [diff] [blame] | 217 | |
Andrea Parri (Microsoft) | 8a857c5 | 2020-04-06 02:15:04 +0200 | [diff] [blame] | 218 | struct vmbus_connection { |
Dexuan Cui | ae20b25 | 2018-05-12 02:30:33 -0700 | [diff] [blame] | 219 | u32 msg_conn_id; |
| 220 | |
K. Y. Srinivasan | 54a66265 | 2017-04-30 16:21:18 -0700 | [diff] [blame] | 221 | atomic_t offer_in_progress; |
| 222 | |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 223 | enum vmbus_connect_state conn_state; |
| 224 | |
| 225 | atomic_t next_gpadl_handle; |
| 226 | |
K. Y. Srinivasan | 2db84ef | 2015-04-22 21:31:32 -0700 | [diff] [blame] | 227 | struct completion unload_event; |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 228 | /* |
| 229 | * Represents channel interrupts. Each bit position represents a |
| 230 | * channel. When a channel sends an interrupt via VMBUS, it finds its |
| 231 | * bit in the sendInterruptPage, set it and calls Hv to generate a port |
| 232 | * event. The other end receives the port event and parse the |
| 233 | * recvInterruptPage to see which bit is set |
| 234 | */ |
| 235 | void *int_page; |
| 236 | void *send_int_page; |
| 237 | void *recv_int_page; |
| 238 | |
| 239 | /* |
| 240 | * 2 pages - 1st page for parent->child notification and 2nd |
| 241 | * is child->parent notification |
| 242 | */ |
Greg Kroah-Hartman | 8681db4 | 2013-09-13 11:32:55 -0700 | [diff] [blame] | 243 | struct hv_monitor_page *monitor_pages[2]; |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 244 | struct list_head chn_msg_list; |
| 245 | spinlock_t channelmsg_lock; |
| 246 | |
| 247 | /* List of channels */ |
| 248 | struct list_head chn_list; |
Dexuan Cui | d6f591e | 2015-12-14 16:01:51 -0800 | [diff] [blame] | 249 | struct mutex channel_mutex; |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 250 | |
Andrea Parri (Microsoft) | 8b6a877 | 2020-04-06 02:15:06 +0200 | [diff] [blame] | 251 | /* Array of channels */ |
| 252 | struct vmbus_channel **channels; |
| 253 | |
Dexuan Cui | 37c2578 | 2018-12-03 00:54:35 +0000 | [diff] [blame] | 254 | /* |
| 255 | * An offer message is handled first on the work_queue, and then |
| 256 | * is further handled on handle_primary_chan_wq or |
| 257 | * handle_sub_chan_wq. |
| 258 | */ |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 259 | struct workqueue_struct *work_queue; |
Dexuan Cui | 37c2578 | 2018-12-03 00:54:35 +0000 | [diff] [blame] | 260 | struct workqueue_struct *handle_primary_chan_wq; |
| 261 | struct workqueue_struct *handle_sub_chan_wq; |
Dexuan Cui | b307b38 | 2019-09-05 23:01:21 +0000 | [diff] [blame] | 262 | |
| 263 | /* |
| 264 | * The number of sub-channels and hv_sock channels that should be |
| 265 | * cleaned up upon suspend: sub-channels will be re-created upon |
| 266 | * resume, and hv_sock channels should not survive suspend. |
| 267 | */ |
| 268 | atomic_t nr_chan_close_on_suspend; |
| 269 | /* |
| 270 | * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to |
| 271 | * drop to zero. |
| 272 | */ |
| 273 | struct completion ready_for_suspend_event; |
Dexuan Cui | d8bd2d4 | 2019-09-05 23:01:22 +0000 | [diff] [blame] | 274 | |
| 275 | /* |
| 276 | * The number of primary channels that should be "fixed up" |
| 277 | * upon resume: these channels are re-offered upon resume, and some |
| 278 | * fields of the channel offers (i.e. child_relid and connection_id) |
| 279 | * can change, so the old offermsg must be fixed up, before the resume |
| 280 | * callbacks of the VSC drivers start to further touch the channels. |
| 281 | */ |
| 282 | atomic_t nr_chan_fixup_on_resume; |
| 283 | /* |
| 284 | * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to |
| 285 | * drop to zero. |
| 286 | */ |
| 287 | struct completion ready_for_resume_event; |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 288 | }; |
| 289 | |
| 290 | |
| 291 | struct vmbus_msginfo { |
| 292 | /* Bookkeeping stuff */ |
| 293 | struct list_head msglist_entry; |
| 294 | |
| 295 | /* The message itself */ |
Gustavo A. R. Silva | 032d4a4 | 2020-03-19 16:32:26 -0500 | [diff] [blame] | 296 | unsigned char msg[]; |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 297 | }; |
| 298 | |
| 299 | |
| 300 | extern struct vmbus_connection vmbus_connection; |
| 301 | |
Dexuan Cui | f53335e | 2019-09-05 23:01:19 +0000 | [diff] [blame] | 302 | int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version); |
| 303 | |
Stephen Hemminger | 5c1bec6 | 2017-02-05 17:20:31 -0700 | [diff] [blame] | 304 | static inline void vmbus_send_interrupt(u32 relid) |
| 305 | { |
| 306 | sync_set_bit(relid, vmbus_connection.send_int_page); |
| 307 | } |
| 308 | |
Dexuan Cui | 652594c | 2015-03-27 09:10:08 -0700 | [diff] [blame] | 309 | enum vmbus_message_handler_type { |
| 310 | /* The related handler can sleep. */ |
| 311 | VMHT_BLOCKING = 0, |
| 312 | |
| 313 | /* The related handler must NOT sleep. */ |
| 314 | VMHT_NON_BLOCKING = 1, |
| 315 | }; |
| 316 | |
| 317 | struct vmbus_channel_message_table_entry { |
| 318 | enum vmbus_channel_message_type message_type; |
| 319 | enum vmbus_message_handler_type handler_type; |
| 320 | void (*message_handler)(struct vmbus_channel_message_header *msg); |
Vitaly Kuznetsov | 52c7803 | 2020-04-06 12:43:26 +0200 | [diff] [blame] | 321 | u32 min_payload_len; |
Dexuan Cui | 652594c | 2015-03-27 09:10:08 -0700 | [diff] [blame] | 322 | }; |
| 323 | |
Stephen Hemminger | e6242fa | 2017-03-04 18:27:16 -0700 | [diff] [blame] | 324 | extern const struct vmbus_channel_message_table_entry |
Dexuan Cui | 652594c | 2015-03-27 09:10:08 -0700 | [diff] [blame] | 325 | channel_message_table[CHANNELMSG_COUNT]; |
| 326 | |
Vitaly Kuznetsov | 0f70b66 | 2016-02-26 15:13:17 -0800 | [diff] [blame] | 327 | |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 328 | /* General vmbus interface */ |
| 329 | |
Andy Shevchenko | 593db80 | 2019-01-10 16:25:32 +0200 | [diff] [blame] | 330 | struct hv_device *vmbus_device_create(const guid_t *type, |
| 331 | const guid_t *instance, |
stephen hemminger | 1b9d48f | 2014-06-03 08:38:15 -0700 | [diff] [blame] | 332 | struct vmbus_channel *channel); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 333 | |
K. Y. Srinivasan | 22794281 | 2011-09-08 07:24:13 -0700 | [diff] [blame] | 334 | int vmbus_device_register(struct hv_device *child_device_obj); |
K. Y. Srinivasan | 696453b | 2011-09-08 07:24:14 -0700 | [diff] [blame] | 335 | void vmbus_device_unregister(struct hv_device *device_obj); |
Stephen Hemminger | c2e5df6 | 2017-09-21 20:58:49 -0700 | [diff] [blame] | 336 | int vmbus_add_channel_kobj(struct hv_device *device_obj, |
| 337 | struct vmbus_channel *channel); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 338 | |
Kimberly Brown | 46fc154 | 2019-03-19 00:04:01 -0400 | [diff] [blame] | 339 | void vmbus_remove_channel_attr_group(struct vmbus_channel *channel); |
| 340 | |
Andrea Parri (Microsoft) | 8b6a877 | 2020-04-06 02:15:06 +0200 | [diff] [blame] | 341 | void vmbus_channel_map_relid(struct vmbus_channel *channel); |
| 342 | void vmbus_channel_unmap_relid(struct vmbus_channel *channel); |
| 343 | |
Dexuan Cui | d43e2fe | 2015-03-27 09:10:09 -0700 | [diff] [blame] | 344 | struct vmbus_channel *relid2channel(u32 relid); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 345 | |
K. Y. Srinivasan | 93e5bd0 | 2011-12-12 09:29:17 -0800 | [diff] [blame] | 346 | void vmbus_free_channels(void); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 347 | |
| 348 | /* Connection interface */ |
| 349 | |
| 350 | int vmbus_connect(void); |
Vitaly Kuznetsov | 09a1962 | 2015-02-27 11:25:54 -0800 | [diff] [blame] | 351 | void vmbus_disconnect(void); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 352 | |
Vitaly Kuznetsov | c0bb039 | 2016-12-07 01:16:24 -0800 | [diff] [blame] | 353 | int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 354 | |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 355 | void vmbus_on_event(unsigned long data); |
K. Y. Srinivasan | d81274a | 2016-02-26 15:13:21 -0800 | [diff] [blame] | 356 | void vmbus_on_msg_dpc(unsigned long data); |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 357 | |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 358 | int hv_kvp_init(struct hv_util_service *srv); |
Vitaly Kuznetsov | 3647a83 | 2015-04-11 18:07:39 -0700 | [diff] [blame] | 359 | void hv_kvp_deinit(void); |
Dexuan Cui | 54e19d3 | 2020-01-25 21:49:44 -0800 | [diff] [blame] | 360 | int hv_kvp_pre_suspend(void); |
| 361 | int hv_kvp_pre_resume(void); |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 362 | void hv_kvp_onchannelcallback(void *context); |
Vitaly Kuznetsov | 3647a83 | 2015-04-11 18:07:39 -0700 | [diff] [blame] | 363 | |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 364 | int hv_vss_init(struct hv_util_service *srv); |
Vitaly Kuznetsov | 3647a83 | 2015-04-11 18:07:39 -0700 | [diff] [blame] | 365 | void hv_vss_deinit(void); |
Dexuan Cui | 54e19d3 | 2020-01-25 21:49:44 -0800 | [diff] [blame] | 366 | int hv_vss_pre_suspend(void); |
| 367 | int hv_vss_pre_resume(void); |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 368 | void hv_vss_onchannelcallback(void *context); |
Vitaly Kuznetsov | 3647a83 | 2015-04-11 18:07:39 -0700 | [diff] [blame] | 369 | |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 370 | int hv_fcopy_init(struct hv_util_service *srv); |
K. Y. Srinivasan | 01325476 | 2014-02-16 11:34:30 -0800 | [diff] [blame] | 371 | void hv_fcopy_deinit(void); |
Dexuan Cui | 54e19d3 | 2020-01-25 21:49:44 -0800 | [diff] [blame] | 372 | int hv_fcopy_pre_suspend(void); |
| 373 | int hv_fcopy_pre_resume(void); |
Stephen Hemminger | 2a9d7de | 2017-03-04 18:27:17 -0700 | [diff] [blame] | 374 | void hv_fcopy_onchannelcallback(void *context); |
Vitaly Kuznetsov | 75ff3a8 | 2016-02-26 15:13:16 -0800 | [diff] [blame] | 375 | void vmbus_initiate_unload(bool crash); |
K. Y. Srinivasan | 01325476 | 2014-02-16 11:34:30 -0800 | [diff] [blame] | 376 | |
Vitaly Kuznetsov | 8efe78f | 2015-04-11 18:07:41 -0700 | [diff] [blame] | 377 | static inline void hv_poll_channel(struct vmbus_channel *channel, |
| 378 | void (*cb)(void *)) |
| 379 | { |
| 380 | if (!channel) |
| 381 | return; |
Andrea Parri (Microsoft) | 238d2ed8 | 2020-04-06 02:15:08 +0200 | [diff] [blame] | 382 | cb(channel); |
Vitaly Kuznetsov | 8efe78f | 2015-04-11 18:07:41 -0700 | [diff] [blame] | 383 | } |
K. Y. Srinivasan | 89b2ca4 | 2011-05-12 19:34:32 -0700 | [diff] [blame] | 384 | |
Vitaly Kuznetsov | 636c88d | 2015-04-11 18:07:46 -0700 | [diff] [blame] | 385 | enum hvutil_device_state { |
| 386 | HVUTIL_DEVICE_INIT = 0, /* driver is loaded, waiting for userspace */ |
| 387 | HVUTIL_READY, /* userspace is registered */ |
| 388 | HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */ |
| 389 | HVUTIL_USERSPACE_REQ, /* request to userspace was sent */ |
| 390 | HVUTIL_USERSPACE_RECV, /* reply from userspace was received */ |
| 391 | HVUTIL_DEVICE_DYING, /* driver unload is in progress */ |
| 392 | }; |
| 393 | |
Branden Bonaby | af9ca6f | 2019-10-03 17:01:49 -0400 | [diff] [blame] | 394 | enum delay { |
| 395 | INTERRUPT_DELAY = 0, |
| 396 | MESSAGE_DELAY = 1, |
| 397 | }; |
| 398 | |
Andrea Parri (Microsoft) | afaa33d | 2020-05-22 19:19:01 +0200 | [diff] [blame] | 399 | extern const struct vmbus_device vmbus_devs[]; |
| 400 | |
| 401 | static inline bool hv_is_perf_channel(struct vmbus_channel *channel) |
| 402 | { |
| 403 | return vmbus_devs[channel->device_id].perf_device; |
| 404 | } |
| 405 | |
| 406 | static inline bool hv_is_alloced_cpu(unsigned int cpu) |
| 407 | { |
| 408 | struct vmbus_channel *channel, *sc; |
| 409 | |
| 410 | lockdep_assert_held(&vmbus_connection.channel_mutex); |
| 411 | /* |
| 412 | * List additions/deletions as well as updates of the target CPUs are |
| 413 | * protected by channel_mutex. |
| 414 | */ |
| 415 | list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { |
| 416 | if (!hv_is_perf_channel(channel)) |
| 417 | continue; |
| 418 | if (channel->target_cpu == cpu) |
| 419 | return true; |
| 420 | list_for_each_entry(sc, &channel->sc_list, sc_list) { |
| 421 | if (sc->target_cpu == cpu) |
| 422 | return true; |
| 423 | } |
| 424 | } |
| 425 | return false; |
| 426 | } |
| 427 | |
| 428 | static inline void hv_set_alloced_cpu(unsigned int cpu) |
| 429 | { |
| 430 | cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]); |
| 431 | } |
| 432 | |
| 433 | static inline void hv_clear_alloced_cpu(unsigned int cpu) |
| 434 | { |
| 435 | if (hv_is_alloced_cpu(cpu)) |
| 436 | return; |
| 437 | cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]); |
| 438 | } |
| 439 | |
| 440 | static inline void hv_update_alloced_cpus(unsigned int old_cpu, |
| 441 | unsigned int new_cpu) |
| 442 | { |
| 443 | hv_set_alloced_cpu(new_cpu); |
| 444 | hv_clear_alloced_cpu(old_cpu); |
| 445 | } |
| 446 | |
Branden Bonaby | af9ca6f | 2019-10-03 17:01:49 -0400 | [diff] [blame] | 447 | #ifdef CONFIG_HYPERV_TESTING |
| 448 | |
| 449 | int hv_debug_add_dev_dir(struct hv_device *dev); |
| 450 | void hv_debug_rm_dev_dir(struct hv_device *dev); |
| 451 | void hv_debug_rm_all_dir(void); |
| 452 | int hv_debug_init(void); |
| 453 | void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type); |
| 454 | |
| 455 | #else /* CONFIG_HYPERV_TESTING */ |
| 456 | |
| 457 | static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {}; |
| 458 | static inline void hv_debug_rm_all_dir(void) {}; |
| 459 | static inline void hv_debug_delay_test(struct vmbus_channel *channel, |
| 460 | enum delay delay_type) {}; |
| 461 | static inline int hv_debug_init(void) |
| 462 | { |
| 463 | return -1; |
| 464 | } |
| 465 | |
| 466 | static inline int hv_debug_add_dev_dir(struct hv_device *dev) |
| 467 | { |
| 468 | return -1; |
| 469 | } |
| 470 | |
| 471 | #endif /* CONFIG_HYPERV_TESTING */ |
| 472 | |
K. Y. Srinivasan | 0f2a661 | 2011-05-12 19:34:28 -0700 | [diff] [blame] | 473 | #endif /* _HYPERV_VMBUS_H */ |