blob: afa81a9480ccd88e045dfc31d90ee947e116d48f [file] [log] [blame]
Thomas Gleixner9952f692019-05-28 10:10:04 -07001// SPDX-License-Identifier: GPL-2.0-only
Hank Janssenfceaf242009-07-13 15:34:54 -07002/*
Hank Janssenfceaf242009-07-13 15:34:54 -07003 * Copyright (c) 2009, Microsoft Corporation.
4 *
Hank Janssenfceaf242009-07-13 15:34:54 -07005 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +00006 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -07007 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -07008 */
Hank Jansseneb335bc2011-03-29 13:58:48 -07009#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
Greg Kroah-Hartman5654e932009-07-14 15:08:20 -070011#include <linux/kernel.h>
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -080012#include <linux/sched.h>
13#include <linux/wait.h>
Greg Kroah-Hartman0ffa63b2009-07-15 11:06:01 -070014#include <linux/mm.h>
Greg Kroah-Hartmanb4362c92009-07-16 11:50:41 -070015#include <linux/delay.h>
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -070016#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Haiyang Zhangd9871152011-09-01 12:19:41 -070018#include <linux/netdevice.h>
Haiyang Zhangf157e782011-12-15 13:45:16 -080019#include <linux/if_ether.h>
Stephen Rothwelld6472302015-06-02 19:01:38 +100020#include <linux/vmalloc.h>
stephen hemminger9749fed2017-07-19 11:53:16 -070021#include <linux/rtnetlink.h>
stephen hemminger43bf99c2017-07-24 10:57:27 -070022#include <linux/prefetch.h>
stephen hemminger9749fed2017-07-19 11:53:16 -070023
KY Srinivasanc25aaf82014-04-30 10:14:31 -070024#include <asm/sync_bitops.h>
Andrea Parri (Microsoft)96854bb2021-02-01 15:48:14 +010025#include <asm/mshyperv.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070026
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070027#include "hyperv_net.h"
Stephen Hemmingerec966382018-03-16 15:44:28 -070028#include "netvsc_trace.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070029
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070030/*
31 * Switch the data path from the synthetic interface to the VF
32 * interface.
33 */
Haiyang Zhangd0922bf2021-03-29 16:21:35 -070034int netvsc_switch_datapath(struct net_device *ndev, bool vf)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070035{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +020036 struct net_device_context *net_device_ctx = netdev_priv(ndev);
37 struct hv_device *dev = net_device_ctx->device_ctx;
stephen hemminger79e8cbe2017-07-19 11:53:13 -070038 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020039 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
Haiyang Zhangd0922bf2021-03-29 16:21:35 -070040 int ret, retry = 0;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070041
Long Li8b31f8c2021-01-08 16:53:42 -080042 /* Block sending traffic to VF if it's about to be gone */
43 if (!vf)
44 net_device_ctx->data_path_is_vf = vf;
45
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070046 memset(init_pkt, 0, sizeof(struct nvsp_message));
47 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
48 if (vf)
49 init_pkt->msg.v4_msg.active_dp.active_datapath =
50 NVSP_DATAPATH_VF;
51 else
52 init_pkt->msg.v4_msg.active_dp.active_datapath =
53 NVSP_DATAPATH_SYNTHETIC;
54
Haiyang Zhangd0922bf2021-03-29 16:21:35 -070055again:
Stephen Hemmingerec966382018-03-16 15:44:28 -070056 trace_nvsp_send(ndev, init_pkt);
57
Haiyang Zhangd0922bf2021-03-29 16:21:35 -070058 ret = vmbus_sendpacket(dev->channel, init_pkt,
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070059 sizeof(struct nvsp_message),
Haiyang Zhangd0922bf2021-03-29 16:21:35 -070060 (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
Long Li8b31f8c2021-01-08 16:53:42 -080061 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Haiyang Zhangd0922bf2021-03-29 16:21:35 -070062
63 /* If failed to switch to/from VF, let data_path_is_vf stay false,
64 * so we use synthetic path to send data.
65 */
66 if (ret) {
67 if (ret != -EAGAIN) {
68 netdev_err(ndev,
69 "Unable to send sw datapath msg, err: %d\n",
70 ret);
71 return ret;
72 }
73
74 if (retry++ < RETRY_MAX) {
75 usleep_range(RETRY_US_LO, RETRY_US_HI);
76 goto again;
77 } else {
78 netdev_err(
79 ndev,
80 "Retry failed to send sw datapath msg, err: %d\n",
81 ret);
82 return ret;
83 }
84 }
85
Long Li8b31f8c2021-01-08 16:53:42 -080086 wait_for_completion(&nv_dev->channel_init_wait);
87 net_device_ctx->data_path_is_vf = vf;
Haiyang Zhangd0922bf2021-03-29 16:21:35 -070088
89 return 0;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070090}
91
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -070092/* Worker to setup sub channels on initial setup
93 * Initial hotplug event occurs in softirq context
94 * and can't wait for channels.
95 */
96static void netvsc_subchan_work(struct work_struct *w)
97{
98 struct netvsc_device *nvdev =
99 container_of(w, struct netvsc_device, subchan_work);
100 struct rndis_device *rdev;
101 int i, ret;
102
103 /* Avoid deadlock with device removal already under RTNL */
104 if (!rtnl_trylock()) {
105 schedule_work(w);
106 return;
107 }
108
109 rdev = nvdev->extension;
110 if (rdev) {
Haiyang Zhang17d91252019-01-15 00:51:44 +0000111 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -0700112 if (ret == 0) {
113 netif_device_attach(rdev->ndev);
114 } else {
115 /* fallback to only primary channel */
116 for (i = 1; i < nvdev->num_chn; i++)
117 netif_napi_del(&nvdev->chan_table[i].napi);
118
119 nvdev->max_chn = 1;
120 nvdev->num_chn = 1;
121 }
122 }
123
124 rtnl_unlock();
125}
126
Vitaly Kuznetsov88098832016-05-13 13:55:25 +0200127static struct netvsc_device *alloc_net_device(void)
Hank Janssenfceaf242009-07-13 15:34:54 -0700128{
Haiyang Zhang85799a32010-12-10 12:03:54 -0800129 struct netvsc_device *net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -0700130
Haiyang Zhang85799a32010-12-10 12:03:54 -0800131 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
132 if (!net_device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700133 return NULL;
134
Haiyang Zhangdc5cd892012-06-04 06:42:38 +0000135 init_waitqueue_head(&net_device->wait_drain);
K. Y. Srinivasanc38b9c72011-08-27 11:31:12 -0700136 net_device->destroy = false;
Haiyang Zhangf6f13c12020-02-21 08:32:18 -0800137 net_device->tx_disable = true;
Stephen Hemminger0da6edb2017-12-12 16:48:39 -0800138
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700139 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
140 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
stephen hemminger8b532792017-08-09 17:46:11 -0700141
Stephen Hemmingerfd612602016-08-23 12:17:51 -0700142 init_completion(&net_device->channel_init_wait);
stephen hemminger732e4982017-08-03 17:13:54 -0700143 init_waitqueue_head(&net_device->subchan_open);
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -0700144 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700145
Haiyang Zhang85799a32010-12-10 12:03:54 -0800146 return net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -0700147}
148
stephen hemminger545a8e72017-03-22 14:51:00 -0700149static void free_netvsc_device(struct rcu_head *head)
Haiyang Zhangf90251c2014-08-15 19:18:19 +0000150{
stephen hemminger545a8e72017-03-22 14:51:00 -0700151 struct netvsc_device *nvdev
152 = container_of(head, struct netvsc_device, rcu);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700153 int i;
154
Stephen Hemminger02400fc2018-03-20 15:03:03 -0700155 kfree(nvdev->extension);
Tianyu Lan846da382021-12-13 02:14:06 -0500156
157 if (nvdev->recv_original_buf) {
158 hv_unmap_memory(nvdev->recv_buf);
159 vfree(nvdev->recv_original_buf);
160 } else {
161 vfree(nvdev->recv_buf);
162 }
163
164 if (nvdev->send_original_buf) {
165 hv_unmap_memory(nvdev->send_buf);
166 vfree(nvdev->send_original_buf);
167 } else {
168 vfree(nvdev->send_buf);
169 }
170
Christophe JAILLETe9268a92021-11-21 22:56:39 +0100171 bitmap_free(nvdev->send_section_map);
Stephen Hemminger02400fc2018-03-20 15:03:03 -0700172
Haiyang Zhang351e1582020-01-23 13:52:34 -0800173 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
174 xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
Andrea Parri (Microsoft)0ba35fe2021-01-26 17:29:07 +0100175 kfree(nvdev->chan_table[i].recv_buf);
stephen hemminger7426b1a2017-07-28 08:59:45 -0700176 vfree(nvdev->chan_table[i].mrc.slots);
Haiyang Zhang351e1582020-01-23 13:52:34 -0800177 }
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700178
Haiyang Zhangf90251c2014-08-15 19:18:19 +0000179 kfree(nvdev);
180}
181
stephen hemminger545a8e72017-03-22 14:51:00 -0700182static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
183{
184 call_rcu(&nvdev->rcu, free_netvsc_device);
185}
stephen hemminger46b4f7f2017-01-24 13:06:11 -0800186
Mohammed Gamal79928942018-04-05 21:09:19 +0200187static void netvsc_revoke_recv_buf(struct hv_device *device,
Mohammed Gamal3f076eff2018-04-05 21:09:21 +0200188 struct netvsc_device *net_device,
189 struct net_device *ndev)
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700190{
Mohammed Gamal79928942018-04-05 21:09:19 +0200191 struct nvsp_message *revoke_packet;
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700192 int ret;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700193
194 /*
195 * If we got a section count, it means we received a
196 * SendReceiveBufferComplete msg (ie sent
197 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
198 * to send a revoke msg here
199 */
200 if (net_device->recv_section_cnt) {
201 /* Send the revoke receive buffer */
202 revoke_packet = &net_device->revoke_packet;
203 memset(revoke_packet, 0, sizeof(struct nvsp_message));
204
205 revoke_packet->hdr.msg_type =
206 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
207 revoke_packet->msg.v1_msg.
208 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
209
Stephen Hemmingerec966382018-03-16 15:44:28 -0700210 trace_nvsp_send(ndev, revoke_packet);
211
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200212 ret = vmbus_sendpacket(device->channel,
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700213 revoke_packet,
214 sizeof(struct nvsp_message),
Andres Beltran4d18fcc2020-11-09 11:04:02 +0100215 VMBUS_RQST_ID_NO_RESPONSE,
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700216 VM_PKT_DATA_INBAND, 0);
K. Y. Srinivasan73e64fa2017-04-19 13:53:49 -0700217 /* If the failure is because the channel is rescinded;
218 * ignore the failure since we cannot send on a rescinded
219 * channel. This would allow us to properly cleanup
220 * even when the channel is rescinded.
221 */
222 if (device->channel->rescind)
223 ret = 0;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700224 /*
225 * If we failed here, we might as well return and
226 * have a leak rather than continue and a bugchk
227 */
228 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700229 netdev_err(ndev, "unable to send "
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700230 "revoke receive buffer to netvsp\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700231 return;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700232 }
stephen hemminger8b532792017-08-09 17:46:11 -0700233 net_device->recv_section_cnt = 0;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700234 }
Mohammed Gamal79928942018-04-05 21:09:19 +0200235}
236
237static void netvsc_revoke_send_buf(struct hv_device *device,
Mohammed Gamal3f076eff2018-04-05 21:09:21 +0200238 struct netvsc_device *net_device,
239 struct net_device *ndev)
Mohammed Gamal79928942018-04-05 21:09:19 +0200240{
Mohammed Gamal79928942018-04-05 21:09:19 +0200241 struct nvsp_message *revoke_packet;
242 int ret;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700243
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700244 /* Deal with the send buffer we may have setup.
245 * If we got a send section size, it means we received a
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800246 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
247 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700248 * to send a revoke msg here
249 */
stephen hemminger8b532792017-08-09 17:46:11 -0700250 if (net_device->send_section_cnt) {
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700251 /* Send the revoke receive buffer */
252 revoke_packet = &net_device->revoke_packet;
253 memset(revoke_packet, 0, sizeof(struct nvsp_message));
254
255 revoke_packet->hdr.msg_type =
256 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800257 revoke_packet->msg.v1_msg.revoke_send_buf.id =
258 NETVSC_SEND_BUFFER_ID;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700259
Stephen Hemmingerec966382018-03-16 15:44:28 -0700260 trace_nvsp_send(ndev, revoke_packet);
261
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200262 ret = vmbus_sendpacket(device->channel,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700263 revoke_packet,
264 sizeof(struct nvsp_message),
Andres Beltran4d18fcc2020-11-09 11:04:02 +0100265 VMBUS_RQST_ID_NO_RESPONSE,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700266 VM_PKT_DATA_INBAND, 0);
K. Y. Srinivasan73e64fa2017-04-19 13:53:49 -0700267
268 /* If the failure is because the channel is rescinded;
269 * ignore the failure since we cannot send on a rescinded
270 * channel. This would allow us to properly cleanup
271 * even when the channel is rescinded.
272 */
273 if (device->channel->rescind)
274 ret = 0;
275
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700276 /* If we failed here, we might as well return and
277 * have a leak rather than continue and a bugchk
278 */
279 if (ret != 0) {
280 netdev_err(ndev, "unable to send "
281 "revoke send buffer to netvsp\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700282 return;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700283 }
stephen hemminger8b532792017-08-09 17:46:11 -0700284 net_device->send_section_cnt = 0;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700285 }
Vitaly Kuznetsov0cf73782017-11-02 11:35:30 +0100286}
287
Mohammed Gamal79928942018-04-05 21:09:19 +0200288static void netvsc_teardown_recv_gpadl(struct hv_device *device,
Mohammed Gamal3f076eff2018-04-05 21:09:21 +0200289 struct netvsc_device *net_device,
290 struct net_device *ndev)
Vitaly Kuznetsov0cf73782017-11-02 11:35:30 +0100291{
Vitaly Kuznetsov0cf73782017-11-02 11:35:30 +0100292 int ret;
293
Tianyu Land4dccf32021-10-25 08:21:09 -0400294 if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
Vitaly Kuznetsov0cf73782017-11-02 11:35:30 +0100295 ret = vmbus_teardown_gpadl(device->channel,
Tianyu Land4dccf32021-10-25 08:21:09 -0400296 &net_device->recv_buf_gpadl_handle);
Vitaly Kuznetsov0cf73782017-11-02 11:35:30 +0100297
298 /* If we failed here, we might as well return and have a leak
299 * rather than continue and a bugchk
300 */
301 if (ret != 0) {
302 netdev_err(ndev,
303 "unable to teardown receive buffer's gpadl\n");
304 return;
305 }
Vitaly Kuznetsov0cf73782017-11-02 11:35:30 +0100306 }
Mohammed Gamal79928942018-04-05 21:09:19 +0200307}
308
309static void netvsc_teardown_send_gpadl(struct hv_device *device,
Mohammed Gamal3f076eff2018-04-05 21:09:21 +0200310 struct netvsc_device *net_device,
311 struct net_device *ndev)
Mohammed Gamal79928942018-04-05 21:09:19 +0200312{
Mohammed Gamal79928942018-04-05 21:09:19 +0200313 int ret;
Vitaly Kuznetsov0cf73782017-11-02 11:35:30 +0100314
Tianyu Land4dccf32021-10-25 08:21:09 -0400315 if (net_device->send_buf_gpadl_handle.gpadl_handle) {
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200316 ret = vmbus_teardown_gpadl(device->channel,
Tianyu Land4dccf32021-10-25 08:21:09 -0400317 &net_device->send_buf_gpadl_handle);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700318
319 /* If we failed here, we might as well return and have a leak
320 * rather than continue and a bugchk
321 */
322 if (ret != 0) {
323 netdev_err(ndev,
324 "unable to teardown send buffer's gpadl\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700325 return;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700326 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700327 }
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700328}
329
stephen hemminger7426b1a2017-07-28 08:59:45 -0700330int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
331{
332 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
333 int node = cpu_to_node(nvchan->channel->target_cpu);
334 size_t size;
335
336 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
337 nvchan->mrc.slots = vzalloc_node(size, node);
338 if (!nvchan->mrc.slots)
339 nvchan->mrc.slots = vzalloc(size);
340
341 return nvchan->mrc.slots ? 0 : -ENOMEM;
342}
343
stephen hemminger95790832017-06-08 16:21:22 -0700344static int netvsc_init_buf(struct hv_device *device,
stephen hemminger8b532792017-08-09 17:46:11 -0700345 struct netvsc_device *net_device,
346 const struct netvsc_device_info *device_info)
Hank Janssenfceaf242009-07-13 15:34:54 -0700347{
stephen hemminger7426b1a2017-07-28 08:59:45 -0700348 struct nvsp_1_message_send_receive_buffer_complete *resp;
stephen hemminger95833372017-08-09 17:46:07 -0700349 struct net_device *ndev = hv_get_drvdata(device);
350 struct nvsp_message *init_packet;
stephen hemminger8b532792017-08-09 17:46:11 -0700351 unsigned int buf_size;
Andrea Parri (Microsoft)0102eee2021-02-03 12:35:12 +0100352 int i, ret = 0;
Tianyu Lan846da382021-12-13 02:14:06 -0500353 void *vaddr;
Hank Janssenfceaf242009-07-13 15:34:54 -0700354
stephen hemminger8b532792017-08-09 17:46:11 -0700355 /* Get receive buffer area. */
Alex Ng0ab09be2017-09-20 11:17:35 -0700356 buf_size = device_info->recv_sections * device_info->recv_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700357 buf_size = roundup(buf_size, PAGE_SIZE);
358
Haiyang Zhang11b2b652017-12-11 08:56:57 -0800359 /* Legacy hosts only allow smaller receive buffer */
360 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
361 buf_size = min_t(unsigned int, buf_size,
362 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
363
stephen hemminger8b532792017-08-09 17:46:11 -0700364 net_device->recv_buf = vzalloc(buf_size);
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800365 if (!net_device->recv_buf) {
stephen hemminger8b532792017-08-09 17:46:11 -0700366 netdev_err(ndev,
367 "unable to allocate receive buffer of size %u\n",
368 buf_size);
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700369 ret = -ENOMEM;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800370 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700371 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700372
Haiyang Zhangc5d24bd2018-03-22 12:01:14 -0700373 net_device->recv_buf_size = buf_size;
374
Bill Pemberton454f18a2009-07-27 16:47:24 -0400375 /*
376 * Establish the gpadl handle for this buffer on this
377 * channel. Note: This call uses the vmbus connection rather
378 * than the channel to establish the gpadl handle.
379 */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800380 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
stephen hemminger8b532792017-08-09 17:46:11 -0700381 buf_size,
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800382 &net_device->recv_buf_gpadl_handle);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700383 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700384 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700385 "unable to establish receive buffer's gpadl\n");
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800386 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700387 }
388
Tianyu Lan846da382021-12-13 02:14:06 -0500389 if (hv_isolation_type_snp()) {
390 vaddr = hv_map_memory(net_device->recv_buf, buf_size);
391 if (!vaddr) {
392 ret = -ENOMEM;
393 goto cleanup;
394 }
395
396 net_device->recv_original_buf = net_device->recv_buf;
397 net_device->recv_buf = vaddr;
398 }
399
Bill Pemberton454f18a2009-07-27 16:47:24 -0400400 /* Notify the NetVsp of the gpadl handle */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800401 init_packet = &net_device->channel_init_pkt;
Haiyang Zhang85799a32010-12-10 12:03:54 -0800402 memset(init_packet, 0, sizeof(struct nvsp_message));
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800403 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
404 init_packet->msg.v1_msg.send_recv_buf.
Tianyu Land4dccf32021-10-25 08:21:09 -0400405 gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800406 init_packet->msg.v1_msg.
407 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
Hank Janssenfceaf242009-07-13 15:34:54 -0700408
Stephen Hemmingerec966382018-03-16 15:44:28 -0700409 trace_nvsp_send(ndev, init_packet);
410
Bill Pemberton454f18a2009-07-27 16:47:24 -0400411 /* Send the gpadl notification request */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800412 ret = vmbus_sendpacket(device->channel, init_packet,
Greg Kroah-Hartman5a4df292010-10-21 09:43:24 -0700413 sizeof(struct nvsp_message),
Haiyang Zhang85799a32010-12-10 12:03:54 -0800414 (unsigned long)init_packet,
Haiyang Zhang415f2282011-01-26 12:12:13 -0800415 VM_PKT_DATA_INBAND,
Greg Kroah-Hartman5a4df292010-10-21 09:43:24 -0700416 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700417 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700418 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700419 "unable to send receive buffer's gpadl to netvsp\n");
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800420 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700421 }
422
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200423 wait_for_completion(&net_device->channel_init_wait);
Hank Janssenfceaf242009-07-13 15:34:54 -0700424
Bill Pemberton454f18a2009-07-27 16:47:24 -0400425 /* Check the response */
stephen hemminger7426b1a2017-07-28 08:59:45 -0700426 resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
427 if (resp->status != NVSP_STAT_SUCCESS) {
428 netdev_err(ndev,
429 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
430 resp->status);
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700431 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800432 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700433 }
434
Bill Pemberton454f18a2009-07-27 16:47:24 -0400435 /* Parse the response */
stephen hemminger7426b1a2017-07-28 08:59:45 -0700436 netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
437 resp->num_sections, resp->sections[0].sub_alloc_size,
438 resp->sections[0].num_sub_allocs);
Hank Janssenfceaf242009-07-13 15:34:54 -0700439
stephen hemminger8b532792017-08-09 17:46:11 -0700440 /* There should only be one section for the entire receive buffer */
441 if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700442 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800443 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700444 }
445
stephen hemminger8b532792017-08-09 17:46:11 -0700446 net_device->recv_section_size = resp->sections[0].sub_alloc_size;
447 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
448
Andres Beltran44144182020-09-16 11:47:27 +0200449 /* Ensure buffer will not overflow */
450 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
451 (u64)net_device->recv_section_cnt > (u64)buf_size) {
452 netdev_err(ndev, "invalid recv_section_size %u\n",
453 net_device->recv_section_size);
454 ret = -EINVAL;
455 goto cleanup;
456 }
457
Andrea Parri (Microsoft)0102eee2021-02-03 12:35:12 +0100458 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
459 struct netvsc_channel *nvchan = &net_device->chan_table[i];
460
461 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
462 if (nvchan->recv_buf == NULL) {
463 ret = -ENOMEM;
464 goto cleanup;
465 }
466 }
467
Haiyang Zhangf87238d2020-03-30 12:29:13 -0700468 /* Setup receive completion ring.
469 * Add 1 to the recv_section_cnt because at least one entry in a
470 * ring buffer has to be empty.
471 */
472 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
stephen hemminger7426b1a2017-07-28 08:59:45 -0700473 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
474 if (ret)
475 goto cleanup;
476
477 /* Now setup the send buffer. */
Alex Ng0ab09be2017-09-20 11:17:35 -0700478 buf_size = device_info->send_sections * device_info->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700479 buf_size = round_up(buf_size, PAGE_SIZE);
480
481 net_device->send_buf = vzalloc(buf_size);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700482 if (!net_device->send_buf) {
stephen hemminger8b532792017-08-09 17:46:11 -0700483 netdev_err(ndev, "unable to allocate send buffer of size %u\n",
484 buf_size);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700485 ret = -ENOMEM;
486 goto cleanup;
487 }
Tianyu Land4dccf32021-10-25 08:21:09 -0400488 net_device->send_buf_size = buf_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700489
490 /* Establish the gpadl handle for this buffer on this
491 * channel. Note: This call uses the vmbus connection rather
492 * than the channel to establish the gpadl handle.
493 */
494 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
stephen hemminger8b532792017-08-09 17:46:11 -0700495 buf_size,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700496 &net_device->send_buf_gpadl_handle);
497 if (ret != 0) {
498 netdev_err(ndev,
499 "unable to establish send buffer's gpadl\n");
500 goto cleanup;
501 }
502
Tianyu Lan846da382021-12-13 02:14:06 -0500503 if (hv_isolation_type_snp()) {
504 vaddr = hv_map_memory(net_device->send_buf, buf_size);
505 if (!vaddr) {
506 ret = -ENOMEM;
507 goto cleanup;
508 }
509
510 net_device->send_original_buf = net_device->send_buf;
511 net_device->send_buf = vaddr;
512 }
513
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700514 /* Notify the NetVsp of the gpadl handle */
515 init_packet = &net_device->channel_init_pkt;
516 memset(init_packet, 0, sizeof(struct nvsp_message));
517 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800518 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
Tianyu Land4dccf32021-10-25 08:21:09 -0400519 net_device->send_buf_gpadl_handle.gpadl_handle;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800520 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700521
Stephen Hemmingerec966382018-03-16 15:44:28 -0700522 trace_nvsp_send(ndev, init_packet);
523
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700524 /* Send the gpadl notification request */
525 ret = vmbus_sendpacket(device->channel, init_packet,
526 sizeof(struct nvsp_message),
527 (unsigned long)init_packet,
528 VM_PKT_DATA_INBAND,
529 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
530 if (ret != 0) {
531 netdev_err(ndev,
532 "unable to send send buffer's gpadl to netvsp\n");
533 goto cleanup;
534 }
535
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200536 wait_for_completion(&net_device->channel_init_wait);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700537
538 /* Check the response */
539 if (init_packet->msg.v1_msg.
540 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
541 netdev_err(ndev, "Unable to complete send buffer "
542 "initialization with NetVsp - status %d\n",
543 init_packet->msg.v1_msg.
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800544 send_send_buf_complete.status);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700545 ret = -EINVAL;
546 goto cleanup;
547 }
548
549 /* Parse the response */
550 net_device->send_section_size = init_packet->msg.
551 v1_msg.send_send_buf_complete.section_size;
Andres Beltran44144182020-09-16 11:47:27 +0200552 if (net_device->send_section_size < NETVSC_MTU_MIN) {
553 netdev_err(ndev, "invalid send_section_size %u\n",
554 net_device->send_section_size);
555 ret = -EINVAL;
556 goto cleanup;
557 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700558
stephen hemminger8b532792017-08-09 17:46:11 -0700559 /* Section count is simply the size divided by the section size. */
560 net_device->send_section_cnt = buf_size / net_device->send_section_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700561
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +0100562 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
563 net_device->send_section_size, net_device->send_section_cnt);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700564
565 /* Setup state for managing the send buffer. */
Christophe JAILLETe9268a92021-11-21 22:56:39 +0100566 net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
567 GFP_KERNEL);
568 if (!net_device->send_section_map) {
Wei Yongjundd1d3f82014-07-23 09:00:35 +0800569 ret = -ENOMEM;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700570 goto cleanup;
Wei Yongjundd1d3f82014-07-23 09:00:35 +0800571 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700572
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800573 goto exit;
Hank Janssenfceaf242009-07-13 15:34:54 -0700574
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800575cleanup:
Mohammed Gamal3f076eff2018-04-05 21:09:21 +0200576 netvsc_revoke_recv_buf(device, net_device, ndev);
577 netvsc_revoke_send_buf(device, net_device, ndev);
578 netvsc_teardown_recv_gpadl(device, net_device, ndev);
579 netvsc_teardown_send_gpadl(device, net_device, ndev);
Hank Janssenfceaf242009-07-13 15:34:54 -0700580
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800581exit:
Hank Janssenfceaf242009-07-13 15:34:54 -0700582 return ret;
583}
584
Haiyang Zhangf157e782011-12-15 13:45:16 -0800585/* Negotiate NVSP protocol version */
586static int negotiate_nvsp_ver(struct hv_device *device,
587 struct netvsc_device *net_device,
588 struct nvsp_message *init_packet,
589 u32 nvsp_ver)
Hank Janssenfceaf242009-07-13 15:34:54 -0700590{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200591 struct net_device *ndev = hv_get_drvdata(device);
Nicholas Mc Guire7390fe92015-01-25 15:46:31 +0100592 int ret;
Haiyang Zhangf157e782011-12-15 13:45:16 -0800593
594 memset(init_packet, 0, sizeof(struct nvsp_message));
595 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
596 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
597 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
Stephen Hemmingerec966382018-03-16 15:44:28 -0700598 trace_nvsp_send(ndev, init_packet);
599
Haiyang Zhangf157e782011-12-15 13:45:16 -0800600 /* Send the init request */
601 ret = vmbus_sendpacket(device->channel, init_packet,
602 sizeof(struct nvsp_message),
603 (unsigned long)init_packet,
604 VM_PKT_DATA_INBAND,
605 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
606
607 if (ret != 0)
608 return ret;
609
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200610 wait_for_completion(&net_device->channel_init_wait);
Haiyang Zhangf157e782011-12-15 13:45:16 -0800611
612 if (init_packet->msg.init_msg.init_complete.status !=
613 NVSP_STAT_SUCCESS)
614 return -EINVAL;
615
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800616 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
Haiyang Zhangf157e782011-12-15 13:45:16 -0800617 return 0;
618
Haiyang Zhang71790a22015-07-24 10:08:40 -0700619 /* NVSPv2 or later: Send NDIS config */
Haiyang Zhangf157e782011-12-15 13:45:16 -0800620 memset(init_packet, 0, sizeof(struct nvsp_message));
621 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200622 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000623 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
Haiyang Zhangf157e782011-12-15 13:45:16 -0800624
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700625 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
Andrea Parri (Microsoft)96854bb2021-02-01 15:48:14 +0100626 if (hv_is_isolation_supported())
627 netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
628 else
629 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
Haiyang Zhang71790a22015-07-24 10:08:40 -0700630
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700631 /* Teaming bit is needed to receive link speed updates */
632 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
633 }
634
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000635 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
636 init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
637
Stephen Hemmingerec966382018-03-16 15:44:28 -0700638 trace_nvsp_send(ndev, init_packet);
639
Haiyang Zhangf157e782011-12-15 13:45:16 -0800640 ret = vmbus_sendpacket(device->channel, init_packet,
641 sizeof(struct nvsp_message),
Andres Beltran4d18fcc2020-11-09 11:04:02 +0100642 VMBUS_RQST_ID_NO_RESPONSE,
Haiyang Zhangf157e782011-12-15 13:45:16 -0800643 VM_PKT_DATA_INBAND, 0);
644
645 return ret;
646}
647
stephen hemminger95790832017-06-08 16:21:22 -0700648static int netvsc_connect_vsp(struct hv_device *device,
stephen hemminger8b532792017-08-09 17:46:11 -0700649 struct netvsc_device *net_device,
650 const struct netvsc_device_info *device_info)
Haiyang Zhangf157e782011-12-15 13:45:16 -0800651{
Stephen Hemmingerec966382018-03-16 15:44:28 -0700652 struct net_device *ndev = hv_get_drvdata(device);
Colin Ian King1b17ca02017-09-22 16:50:23 +0100653 static const u32 ver_list[] = {
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700654 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
Haiyang Zhang0dcec222018-04-17 15:31:47 -0700655 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
656 NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
stephen hemminger95790832017-06-08 16:21:22 -0700657 };
658 struct nvsp_message *init_packet;
659 int ndis_version, i, ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700660
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800661 init_packet = &net_device->channel_init_pkt;
Hank Janssenfceaf242009-07-13 15:34:54 -0700662
Haiyang Zhangf157e782011-12-15 13:45:16 -0800663 /* Negotiate the latest NVSP protocol supported */
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700664 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800665 if (negotiate_nvsp_ver(device, net_device, init_packet,
666 ver_list[i]) == 0) {
667 net_device->nvsp_version = ver_list[i];
668 break;
669 }
670
671 if (i < 0) {
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700672 ret = -EPROTO;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800673 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700674 }
Haiyang Zhangf157e782011-12-15 13:45:16 -0800675
Andrea Parri (Microsoft)96854bb2021-02-01 15:48:14 +0100676 if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
677 netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
678 net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
679 ret = -EPROTO;
680 goto cleanup;
681 }
682
Haiyang Zhangf157e782011-12-15 13:45:16 -0800683 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
684
Bill Pemberton454f18a2009-07-27 16:47:24 -0400685 /* Send the ndis version */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800686 memset(init_packet, 0, sizeof(struct nvsp_message));
Hank Janssenfceaf242009-07-13 15:34:54 -0700687
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800688 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
KY Srinivasan1f73db42014-04-09 15:00:46 -0700689 ndis_version = 0x00060001;
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800690 else
691 ndis_version = 0x0006001e;
Hank Janssenfceaf242009-07-13 15:34:54 -0700692
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800693 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
694 init_packet->msg.v1_msg.
695 send_ndis_ver.ndis_major_ver =
Haiyang Zhang85799a32010-12-10 12:03:54 -0800696 (ndis_version & 0xFFFF0000) >> 16;
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800697 init_packet->msg.v1_msg.
698 send_ndis_ver.ndis_minor_ver =
Haiyang Zhang85799a32010-12-10 12:03:54 -0800699 ndis_version & 0xFFFF;
Hank Janssenfceaf242009-07-13 15:34:54 -0700700
Stephen Hemmingerec966382018-03-16 15:44:28 -0700701 trace_nvsp_send(ndev, init_packet);
702
Bill Pemberton454f18a2009-07-27 16:47:24 -0400703 /* Send the init request */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800704 ret = vmbus_sendpacket(device->channel, init_packet,
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800705 sizeof(struct nvsp_message),
Andres Beltran4d18fcc2020-11-09 11:04:02 +0100706 VMBUS_RQST_ID_NO_RESPONSE,
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800707 VM_PKT_DATA_INBAND, 0);
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700708 if (ret != 0)
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800709 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700710
Haiyang Zhang99d30162014-03-09 16:10:59 -0700711
stephen hemminger8b532792017-08-09 17:46:11 -0700712 ret = netvsc_init_buf(device, net_device, device_info);
Hank Janssenfceaf242009-07-13 15:34:54 -0700713
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800714cleanup:
Hank Janssenfceaf242009-07-13 15:34:54 -0700715 return ret;
716}
717
Hank Janssen3e189512010-03-04 22:11:00 +0000718/*
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800719 * netvsc_device_remove - Callback when the root bus device is removed
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700720 */
Stephen Hemmingere08f3ea2016-08-23 12:17:50 -0700721void netvsc_device_remove(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700722{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200723 struct net_device *ndev = hv_get_drvdata(device);
724 struct net_device_context *net_device_ctx = netdev_priv(ndev);
stephen hemminger79e8cbe2017-07-19 11:53:13 -0700725 struct netvsc_device *net_device
726 = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger15a863b2017-02-27 10:26:49 -0800727 int i;
Hank Janssenfceaf242009-07-13 15:34:54 -0700728
Mohammed Gamala56d99d2018-04-05 21:09:20 +0200729 /*
730 * Revoke receive buffer. If host is pre-Win2016 then tear down
731 * receive buffer GPADL. Do the same for send buffer.
732 */
Mohammed Gamal3f076eff2018-04-05 21:09:21 +0200733 netvsc_revoke_recv_buf(device, net_device, ndev);
Mohammed Gamala56d99d2018-04-05 21:09:20 +0200734 if (vmbus_proto_version < VERSION_WIN10)
Mohammed Gamal3f076eff2018-04-05 21:09:21 +0200735 netvsc_teardown_recv_gpadl(device, net_device, ndev);
Mohammed Gamala56d99d2018-04-05 21:09:20 +0200736
Mohammed Gamal3f076eff2018-04-05 21:09:21 +0200737 netvsc_revoke_send_buf(device, net_device, ndev);
Mohammed Gamala56d99d2018-04-05 21:09:20 +0200738 if (vmbus_proto_version < VERSION_WIN10)
Mohammed Gamal3f076eff2018-04-05 21:09:21 +0200739 netvsc_teardown_send_gpadl(device, net_device, ndev);
Hank Janssenfceaf242009-07-13 15:34:54 -0700740
stephen hemminger545a8e72017-03-22 14:51:00 -0700741 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
K. Y. Srinivasan38524092011-08-27 11:31:14 -0700742
Andrea Parri (Microsoft)ac504762020-04-06 02:15:07 +0200743 /* Disable NAPI and disassociate its context from the device. */
744 for (i = 0; i < net_device->num_chn; i++) {
745 /* See also vmbus_reset_channel_cb(). */
746 napi_disable(&net_device->chan_table[i].napi);
Stephen Hemminger8348e042018-03-20 15:03:02 -0700747 netif_napi_del(&net_device->chan_table[i].napi);
Andrea Parri (Microsoft)ac504762020-04-06 02:15:07 +0200748 }
Stephen Hemminger8348e042018-03-20 15:03:02 -0700749
K. Y. Srinivasan86c921a2011-09-13 10:59:54 -0700750 /*
751 * At this point, no one should be accessing net_device
752 * except in here
753 */
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +0100754 netdev_dbg(ndev, "net device safe to remove\n");
Hank Janssenfceaf242009-07-13 15:34:54 -0700755
Bill Pemberton454f18a2009-07-27 16:47:24 -0400756 /* Now, we can close the channel safely */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800757 vmbus_close(device->channel);
Hank Janssenfceaf242009-07-13 15:34:54 -0700758
Mohammed Gamala56d99d2018-04-05 21:09:20 +0200759 /*
760 * If host is Win2016 or higher then we do the GPADL tear down
761 * here after VMBus is closed.
762 */
Mohammed Gamal79928942018-04-05 21:09:19 +0200763 if (vmbus_proto_version >= VERSION_WIN10) {
Mohammed Gamal3f076eff2018-04-05 21:09:21 +0200764 netvsc_teardown_recv_gpadl(device, net_device, ndev);
765 netvsc_teardown_send_gpadl(device, net_device, ndev);
Mohammed Gamal79928942018-04-05 21:09:19 +0200766 }
stephen hemminger15a863b2017-02-27 10:26:49 -0800767
Bill Pemberton454f18a2009-07-27 16:47:24 -0400768 /* Release all resources */
stephen hemminger545a8e72017-03-22 14:51:00 -0700769 free_netvsc_device_rcu(net_device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700770}
771
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000772#define RING_AVAIL_PERCENT_HIWATER 20
773#define RING_AVAIL_PERCENT_LOWATER 10
774
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700775static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
776 u32 index)
777{
778 sync_change_bit(index, net_device->send_section_map);
779}
780
Stephen Hemmingerc347b922018-04-26 14:34:25 -0700781static void netvsc_send_tx_complete(struct net_device *ndev,
782 struct netvsc_device *net_device,
783 struct vmbus_channel *channel,
stephen hemmingerf9645432017-04-07 14:41:19 -0400784 const struct vmpacket_descriptor *desc,
785 int budget)
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700786{
Simon Xiao09af87d2017-09-29 11:39:46 -0700787 struct net_device_context *ndev_ctx = netdev_priv(ndev);
Andres Beltran4d18fcc2020-11-09 11:04:02 +0100788 struct sk_buff *skb;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700789 u16 q_idx = 0;
790 int queue_sends;
Andres Beltran4d18fcc2020-11-09 11:04:02 +0100791 u64 cmd_rqst;
792
Andrea Parri (Microsoft)bf5fd8c2021-05-10 23:08:41 +0200793 cmd_rqst = channel->request_addr_callback(channel, (u64)desc->trans_id);
Andres Beltran4d18fcc2020-11-09 11:04:02 +0100794 if (cmd_rqst == VMBUS_RQST_ERROR) {
795 netdev_err(ndev, "Incorrect transaction id\n");
796 return;
797 }
798
799 skb = (struct sk_buff *)(unsigned long)cmd_rqst;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700800
801 /* Notify the layer above us */
802 if (likely(skb)) {
Tianyu Lan846da382021-12-13 02:14:06 -0500803 struct hv_netvsc_packet *packet
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700804 = (struct hv_netvsc_packet *)skb->cb;
stephen hemminger793e3952017-01-24 13:06:12 -0800805 u32 send_index = packet->send_buf_index;
806 struct netvsc_stats *tx_stats;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700807
808 if (send_index != NETVSC_INVALID_INDEX)
809 netvsc_free_send_slot(net_device, send_index);
stephen hemminger793e3952017-01-24 13:06:12 -0800810 q_idx = packet->q_idx;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700811
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800812 tx_stats = &net_device->chan_table[q_idx].tx_stats;
stephen hemminger793e3952017-01-24 13:06:12 -0800813
814 u64_stats_update_begin(&tx_stats->syncp);
815 tx_stats->packets += packet->total_packets;
816 tx_stats->bytes += packet->total_bytes;
817 u64_stats_update_end(&tx_stats->syncp);
818
Tianyu Lan846da382021-12-13 02:14:06 -0500819 netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
stephen hemmingerf9645432017-04-07 14:41:19 -0400820 napi_consume_skb(skb, budget);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700821 }
822
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800823 queue_sends =
824 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700825
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700826 if (unlikely(net_device->destroy)) {
827 if (queue_sends == 0)
828 wake_up(&net_device->wait_drain);
829 } else {
830 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700831
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000832 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
Long Li6b1f8372018-03-27 17:48:39 -0700833 (hv_get_avail_to_write_percent(&channel->outbound) >
834 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700835 netif_tx_wake_queue(txq);
836 ndev_ctx->eth_stats.wake_queue++;
837 }
Simon Xiao09af87d2017-09-29 11:39:46 -0700838 }
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700839}
840
Stephen Hemmingerc347b922018-04-26 14:34:25 -0700841static void netvsc_send_completion(struct net_device *ndev,
842 struct netvsc_device *net_device,
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800843 struct vmbus_channel *incoming_channel,
stephen hemmingerf9645432017-04-07 14:41:19 -0400844 const struct vmpacket_descriptor *desc,
845 int budget)
Hank Janssenfceaf242009-07-13 15:34:54 -0700846{
Long Li8b31f8c2021-01-08 16:53:42 -0800847 const struct nvsp_message *nvsp_packet;
Andres Beltran44144182020-09-16 11:47:27 +0200848 u32 msglen = hv_pkt_datalen(desc);
Long Li8b31f8c2021-01-08 16:53:42 -0800849 struct nvsp_message *pkt_rqst;
850 u64 cmd_rqst;
851
852 /* First check if this is a VMBUS completion without data payload */
853 if (!msglen) {
Andrea Parri (Microsoft)bf5fd8c2021-05-10 23:08:41 +0200854 cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
855 (u64)desc->trans_id);
Long Li8b31f8c2021-01-08 16:53:42 -0800856 if (cmd_rqst == VMBUS_RQST_ERROR) {
857 netdev_err(ndev, "Invalid transaction id\n");
858 return;
859 }
860
861 pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
862 switch (pkt_rqst->hdr.msg_type) {
863 case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
864 complete(&net_device->channel_init_wait);
865 break;
866
867 default:
868 netdev_err(ndev, "Unexpected VMBUS completion!!\n");
869 }
870 return;
871 }
Andres Beltran44144182020-09-16 11:47:27 +0200872
873 /* Ensure packet is big enough to read header fields */
874 if (msglen < sizeof(struct nvsp_message_header)) {
875 netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
876 return;
877 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700878
Long Li8b31f8c2021-01-08 16:53:42 -0800879 nvsp_packet = hv_pkt_data(desc);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700880 switch (nvsp_packet->hdr.msg_type) {
881 case NVSP_MSG_TYPE_INIT_COMPLETE:
Andres Beltran44144182020-09-16 11:47:27 +0200882 if (msglen < sizeof(struct nvsp_message_header) +
883 sizeof(struct nvsp_message_init_complete)) {
884 netdev_err(ndev, "nvsp_msg length too small: %u\n",
885 msglen);
886 return;
887 }
888 fallthrough;
889
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700890 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
Andres Beltran44144182020-09-16 11:47:27 +0200891 if (msglen < sizeof(struct nvsp_message_header) +
892 sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
893 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
894 msglen);
895 return;
896 }
897 fallthrough;
898
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700899 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
Andres Beltran44144182020-09-16 11:47:27 +0200900 if (msglen < sizeof(struct nvsp_message_header) +
901 sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
902 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
903 msglen);
904 return;
905 }
906 fallthrough;
907
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700908 case NVSP_MSG5_TYPE_SUBCHANNEL:
Andres Beltran44144182020-09-16 11:47:27 +0200909 if (msglen < sizeof(struct nvsp_message_header) +
910 sizeof(struct nvsp_5_subchannel_complete)) {
911 netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
912 msglen);
913 return;
914 }
Bill Pemberton454f18a2009-07-27 16:47:24 -0400915 /* Copy the response back */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800916 memcpy(&net_device->channel_init_pkt, nvsp_packet,
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700917 sizeof(struct nvsp_message));
K. Y. Srinivasan35abb212011-05-10 07:55:41 -0700918 complete(&net_device->channel_init_wait);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700919 break;
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000920
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700921 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
Stephen Hemmingerc347b922018-04-26 14:34:25 -0700922 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
923 desc, budget);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700924 break;
Hank Janssenfceaf242009-07-13 15:34:54 -0700925
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700926 default:
927 netdev_err(ndev,
928 "Unknown send completion type %d received!!\n",
929 nvsp_packet->hdr.msg_type);
Hank Janssenfceaf242009-07-13 15:34:54 -0700930 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700931}
932
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700933static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
934{
stephen hemmingerb58a1852017-01-24 13:06:14 -0800935 unsigned long *map_addr = net_device->send_section_map;
936 unsigned int i;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700937
stephen hemmingerfdfb70d2017-04-24 18:33:38 -0700938 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
stephen hemmingerb58a1852017-01-24 13:06:14 -0800939 if (sync_test_and_set_bit(i, map_addr) == 0)
940 return i;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700941 }
stephen hemmingerb58a1852017-01-24 13:06:14 -0800942
943 return NETVSC_INVALID_INDEX;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700944}
945
Stephen Hemminger26a11262017-12-12 16:48:35 -0800946static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
947 unsigned int section_index,
948 u32 pend_size,
949 struct hv_netvsc_packet *packet,
950 struct rndis_message *rndis_msg,
951 struct hv_page_buffer *pb,
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -0800952 bool xmit_more)
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700953{
954 char *start = net_device->send_buf;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700955 char *dest = start + (section_index * net_device->send_section_size)
956 + pend_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700957 int i;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700958 u32 padding = 0;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700959 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
960 packet->page_buf_cnt;
Stephen Hemmingerb85e06f2017-12-01 11:01:46 -0800961 u32 remain;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700962
963 /* Add padding */
Stephen Hemmingerb85e06f2017-12-01 11:01:46 -0800964 remain = packet->total_data_buflen & (net_device->pkt_align - 1);
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -0800965 if (xmit_more && remain) {
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700966 padding = net_device->pkt_align - remain;
KY Srinivasan24476762015-12-01 16:43:06 -0800967 rndis_msg->msg_len += padding;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700968 packet->total_data_buflen += padding;
969 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700970
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700971 for (i = 0; i < page_count; i++) {
Boqun Feng11d86202020-09-16 11:48:13 +0800972 char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
stephen hemminger02b6de02017-07-28 08:59:44 -0700973 u32 offset = pb[i].offset;
974 u32 len = pb[i].len;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700975
976 memcpy(dest, (src + offset), len);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700977 dest += len;
978 }
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700979
Stephen Hemminger26a11262017-12-12 16:48:35 -0800980 if (padding)
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700981 memset(dest, 0, padding);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700982}
983
Tianyu Lan846da382021-12-13 02:14:06 -0500984void netvsc_dma_unmap(struct hv_device *hv_dev,
985 struct hv_netvsc_packet *packet)
986{
987 u32 page_count = packet->cp_partial ?
988 packet->page_buf_cnt - packet->rmsg_pgcnt :
989 packet->page_buf_cnt;
990 int i;
991
992 if (!hv_is_isolation_supported())
993 return;
994
995 if (!packet->dma_range)
996 return;
997
998 for (i = 0; i < page_count; i++)
999 dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
1000 packet->dma_range[i].mapping_size,
1001 DMA_TO_DEVICE);
1002
1003 kfree(packet->dma_range);
1004}
1005
1006/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
1007 * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
1008 * VM.
1009 *
1010 * In isolation VM, netvsc send buffer has been marked visible to
1011 * host and so the data copied to send buffer doesn't need to use
1012 * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
1013 * may not be copied to send buffer and so these pages need to be
1014 * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
1015 * that. The pfns in the struct hv_page_buffer need to be converted
1016 * to bounce buffer's pfn. The loop here is necessary because the
1017 * entries in the page buffer array are not necessarily full
1018 * pages of data. Each entry in the array has a separate offset and
1019 * len that may be non-zero, even for entries in the middle of the
1020 * array. And the entries are not physically contiguous. So each
1021 * entry must be individually mapped rather than as a contiguous unit.
1022 * So not use dma_map_sg() here.
1023 */
1024static int netvsc_dma_map(struct hv_device *hv_dev,
1025 struct hv_netvsc_packet *packet,
1026 struct hv_page_buffer *pb)
1027{
1028 u32 page_count = packet->cp_partial ?
1029 packet->page_buf_cnt - packet->rmsg_pgcnt :
1030 packet->page_buf_cnt;
1031 dma_addr_t dma;
1032 int i;
1033
1034 if (!hv_is_isolation_supported())
1035 return 0;
1036
1037 packet->dma_range = kcalloc(page_count,
1038 sizeof(*packet->dma_range),
1039 GFP_KERNEL);
1040 if (!packet->dma_range)
1041 return -ENOMEM;
1042
1043 for (i = 0; i < page_count; i++) {
1044 char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
1045 + pb[i].offset);
1046 u32 len = pb[i].len;
1047
1048 dma = dma_map_single(&hv_dev->device, src, len,
1049 DMA_TO_DEVICE);
1050 if (dma_mapping_error(&hv_dev->device, dma)) {
1051 kfree(packet->dma_range);
1052 return -ENOMEM;
1053 }
1054
1055 /* pb[].offset and pb[].len are not changed during dma mapping
1056 * and so not reassign.
1057 */
1058 packet->dma_range[i].dma = dma;
1059 packet->dma_range[i].mapping_size = len;
1060 pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
1061 }
1062
1063 return 0;
1064}
1065
Stephen Hemminger3a8963a2016-09-09 12:45:24 -07001066static inline int netvsc_send_pkt(
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001067 struct hv_device *device,
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001068 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -08001069 struct netvsc_device *net_device,
stephen hemminger02b6de02017-07-28 08:59:44 -07001070 struct hv_page_buffer *pb,
KY Srinivasan3a3d9a02015-12-01 16:43:14 -08001071 struct sk_buff *skb)
Hank Janssenfceaf242009-07-13 15:34:54 -07001072{
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001073 struct nvsp_message nvmsg;
Stephen Hemmingerec966382018-03-16 15:44:28 -07001074 struct nvsp_1_message_send_rndis_packet *rpkt =
Joe Perches956a25c2017-07-31 10:30:54 -07001075 &nvmsg.msg.v1_msg.send_rndis_pkt;
1076 struct netvsc_channel * const nvchan =
1077 &net_device->chan_table[packet->q_idx];
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001078 struct vmbus_channel *out_channel = nvchan->channel;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001079 struct net_device *ndev = hv_get_drvdata(device);
Simon Xiao09af87d2017-09-29 11:39:46 -07001080 struct net_device_context *ndev_ctx = netdev_priv(ndev);
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001081 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001082 u64 req_id;
1083 int ret;
Long Li6b1f8372018-03-27 17:48:39 -07001084 u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
KY Srinivasanc25aaf82014-04-30 10:14:31 -07001085
Andrea Parri (Microsoft)505e3f02021-01-14 21:26:28 +01001086 memset(&nvmsg, 0, sizeof(struct nvsp_message));
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001087 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
Joe Perches956a25c2017-07-31 10:30:54 -07001088 if (skb)
1089 rpkt->channel_type = 0; /* 0 is RMC_DATA */
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001090 else
Joe Perches956a25c2017-07-31 10:30:54 -07001091 rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
1092
1093 rpkt->send_buf_section_index = packet->send_buf_index;
1094 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
1095 rpkt->send_buf_section_size = 0;
1096 else
1097 rpkt->send_buf_section_size = packet->total_data_buflen;
Hank Janssenfceaf242009-07-13 15:34:54 -07001098
KY Srinivasan3a3d9a02015-12-01 16:43:14 -08001099 req_id = (ulong)skb;
Haiyang Zhangf1ea3cd2013-04-05 11:44:40 +00001100
Haiyang Zhangc3582a22014-12-01 13:28:39 -08001101 if (out_channel->rescind)
1102 return -ENODEV;
1103
Stephen Hemmingerec966382018-03-16 15:44:28 -07001104 trace_nvsp_send_pkt(ndev, out_channel, rpkt);
1105
Tianyu Lan846da382021-12-13 02:14:06 -05001106 packet->dma_range = NULL;
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -08001107 if (packet->page_buf_cnt) {
stephen hemminger02b6de02017-07-28 08:59:44 -07001108 if (packet->cp_partial)
1109 pb += packet->rmsg_pgcnt;
1110
Tianyu Lan846da382021-12-13 02:14:06 -05001111 ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
1112 if (ret) {
1113 ret = -EAGAIN;
1114 goto exit;
1115 }
1116
stephen hemminger5a668d82017-08-16 08:56:25 -07001117 ret = vmbus_sendpacket_pagebuffer(out_channel,
1118 pb, packet->page_buf_cnt,
1119 &nvmsg, sizeof(nvmsg),
1120 req_id);
Tianyu Lan846da382021-12-13 02:14:06 -05001121
1122 if (ret)
1123 netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -07001124 } else {
stephen hemminger5dd0fb92017-08-16 08:56:26 -07001125 ret = vmbus_sendpacket(out_channel,
1126 &nvmsg, sizeof(nvmsg),
1127 req_id, VM_PKT_DATA_INBAND,
1128 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Hank Janssenfceaf242009-07-13 15:34:54 -07001129 }
1130
Tianyu Lan846da382021-12-13 02:14:06 -05001131exit:
Haiyang Zhang1d068252011-12-02 11:56:25 -08001132 if (ret == 0) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001133 atomic_inc_return(&nvchan->queue_sends);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001134
Simon Xiao09af87d2017-09-29 11:39:46 -07001135 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001136 netif_tx_stop_queue(txq);
Simon Xiao09af87d2017-09-29 11:39:46 -07001137 ndev_ctx->eth_stats.stop_queue++;
1138 }
Haiyang Zhang1d068252011-12-02 11:56:25 -08001139 } else if (ret == -EAGAIN) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001140 netif_tx_stop_queue(txq);
Simon Xiao09af87d2017-09-29 11:39:46 -07001141 ndev_ctx->eth_stats.stop_queue++;
Haiyang Zhang1d068252011-12-02 11:56:25 -08001142 } else {
stephen hemminger4a2176c2017-07-28 08:59:43 -07001143 netdev_err(ndev,
1144 "Unable to send packet pages %u len %u, ret %d\n",
1145 packet->page_buf_cnt, packet->total_data_buflen,
1146 ret);
Haiyang Zhang1d068252011-12-02 11:56:25 -08001147 }
Hank Janssenfceaf242009-07-13 15:34:54 -07001148
Haiyang Zhang93aa4792019-04-30 19:29:07 +00001149 if (netif_tx_queue_stopped(txq) &&
1150 atomic_read(&nvchan->queue_sends) < 1 &&
1151 !net_device->tx_disable) {
1152 netif_tx_wake_queue(txq);
1153 ndev_ctx->eth_stats.wake_queue++;
1154 if (ret == -EAGAIN)
1155 ret = -ENOSPC;
1156 }
1157
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001158 return ret;
1159}
1160
Haiyang Zhangc85e4922016-01-25 09:49:31 -08001161/* Move packet out of multi send data (msd), and clear msd */
1162static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
1163 struct sk_buff **msd_skb,
1164 struct multi_send_data *msdp)
1165{
1166 *msd_skb = msdp->skb;
1167 *msd_send = msdp->pkt;
1168 msdp->skb = NULL;
1169 msdp->pkt = NULL;
1170 msdp->count = 0;
1171}
1172
stephen hemminger2a926f72017-07-19 11:53:17 -07001173/* RCU already held by caller */
Shachar Raindelbd49fea2021-03-12 15:45:27 -08001174/* Batching/bouncing logic is designed to attempt to optimize
1175 * performance.
1176 *
1177 * For small, non-LSO packets we copy the packet to a send buffer
1178 * which is pre-registered with the Hyper-V side. This enables the
1179 * hypervisor to avoid remapping the aperture to access the packet
1180 * descriptor and data.
1181 *
1182 * If we already started using a buffer and the netdev is transmitting
1183 * a burst of packets, keep on copying into the buffer until it is
1184 * full or we are done collecting a burst. If there is an existing
1185 * buffer with space for the RNDIS descriptor but not the packet, copy
1186 * the RNDIS descriptor to the buffer, keeping the packet in place.
1187 *
1188 * If we do batching and send more than one packet using a single
1189 * NetVSC message, free the SKBs of the packets copied, except for the
1190 * last packet. This is done to streamline the handling of the case
1191 * where the last packet only had the RNDIS descriptor copied to the
1192 * send buffer, with the data pointers included in the NetVSC message.
1193 */
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -08001194int netvsc_send(struct net_device *ndev,
KY Srinivasan24476762015-12-01 16:43:06 -08001195 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -08001196 struct rndis_message *rndis_msg,
stephen hemminger02b6de02017-07-28 08:59:44 -07001197 struct hv_page_buffer *pb,
Haiyang Zhang351e1582020-01-23 13:52:34 -08001198 struct sk_buff *skb,
1199 bool xdp_tx)
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001200{
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -08001201 struct net_device_context *ndev_ctx = netdev_priv(ndev);
stephen hemminger39629812017-07-19 11:53:19 -07001202 struct netvsc_device *net_device
stephen hemminger867047c2017-07-28 08:59:42 -07001203 = rcu_dereference_bh(ndev_ctx->nvdev);
stephen hemminger2a926f72017-07-19 11:53:17 -07001204 struct hv_device *device = ndev_ctx->device_ctx;
Stephen Hemminger6c4c1372016-08-23 12:17:55 -07001205 int ret = 0;
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001206 struct netvsc_channel *nvchan;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001207 u32 pktlen = packet->total_data_buflen, msd_len = 0;
1208 unsigned int section_index = NETVSC_INVALID_INDEX;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001209 struct multi_send_data *msdp;
1210 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
Haiyang Zhangc85e4922016-01-25 09:49:31 -08001211 struct sk_buff *msd_skb = NULL;
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -08001212 bool try_batch, xmit_more;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001213
stephen hemminger592b4fe2017-06-08 16:21:23 -07001214 /* If device is rescinded, return error and packet will get dropped. */
stephen hemminger2a926f72017-07-19 11:53:17 -07001215 if (unlikely(!net_device || net_device->destroy))
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001216 return -ENODEV;
1217
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001218 nvchan = &net_device->chan_table[packet->q_idx];
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001219 packet->send_buf_index = NETVSC_INVALID_INDEX;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -07001220 packet->cp_partial = false;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001221
Haiyang Zhang351e1582020-01-23 13:52:34 -08001222 /* Send a control message or XDP packet directly without accessing
1223 * msd (Multi-Send Data) field which may be changed during data packet
1224 * processing.
Haiyang Zhangcf8190e2015-12-10 12:19:35 -08001225 */
Haiyang Zhang351e1582020-01-23 13:52:34 -08001226 if (!skb || xdp_tx)
Stephen Hemminger12f69662018-03-02 13:49:01 -08001227 return netvsc_send_pkt(device, packet, net_device, pb, skb);
Haiyang Zhangcf8190e2015-12-10 12:19:35 -08001228
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001229 /* batch packets in send buffer if possible */
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001230 msdp = &nvchan->msd;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001231 if (msdp->pkt)
1232 msd_len = msdp->pkt->total_data_buflen;
1233
stephen hemmingerebc1dcf2017-03-22 14:51:04 -07001234 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -07001235 if (try_batch && msd_len + pktlen + net_device->pkt_align <
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001236 net_device->send_section_size) {
1237 section_index = msdp->pkt->send_buf_index;
1238
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -07001239 } else if (try_batch && msd_len + packet->rmsg_size <
1240 net_device->send_section_size) {
1241 section_index = msdp->pkt->send_buf_index;
1242 packet->cp_partial = true;
1243
stephen hemmingerebc1dcf2017-03-22 14:51:04 -07001244 } else if (pktlen + net_device->pkt_align <
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001245 net_device->send_section_size) {
1246 section_index = netvsc_get_next_send_section(net_device);
stephen hemmingercad5c192017-08-09 17:46:12 -07001247 if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1248 ++ndev_ctx->eth_stats.tx_send_full;
1249 } else {
Haiyang Zhangc85e4922016-01-25 09:49:31 -08001250 move_pkt_msd(&msd_send, &msd_skb, msdp);
1251 msd_len = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001252 }
1253 }
1254
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -08001255 /* Keep aggregating only if stack says more data is coming
1256 * and not doing mixed modes send and not flow blocked
1257 */
Florian Westphal6b16f9e2019-04-01 16:42:14 +02001258 xmit_more = netdev_xmit_more() &&
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -08001259 !packet->cp_partial &&
1260 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1261
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001262 if (section_index != NETVSC_INVALID_INDEX) {
1263 netvsc_copy_to_send_buf(net_device,
1264 section_index, msd_len,
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -08001265 packet, rndis_msg, pb, xmit_more);
KY Srinivasanb08cc792015-03-29 21:08:42 -07001266
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001267 packet->send_buf_index = section_index;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -07001268
1269 if (packet->cp_partial) {
1270 packet->page_buf_cnt -= packet->rmsg_pgcnt;
1271 packet->total_data_buflen = msd_len + packet->rmsg_size;
1272 } else {
1273 packet->page_buf_cnt = 0;
1274 packet->total_data_buflen += msd_len;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -07001275 }
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001276
stephen hemminger793e3952017-01-24 13:06:12 -08001277 if (msdp->pkt) {
1278 packet->total_packets += msdp->pkt->total_packets;
1279 packet->total_bytes += msdp->pkt->total_bytes;
1280 }
1281
Haiyang Zhangc85e4922016-01-25 09:49:31 -08001282 if (msdp->skb)
Stephen Hemminger17db4bc2016-09-22 16:56:29 -07001283 dev_consume_skb_any(msdp->skb);
Haiyang Zhangee90b812015-04-06 15:22:54 -07001284
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -08001285 if (xmit_more) {
Haiyang Zhangc85e4922016-01-25 09:49:31 -08001286 msdp->skb = skb;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001287 msdp->pkt = packet;
1288 msdp->count++;
1289 } else {
1290 cur_send = packet;
Haiyang Zhangc85e4922016-01-25 09:49:31 -08001291 msdp->skb = NULL;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001292 msdp->pkt = NULL;
1293 msdp->count = 0;
1294 }
1295 } else {
Haiyang Zhangc85e4922016-01-25 09:49:31 -08001296 move_pkt_msd(&msd_send, &msd_skb, msdp);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001297 cur_send = packet;
1298 }
1299
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001300 if (msd_send) {
Stephen Hemminger6c4c1372016-08-23 12:17:55 -07001301 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1302 NULL, msd_skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001303
1304 if (m_ret != 0) {
1305 netvsc_free_send_slot(net_device,
1306 msd_send->send_buf_index);
Haiyang Zhangc85e4922016-01-25 09:49:31 -08001307 dev_kfree_skb_any(msd_skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001308 }
1309 }
1310
1311 if (cur_send)
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001312 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -07001313
Jerry Snitselaar7aab5152015-05-04 10:57:16 -07001314 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1315 netvsc_free_send_slot(net_device, section_index);
Haiyang Zhangd953ca42015-01-29 12:34:49 -08001316
Hank Janssenfceaf242009-07-13 15:34:54 -07001317 return ret;
1318}
1319
stephen hemminger7426b1a2017-07-28 08:59:45 -07001320/* Send pending recv completions */
stephen hemmingercad5c192017-08-09 17:46:12 -07001321static int send_recv_completions(struct net_device *ndev,
1322 struct netvsc_device *nvdev,
1323 struct netvsc_channel *nvchan)
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001324{
stephen hemminger7426b1a2017-07-28 08:59:45 -07001325 struct multi_recv_comp *mrc = &nvchan->mrc;
1326 struct recv_comp_msg {
1327 struct nvsp_message_header hdr;
1328 u32 status;
1329 } __packed;
1330 struct recv_comp_msg msg = {
1331 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1332 };
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001333 int ret;
1334
stephen hemminger7426b1a2017-07-28 08:59:45 -07001335 while (mrc->first != mrc->next) {
1336 const struct recv_comp_data *rcd
1337 = mrc->slots + mrc->first;
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001338
stephen hemminger7426b1a2017-07-28 08:59:45 -07001339 msg.status = rcd->status;
1340 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1341 rcd->tid, VM_PKT_COMP, 0);
stephen hemmingercad5c192017-08-09 17:46:12 -07001342 if (unlikely(ret)) {
1343 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1344
1345 ++ndev_ctx->eth_stats.rx_comp_busy;
stephen hemminger7426b1a2017-07-28 08:59:45 -07001346 return ret;
stephen hemmingercad5c192017-08-09 17:46:12 -07001347 }
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001348
stephen hemminger7426b1a2017-07-28 08:59:45 -07001349 if (++mrc->first == nvdev->recv_completion_cnt)
1350 mrc->first = 0;
1351 }
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001352
stephen hemminger7426b1a2017-07-28 08:59:45 -07001353 /* receive completion ring has been emptied */
1354 if (unlikely(nvdev->destroy))
1355 wake_up(&nvdev->wait_drain);
1356
1357 return 0;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001358}
1359
stephen hemminger7426b1a2017-07-28 08:59:45 -07001360/* Count how many receive completions are outstanding */
1361static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1362 const struct multi_recv_comp *mrc,
1363 u32 *filled, u32 *avail)
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001364{
stephen hemminger7426b1a2017-07-28 08:59:45 -07001365 u32 count = nvdev->recv_completion_cnt;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001366
stephen hemminger7426b1a2017-07-28 08:59:45 -07001367 if (mrc->next >= mrc->first)
1368 *filled = mrc->next - mrc->first;
1369 else
1370 *filled = (count - mrc->first) + mrc->next;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001371
stephen hemminger7426b1a2017-07-28 08:59:45 -07001372 *avail = count - *filled - 1;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001373}
1374
stephen hemminger7426b1a2017-07-28 08:59:45 -07001375/* Add receive complete to ring to send to host. */
1376static void enq_receive_complete(struct net_device *ndev,
1377 struct netvsc_device *nvdev, u16 q_idx,
1378 u64 tid, u32 status)
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001379{
stephen hemminger7426b1a2017-07-28 08:59:45 -07001380 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1381 struct multi_recv_comp *mrc = &nvchan->mrc;
1382 struct recv_comp_data *rcd;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001383 u32 filled, avail;
1384
stephen hemminger7426b1a2017-07-28 08:59:45 -07001385 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001386
stephen hemminger7426b1a2017-07-28 08:59:45 -07001387 if (unlikely(filled > NAPI_POLL_WEIGHT)) {
stephen hemmingercad5c192017-08-09 17:46:12 -07001388 send_recv_completions(ndev, nvdev, nvchan);
stephen hemminger7426b1a2017-07-28 08:59:45 -07001389 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001390 }
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001391
stephen hemminger7426b1a2017-07-28 08:59:45 -07001392 if (unlikely(!avail)) {
1393 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1394 q_idx, tid);
1395 return;
1396 }
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001397
stephen hemminger7426b1a2017-07-28 08:59:45 -07001398 rcd = mrc->slots + mrc->next;
1399 rcd->tid = tid;
1400 rcd->status = status;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001401
stephen hemminger7426b1a2017-07-28 08:59:45 -07001402 if (++mrc->next == nvdev->recv_completion_cnt)
1403 mrc->next = 0;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001404}
1405
stephen hemminger15a863b2017-02-27 10:26:49 -08001406static int netvsc_receive(struct net_device *ndev,
stephen hemminger7426b1a2017-07-28 08:59:45 -07001407 struct netvsc_device *net_device,
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +00001408 struct netvsc_channel *nvchan,
Andres Beltran44144182020-09-16 11:47:27 +02001409 const struct vmpacket_descriptor *desc)
Hank Janssenfceaf242009-07-13 15:34:54 -07001410{
Stephen Hemmingerc347b922018-04-26 14:34:25 -07001411 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +00001412 struct vmbus_channel *channel = nvchan->channel;
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001413 const struct vmtransfer_page_packet_header *vmxferpage_packet
1414 = container_of(desc, const struct vmtransfer_page_packet_header, d);
Andres Beltran44144182020-09-16 11:47:27 +02001415 const struct nvsp_message *nvsp = hv_pkt_data(desc);
1416 u32 msglen = hv_pkt_datalen(desc);
stephen hemminger15a863b2017-02-27 10:26:49 -08001417 u16 q_idx = channel->offermsg.offer.sub_channel_index;
stephen hemmingerdc54a082017-01-24 13:06:08 -08001418 char *recv_buf = net_device->recv_buf;
Haiyang Zhang4baab262014-04-21 14:54:43 -07001419 u32 status = NVSP_STAT_SUCCESS;
Haiyang Zhang45326342011-12-15 13:45:15 -08001420 int i;
1421 int count = 0;
K. Y. Srinivasan779b4d12011-04-26 09:20:22 -07001422
Andres Beltran44144182020-09-16 11:47:27 +02001423 /* Ensure packet is big enough to read header fields */
1424 if (msglen < sizeof(struct nvsp_message_header)) {
1425 netif_err(net_device_ctx, rx_err, ndev,
1426 "invalid nvsp header, length too small: %u\n",
1427 msglen);
1428 return 0;
1429 }
1430
Bill Pemberton454f18a2009-07-27 16:47:24 -04001431 /* Make sure this is a valid nvsp packet */
stephen hemmingerdc54a082017-01-24 13:06:08 -08001432 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1433 netif_err(net_device_ctx, rx_err, ndev,
1434 "Unknown nvsp packet type received %u\n",
1435 nvsp->hdr.msg_type);
stephen hemminger15a863b2017-02-27 10:26:49 -08001436 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -07001437 }
1438
Andres Beltran44144182020-09-16 11:47:27 +02001439 /* Validate xfer page pkt header */
1440 if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1441 netif_err(net_device_ctx, rx_err, ndev,
1442 "Invalid xfer page pkt, offset too small: %u\n",
1443 desc->offset8 << 3);
1444 return 0;
1445 }
1446
stephen hemmingerdc54a082017-01-24 13:06:08 -08001447 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1448 netif_err(net_device_ctx, rx_err, ndev,
1449 "Invalid xfer page set id - expecting %x got %x\n",
1450 NETVSC_RECEIVE_BUFFER_ID,
1451 vmxferpage_packet->xfer_pageset_id);
stephen hemminger15a863b2017-02-27 10:26:49 -08001452 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -07001453 }
1454
Haiyang Zhang4baab262014-04-21 14:54:43 -07001455 count = vmxferpage_packet->range_cnt;
Hank Janssenfceaf242009-07-13 15:34:54 -07001456
Andres Beltran44144182020-09-16 11:47:27 +02001457 /* Check count for a valid value */
1458 if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1459 netif_err(net_device_ctx, rx_err, ndev,
1460 "Range count is not valid: %d\n",
1461 count);
1462 return 0;
1463 }
1464
Bill Pemberton454f18a2009-07-27 16:47:24 -04001465 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
Haiyang Zhang4baab262014-04-21 14:54:43 -07001466 for (i = 0; i < count; i++) {
Haiyang Zhangc5d24bd2018-03-22 12:01:14 -07001467 u32 offset = vmxferpage_packet->ranges[i].byte_offset;
stephen hemmingerdc54a082017-01-24 13:06:08 -08001468 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
Haiyang Zhangc5d24bd2018-03-22 12:01:14 -07001469 void *data;
Haiyang Zhang5c71dad2018-03-22 12:01:13 -07001470 int ret;
Hank Janssenfceaf242009-07-13 15:34:54 -07001471
Andres Beltran44144182020-09-16 11:47:27 +02001472 if (unlikely(offset > net_device->recv_buf_size ||
1473 buflen > net_device->recv_buf_size - offset)) {
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +00001474 nvchan->rsc.cnt = 0;
Haiyang Zhangc5d24bd2018-03-22 12:01:14 -07001475 status = NVSP_STAT_FAIL;
1476 netif_err(net_device_ctx, rx_err, ndev,
1477 "Packet offset:%u + len:%u too big\n",
1478 offset, buflen);
1479
1480 continue;
1481 }
1482
Andrea Parri (Microsoft)0ba35fe2021-01-26 17:29:07 +01001483 /* We're going to copy (sections of) the packet into nvchan->recv_buf;
1484 * make sure that nvchan->recv_buf is large enough to hold the packet.
1485 */
1486 if (unlikely(buflen > net_device->recv_section_size)) {
1487 nvchan->rsc.cnt = 0;
1488 status = NVSP_STAT_FAIL;
1489 netif_err(net_device_ctx, rx_err, ndev,
1490 "Packet too big: buflen=%u recv_section_size=%u\n",
1491 buflen, net_device->recv_section_size);
1492
1493 continue;
1494 }
1495
Haiyang Zhangc5d24bd2018-03-22 12:01:14 -07001496 data = recv_buf + offset;
1497
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +00001498 nvchan->rsc.is_last = (i == count - 1);
1499
Stephen Hemmingerec966382018-03-16 15:44:28 -07001500 trace_rndis_recv(ndev, q_idx, data);
1501
Bill Pemberton454f18a2009-07-27 16:47:24 -04001502 /* Pass it to the upper layer */
Haiyang Zhang5c71dad2018-03-22 12:01:13 -07001503 ret = rndis_filter_receive(ndev, net_device,
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +00001504 nvchan, data, buflen);
Haiyang Zhang5c71dad2018-03-22 12:01:13 -07001505
Andrea Parri (Microsoft)12bc8df2021-02-03 12:36:02 +01001506 if (unlikely(ret != NVSP_STAT_SUCCESS)) {
1507 /* Drop incomplete packet */
1508 nvchan->rsc.cnt = 0;
Haiyang Zhang5c71dad2018-03-22 12:01:13 -07001509 status = NVSP_STAT_FAIL;
Andrea Parri (Microsoft)12bc8df2021-02-03 12:36:02 +01001510 }
Hank Janssenfceaf242009-07-13 15:34:54 -07001511 }
1512
stephen hemminger7426b1a2017-07-28 08:59:45 -07001513 enq_receive_complete(ndev, net_device, q_idx,
1514 vmxferpage_packet->d.trans_id, status);
stephen hemminger15a863b2017-02-27 10:26:49 -08001515
stephen hemminger15a863b2017-02-27 10:26:49 -08001516 return count;
Hank Janssenfceaf242009-07-13 15:34:54 -07001517}
1518
Stephen Hemmingerc347b922018-04-26 14:34:25 -07001519static void netvsc_send_table(struct net_device *ndev,
Haiyang Zhang171c1fd2019-11-21 13:33:41 -08001520 struct netvsc_device *nvscdev,
Haiyang Zhang71f21952019-11-21 13:33:40 -08001521 const struct nvsp_message *nvmsg,
1522 u32 msglen)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001523{
stephen hemminger7ce10122017-03-09 14:58:29 -08001524 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Haiyang Zhang71f21952019-11-21 13:33:40 -08001525 u32 count, offset, *tab;
Stephen Hemmingerc347b922018-04-26 14:34:25 -07001526 int i;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001527
Andres Beltran44144182020-09-16 11:47:27 +02001528 /* Ensure packet is big enough to read send_table fields */
1529 if (msglen < sizeof(struct nvsp_message_header) +
1530 sizeof(struct nvsp_5_send_indirect_table)) {
1531 netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1532 return;
1533 }
1534
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001535 count = nvmsg->msg.v5_msg.send_table.count;
Haiyang Zhang71f21952019-11-21 13:33:40 -08001536 offset = nvmsg->msg.v5_msg.send_table.offset;
1537
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001538 if (count != VRSS_SEND_TAB_SIZE) {
1539 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1540 return;
1541 }
1542
Haiyang Zhang171c1fd2019-11-21 13:33:41 -08001543 /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1544 * wrong due to a host bug. So fix the offset here.
1545 */
1546 if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1547 msglen >= sizeof(struct nvsp_message_header) +
1548 sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1549 offset = sizeof(struct nvsp_message_header) +
1550 sizeof(union nvsp_6_message_uber);
1551
1552 /* Boundary check for all versions */
Andrea Parri (Microsoft)505e3f02021-01-14 21:26:28 +01001553 if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
Haiyang Zhang71f21952019-11-21 13:33:40 -08001554 netdev_err(ndev, "Received send-table offset too big:%u\n",
1555 offset);
1556 return;
1557 }
1558
1559 tab = (void *)nvmsg + offset;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001560
1561 for (i = 0; i < count; i++)
Haiyang Zhang39e91cf2017-10-13 12:28:04 -07001562 net_device_ctx->tx_table[i] = tab[i];
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001563}
1564
Stephen Hemmingerc347b922018-04-26 14:34:25 -07001565static void netvsc_send_vf(struct net_device *ndev,
Andres Beltran44144182020-09-16 11:47:27 +02001566 const struct nvsp_message *nvmsg,
1567 u32 msglen)
Haiyang Zhang71790a22015-07-24 10:08:40 -07001568{
Stephen Hemmingerc347b922018-04-26 14:34:25 -07001569 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1570
Andres Beltran44144182020-09-16 11:47:27 +02001571 /* Ensure packet is big enough to read its fields */
1572 if (msglen < sizeof(struct nvsp_message_header) +
1573 sizeof(struct nvsp_4_send_vf_association)) {
1574 netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1575 return;
1576 }
1577
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001578 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1579 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07001580 netdev_info(ndev, "VF slot %u %s\n",
1581 net_device_ctx->vf_serial,
1582 net_device_ctx->vf_alloc ? "added" : "removed");
Haiyang Zhang71790a22015-07-24 10:08:40 -07001583}
1584
Haiyang Zhang71f21952019-11-21 13:33:40 -08001585static void netvsc_receive_inband(struct net_device *ndev,
Haiyang Zhang171c1fd2019-11-21 13:33:41 -08001586 struct netvsc_device *nvscdev,
Andres Beltran44144182020-09-16 11:47:27 +02001587 const struct vmpacket_descriptor *desc)
Haiyang Zhang71790a22015-07-24 10:08:40 -07001588{
Andres Beltran44144182020-09-16 11:47:27 +02001589 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1590 u32 msglen = hv_pkt_datalen(desc);
1591
1592 /* Ensure packet is big enough to read header fields */
1593 if (msglen < sizeof(struct nvsp_message_header)) {
1594 netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1595 return;
1596 }
1597
Haiyang Zhang71790a22015-07-24 10:08:40 -07001598 switch (nvmsg->hdr.msg_type) {
1599 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
Haiyang Zhang171c1fd2019-11-21 13:33:41 -08001600 netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
Haiyang Zhang71790a22015-07-24 10:08:40 -07001601 break;
1602
1603 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
Andrea Parri (Microsoft)96854bb2021-02-01 15:48:14 +01001604 if (hv_is_isolation_supported())
1605 netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
1606 else
1607 netvsc_send_vf(ndev, nvmsg, msglen);
Haiyang Zhang71790a22015-07-24 10:08:40 -07001608 break;
1609 }
1610}
1611
stephen hemminger15a863b2017-02-27 10:26:49 -08001612static int netvsc_process_raw_pkt(struct hv_device *device,
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +00001613 struct netvsc_channel *nvchan,
stephen hemminger15a863b2017-02-27 10:26:49 -08001614 struct netvsc_device *net_device,
1615 struct net_device *ndev,
stephen hemmingerf9645432017-04-07 14:41:19 -04001616 const struct vmpacket_descriptor *desc,
1617 int budget)
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001618{
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +00001619 struct vmbus_channel *channel = nvchan->channel;
Stephen Hemmingerc347b922018-04-26 14:34:25 -07001620 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001621
Stephen Hemmingerec966382018-03-16 15:44:28 -07001622 trace_nvsp_recv(ndev, channel, nvmsg);
1623
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001624 switch (desc->type) {
1625 case VM_PKT_COMP:
Andres Beltran44144182020-09-16 11:47:27 +02001626 netvsc_send_completion(ndev, net_device, channel, desc, budget);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001627 break;
1628
1629 case VM_PKT_DATA_USING_XFER_PAGES:
Andres Beltran44144182020-09-16 11:47:27 +02001630 return netvsc_receive(ndev, net_device, nvchan, desc);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001631 break;
1632
1633 case VM_PKT_DATA_INBAND:
Andres Beltran44144182020-09-16 11:47:27 +02001634 netvsc_receive_inband(ndev, net_device, desc);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001635 break;
1636
1637 default:
1638 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001639 desc->type, desc->trans_id);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001640 break;
1641 }
stephen hemminger15a863b2017-02-27 10:26:49 -08001642
1643 return 0;
1644}
1645
1646static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1647{
1648 struct vmbus_channel *primary = channel->primary_channel;
1649
1650 return primary ? primary->device_obj : channel->device_obj;
1651}
1652
stephen hemminger262b7f12017-03-16 16:12:38 -07001653/* Network processing softirq
1654 * Process data in incoming ring buffer from host
1655 * Stops when ring is empty or budget is met or exceeded.
1656 */
stephen hemminger15a863b2017-02-27 10:26:49 -08001657int netvsc_poll(struct napi_struct *napi, int budget)
1658{
1659 struct netvsc_channel *nvchan
1660 = container_of(napi, struct netvsc_channel, napi);
stephen hemminger35fbbcc2017-07-19 11:53:18 -07001661 struct netvsc_device *net_device = nvchan->net_device;
stephen hemminger15a863b2017-02-27 10:26:49 -08001662 struct vmbus_channel *channel = nvchan->channel;
1663 struct hv_device *device = netvsc_channel_to_device(channel);
stephen hemminger15a863b2017-02-27 10:26:49 -08001664 struct net_device *ndev = hv_get_drvdata(device);
stephen hemminger15a863b2017-02-27 10:26:49 -08001665 int work_done = 0;
Haiyang Zhang6b81b192018-07-17 17:11:13 +00001666 int ret;
stephen hemminger15a863b2017-02-27 10:26:49 -08001667
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001668 /* If starting a new interval */
1669 if (!nvchan->desc)
1670 nvchan->desc = hv_pkt_iter_first(channel);
stephen hemminger15a863b2017-02-27 10:26:49 -08001671
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001672 while (nvchan->desc && work_done < budget) {
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +00001673 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
stephen hemmingerf9645432017-04-07 14:41:19 -04001674 ndev, nvchan->desc, budget);
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001675 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
stephen hemminger15a863b2017-02-27 10:26:49 -08001676 }
stephen hemminger15a863b2017-02-27 10:26:49 -08001677
Haiyang Zhang6b81b192018-07-17 17:11:13 +00001678 /* Send any pending receive completions */
1679 ret = send_recv_completions(ndev, net_device, nvchan);
1680
1681 /* If it did not exhaust NAPI budget this time
1682 * and not doing busy poll
stephen hemmingerf4e40362017-07-28 08:59:47 -07001683 * then re-enable host interrupts
Haiyang Zhang6b81b192018-07-17 17:11:13 +00001684 * and reschedule if ring is not empty
1685 * or sending receive completion failed.
stephen hemminger262b7f12017-03-16 16:12:38 -07001686 */
Haiyang Zhang6b81b192018-07-17 17:11:13 +00001687 if (work_done < budget &&
stephen hemminger15a863b2017-02-27 10:26:49 -08001688 napi_complete_done(napi, work_done) &&
Haiyang Zhang6b81b192018-07-17 17:11:13 +00001689 (ret || hv_end_read(&channel->inbound)) &&
Stephen Hemmingerd64e38a2018-03-02 13:49:05 -08001690 napi_schedule_prep(napi)) {
stephen hemminger7426b1a2017-07-28 08:59:45 -07001691 hv_begin_read(&channel->inbound);
Stephen Hemmingerd64e38a2018-03-02 13:49:05 -08001692 __napi_schedule(napi);
stephen hemminger7426b1a2017-07-28 08:59:45 -07001693 }
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001694
1695 /* Driver may overshoot since multiple packets per descriptor */
1696 return min(work_done, budget);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001697}
1698
stephen hemminger262b7f12017-03-16 16:12:38 -07001699/* Call back when data is available in host ring buffer.
1700 * Processing is deferred until network softirq (NAPI)
1701 */
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001702void netvsc_channel_cb(void *context)
Hank Janssenfceaf242009-07-13 15:34:54 -07001703{
stephen hemminger6de38af2017-03-16 16:12:37 -07001704 struct netvsc_channel *nvchan = context;
stephen hemminger43bf99c2017-07-24 10:57:27 -07001705 struct vmbus_channel *channel = nvchan->channel;
1706 struct hv_ring_buffer_info *rbi = &channel->inbound;
1707
1708 /* preload first vmpacket descriptor */
1709 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
stephen hemminger0b307eb2017-01-24 13:05:58 -08001710
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001711 if (napi_schedule_prep(&nvchan->napi)) {
Adrian Vladu52d3b492019-01-03 19:43:08 +00001712 /* disable interrupts from host */
stephen hemminger43bf99c2017-07-24 10:57:27 -07001713 hv_begin_read(rbi);
stephen hemminger0d6dd352017-03-09 15:04:14 -08001714
Stephen Hemminger68633ed2018-03-02 13:49:06 -08001715 __napi_schedule_irqoff(&nvchan->napi);
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001716 }
Hank Janssenfceaf242009-07-13 15:34:54 -07001717}
Haiyang Zhangaf24ce42011-04-21 12:30:40 -07001718
1719/*
Haiyang Zhangb637e022011-04-21 12:30:45 -07001720 * netvsc_device_add - Callback when the device belonging to this
1721 * driver is added
1722 */
stephen hemminger9749fed2017-07-19 11:53:16 -07001723struct netvsc_device *netvsc_device_add(struct hv_device *device,
1724 const struct netvsc_device_info *device_info)
Haiyang Zhangb637e022011-04-21 12:30:45 -07001725{
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001726 int i, ret = 0;
Haiyang Zhangb637e022011-04-21 12:30:45 -07001727 struct netvsc_device *net_device;
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001728 struct net_device *ndev = hv_get_drvdata(device);
1729 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001730
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001731 net_device = alloc_net_device();
Dan Carpenterb1c84922014-09-04 14:11:23 +03001732 if (!net_device)
stephen hemminger9749fed2017-07-19 11:53:16 -07001733 return ERR_PTR(-ENOMEM);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001734
Haiyang Zhang6b0cbe32017-10-13 12:28:05 -07001735 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1736 net_device_ctx->tx_table[i] = 0;
1737
stephen hemminger15a863b2017-02-27 10:26:49 -08001738 /* Because the device uses NAPI, all the interrupt batching and
1739 * control is done via Net softirq, not the channel handling
1740 */
1741 set_channel_read_mode(device->channel, HV_CALL_ISR);
1742
K. Y. Srinivasanbffb1842017-04-06 14:59:21 -07001743 /* If we're reopening the device we may have multiple queues, fill the
1744 * chn_table with the default channel to use it before subchannels are
1745 * opened.
1746 * Initialize the channel state before we open;
1747 * we can be interrupted as soon as we open the channel.
1748 */
1749
1750 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1751 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1752
1753 nvchan->channel = device->channel;
stephen hemminger35fbbcc2017-07-19 11:53:18 -07001754 nvchan->net_device = net_device;
Florian Fainelli4a0dee12017-08-01 12:11:12 -07001755 u64_stats_init(&nvchan->tx_stats.syncp);
1756 u64_stats_init(&nvchan->rx_stats.syncp);
Haiyang Zhang351e1582020-01-23 13:52:34 -08001757
Björn Töpelb02e5a02020-11-30 19:52:01 +01001758 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
Haiyang Zhang351e1582020-01-23 13:52:34 -08001759
1760 if (ret) {
1761 netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1762 goto cleanup2;
1763 }
1764
1765 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1766 MEM_TYPE_PAGE_SHARED, NULL);
1767
1768 if (ret) {
1769 netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1770 goto cleanup2;
1771 }
K. Y. Srinivasanbffb1842017-04-06 14:59:21 -07001772 }
1773
stephen hemminger2be0f262017-05-03 16:59:21 -07001774 /* Enable NAPI handler before init callbacks */
1775 netif_napi_add(ndev, &net_device->chan_table[0].napi,
1776 netvsc_poll, NAPI_POLL_WEIGHT);
1777
Haiyang Zhangb637e022011-04-21 12:30:45 -07001778 /* Open the channel */
Andrea Parri (Microsoft)bf5fd8c2021-05-10 23:08:41 +02001779 device->channel->next_request_id_callback = vmbus_next_request_id;
1780 device->channel->request_addr_callback = vmbus_request_addr;
Andres Beltran4d18fcc2020-11-09 11:04:02 +01001781 device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
Andres Beltranadae1e92021-04-08 18:14:39 +02001782 device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1783
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08001784 ret = vmbus_open(device->channel, netvsc_ring_bytes,
1785 netvsc_ring_bytes, NULL, 0,
1786 netvsc_channel_cb, net_device->chan_table);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001787
1788 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -07001789 netdev_err(ndev, "unable to open channel: %d\n", ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001790 goto cleanup;
1791 }
1792
1793 /* Channel is opened */
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +01001794 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
Haiyang Zhangb637e022011-04-21 12:30:45 -07001795
stephen hemminger15a863b2017-02-27 10:26:49 -08001796 napi_enable(&net_device->chan_table[0].napi);
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001797
Haiyang Zhangb637e022011-04-21 12:30:45 -07001798 /* Connect with the NetVsp */
stephen hemminger8b532792017-08-09 17:46:11 -07001799 ret = netvsc_connect_vsp(device, net_device, device_info);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001800 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -07001801 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -07001802 "unable to connect to NetVSP - %d\n", ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001803 goto close;
1804 }
1805
Stephen Hemminger12f69662018-03-02 13:49:01 -08001806 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1807 * populated.
1808 */
1809 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1810
stephen hemminger9749fed2017-07-19 11:53:16 -07001811 return net_device;
Haiyang Zhangb637e022011-04-21 12:30:45 -07001812
1813close:
stephen hemminger49393342017-07-28 08:59:46 -07001814 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1815 napi_disable(&net_device->chan_table[0].napi);
stephen hemminger15a863b2017-02-27 10:26:49 -08001816
Haiyang Zhangb637e022011-04-21 12:30:45 -07001817 /* Now, we can close the channel safely */
1818 vmbus_close(device->channel);
1819
1820cleanup:
Stephen Hemmingerfcfb4a02018-03-02 13:49:03 -08001821 netif_napi_del(&net_device->chan_table[0].napi);
Haiyang Zhang351e1582020-01-23 13:52:34 -08001822
1823cleanup2:
stephen hemminger545a8e72017-03-22 14:51:00 -07001824 free_netvsc_device(&net_device->rcu);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001825
stephen hemminger9749fed2017-07-19 11:53:16 -07001826 return ERR_PTR(ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001827}