blob: 5cfdb1a1b4c17c793c4b5e605b38919c6840904f [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Greg Kroah-Hartman5654e932009-07-14 15:08:20 -070022#include <linux/kernel.h>
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -080023#include <linux/sched.h>
24#include <linux/wait.h>
Greg Kroah-Hartman0ffa63b2009-07-15 11:06:01 -070025#include <linux/mm.h>
Greg Kroah-Hartmanb4362c92009-07-16 11:50:41 -070026#include <linux/delay.h>
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -070027#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Haiyang Zhangd9871152011-09-01 12:19:41 -070029#include <linux/netdevice.h>
Haiyang Zhangf157e782011-12-15 13:45:16 -080030#include <linux/if_ether.h>
Stephen Rothwelld6472302015-06-02 19:01:38 +100031#include <linux/vmalloc.h>
KY Srinivasanc25aaf82014-04-30 10:14:31 -070032#include <asm/sync_bitops.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070033
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070034#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070035
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070036/*
37 * Switch the data path from the synthetic interface to the VF
38 * interface.
39 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020040void netvsc_switch_datapath(struct net_device *ndev, bool vf)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070041{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +020042 struct net_device_context *net_device_ctx = netdev_priv(ndev);
43 struct hv_device *dev = net_device_ctx->device_ctx;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020044 struct netvsc_device *nv_dev = net_device_ctx->nvdev;
45 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070046
47 memset(init_pkt, 0, sizeof(struct nvsp_message));
48 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
49 if (vf)
50 init_pkt->msg.v4_msg.active_dp.active_datapath =
51 NVSP_DATAPATH_VF;
52 else
53 init_pkt->msg.v4_msg.active_dp.active_datapath =
54 NVSP_DATAPATH_SYNTHETIC;
55
56 vmbus_sendpacket(dev->channel, init_pkt,
57 sizeof(struct nvsp_message),
58 (unsigned long)init_pkt,
59 VM_PKT_DATA_INBAND, 0);
60}
61
Vitaly Kuznetsov88098832016-05-13 13:55:25 +020062static struct netvsc_device *alloc_net_device(void)
Hank Janssenfceaf242009-07-13 15:34:54 -070063{
Haiyang Zhang85799a32010-12-10 12:03:54 -080064 struct netvsc_device *net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -070065
Haiyang Zhang85799a32010-12-10 12:03:54 -080066 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
67 if (!net_device)
Hank Janssenfceaf242009-07-13 15:34:54 -070068 return NULL;
69
stephen hemmingerb8b835a2017-01-24 13:06:07 -080070 net_device->chan_table[0].mrc.buf
71 = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070072
Haiyang Zhangdc5cd892012-06-04 06:42:38 +000073 init_waitqueue_head(&net_device->wait_drain);
K. Y. Srinivasanc38b9c72011-08-27 11:31:12 -070074 net_device->destroy = false;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070075 atomic_set(&net_device->open_cnt, 0);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -070076 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
77 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
Stephen Hemmingerfd612602016-08-23 12:17:51 -070078 init_completion(&net_device->channel_init_wait);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -070079
Haiyang Zhang85799a32010-12-10 12:03:54 -080080 return net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -070081}
82
Haiyang Zhangf90251c2014-08-15 19:18:19 +000083static void free_netvsc_device(struct netvsc_device *nvdev)
84{
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070085 int i;
86
87 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
stephen hemmingerb8b835a2017-01-24 13:06:07 -080088 vfree(nvdev->chan_table[i].mrc.buf);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070089
Haiyang Zhangf90251c2014-08-15 19:18:19 +000090 kfree(nvdev);
91}
92
stephen hemminger46b4f7f2017-01-24 13:06:11 -080093
94static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
95 u16 q_idx)
96{
97 const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
98
99 return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
100 atomic_read(&nvchan->queue_sends) == 0;
101}
102
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800103static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700104{
Vitaly Kuznetsov26254662016-06-03 17:50:59 +0200105 struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700106
K. Y. Srinivasan9d88f332011-08-27 11:31:16 -0700107 if (net_device && net_device->destroy)
Haiyang Zhang85799a32010-12-10 12:03:54 -0800108 net_device = NULL;
Hank Janssenfceaf242009-07-13 15:34:54 -0700109
Haiyang Zhang85799a32010-12-10 12:03:54 -0800110 return net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -0700111}
112
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700113static void netvsc_destroy_buf(struct hv_device *device)
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700114{
115 struct nvsp_message *revoke_packet;
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200116 struct net_device *ndev = hv_get_drvdata(device);
Vitaly Kuznetsov26254662016-06-03 17:50:59 +0200117 struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700118 int ret;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700119
120 /*
121 * If we got a section count, it means we received a
122 * SendReceiveBufferComplete msg (ie sent
123 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
124 * to send a revoke msg here
125 */
126 if (net_device->recv_section_cnt) {
127 /* Send the revoke receive buffer */
128 revoke_packet = &net_device->revoke_packet;
129 memset(revoke_packet, 0, sizeof(struct nvsp_message));
130
131 revoke_packet->hdr.msg_type =
132 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
133 revoke_packet->msg.v1_msg.
134 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
135
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200136 ret = vmbus_sendpacket(device->channel,
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700137 revoke_packet,
138 sizeof(struct nvsp_message),
139 (unsigned long)revoke_packet,
140 VM_PKT_DATA_INBAND, 0);
141 /*
142 * If we failed here, we might as well return and
143 * have a leak rather than continue and a bugchk
144 */
145 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700146 netdev_err(ndev, "unable to send "
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700147 "revoke receive buffer to netvsp\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700148 return;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700149 }
150 }
151
152 /* Teardown the gpadl on the vsp end */
153 if (net_device->recv_buf_gpadl_handle) {
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200154 ret = vmbus_teardown_gpadl(device->channel,
155 net_device->recv_buf_gpadl_handle);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700156
157 /* If we failed here, we might as well return and have a leak
158 * rather than continue and a bugchk
159 */
160 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700161 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700162 "unable to teardown receive buffer's gpadl\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700163 return;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700164 }
165 net_device->recv_buf_gpadl_handle = 0;
166 }
167
168 if (net_device->recv_buf) {
169 /* Free up the receive buffer */
Haiyang Zhangb679ef72014-01-27 15:03:42 -0800170 vfree(net_device->recv_buf);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700171 net_device->recv_buf = NULL;
172 }
173
174 if (net_device->recv_section) {
175 net_device->recv_section_cnt = 0;
176 kfree(net_device->recv_section);
177 net_device->recv_section = NULL;
178 }
179
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700180 /* Deal with the send buffer we may have setup.
181 * If we got a send section size, it means we received a
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800182 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
183 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700184 * to send a revoke msg here
185 */
186 if (net_device->send_section_size) {
187 /* Send the revoke receive buffer */
188 revoke_packet = &net_device->revoke_packet;
189 memset(revoke_packet, 0, sizeof(struct nvsp_message));
190
191 revoke_packet->hdr.msg_type =
192 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800193 revoke_packet->msg.v1_msg.revoke_send_buf.id =
194 NETVSC_SEND_BUFFER_ID;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700195
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200196 ret = vmbus_sendpacket(device->channel,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700197 revoke_packet,
198 sizeof(struct nvsp_message),
199 (unsigned long)revoke_packet,
200 VM_PKT_DATA_INBAND, 0);
201 /* If we failed here, we might as well return and
202 * have a leak rather than continue and a bugchk
203 */
204 if (ret != 0) {
205 netdev_err(ndev, "unable to send "
206 "revoke send buffer to netvsp\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700207 return;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700208 }
209 }
210 /* Teardown the gpadl on the vsp end */
211 if (net_device->send_buf_gpadl_handle) {
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200212 ret = vmbus_teardown_gpadl(device->channel,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700213 net_device->send_buf_gpadl_handle);
214
215 /* If we failed here, we might as well return and have a leak
216 * rather than continue and a bugchk
217 */
218 if (ret != 0) {
219 netdev_err(ndev,
220 "unable to teardown send buffer's gpadl\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700221 return;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700222 }
Dave Jones2f184232014-06-16 16:59:02 -0400223 net_device->send_buf_gpadl_handle = 0;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700224 }
225 if (net_device->send_buf) {
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800226 /* Free up the send buffer */
KY Srinivasan06b47aa2014-08-02 10:42:02 -0700227 vfree(net_device->send_buf);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700228 net_device->send_buf = NULL;
229 }
230 kfree(net_device->send_section_map);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700231}
232
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700233static int netvsc_init_buf(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700234{
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700235 int ret = 0;
Haiyang Zhang85799a32010-12-10 12:03:54 -0800236 struct netvsc_device *net_device;
237 struct nvsp_message *init_packet;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700238 struct net_device *ndev;
K. Y. Srinivasan0a726c22015-05-28 17:08:06 -0700239 int node;
Hank Janssenfceaf242009-07-13 15:34:54 -0700240
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800241 net_device = get_outbound_net_device(device);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700242 if (!net_device)
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700243 return -ENODEV;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200244 ndev = hv_get_drvdata(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700245
K. Y. Srinivasan0a726c22015-05-28 17:08:06 -0700246 node = cpu_to_node(device->channel->target_cpu);
247 net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
248 if (!net_device->recv_buf)
249 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
250
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800251 if (!net_device->recv_buf) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700252 netdev_err(ndev, "unable to allocate receive "
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700253 "buffer of size %d\n", net_device->recv_buf_size);
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700254 ret = -ENOMEM;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800255 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700256 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700257
Bill Pemberton454f18a2009-07-27 16:47:24 -0400258 /*
259 * Establish the gpadl handle for this buffer on this
260 * channel. Note: This call uses the vmbus connection rather
261 * than the channel to establish the gpadl handle.
262 */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800263 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
264 net_device->recv_buf_size,
265 &net_device->recv_buf_gpadl_handle);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700266 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700267 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700268 "unable to establish receive buffer's gpadl\n");
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800269 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700270 }
271
Bill Pemberton454f18a2009-07-27 16:47:24 -0400272 /* Notify the NetVsp of the gpadl handle */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800273 init_packet = &net_device->channel_init_pkt;
Hank Janssenfceaf242009-07-13 15:34:54 -0700274
Haiyang Zhang85799a32010-12-10 12:03:54 -0800275 memset(init_packet, 0, sizeof(struct nvsp_message));
Hank Janssenfceaf242009-07-13 15:34:54 -0700276
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800277 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
278 init_packet->msg.v1_msg.send_recv_buf.
279 gpadl_handle = net_device->recv_buf_gpadl_handle;
280 init_packet->msg.v1_msg.
281 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
Hank Janssenfceaf242009-07-13 15:34:54 -0700282
Bill Pemberton454f18a2009-07-27 16:47:24 -0400283 /* Send the gpadl notification request */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800284 ret = vmbus_sendpacket(device->channel, init_packet,
Greg Kroah-Hartman5a4df292010-10-21 09:43:24 -0700285 sizeof(struct nvsp_message),
Haiyang Zhang85799a32010-12-10 12:03:54 -0800286 (unsigned long)init_packet,
Haiyang Zhang415f2282011-01-26 12:12:13 -0800287 VM_PKT_DATA_INBAND,
Greg Kroah-Hartman5a4df292010-10-21 09:43:24 -0700288 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700289 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700290 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700291 "unable to send receive buffer's gpadl to netvsp\n");
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800292 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700293 }
294
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200295 wait_for_completion(&net_device->channel_init_wait);
Hank Janssenfceaf242009-07-13 15:34:54 -0700296
Bill Pemberton454f18a2009-07-27 16:47:24 -0400297 /* Check the response */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800298 if (init_packet->msg.v1_msg.
299 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700300 netdev_err(ndev, "Unable to complete receive buffer "
Haiyang Zhang8bff33a2011-09-01 12:19:48 -0700301 "initialization with NetVsp - status %d\n",
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800302 init_packet->msg.v1_msg.
303 send_recv_buf_complete.status);
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700304 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800305 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700306 }
307
Bill Pemberton454f18a2009-07-27 16:47:24 -0400308 /* Parse the response */
Hank Janssenfceaf242009-07-13 15:34:54 -0700309
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800310 net_device->recv_section_cnt = init_packet->msg.
311 v1_msg.send_recv_buf_complete.num_sections;
Hank Janssenfceaf242009-07-13 15:34:54 -0700312
Haiyang Zhangc1813202011-11-30 07:19:07 -0800313 net_device->recv_section = kmemdup(
314 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
315 net_device->recv_section_cnt *
316 sizeof(struct nvsp_1_receive_buffer_section),
317 GFP_KERNEL);
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800318 if (net_device->recv_section == NULL) {
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700319 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800320 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700321 }
322
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700323 /*
324 * For 1st release, there should only be 1 section that represents the
325 * entire receive buffer
326 */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800327 if (net_device->recv_section_cnt != 1 ||
328 net_device->recv_section->offset != 0) {
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700329 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800330 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700331 }
332
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700333 /* Now setup the send buffer.
334 */
K. Y. Srinivasan5defde52015-05-28 17:08:07 -0700335 net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
336 if (!net_device->send_buf)
337 net_device->send_buf = vzalloc(net_device->send_buf_size);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700338 if (!net_device->send_buf) {
339 netdev_err(ndev, "unable to allocate send "
340 "buffer of size %d\n", net_device->send_buf_size);
341 ret = -ENOMEM;
342 goto cleanup;
343 }
344
345 /* Establish the gpadl handle for this buffer on this
346 * channel. Note: This call uses the vmbus connection rather
347 * than the channel to establish the gpadl handle.
348 */
349 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
350 net_device->send_buf_size,
351 &net_device->send_buf_gpadl_handle);
352 if (ret != 0) {
353 netdev_err(ndev,
354 "unable to establish send buffer's gpadl\n");
355 goto cleanup;
356 }
357
358 /* Notify the NetVsp of the gpadl handle */
359 init_packet = &net_device->channel_init_pkt;
360 memset(init_packet, 0, sizeof(struct nvsp_message));
361 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800362 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700363 net_device->send_buf_gpadl_handle;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800364 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700365
366 /* Send the gpadl notification request */
367 ret = vmbus_sendpacket(device->channel, init_packet,
368 sizeof(struct nvsp_message),
369 (unsigned long)init_packet,
370 VM_PKT_DATA_INBAND,
371 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
372 if (ret != 0) {
373 netdev_err(ndev,
374 "unable to send send buffer's gpadl to netvsp\n");
375 goto cleanup;
376 }
377
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200378 wait_for_completion(&net_device->channel_init_wait);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700379
380 /* Check the response */
381 if (init_packet->msg.v1_msg.
382 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
383 netdev_err(ndev, "Unable to complete send buffer "
384 "initialization with NetVsp - status %d\n",
385 init_packet->msg.v1_msg.
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800386 send_send_buf_complete.status);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700387 ret = -EINVAL;
388 goto cleanup;
389 }
390
391 /* Parse the response */
392 net_device->send_section_size = init_packet->msg.
393 v1_msg.send_send_buf_complete.section_size;
394
395 /* Section count is simply the size divided by the section size.
396 */
397 net_device->send_section_cnt =
Stephen Hemminger796cc882016-08-23 12:17:47 -0700398 net_device->send_buf_size / net_device->send_section_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700399
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +0100400 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
401 net_device->send_section_size, net_device->send_section_cnt);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700402
403 /* Setup state for managing the send buffer. */
404 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
405 BITS_PER_LONG);
406
Stephen Hemmingere53a9c22016-08-23 12:17:46 -0700407 net_device->send_section_map = kcalloc(net_device->map_words,
408 sizeof(ulong), GFP_KERNEL);
Wei Yongjundd1d3f82014-07-23 09:00:35 +0800409 if (net_device->send_section_map == NULL) {
410 ret = -ENOMEM;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700411 goto cleanup;
Wei Yongjundd1d3f82014-07-23 09:00:35 +0800412 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700413
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800414 goto exit;
Hank Janssenfceaf242009-07-13 15:34:54 -0700415
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800416cleanup:
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200417 netvsc_destroy_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700418
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800419exit:
Hank Janssenfceaf242009-07-13 15:34:54 -0700420 return ret;
421}
422
Haiyang Zhangf157e782011-12-15 13:45:16 -0800423/* Negotiate NVSP protocol version */
424static int negotiate_nvsp_ver(struct hv_device *device,
425 struct netvsc_device *net_device,
426 struct nvsp_message *init_packet,
427 u32 nvsp_ver)
Hank Janssenfceaf242009-07-13 15:34:54 -0700428{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200429 struct net_device *ndev = hv_get_drvdata(device);
Nicholas Mc Guire7390fe92015-01-25 15:46:31 +0100430 int ret;
Haiyang Zhangf157e782011-12-15 13:45:16 -0800431
432 memset(init_packet, 0, sizeof(struct nvsp_message));
433 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
434 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
435 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
436
437 /* Send the init request */
438 ret = vmbus_sendpacket(device->channel, init_packet,
439 sizeof(struct nvsp_message),
440 (unsigned long)init_packet,
441 VM_PKT_DATA_INBAND,
442 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
443
444 if (ret != 0)
445 return ret;
446
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200447 wait_for_completion(&net_device->channel_init_wait);
Haiyang Zhangf157e782011-12-15 13:45:16 -0800448
449 if (init_packet->msg.init_msg.init_complete.status !=
450 NVSP_STAT_SUCCESS)
451 return -EINVAL;
452
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800453 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
Haiyang Zhangf157e782011-12-15 13:45:16 -0800454 return 0;
455
Haiyang Zhang71790a22015-07-24 10:08:40 -0700456 /* NVSPv2 or later: Send NDIS config */
Haiyang Zhangf157e782011-12-15 13:45:16 -0800457 memset(init_packet, 0, sizeof(struct nvsp_message));
458 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200459 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000460 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
Haiyang Zhangf157e782011-12-15 13:45:16 -0800461
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700462 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
Haiyang Zhang71790a22015-07-24 10:08:40 -0700463 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
464
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700465 /* Teaming bit is needed to receive link speed updates */
466 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
467 }
468
Haiyang Zhangf157e782011-12-15 13:45:16 -0800469 ret = vmbus_sendpacket(device->channel, init_packet,
470 sizeof(struct nvsp_message),
471 (unsigned long)init_packet,
472 VM_PKT_DATA_INBAND, 0);
473
474 return ret;
475}
476
477static int netvsc_connect_vsp(struct hv_device *device)
478{
479 int ret;
Haiyang Zhang85799a32010-12-10 12:03:54 -0800480 struct netvsc_device *net_device;
481 struct nvsp_message *init_packet;
482 int ndis_version;
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700483 const u32 ver_list[] = {
484 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800485 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700486 int i;
Hank Janssenfceaf242009-07-13 15:34:54 -0700487
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800488 net_device = get_outbound_net_device(device);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700489 if (!net_device)
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700490 return -ENODEV;
Hank Janssenfceaf242009-07-13 15:34:54 -0700491
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800492 init_packet = &net_device->channel_init_pkt;
Hank Janssenfceaf242009-07-13 15:34:54 -0700493
Haiyang Zhangf157e782011-12-15 13:45:16 -0800494 /* Negotiate the latest NVSP protocol supported */
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700495 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800496 if (negotiate_nvsp_ver(device, net_device, init_packet,
497 ver_list[i]) == 0) {
498 net_device->nvsp_version = ver_list[i];
499 break;
500 }
501
502 if (i < 0) {
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700503 ret = -EPROTO;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800504 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700505 }
Haiyang Zhangf157e782011-12-15 13:45:16 -0800506
507 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
508
Bill Pemberton454f18a2009-07-27 16:47:24 -0400509 /* Send the ndis version */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800510 memset(init_packet, 0, sizeof(struct nvsp_message));
Hank Janssenfceaf242009-07-13 15:34:54 -0700511
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800512 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
KY Srinivasan1f73db42014-04-09 15:00:46 -0700513 ndis_version = 0x00060001;
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800514 else
515 ndis_version = 0x0006001e;
Hank Janssenfceaf242009-07-13 15:34:54 -0700516
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800517 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
518 init_packet->msg.v1_msg.
519 send_ndis_ver.ndis_major_ver =
Haiyang Zhang85799a32010-12-10 12:03:54 -0800520 (ndis_version & 0xFFFF0000) >> 16;
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800521 init_packet->msg.v1_msg.
522 send_ndis_ver.ndis_minor_ver =
Haiyang Zhang85799a32010-12-10 12:03:54 -0800523 ndis_version & 0xFFFF;
Hank Janssenfceaf242009-07-13 15:34:54 -0700524
Bill Pemberton454f18a2009-07-27 16:47:24 -0400525 /* Send the init request */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800526 ret = vmbus_sendpacket(device->channel, init_packet,
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800527 sizeof(struct nvsp_message),
528 (unsigned long)init_packet,
529 VM_PKT_DATA_INBAND, 0);
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700530 if (ret != 0)
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800531 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700532
Bill Pemberton454f18a2009-07-27 16:47:24 -0400533 /* Post the big receive buffer to NetVSP */
Haiyang Zhang99d30162014-03-09 16:10:59 -0700534 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
535 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
536 else
537 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700538 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
Haiyang Zhang99d30162014-03-09 16:10:59 -0700539
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700540 ret = netvsc_init_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700541
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800542cleanup:
Hank Janssenfceaf242009-07-13 15:34:54 -0700543 return ret;
544}
545
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200546static void netvsc_disconnect_vsp(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700547{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200548 netvsc_destroy_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700549}
550
Hank Janssen3e189512010-03-04 22:11:00 +0000551/*
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800552 * netvsc_device_remove - Callback when the root bus device is removed
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700553 */
Stephen Hemmingere08f3ea2016-08-23 12:17:50 -0700554void netvsc_device_remove(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700555{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200556 struct net_device *ndev = hv_get_drvdata(device);
557 struct net_device_context *net_device_ctx = netdev_priv(ndev);
558 struct netvsc_device *net_device = net_device_ctx->nvdev;
Hank Janssenfceaf242009-07-13 15:34:54 -0700559
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200560 netvsc_disconnect_vsp(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700561
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200562 net_device_ctx->nvdev = NULL;
K. Y. Srinivasan38524092011-08-27 11:31:14 -0700563
K. Y. Srinivasan86c921a2011-09-13 10:59:54 -0700564 /*
565 * At this point, no one should be accessing net_device
566 * except in here
567 */
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +0100568 netdev_dbg(ndev, "net device safe to remove\n");
Hank Janssenfceaf242009-07-13 15:34:54 -0700569
Bill Pemberton454f18a2009-07-27 16:47:24 -0400570 /* Now, we can close the channel safely */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800571 vmbus_close(device->channel);
Hank Janssenfceaf242009-07-13 15:34:54 -0700572
Bill Pemberton454f18a2009-07-27 16:47:24 -0400573 /* Release all resources */
Haiyang Zhangf90251c2014-08-15 19:18:19 +0000574 free_netvsc_device(net_device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700575}
576
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000577#define RING_AVAIL_PERCENT_HIWATER 20
578#define RING_AVAIL_PERCENT_LOWATER 10
579
580/*
581 * Get the percentage of available bytes to write in the ring.
582 * The return value is in range from 0 to 100.
583 */
584static inline u32 hv_ringbuf_avail_percent(
585 struct hv_ring_buffer_info *ring_info)
586{
587 u32 avail_read, avail_write;
588
589 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
590
591 return avail_write * 100 / ring_info->ring_datasize;
592}
593
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700594static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
595 u32 index)
596{
597 sync_change_bit(index, net_device->send_section_map);
598}
599
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700600static void netvsc_send_tx_complete(struct netvsc_device *net_device,
601 struct vmbus_channel *incoming_channel,
602 struct hv_device *device,
603 struct vmpacket_descriptor *packet)
604{
605 struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id;
606 struct net_device *ndev = hv_get_drvdata(device);
607 struct net_device_context *net_device_ctx = netdev_priv(ndev);
608 struct vmbus_channel *channel = device->channel;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700609 u16 q_idx = 0;
610 int queue_sends;
611
612 /* Notify the layer above us */
613 if (likely(skb)) {
stephen hemminger793e3952017-01-24 13:06:12 -0800614 const struct hv_netvsc_packet *packet
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700615 = (struct hv_netvsc_packet *)skb->cb;
stephen hemminger793e3952017-01-24 13:06:12 -0800616 u32 send_index = packet->send_buf_index;
617 struct netvsc_stats *tx_stats;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700618
619 if (send_index != NETVSC_INVALID_INDEX)
620 netvsc_free_send_slot(net_device, send_index);
stephen hemminger793e3952017-01-24 13:06:12 -0800621 q_idx = packet->q_idx;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700622 channel = incoming_channel;
623
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800624 tx_stats = &net_device->chan_table[q_idx].tx_stats;
stephen hemminger793e3952017-01-24 13:06:12 -0800625
626 u64_stats_update_begin(&tx_stats->syncp);
627 tx_stats->packets += packet->total_packets;
628 tx_stats->bytes += packet->total_bytes;
629 u64_stats_update_end(&tx_stats->syncp);
630
Stephen Hemminger17db4bc2016-09-22 16:56:29 -0700631 dev_consume_skb_any(skb);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700632 }
633
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800634 queue_sends =
635 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700636
stephen hemminger46b4f7f2017-01-24 13:06:11 -0800637 if (net_device->destroy && queue_sends == 0)
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700638 wake_up(&net_device->wait_drain);
639
640 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
641 !net_device_ctx->start_remove &&
642 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
643 queue_sends < 1))
644 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
645}
646
KY Srinivasan97c17232014-02-16 16:38:44 -0800647static void netvsc_send_completion(struct netvsc_device *net_device,
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800648 struct vmbus_channel *incoming_channel,
KY Srinivasan97c17232014-02-16 16:38:44 -0800649 struct hv_device *device,
Haiyang Zhang85799a32010-12-10 12:03:54 -0800650 struct vmpacket_descriptor *packet)
Hank Janssenfceaf242009-07-13 15:34:54 -0700651{
Haiyang Zhang85799a32010-12-10 12:03:54 -0800652 struct nvsp_message *nvsp_packet;
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200653 struct net_device *ndev = hv_get_drvdata(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700654
Haiyang Zhang85799a32010-12-10 12:03:54 -0800655 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700656 (packet->offset8 << 3));
Hank Janssenfceaf242009-07-13 15:34:54 -0700657
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700658 switch (nvsp_packet->hdr.msg_type) {
659 case NVSP_MSG_TYPE_INIT_COMPLETE:
660 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
661 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
662 case NVSP_MSG5_TYPE_SUBCHANNEL:
Bill Pemberton454f18a2009-07-27 16:47:24 -0400663 /* Copy the response back */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800664 memcpy(&net_device->channel_init_pkt, nvsp_packet,
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700665 sizeof(struct nvsp_message));
K. Y. Srinivasan35abb212011-05-10 07:55:41 -0700666 complete(&net_device->channel_init_wait);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700667 break;
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000668
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700669 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
670 netvsc_send_tx_complete(net_device, incoming_channel,
671 device, packet);
672 break;
Hank Janssenfceaf242009-07-13 15:34:54 -0700673
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700674 default:
675 netdev_err(ndev,
676 "Unknown send completion type %d received!!\n",
677 nvsp_packet->hdr.msg_type);
Hank Janssenfceaf242009-07-13 15:34:54 -0700678 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700679}
680
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700681static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
682{
stephen hemmingerb58a1852017-01-24 13:06:14 -0800683 unsigned long *map_addr = net_device->send_section_map;
684 unsigned int i;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700685
stephen hemmingerb58a1852017-01-24 13:06:14 -0800686 for_each_clear_bit(i, map_addr, net_device->map_words) {
687 if (sync_test_and_set_bit(i, map_addr) == 0)
688 return i;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700689 }
stephen hemmingerb58a1852017-01-24 13:06:14 -0800690
691 return NETVSC_INVALID_INDEX;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700692}
693
Lad, Prabhakarda19fcd2015-02-05 15:06:33 +0000694static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
695 unsigned int section_index,
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700696 u32 pend_size,
KY Srinivasan24476762015-12-01 16:43:06 -0800697 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800698 struct rndis_message *rndis_msg,
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800699 struct hv_page_buffer **pb,
700 struct sk_buff *skb)
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700701{
702 char *start = net_device->send_buf;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700703 char *dest = start + (section_index * net_device->send_section_size)
704 + pend_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700705 int i;
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800706 bool is_data_pkt = (skb != NULL) ? true : false;
KY Srinivasanbde79be2015-12-01 16:43:17 -0800707 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700708 u32 msg_size = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700709 u32 padding = 0;
710 u32 remain = packet->total_data_buflen % net_device->pkt_align;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700711 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
712 packet->page_buf_cnt;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700713
714 /* Add padding */
KY Srinivasanbde79be2015-12-01 16:43:17 -0800715 if (is_data_pkt && xmit_more && remain &&
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700716 !packet->cp_partial) {
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700717 padding = net_device->pkt_align - remain;
KY Srinivasan24476762015-12-01 16:43:06 -0800718 rndis_msg->msg_len += padding;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700719 packet->total_data_buflen += padding;
720 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700721
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700722 for (i = 0; i < page_count; i++) {
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800723 char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
724 u32 offset = (*pb)[i].offset;
725 u32 len = (*pb)[i].len;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700726
727 memcpy(dest, (src + offset), len);
728 msg_size += len;
729 dest += len;
730 }
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700731
732 if (padding) {
733 memset(dest, 0, padding);
734 msg_size += padding;
735 }
736
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700737 return msg_size;
738}
739
Stephen Hemminger3a8963a2016-09-09 12:45:24 -0700740static inline int netvsc_send_pkt(
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200741 struct hv_device *device,
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700742 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800743 struct netvsc_device *net_device,
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800744 struct hv_page_buffer **pb,
745 struct sk_buff *skb)
Hank Janssenfceaf242009-07-13 15:34:54 -0700746{
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700747 struct nvsp_message nvmsg;
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800748 struct netvsc_channel *nvchan
749 = &net_device->chan_table[packet->q_idx];
750 struct vmbus_channel *out_channel = nvchan->channel;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200751 struct net_device *ndev = hv_get_drvdata(device);
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800752 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700753 u64 req_id;
754 int ret;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700755 struct hv_page_buffer *pgbuf;
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700756 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
KY Srinivasanbde79be2015-12-01 16:43:17 -0800757 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700758
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700759 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800760 if (skb != NULL) {
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700761 /* 0 is RMC_DATA; */
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700762 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700763 } else {
764 /* 1 is RMC_CONTROL; */
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700765 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700766 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700767
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700768 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
769 packet->send_buf_index;
770 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
771 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
772 else
773 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
774 packet->total_data_buflen;
Hank Janssenfceaf242009-07-13 15:34:54 -0700775
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800776 req_id = (ulong)skb;
Haiyang Zhangf1ea3cd2013-04-05 11:44:40 +0000777
Haiyang Zhangc3582a22014-12-01 13:28:39 -0800778 if (out_channel->rescind)
779 return -ENODEV;
780
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700781 /*
782 * It is possible that once we successfully place this packet
783 * on the ringbuffer, we may stop the queue. In that case, we want
784 * to notify the host independent of the xmit_more flag. We don't
785 * need to be precise here; in the worst case we may signal the host
786 * unnecessarily.
787 */
788 if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
KY Srinivasanbde79be2015-12-01 16:43:17 -0800789 xmit_more = false;
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700790
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -0800791 if (packet->page_buf_cnt) {
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800792 pgbuf = packet->cp_partial ? (*pb) +
793 packet->rmsg_pgcnt : (*pb);
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700794 ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
795 pgbuf,
796 packet->page_buf_cnt,
797 &nvmsg,
798 sizeof(struct nvsp_message),
799 req_id,
800 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
KY Srinivasanbde79be2015-12-01 16:43:17 -0800801 !xmit_more);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700802 } else {
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700803 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
804 sizeof(struct nvsp_message),
805 req_id,
806 VM_PKT_DATA_INBAND,
807 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
KY Srinivasanbde79be2015-12-01 16:43:17 -0800808 !xmit_more);
Hank Janssenfceaf242009-07-13 15:34:54 -0700809 }
810
Haiyang Zhang1d068252011-12-02 11:56:25 -0800811 if (ret == 0) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800812 atomic_inc_return(&nvchan->queue_sends);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700813
stephen hemminger46b4f7f2017-01-24 13:06:11 -0800814 if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800815 netif_tx_stop_queue(txq);
Haiyang Zhang1d068252011-12-02 11:56:25 -0800816 } else if (ret == -EAGAIN) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800817 netif_tx_stop_queue(txq);
818 if (atomic_read(&nvchan->queue_sends) < 1) {
819 netif_tx_wake_queue(txq);
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000820 ret = -ENOSPC;
821 }
Haiyang Zhang1d068252011-12-02 11:56:25 -0800822 } else {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700823 netdev_err(ndev, "Unable to send packet %p ret %d\n",
Haiyang Zhang85799a32010-12-10 12:03:54 -0800824 packet, ret);
Haiyang Zhang1d068252011-12-02 11:56:25 -0800825 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700826
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700827 return ret;
828}
829
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800830/* Move packet out of multi send data (msd), and clear msd */
831static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
832 struct sk_buff **msd_skb,
833 struct multi_send_data *msdp)
834{
835 *msd_skb = msdp->skb;
836 *msd_send = msdp->pkt;
837 msdp->skb = NULL;
838 msdp->pkt = NULL;
839 msdp->count = 0;
840}
841
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700842int netvsc_send(struct hv_device *device,
KY Srinivasan24476762015-12-01 16:43:06 -0800843 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800844 struct rndis_message *rndis_msg,
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800845 struct hv_page_buffer **pb,
846 struct sk_buff *skb)
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700847{
848 struct netvsc_device *net_device;
Stephen Hemminger6c4c1372016-08-23 12:17:55 -0700849 int ret = 0;
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800850 struct netvsc_channel *nvchan;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700851 u32 pktlen = packet->total_data_buflen, msd_len = 0;
852 unsigned int section_index = NETVSC_INVALID_INDEX;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700853 struct multi_send_data *msdp;
854 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800855 struct sk_buff *msd_skb = NULL;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700856 bool try_batch;
KY Srinivasanbde79be2015-12-01 16:43:17 -0800857 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700858
859 net_device = get_outbound_net_device(device);
860 if (!net_device)
861 return -ENODEV;
862
Vitaly Kuznetsove8f0a892016-10-19 15:53:01 +0200863 /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
864 * here before the negotiation with the host is finished and
865 * send_section_map may not be allocated yet.
866 */
867 if (!net_device->send_section_map)
868 return -EAGAIN;
869
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800870 nvchan = &net_device->chan_table[packet->q_idx];
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700871 packet->send_buf_index = NETVSC_INVALID_INDEX;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700872 packet->cp_partial = false;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700873
Haiyang Zhangcf8190e2015-12-10 12:19:35 -0800874 /* Send control message directly without accessing msd (Multi-Send
875 * Data) field which may be changed during data packet processing.
876 */
877 if (!skb) {
878 cur_send = packet;
879 goto send_now;
880 }
881
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700882 /* batch packets in send buffer if possible */
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800883 msdp = &nvchan->msd;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700884 if (msdp->pkt)
885 msd_len = msdp->pkt->total_data_buflen;
886
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800887 try_batch = (skb != NULL) && msd_len > 0 && msdp->count <
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700888 net_device->max_pkt;
889
890 if (try_batch && msd_len + pktlen + net_device->pkt_align <
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700891 net_device->send_section_size) {
892 section_index = msdp->pkt->send_buf_index;
893
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700894 } else if (try_batch && msd_len + packet->rmsg_size <
895 net_device->send_section_size) {
896 section_index = msdp->pkt->send_buf_index;
897 packet->cp_partial = true;
898
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800899 } else if ((skb != NULL) && pktlen + net_device->pkt_align <
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700900 net_device->send_section_size) {
901 section_index = netvsc_get_next_send_section(net_device);
902 if (section_index != NETVSC_INVALID_INDEX) {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800903 move_pkt_msd(&msd_send, &msd_skb, msdp);
904 msd_len = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700905 }
906 }
907
908 if (section_index != NETVSC_INVALID_INDEX) {
909 netvsc_copy_to_send_buf(net_device,
910 section_index, msd_len,
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800911 packet, rndis_msg, pb, skb);
KY Srinivasanb08cc792015-03-29 21:08:42 -0700912
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700913 packet->send_buf_index = section_index;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700914
915 if (packet->cp_partial) {
916 packet->page_buf_cnt -= packet->rmsg_pgcnt;
917 packet->total_data_buflen = msd_len + packet->rmsg_size;
918 } else {
919 packet->page_buf_cnt = 0;
920 packet->total_data_buflen += msd_len;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700921 }
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700922
stephen hemminger793e3952017-01-24 13:06:12 -0800923 if (msdp->pkt) {
924 packet->total_packets += msdp->pkt->total_packets;
925 packet->total_bytes += msdp->pkt->total_bytes;
926 }
927
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800928 if (msdp->skb)
Stephen Hemminger17db4bc2016-09-22 16:56:29 -0700929 dev_consume_skb_any(msdp->skb);
Haiyang Zhangee90b812015-04-06 15:22:54 -0700930
KY Srinivasanbde79be2015-12-01 16:43:17 -0800931 if (xmit_more && !packet->cp_partial) {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800932 msdp->skb = skb;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700933 msdp->pkt = packet;
934 msdp->count++;
935 } else {
936 cur_send = packet;
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800937 msdp->skb = NULL;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700938 msdp->pkt = NULL;
939 msdp->count = 0;
940 }
941 } else {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800942 move_pkt_msd(&msd_send, &msd_skb, msdp);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700943 cur_send = packet;
944 }
945
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700946 if (msd_send) {
Stephen Hemminger6c4c1372016-08-23 12:17:55 -0700947 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
948 NULL, msd_skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700949
950 if (m_ret != 0) {
951 netvsc_free_send_slot(net_device,
952 msd_send->send_buf_index);
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800953 dev_kfree_skb_any(msd_skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700954 }
955 }
956
Haiyang Zhangcf8190e2015-12-10 12:19:35 -0800957send_now:
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700958 if (cur_send)
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200959 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700960
Jerry Snitselaar7aab5152015-05-04 10:57:16 -0700961 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
962 netvsc_free_send_slot(net_device, section_index);
Haiyang Zhangd953ca42015-01-29 12:34:49 -0800963
Hank Janssenfceaf242009-07-13 15:34:54 -0700964 return ret;
965}
966
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700967static int netvsc_send_recv_completion(struct vmbus_channel *channel,
968 u64 transaction_id, u32 status)
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700969{
970 struct nvsp_message recvcompMessage;
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700971 int ret;
972
973 recvcompMessage.hdr.msg_type =
974 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
975
Haiyang Zhang63f69212012-10-02 05:30:23 +0000976 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700977
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700978 /* Send the completion */
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700979 ret = vmbus_sendpacket(channel, &recvcompMessage,
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700980 sizeof(struct nvsp_message_header) + sizeof(u32),
981 transaction_id, VM_PKT_COMP, 0);
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700982
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700983 return ret;
984}
985
986static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
987 u32 *filled, u32 *avail)
988{
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800989 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
990 u32 first = mrc->first;
991 u32 next = mrc->next;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700992
993 *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
994 next - first;
995
996 *avail = NETVSC_RECVSLOT_MAX - *filled - 1;
997}
998
999/* Read the first filled slot, no change to index */
1000static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
1001 *nvdev, u16 q_idx)
1002{
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001003 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001004 u32 filled, avail;
1005
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001006 if (unlikely(!mrc->buf))
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001007 return NULL;
1008
1009 count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
1010 if (!filled)
1011 return NULL;
1012
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001013 return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001014}
1015
1016/* Put the first filled slot back to available pool */
1017static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
1018{
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001019 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001020 int num_recv;
1021
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001022 mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001023
1024 num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
1025
1026 if (nvdev->destroy && num_recv == 0)
1027 wake_up(&nvdev->wait_drain);
1028}
1029
1030/* Check and send pending recv completions */
1031static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
1032 struct vmbus_channel *channel, u16 q_idx)
1033{
1034 struct recv_comp_data *rcd;
1035 int ret;
1036
1037 while (true) {
1038 rcd = read_recv_comp_slot(nvdev, q_idx);
1039 if (!rcd)
1040 break;
1041
1042 ret = netvsc_send_recv_completion(channel, rcd->tid,
1043 rcd->status);
1044 if (ret)
1045 break;
1046
1047 put_recv_comp_slot(nvdev, q_idx);
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001048 }
1049}
1050
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001051#define NETVSC_RCD_WATERMARK 80
1052
1053/* Get next available slot */
1054static inline struct recv_comp_data *get_recv_comp_slot(
1055 struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
1056{
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001057 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001058 u32 filled, avail, next;
1059 struct recv_comp_data *rcd;
1060
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001061 if (unlikely(!nvdev->recv_section))
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001062 return NULL;
1063
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001064 if (unlikely(!mrc->buf))
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001065 return NULL;
1066
1067 if (atomic_read(&nvdev->num_outstanding_recvs) >
1068 nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100)
1069 netvsc_chk_recv_comp(nvdev, channel, q_idx);
1070
1071 count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
1072 if (!avail)
1073 return NULL;
1074
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001075 next = mrc->next;
1076 rcd = mrc->buf + next * sizeof(struct recv_comp_data);
1077 mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001078
1079 atomic_inc(&nvdev->num_outstanding_recvs);
1080
1081 return rcd;
1082}
1083
stephen hemmingerdc54a082017-01-24 13:06:08 -08001084static void netvsc_receive(struct net_device *ndev,
1085 struct netvsc_device *net_device,
1086 struct net_device_context *net_device_ctx,
1087 struct hv_device *device,
1088 struct vmbus_channel *channel,
1089 struct vmtransfer_page_packet_header *vmxferpage_packet,
1090 struct nvsp_message *nvsp)
Hank Janssenfceaf242009-07-13 15:34:54 -07001091{
stephen hemmingerdc54a082017-01-24 13:06:08 -08001092 char *recv_buf = net_device->recv_buf;
Haiyang Zhang4baab262014-04-21 14:54:43 -07001093 u32 status = NVSP_STAT_SUCCESS;
Haiyang Zhang45326342011-12-15 13:45:15 -08001094 int i;
1095 int count = 0;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001096 int ret;
1097 struct recv_comp_data *rcd;
1098 u16 q_idx = channel->offermsg.offer.sub_channel_index;
K. Y. Srinivasan779b4d12011-04-26 09:20:22 -07001099
Bill Pemberton454f18a2009-07-27 16:47:24 -04001100 /* Make sure this is a valid nvsp packet */
stephen hemmingerdc54a082017-01-24 13:06:08 -08001101 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1102 netif_err(net_device_ctx, rx_err, ndev,
1103 "Unknown nvsp packet type received %u\n",
1104 nvsp->hdr.msg_type);
Hank Janssenfceaf242009-07-13 15:34:54 -07001105 return;
1106 }
1107
stephen hemmingerdc54a082017-01-24 13:06:08 -08001108 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1109 netif_err(net_device_ctx, rx_err, ndev,
1110 "Invalid xfer page set id - expecting %x got %x\n",
1111 NETVSC_RECEIVE_BUFFER_ID,
1112 vmxferpage_packet->xfer_pageset_id);
Hank Janssenfceaf242009-07-13 15:34:54 -07001113 return;
1114 }
1115
Haiyang Zhang4baab262014-04-21 14:54:43 -07001116 count = vmxferpage_packet->range_cnt;
Hank Janssenfceaf242009-07-13 15:34:54 -07001117
Bill Pemberton454f18a2009-07-27 16:47:24 -04001118 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
Haiyang Zhang4baab262014-04-21 14:54:43 -07001119 for (i = 0; i < count; i++) {
stephen hemmingerdc54a082017-01-24 13:06:08 -08001120 void *data = recv_buf
1121 + vmxferpage_packet->ranges[i].byte_offset;
1122 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
Hank Janssenfceaf242009-07-13 15:34:54 -07001123
Bill Pemberton454f18a2009-07-27 16:47:24 -04001124 /* Pass it to the upper layer */
stephen hemmingerdc54a082017-01-24 13:06:08 -08001125 status = rndis_filter_receive(ndev, net_device, device,
1126 channel, data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -07001127 }
1128
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001129 if (!net_device->chan_table[q_idx].mrc.buf) {
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001130 ret = netvsc_send_recv_completion(channel,
1131 vmxferpage_packet->d.trans_id,
1132 status);
1133 if (ret)
1134 netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
1135 q_idx, vmxferpage_packet->d.trans_id, ret);
1136 return;
1137 }
1138
1139 rcd = get_recv_comp_slot(net_device, channel, q_idx);
1140
1141 if (!rcd) {
1142 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1143 q_idx, vmxferpage_packet->d.trans_id);
1144 return;
1145 }
1146
1147 rcd->tid = vmxferpage_packet->d.trans_id;
1148 rcd->status = status;
Hank Janssenfceaf242009-07-13 15:34:54 -07001149}
1150
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001151static void netvsc_send_table(struct hv_device *hdev,
Haiyang Zhang71790a22015-07-24 10:08:40 -07001152 struct nvsp_message *nvmsg)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001153{
1154 struct netvsc_device *nvscdev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001155 struct net_device *ndev = hv_get_drvdata(hdev);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001156 int i;
1157 u32 count, *tab;
1158
1159 nvscdev = get_outbound_net_device(hdev);
1160 if (!nvscdev)
1161 return;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001162
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001163 count = nvmsg->msg.v5_msg.send_table.count;
1164 if (count != VRSS_SEND_TAB_SIZE) {
1165 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1166 return;
1167 }
1168
1169 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1170 nvmsg->msg.v5_msg.send_table.offset);
1171
1172 for (i = 0; i < count; i++)
1173 nvscdev->send_table[i] = tab[i];
1174}
1175
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001176static void netvsc_send_vf(struct net_device_context *net_device_ctx,
Haiyang Zhang71790a22015-07-24 10:08:40 -07001177 struct nvsp_message *nvmsg)
1178{
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001179 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1180 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
Haiyang Zhang71790a22015-07-24 10:08:40 -07001181}
1182
1183static inline void netvsc_receive_inband(struct hv_device *hdev,
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001184 struct net_device_context *net_device_ctx,
1185 struct nvsp_message *nvmsg)
Haiyang Zhang71790a22015-07-24 10:08:40 -07001186{
1187 switch (nvmsg->hdr.msg_type) {
1188 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1189 netvsc_send_table(hdev, nvmsg);
1190 break;
1191
1192 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001193 netvsc_send_vf(net_device_ctx, nvmsg);
Haiyang Zhang71790a22015-07-24 10:08:40 -07001194 break;
1195 }
1196}
1197
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001198static void netvsc_process_raw_pkt(struct hv_device *device,
1199 struct vmbus_channel *channel,
1200 struct netvsc_device *net_device,
1201 struct net_device *ndev,
1202 u64 request_id,
1203 struct vmpacket_descriptor *desc)
1204{
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001205 struct net_device_context *net_device_ctx = netdev_priv(ndev);
stephen hemmingerdc54a082017-01-24 13:06:08 -08001206 struct nvsp_message *nvmsg
1207 = (struct nvsp_message *)((unsigned long)desc
1208 + (desc->offset8 << 3));
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001209
1210 switch (desc->type) {
1211 case VM_PKT_COMP:
1212 netvsc_send_completion(net_device, channel, device, desc);
1213 break;
1214
1215 case VM_PKT_DATA_USING_XFER_PAGES:
stephen hemmingerdc54a082017-01-24 13:06:08 -08001216 netvsc_receive(ndev, net_device, net_device_ctx,
1217 device, channel,
1218 (struct vmtransfer_page_packet_header *)desc,
1219 nvmsg);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001220 break;
1221
1222 case VM_PKT_DATA_INBAND:
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001223 netvsc_receive_inband(device, net_device_ctx, nvmsg);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001224 break;
1225
1226 default:
1227 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1228 desc->type, request_id);
1229 break;
1230 }
1231}
1232
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001233void netvsc_channel_cb(void *context)
Hank Janssenfceaf242009-07-13 15:34:54 -07001234{
stephen hemminger0b307eb2017-01-24 13:05:58 -08001235 struct vmbus_channel *channel = context;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001236 u16 q_idx = channel->offermsg.offer.sub_channel_index;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001237 struct hv_device *device;
Haiyang Zhang85799a32010-12-10 12:03:54 -08001238 struct netvsc_device *net_device;
Greg Kroah-Hartman8dc0a062009-08-27 16:02:36 -07001239 struct vmpacket_descriptor *desc;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001240 struct net_device *ndev;
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001241 bool need_to_commit = false;
Hank Janssenfceaf242009-07-13 15:34:54 -07001242
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001243 if (channel->primary_channel != NULL)
1244 device = channel->primary_channel->device_obj;
1245 else
1246 device = channel->device_obj;
1247
stephen hemminger46b4f7f2017-01-24 13:06:11 -08001248 ndev = hv_get_drvdata(device);
1249 if (unlikely(!ndev))
KY Srinivasanee0c4c32014-02-16 16:38:45 -08001250 return;
stephen hemminger0b307eb2017-01-24 13:05:58 -08001251
stephen hemminger46b4f7f2017-01-24 13:06:11 -08001252 net_device = net_device_to_netvsc_device(ndev);
1253 if (unlikely(net_device->destroy) &&
1254 netvsc_channel_idle(net_device, q_idx))
1255 return;
Hank Janssenfceaf242009-07-13 15:34:54 -07001256
stephen hemminger0b307eb2017-01-24 13:05:58 -08001257 while ((desc = get_next_pkt_raw(channel)) != NULL) {
1258 netvsc_process_raw_pkt(device, channel, net_device,
1259 ndev, desc->trans_id, desc);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001260
stephen hemminger0b307eb2017-01-24 13:05:58 -08001261 put_pkt_raw(channel, desc);
1262 need_to_commit = true;
1263 }
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001264
stephen hemminger0b307eb2017-01-24 13:05:58 -08001265 if (need_to_commit)
1266 commit_rd_index(channel);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001267
1268 netvsc_chk_recv_comp(net_device, channel, q_idx);
Hank Janssenfceaf242009-07-13 15:34:54 -07001269}
Haiyang Zhangaf24ce42011-04-21 12:30:40 -07001270
1271/*
Haiyang Zhangb637e022011-04-21 12:30:45 -07001272 * netvsc_device_add - Callback when the device belonging to this
1273 * driver is added
1274 */
stephen hemminger2c7f83c2017-01-24 13:06:09 -08001275int netvsc_device_add(struct hv_device *device,
1276 const struct netvsc_device_info *device_info)
Haiyang Zhangb637e022011-04-21 12:30:45 -07001277{
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001278 int i, ret = 0;
stephen hemminger2c7f83c2017-01-24 13:06:09 -08001279 int ring_size = device_info->ring_size;
Haiyang Zhangb637e022011-04-21 12:30:45 -07001280 struct netvsc_device *net_device;
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001281 struct net_device *ndev = hv_get_drvdata(device);
1282 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001283
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001284 net_device = alloc_net_device();
Dan Carpenterb1c84922014-09-04 14:11:23 +03001285 if (!net_device)
1286 return -ENOMEM;
Haiyang Zhangb637e022011-04-21 12:30:45 -07001287
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001288 net_device->ring_size = ring_size;
1289
Haiyang Zhangb637e022011-04-21 12:30:45 -07001290 /* Open the channel */
K. Y. Srinivasanaae23982011-05-12 19:35:05 -07001291 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1292 ring_size * PAGE_SIZE, NULL, 0,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001293 netvsc_channel_cb, device->channel);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001294
1295 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -07001296 netdev_err(ndev, "unable to open channel: %d\n", ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001297 goto cleanup;
1298 }
1299
1300 /* Channel is opened */
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +01001301 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
Haiyang Zhangb637e022011-04-21 12:30:45 -07001302
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001303 /* If we're reopening the device we may have multiple queues, fill the
1304 * chn_table with the default channel to use it before subchannels are
1305 * opened.
1306 */
1307 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001308 net_device->chan_table[i].channel = device->channel;
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001309
1310 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1311 * populated.
1312 */
1313 wmb();
1314
1315 net_device_ctx->nvdev = net_device;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001316
Haiyang Zhangb637e022011-04-21 12:30:45 -07001317 /* Connect with the NetVsp */
1318 ret = netvsc_connect_vsp(device);
1319 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -07001320 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -07001321 "unable to connect to NetVSP - %d\n", ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001322 goto close;
1323 }
1324
1325 return ret;
1326
1327close:
1328 /* Now, we can close the channel safely */
1329 vmbus_close(device->channel);
1330
1331cleanup:
Haiyang Zhangf90251c2014-08-15 19:18:19 +00001332 free_netvsc_device(net_device);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001333
1334 return ret;
1335}