blob: 15ef713d96c0887ec7929ff8d4be3ec3a6cac291 [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Greg Kroah-Hartman5654e932009-07-14 15:08:20 -070022#include <linux/kernel.h>
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -080023#include <linux/sched.h>
24#include <linux/wait.h>
Greg Kroah-Hartman0ffa63b2009-07-15 11:06:01 -070025#include <linux/mm.h>
Greg Kroah-Hartmanb4362c92009-07-16 11:50:41 -070026#include <linux/delay.h>
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -070027#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Haiyang Zhangd9871152011-09-01 12:19:41 -070029#include <linux/netdevice.h>
Haiyang Zhangf157e782011-12-15 13:45:16 -080030#include <linux/if_ether.h>
Stephen Rothwelld6472302015-06-02 19:01:38 +100031#include <linux/vmalloc.h>
KY Srinivasanc25aaf82014-04-30 10:14:31 -070032#include <asm/sync_bitops.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070033
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070034#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070035
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070036/*
37 * Switch the data path from the synthetic interface to the VF
38 * interface.
39 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020040void netvsc_switch_datapath(struct net_device *ndev, bool vf)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070041{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +020042 struct net_device_context *net_device_ctx = netdev_priv(ndev);
43 struct hv_device *dev = net_device_ctx->device_ctx;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020044 struct netvsc_device *nv_dev = net_device_ctx->nvdev;
45 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070046
47 memset(init_pkt, 0, sizeof(struct nvsp_message));
48 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
49 if (vf)
50 init_pkt->msg.v4_msg.active_dp.active_datapath =
51 NVSP_DATAPATH_VF;
52 else
53 init_pkt->msg.v4_msg.active_dp.active_datapath =
54 NVSP_DATAPATH_SYNTHETIC;
55
56 vmbus_sendpacket(dev->channel, init_pkt,
57 sizeof(struct nvsp_message),
58 (unsigned long)init_pkt,
59 VM_PKT_DATA_INBAND, 0);
60}
61
Vitaly Kuznetsov88098832016-05-13 13:55:25 +020062static struct netvsc_device *alloc_net_device(void)
Hank Janssenfceaf242009-07-13 15:34:54 -070063{
Haiyang Zhang85799a32010-12-10 12:03:54 -080064 struct netvsc_device *net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -070065
Haiyang Zhang85799a32010-12-10 12:03:54 -080066 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
67 if (!net_device)
Hank Janssenfceaf242009-07-13 15:34:54 -070068 return NULL;
69
stephen hemmingerb8b835a2017-01-24 13:06:07 -080070 net_device->chan_table[0].mrc.buf
71 = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070072
Haiyang Zhangdc5cd892012-06-04 06:42:38 +000073 init_waitqueue_head(&net_device->wait_drain);
K. Y. Srinivasanc38b9c72011-08-27 11:31:12 -070074 net_device->destroy = false;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070075 atomic_set(&net_device->open_cnt, 0);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -070076 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
77 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
Stephen Hemmingerfd612602016-08-23 12:17:51 -070078 init_completion(&net_device->channel_init_wait);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -070079
Haiyang Zhang85799a32010-12-10 12:03:54 -080080 return net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -070081}
82
Haiyang Zhangf90251c2014-08-15 19:18:19 +000083static void free_netvsc_device(struct netvsc_device *nvdev)
84{
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070085 int i;
86
87 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
stephen hemmingerb8b835a2017-01-24 13:06:07 -080088 vfree(nvdev->chan_table[i].mrc.buf);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070089
Haiyang Zhangf90251c2014-08-15 19:18:19 +000090 kfree(nvdev);
91}
92
stephen hemminger46b4f7f2017-01-24 13:06:11 -080093
94static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
95 u16 q_idx)
96{
97 const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
98
99 return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
100 atomic_read(&nvchan->queue_sends) == 0;
101}
102
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800103static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700104{
Vitaly Kuznetsov26254662016-06-03 17:50:59 +0200105 struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700106
K. Y. Srinivasan9d88f332011-08-27 11:31:16 -0700107 if (net_device && net_device->destroy)
Haiyang Zhang85799a32010-12-10 12:03:54 -0800108 net_device = NULL;
Hank Janssenfceaf242009-07-13 15:34:54 -0700109
Haiyang Zhang85799a32010-12-10 12:03:54 -0800110 return net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -0700111}
112
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700113static void netvsc_destroy_buf(struct hv_device *device)
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700114{
115 struct nvsp_message *revoke_packet;
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200116 struct net_device *ndev = hv_get_drvdata(device);
Vitaly Kuznetsov26254662016-06-03 17:50:59 +0200117 struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700118 int ret;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700119
120 /*
121 * If we got a section count, it means we received a
122 * SendReceiveBufferComplete msg (ie sent
123 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
124 * to send a revoke msg here
125 */
126 if (net_device->recv_section_cnt) {
127 /* Send the revoke receive buffer */
128 revoke_packet = &net_device->revoke_packet;
129 memset(revoke_packet, 0, sizeof(struct nvsp_message));
130
131 revoke_packet->hdr.msg_type =
132 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
133 revoke_packet->msg.v1_msg.
134 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
135
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200136 ret = vmbus_sendpacket(device->channel,
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700137 revoke_packet,
138 sizeof(struct nvsp_message),
139 (unsigned long)revoke_packet,
140 VM_PKT_DATA_INBAND, 0);
141 /*
142 * If we failed here, we might as well return and
143 * have a leak rather than continue and a bugchk
144 */
145 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700146 netdev_err(ndev, "unable to send "
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700147 "revoke receive buffer to netvsp\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700148 return;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700149 }
150 }
151
152 /* Teardown the gpadl on the vsp end */
153 if (net_device->recv_buf_gpadl_handle) {
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200154 ret = vmbus_teardown_gpadl(device->channel,
155 net_device->recv_buf_gpadl_handle);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700156
157 /* If we failed here, we might as well return and have a leak
158 * rather than continue and a bugchk
159 */
160 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700161 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700162 "unable to teardown receive buffer's gpadl\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700163 return;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700164 }
165 net_device->recv_buf_gpadl_handle = 0;
166 }
167
168 if (net_device->recv_buf) {
169 /* Free up the receive buffer */
Haiyang Zhangb679ef72014-01-27 15:03:42 -0800170 vfree(net_device->recv_buf);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700171 net_device->recv_buf = NULL;
172 }
173
174 if (net_device->recv_section) {
175 net_device->recv_section_cnt = 0;
176 kfree(net_device->recv_section);
177 net_device->recv_section = NULL;
178 }
179
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700180 /* Deal with the send buffer we may have setup.
181 * If we got a send section size, it means we received a
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800182 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
183 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700184 * to send a revoke msg here
185 */
186 if (net_device->send_section_size) {
187 /* Send the revoke receive buffer */
188 revoke_packet = &net_device->revoke_packet;
189 memset(revoke_packet, 0, sizeof(struct nvsp_message));
190
191 revoke_packet->hdr.msg_type =
192 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800193 revoke_packet->msg.v1_msg.revoke_send_buf.id =
194 NETVSC_SEND_BUFFER_ID;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700195
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200196 ret = vmbus_sendpacket(device->channel,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700197 revoke_packet,
198 sizeof(struct nvsp_message),
199 (unsigned long)revoke_packet,
200 VM_PKT_DATA_INBAND, 0);
201 /* If we failed here, we might as well return and
202 * have a leak rather than continue and a bugchk
203 */
204 if (ret != 0) {
205 netdev_err(ndev, "unable to send "
206 "revoke send buffer to netvsp\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700207 return;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700208 }
209 }
210 /* Teardown the gpadl on the vsp end */
211 if (net_device->send_buf_gpadl_handle) {
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200212 ret = vmbus_teardown_gpadl(device->channel,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700213 net_device->send_buf_gpadl_handle);
214
215 /* If we failed here, we might as well return and have a leak
216 * rather than continue and a bugchk
217 */
218 if (ret != 0) {
219 netdev_err(ndev,
220 "unable to teardown send buffer's gpadl\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700221 return;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700222 }
Dave Jones2f184232014-06-16 16:59:02 -0400223 net_device->send_buf_gpadl_handle = 0;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700224 }
225 if (net_device->send_buf) {
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800226 /* Free up the send buffer */
KY Srinivasan06b47aa2014-08-02 10:42:02 -0700227 vfree(net_device->send_buf);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700228 net_device->send_buf = NULL;
229 }
230 kfree(net_device->send_section_map);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700231}
232
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700233static int netvsc_init_buf(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700234{
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700235 int ret = 0;
Haiyang Zhang85799a32010-12-10 12:03:54 -0800236 struct netvsc_device *net_device;
237 struct nvsp_message *init_packet;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700238 struct net_device *ndev;
stephen hemmingerfdfb70d2017-04-24 18:33:38 -0700239 size_t map_words;
K. Y. Srinivasan0a726c22015-05-28 17:08:06 -0700240 int node;
Hank Janssenfceaf242009-07-13 15:34:54 -0700241
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800242 net_device = get_outbound_net_device(device);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700243 if (!net_device)
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700244 return -ENODEV;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200245 ndev = hv_get_drvdata(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700246
K. Y. Srinivasan0a726c22015-05-28 17:08:06 -0700247 node = cpu_to_node(device->channel->target_cpu);
248 net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
249 if (!net_device->recv_buf)
250 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
251
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800252 if (!net_device->recv_buf) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700253 netdev_err(ndev, "unable to allocate receive "
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700254 "buffer of size %d\n", net_device->recv_buf_size);
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700255 ret = -ENOMEM;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800256 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700257 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700258
Bill Pemberton454f18a2009-07-27 16:47:24 -0400259 /*
260 * Establish the gpadl handle for this buffer on this
261 * channel. Note: This call uses the vmbus connection rather
262 * than the channel to establish the gpadl handle.
263 */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800264 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
265 net_device->recv_buf_size,
266 &net_device->recv_buf_gpadl_handle);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700267 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700268 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700269 "unable to establish receive buffer's gpadl\n");
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800270 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700271 }
272
Bill Pemberton454f18a2009-07-27 16:47:24 -0400273 /* Notify the NetVsp of the gpadl handle */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800274 init_packet = &net_device->channel_init_pkt;
Hank Janssenfceaf242009-07-13 15:34:54 -0700275
Haiyang Zhang85799a32010-12-10 12:03:54 -0800276 memset(init_packet, 0, sizeof(struct nvsp_message));
Hank Janssenfceaf242009-07-13 15:34:54 -0700277
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800278 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
279 init_packet->msg.v1_msg.send_recv_buf.
280 gpadl_handle = net_device->recv_buf_gpadl_handle;
281 init_packet->msg.v1_msg.
282 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
Hank Janssenfceaf242009-07-13 15:34:54 -0700283
Bill Pemberton454f18a2009-07-27 16:47:24 -0400284 /* Send the gpadl notification request */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800285 ret = vmbus_sendpacket(device->channel, init_packet,
Greg Kroah-Hartman5a4df292010-10-21 09:43:24 -0700286 sizeof(struct nvsp_message),
Haiyang Zhang85799a32010-12-10 12:03:54 -0800287 (unsigned long)init_packet,
Haiyang Zhang415f2282011-01-26 12:12:13 -0800288 VM_PKT_DATA_INBAND,
Greg Kroah-Hartman5a4df292010-10-21 09:43:24 -0700289 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700290 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700291 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700292 "unable to send receive buffer's gpadl to netvsp\n");
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800293 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700294 }
295
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200296 wait_for_completion(&net_device->channel_init_wait);
Hank Janssenfceaf242009-07-13 15:34:54 -0700297
Bill Pemberton454f18a2009-07-27 16:47:24 -0400298 /* Check the response */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800299 if (init_packet->msg.v1_msg.
300 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700301 netdev_err(ndev, "Unable to complete receive buffer "
Haiyang Zhang8bff33a2011-09-01 12:19:48 -0700302 "initialization with NetVsp - status %d\n",
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800303 init_packet->msg.v1_msg.
304 send_recv_buf_complete.status);
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700305 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800306 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700307 }
308
Bill Pemberton454f18a2009-07-27 16:47:24 -0400309 /* Parse the response */
Hank Janssenfceaf242009-07-13 15:34:54 -0700310
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800311 net_device->recv_section_cnt = init_packet->msg.
312 v1_msg.send_recv_buf_complete.num_sections;
Hank Janssenfceaf242009-07-13 15:34:54 -0700313
Haiyang Zhangc1813202011-11-30 07:19:07 -0800314 net_device->recv_section = kmemdup(
315 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
316 net_device->recv_section_cnt *
317 sizeof(struct nvsp_1_receive_buffer_section),
318 GFP_KERNEL);
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800319 if (net_device->recv_section == NULL) {
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700320 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800321 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700322 }
323
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700324 /*
325 * For 1st release, there should only be 1 section that represents the
326 * entire receive buffer
327 */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800328 if (net_device->recv_section_cnt != 1 ||
329 net_device->recv_section->offset != 0) {
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700330 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800331 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700332 }
333
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700334 /* Now setup the send buffer.
335 */
K. Y. Srinivasan5defde52015-05-28 17:08:07 -0700336 net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
337 if (!net_device->send_buf)
338 net_device->send_buf = vzalloc(net_device->send_buf_size);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700339 if (!net_device->send_buf) {
340 netdev_err(ndev, "unable to allocate send "
341 "buffer of size %d\n", net_device->send_buf_size);
342 ret = -ENOMEM;
343 goto cleanup;
344 }
345
346 /* Establish the gpadl handle for this buffer on this
347 * channel. Note: This call uses the vmbus connection rather
348 * than the channel to establish the gpadl handle.
349 */
350 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
351 net_device->send_buf_size,
352 &net_device->send_buf_gpadl_handle);
353 if (ret != 0) {
354 netdev_err(ndev,
355 "unable to establish send buffer's gpadl\n");
356 goto cleanup;
357 }
358
359 /* Notify the NetVsp of the gpadl handle */
360 init_packet = &net_device->channel_init_pkt;
361 memset(init_packet, 0, sizeof(struct nvsp_message));
362 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800363 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700364 net_device->send_buf_gpadl_handle;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800365 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700366
367 /* Send the gpadl notification request */
368 ret = vmbus_sendpacket(device->channel, init_packet,
369 sizeof(struct nvsp_message),
370 (unsigned long)init_packet,
371 VM_PKT_DATA_INBAND,
372 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
373 if (ret != 0) {
374 netdev_err(ndev,
375 "unable to send send buffer's gpadl to netvsp\n");
376 goto cleanup;
377 }
378
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200379 wait_for_completion(&net_device->channel_init_wait);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700380
381 /* Check the response */
382 if (init_packet->msg.v1_msg.
383 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
384 netdev_err(ndev, "Unable to complete send buffer "
385 "initialization with NetVsp - status %d\n",
386 init_packet->msg.v1_msg.
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800387 send_send_buf_complete.status);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700388 ret = -EINVAL;
389 goto cleanup;
390 }
391
392 /* Parse the response */
393 net_device->send_section_size = init_packet->msg.
394 v1_msg.send_send_buf_complete.section_size;
395
396 /* Section count is simply the size divided by the section size.
397 */
398 net_device->send_section_cnt =
Stephen Hemminger796cc882016-08-23 12:17:47 -0700399 net_device->send_buf_size / net_device->send_section_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700400
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +0100401 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
402 net_device->send_section_size, net_device->send_section_cnt);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700403
404 /* Setup state for managing the send buffer. */
stephen hemmingerfdfb70d2017-04-24 18:33:38 -0700405 map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700406
stephen hemmingerfdfb70d2017-04-24 18:33:38 -0700407 net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
Wei Yongjundd1d3f82014-07-23 09:00:35 +0800408 if (net_device->send_section_map == NULL) {
409 ret = -ENOMEM;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700410 goto cleanup;
Wei Yongjundd1d3f82014-07-23 09:00:35 +0800411 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700412
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800413 goto exit;
Hank Janssenfceaf242009-07-13 15:34:54 -0700414
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800415cleanup:
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200416 netvsc_destroy_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700417
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800418exit:
Hank Janssenfceaf242009-07-13 15:34:54 -0700419 return ret;
420}
421
Haiyang Zhangf157e782011-12-15 13:45:16 -0800422/* Negotiate NVSP protocol version */
423static int negotiate_nvsp_ver(struct hv_device *device,
424 struct netvsc_device *net_device,
425 struct nvsp_message *init_packet,
426 u32 nvsp_ver)
Hank Janssenfceaf242009-07-13 15:34:54 -0700427{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200428 struct net_device *ndev = hv_get_drvdata(device);
Nicholas Mc Guire7390fe92015-01-25 15:46:31 +0100429 int ret;
Haiyang Zhangf157e782011-12-15 13:45:16 -0800430
431 memset(init_packet, 0, sizeof(struct nvsp_message));
432 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
433 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
434 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
435
436 /* Send the init request */
437 ret = vmbus_sendpacket(device->channel, init_packet,
438 sizeof(struct nvsp_message),
439 (unsigned long)init_packet,
440 VM_PKT_DATA_INBAND,
441 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
442
443 if (ret != 0)
444 return ret;
445
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200446 wait_for_completion(&net_device->channel_init_wait);
Haiyang Zhangf157e782011-12-15 13:45:16 -0800447
448 if (init_packet->msg.init_msg.init_complete.status !=
449 NVSP_STAT_SUCCESS)
450 return -EINVAL;
451
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800452 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
Haiyang Zhangf157e782011-12-15 13:45:16 -0800453 return 0;
454
Haiyang Zhang71790a22015-07-24 10:08:40 -0700455 /* NVSPv2 or later: Send NDIS config */
Haiyang Zhangf157e782011-12-15 13:45:16 -0800456 memset(init_packet, 0, sizeof(struct nvsp_message));
457 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200458 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000459 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
Haiyang Zhangf157e782011-12-15 13:45:16 -0800460
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700461 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
Haiyang Zhang71790a22015-07-24 10:08:40 -0700462 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
463
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700464 /* Teaming bit is needed to receive link speed updates */
465 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
466 }
467
Haiyang Zhangf157e782011-12-15 13:45:16 -0800468 ret = vmbus_sendpacket(device->channel, init_packet,
469 sizeof(struct nvsp_message),
470 (unsigned long)init_packet,
471 VM_PKT_DATA_INBAND, 0);
472
473 return ret;
474}
475
476static int netvsc_connect_vsp(struct hv_device *device)
477{
478 int ret;
Haiyang Zhang85799a32010-12-10 12:03:54 -0800479 struct netvsc_device *net_device;
480 struct nvsp_message *init_packet;
481 int ndis_version;
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700482 const u32 ver_list[] = {
483 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800484 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700485 int i;
Hank Janssenfceaf242009-07-13 15:34:54 -0700486
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800487 net_device = get_outbound_net_device(device);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700488 if (!net_device)
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700489 return -ENODEV;
Hank Janssenfceaf242009-07-13 15:34:54 -0700490
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800491 init_packet = &net_device->channel_init_pkt;
Hank Janssenfceaf242009-07-13 15:34:54 -0700492
Haiyang Zhangf157e782011-12-15 13:45:16 -0800493 /* Negotiate the latest NVSP protocol supported */
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700494 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800495 if (negotiate_nvsp_ver(device, net_device, init_packet,
496 ver_list[i]) == 0) {
497 net_device->nvsp_version = ver_list[i];
498 break;
499 }
500
501 if (i < 0) {
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700502 ret = -EPROTO;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800503 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700504 }
Haiyang Zhangf157e782011-12-15 13:45:16 -0800505
506 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
507
Bill Pemberton454f18a2009-07-27 16:47:24 -0400508 /* Send the ndis version */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800509 memset(init_packet, 0, sizeof(struct nvsp_message));
Hank Janssenfceaf242009-07-13 15:34:54 -0700510
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800511 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
KY Srinivasan1f73db42014-04-09 15:00:46 -0700512 ndis_version = 0x00060001;
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800513 else
514 ndis_version = 0x0006001e;
Hank Janssenfceaf242009-07-13 15:34:54 -0700515
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800516 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
517 init_packet->msg.v1_msg.
518 send_ndis_ver.ndis_major_ver =
Haiyang Zhang85799a32010-12-10 12:03:54 -0800519 (ndis_version & 0xFFFF0000) >> 16;
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800520 init_packet->msg.v1_msg.
521 send_ndis_ver.ndis_minor_ver =
Haiyang Zhang85799a32010-12-10 12:03:54 -0800522 ndis_version & 0xFFFF;
Hank Janssenfceaf242009-07-13 15:34:54 -0700523
Bill Pemberton454f18a2009-07-27 16:47:24 -0400524 /* Send the init request */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800525 ret = vmbus_sendpacket(device->channel, init_packet,
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800526 sizeof(struct nvsp_message),
527 (unsigned long)init_packet,
528 VM_PKT_DATA_INBAND, 0);
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700529 if (ret != 0)
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800530 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700531
Bill Pemberton454f18a2009-07-27 16:47:24 -0400532 /* Post the big receive buffer to NetVSP */
Haiyang Zhang99d30162014-03-09 16:10:59 -0700533 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
534 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
535 else
536 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700537 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
Haiyang Zhang99d30162014-03-09 16:10:59 -0700538
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700539 ret = netvsc_init_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700540
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800541cleanup:
Hank Janssenfceaf242009-07-13 15:34:54 -0700542 return ret;
543}
544
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200545static void netvsc_disconnect_vsp(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700546{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200547 netvsc_destroy_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700548}
549
Hank Janssen3e189512010-03-04 22:11:00 +0000550/*
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800551 * netvsc_device_remove - Callback when the root bus device is removed
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700552 */
Stephen Hemmingere08f3ea2016-08-23 12:17:50 -0700553void netvsc_device_remove(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700554{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200555 struct net_device *ndev = hv_get_drvdata(device);
556 struct net_device_context *net_device_ctx = netdev_priv(ndev);
557 struct netvsc_device *net_device = net_device_ctx->nvdev;
Hank Janssenfceaf242009-07-13 15:34:54 -0700558
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200559 netvsc_disconnect_vsp(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700560
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200561 net_device_ctx->nvdev = NULL;
K. Y. Srinivasan38524092011-08-27 11:31:14 -0700562
K. Y. Srinivasan86c921a2011-09-13 10:59:54 -0700563 /*
564 * At this point, no one should be accessing net_device
565 * except in here
566 */
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +0100567 netdev_dbg(ndev, "net device safe to remove\n");
Hank Janssenfceaf242009-07-13 15:34:54 -0700568
Bill Pemberton454f18a2009-07-27 16:47:24 -0400569 /* Now, we can close the channel safely */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800570 vmbus_close(device->channel);
Hank Janssenfceaf242009-07-13 15:34:54 -0700571
Bill Pemberton454f18a2009-07-27 16:47:24 -0400572 /* Release all resources */
Haiyang Zhangf90251c2014-08-15 19:18:19 +0000573 free_netvsc_device(net_device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700574}
575
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000576#define RING_AVAIL_PERCENT_HIWATER 20
577#define RING_AVAIL_PERCENT_LOWATER 10
578
579/*
580 * Get the percentage of available bytes to write in the ring.
581 * The return value is in range from 0 to 100.
582 */
583static inline u32 hv_ringbuf_avail_percent(
584 struct hv_ring_buffer_info *ring_info)
585{
586 u32 avail_read, avail_write;
587
588 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
589
590 return avail_write * 100 / ring_info->ring_datasize;
591}
592
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700593static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
594 u32 index)
595{
596 sync_change_bit(index, net_device->send_section_map);
597}
598
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700599static void netvsc_send_tx_complete(struct netvsc_device *net_device,
600 struct vmbus_channel *incoming_channel,
601 struct hv_device *device,
602 struct vmpacket_descriptor *packet)
603{
604 struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id;
605 struct net_device *ndev = hv_get_drvdata(device);
606 struct net_device_context *net_device_ctx = netdev_priv(ndev);
607 struct vmbus_channel *channel = device->channel;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700608 u16 q_idx = 0;
609 int queue_sends;
610
611 /* Notify the layer above us */
612 if (likely(skb)) {
stephen hemminger793e3952017-01-24 13:06:12 -0800613 const struct hv_netvsc_packet *packet
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700614 = (struct hv_netvsc_packet *)skb->cb;
stephen hemminger793e3952017-01-24 13:06:12 -0800615 u32 send_index = packet->send_buf_index;
616 struct netvsc_stats *tx_stats;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700617
618 if (send_index != NETVSC_INVALID_INDEX)
619 netvsc_free_send_slot(net_device, send_index);
stephen hemminger793e3952017-01-24 13:06:12 -0800620 q_idx = packet->q_idx;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700621 channel = incoming_channel;
622
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800623 tx_stats = &net_device->chan_table[q_idx].tx_stats;
stephen hemminger793e3952017-01-24 13:06:12 -0800624
625 u64_stats_update_begin(&tx_stats->syncp);
626 tx_stats->packets += packet->total_packets;
627 tx_stats->bytes += packet->total_bytes;
628 u64_stats_update_end(&tx_stats->syncp);
629
Stephen Hemminger17db4bc2016-09-22 16:56:29 -0700630 dev_consume_skb_any(skb);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700631 }
632
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800633 queue_sends =
634 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700635
stephen hemminger46b4f7f2017-01-24 13:06:11 -0800636 if (net_device->destroy && queue_sends == 0)
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700637 wake_up(&net_device->wait_drain);
638
639 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
640 !net_device_ctx->start_remove &&
641 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
642 queue_sends < 1))
643 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
644}
645
KY Srinivasan97c17232014-02-16 16:38:44 -0800646static void netvsc_send_completion(struct netvsc_device *net_device,
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800647 struct vmbus_channel *incoming_channel,
KY Srinivasan97c17232014-02-16 16:38:44 -0800648 struct hv_device *device,
Haiyang Zhang85799a32010-12-10 12:03:54 -0800649 struct vmpacket_descriptor *packet)
Hank Janssenfceaf242009-07-13 15:34:54 -0700650{
Haiyang Zhang85799a32010-12-10 12:03:54 -0800651 struct nvsp_message *nvsp_packet;
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200652 struct net_device *ndev = hv_get_drvdata(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700653
Haiyang Zhang85799a32010-12-10 12:03:54 -0800654 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700655 (packet->offset8 << 3));
Hank Janssenfceaf242009-07-13 15:34:54 -0700656
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700657 switch (nvsp_packet->hdr.msg_type) {
658 case NVSP_MSG_TYPE_INIT_COMPLETE:
659 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
660 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
661 case NVSP_MSG5_TYPE_SUBCHANNEL:
Bill Pemberton454f18a2009-07-27 16:47:24 -0400662 /* Copy the response back */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800663 memcpy(&net_device->channel_init_pkt, nvsp_packet,
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700664 sizeof(struct nvsp_message));
K. Y. Srinivasan35abb212011-05-10 07:55:41 -0700665 complete(&net_device->channel_init_wait);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700666 break;
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000667
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700668 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
669 netvsc_send_tx_complete(net_device, incoming_channel,
670 device, packet);
671 break;
Hank Janssenfceaf242009-07-13 15:34:54 -0700672
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700673 default:
674 netdev_err(ndev,
675 "Unknown send completion type %d received!!\n",
676 nvsp_packet->hdr.msg_type);
Hank Janssenfceaf242009-07-13 15:34:54 -0700677 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700678}
679
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700680static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
681{
stephen hemmingerb58a1852017-01-24 13:06:14 -0800682 unsigned long *map_addr = net_device->send_section_map;
683 unsigned int i;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700684
stephen hemmingerfdfb70d2017-04-24 18:33:38 -0700685 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
stephen hemmingerb58a1852017-01-24 13:06:14 -0800686 if (sync_test_and_set_bit(i, map_addr) == 0)
687 return i;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700688 }
stephen hemmingerb58a1852017-01-24 13:06:14 -0800689
690 return NETVSC_INVALID_INDEX;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700691}
692
Lad, Prabhakarda19fcd2015-02-05 15:06:33 +0000693static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
694 unsigned int section_index,
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700695 u32 pend_size,
KY Srinivasan24476762015-12-01 16:43:06 -0800696 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800697 struct rndis_message *rndis_msg,
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800698 struct hv_page_buffer **pb,
699 struct sk_buff *skb)
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700700{
701 char *start = net_device->send_buf;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700702 char *dest = start + (section_index * net_device->send_section_size)
703 + pend_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700704 int i;
705 u32 msg_size = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700706 u32 padding = 0;
707 u32 remain = packet->total_data_buflen % net_device->pkt_align;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700708 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
709 packet->page_buf_cnt;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700710
711 /* Add padding */
Stephen Hemminger34543232017-02-05 17:20:34 -0700712 if (skb && skb->xmit_more && remain &&
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700713 !packet->cp_partial) {
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700714 padding = net_device->pkt_align - remain;
KY Srinivasan24476762015-12-01 16:43:06 -0800715 rndis_msg->msg_len += padding;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700716 packet->total_data_buflen += padding;
717 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700718
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700719 for (i = 0; i < page_count; i++) {
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800720 char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
721 u32 offset = (*pb)[i].offset;
722 u32 len = (*pb)[i].len;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700723
724 memcpy(dest, (src + offset), len);
725 msg_size += len;
726 dest += len;
727 }
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700728
729 if (padding) {
730 memset(dest, 0, padding);
731 msg_size += padding;
732 }
733
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700734 return msg_size;
735}
736
Stephen Hemminger3a8963a2016-09-09 12:45:24 -0700737static inline int netvsc_send_pkt(
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200738 struct hv_device *device,
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700739 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800740 struct netvsc_device *net_device,
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800741 struct hv_page_buffer **pb,
742 struct sk_buff *skb)
Hank Janssenfceaf242009-07-13 15:34:54 -0700743{
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700744 struct nvsp_message nvmsg;
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800745 struct netvsc_channel *nvchan
746 = &net_device->chan_table[packet->q_idx];
747 struct vmbus_channel *out_channel = nvchan->channel;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200748 struct net_device *ndev = hv_get_drvdata(device);
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800749 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700750 u64 req_id;
751 int ret;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700752 struct hv_page_buffer *pgbuf;
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700753 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700754
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700755 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800756 if (skb != NULL) {
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700757 /* 0 is RMC_DATA; */
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700758 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700759 } else {
760 /* 1 is RMC_CONTROL; */
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700761 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700762 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700763
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700764 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
765 packet->send_buf_index;
766 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
767 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
768 else
769 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
770 packet->total_data_buflen;
Hank Janssenfceaf242009-07-13 15:34:54 -0700771
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800772 req_id = (ulong)skb;
Haiyang Zhangf1ea3cd2013-04-05 11:44:40 +0000773
Haiyang Zhangc3582a22014-12-01 13:28:39 -0800774 if (out_channel->rescind)
775 return -ENODEV;
776
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -0800777 if (packet->page_buf_cnt) {
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800778 pgbuf = packet->cp_partial ? (*pb) +
779 packet->rmsg_pgcnt : (*pb);
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700780 ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
781 pgbuf,
782 packet->page_buf_cnt,
783 &nvmsg,
784 sizeof(struct nvsp_message),
785 req_id,
Stephen Hemminger34543232017-02-05 17:20:34 -0700786 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700787 } else {
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700788 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
789 sizeof(struct nvsp_message),
790 req_id,
791 VM_PKT_DATA_INBAND,
Stephen Hemminger34543232017-02-05 17:20:34 -0700792 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Hank Janssenfceaf242009-07-13 15:34:54 -0700793 }
794
Haiyang Zhang1d068252011-12-02 11:56:25 -0800795 if (ret == 0) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800796 atomic_inc_return(&nvchan->queue_sends);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700797
stephen hemminger46b4f7f2017-01-24 13:06:11 -0800798 if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800799 netif_tx_stop_queue(txq);
Haiyang Zhang1d068252011-12-02 11:56:25 -0800800 } else if (ret == -EAGAIN) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800801 netif_tx_stop_queue(txq);
802 if (atomic_read(&nvchan->queue_sends) < 1) {
803 netif_tx_wake_queue(txq);
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000804 ret = -ENOSPC;
805 }
Haiyang Zhang1d068252011-12-02 11:56:25 -0800806 } else {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700807 netdev_err(ndev, "Unable to send packet %p ret %d\n",
Haiyang Zhang85799a32010-12-10 12:03:54 -0800808 packet, ret);
Haiyang Zhang1d068252011-12-02 11:56:25 -0800809 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700810
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700811 return ret;
812}
813
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800814/* Move packet out of multi send data (msd), and clear msd */
815static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
816 struct sk_buff **msd_skb,
817 struct multi_send_data *msdp)
818{
819 *msd_skb = msdp->skb;
820 *msd_send = msdp->pkt;
821 msdp->skb = NULL;
822 msdp->pkt = NULL;
823 msdp->count = 0;
824}
825
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700826int netvsc_send(struct hv_device *device,
KY Srinivasan24476762015-12-01 16:43:06 -0800827 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800828 struct rndis_message *rndis_msg,
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800829 struct hv_page_buffer **pb,
830 struct sk_buff *skb)
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700831{
832 struct netvsc_device *net_device;
Stephen Hemminger6c4c1372016-08-23 12:17:55 -0700833 int ret = 0;
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800834 struct netvsc_channel *nvchan;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700835 u32 pktlen = packet->total_data_buflen, msd_len = 0;
836 unsigned int section_index = NETVSC_INVALID_INDEX;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700837 struct multi_send_data *msdp;
838 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800839 struct sk_buff *msd_skb = NULL;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700840 bool try_batch;
KY Srinivasanbde79be2015-12-01 16:43:17 -0800841 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700842
843 net_device = get_outbound_net_device(device);
844 if (!net_device)
845 return -ENODEV;
846
Vitaly Kuznetsove8f0a892016-10-19 15:53:01 +0200847 /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
848 * here before the negotiation with the host is finished and
849 * send_section_map may not be allocated yet.
850 */
851 if (!net_device->send_section_map)
852 return -EAGAIN;
853
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800854 nvchan = &net_device->chan_table[packet->q_idx];
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700855 packet->send_buf_index = NETVSC_INVALID_INDEX;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700856 packet->cp_partial = false;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700857
Haiyang Zhangcf8190e2015-12-10 12:19:35 -0800858 /* Send control message directly without accessing msd (Multi-Send
859 * Data) field which may be changed during data packet processing.
860 */
861 if (!skb) {
862 cur_send = packet;
863 goto send_now;
864 }
865
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700866 /* batch packets in send buffer if possible */
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800867 msdp = &nvchan->msd;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700868 if (msdp->pkt)
869 msd_len = msdp->pkt->total_data_buflen;
870
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800871 try_batch = (skb != NULL) && msd_len > 0 && msdp->count <
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700872 net_device->max_pkt;
873
874 if (try_batch && msd_len + pktlen + net_device->pkt_align <
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700875 net_device->send_section_size) {
876 section_index = msdp->pkt->send_buf_index;
877
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700878 } else if (try_batch && msd_len + packet->rmsg_size <
879 net_device->send_section_size) {
880 section_index = msdp->pkt->send_buf_index;
881 packet->cp_partial = true;
882
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800883 } else if ((skb != NULL) && pktlen + net_device->pkt_align <
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700884 net_device->send_section_size) {
885 section_index = netvsc_get_next_send_section(net_device);
886 if (section_index != NETVSC_INVALID_INDEX) {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800887 move_pkt_msd(&msd_send, &msd_skb, msdp);
888 msd_len = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700889 }
890 }
891
892 if (section_index != NETVSC_INVALID_INDEX) {
893 netvsc_copy_to_send_buf(net_device,
894 section_index, msd_len,
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800895 packet, rndis_msg, pb, skb);
KY Srinivasanb08cc792015-03-29 21:08:42 -0700896
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700897 packet->send_buf_index = section_index;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700898
899 if (packet->cp_partial) {
900 packet->page_buf_cnt -= packet->rmsg_pgcnt;
901 packet->total_data_buflen = msd_len + packet->rmsg_size;
902 } else {
903 packet->page_buf_cnt = 0;
904 packet->total_data_buflen += msd_len;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700905 }
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700906
stephen hemminger793e3952017-01-24 13:06:12 -0800907 if (msdp->pkt) {
908 packet->total_packets += msdp->pkt->total_packets;
909 packet->total_bytes += msdp->pkt->total_bytes;
910 }
911
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800912 if (msdp->skb)
Stephen Hemminger17db4bc2016-09-22 16:56:29 -0700913 dev_consume_skb_any(msdp->skb);
Haiyang Zhangee90b812015-04-06 15:22:54 -0700914
KY Srinivasanbde79be2015-12-01 16:43:17 -0800915 if (xmit_more && !packet->cp_partial) {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800916 msdp->skb = skb;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700917 msdp->pkt = packet;
918 msdp->count++;
919 } else {
920 cur_send = packet;
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800921 msdp->skb = NULL;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700922 msdp->pkt = NULL;
923 msdp->count = 0;
924 }
925 } else {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800926 move_pkt_msd(&msd_send, &msd_skb, msdp);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700927 cur_send = packet;
928 }
929
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700930 if (msd_send) {
Stephen Hemminger6c4c1372016-08-23 12:17:55 -0700931 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
932 NULL, msd_skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700933
934 if (m_ret != 0) {
935 netvsc_free_send_slot(net_device,
936 msd_send->send_buf_index);
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800937 dev_kfree_skb_any(msd_skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700938 }
939 }
940
Haiyang Zhangcf8190e2015-12-10 12:19:35 -0800941send_now:
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700942 if (cur_send)
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200943 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700944
Jerry Snitselaar7aab5152015-05-04 10:57:16 -0700945 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
946 netvsc_free_send_slot(net_device, section_index);
Haiyang Zhangd953ca42015-01-29 12:34:49 -0800947
Hank Janssenfceaf242009-07-13 15:34:54 -0700948 return ret;
949}
950
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700951static int netvsc_send_recv_completion(struct vmbus_channel *channel,
952 u64 transaction_id, u32 status)
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700953{
954 struct nvsp_message recvcompMessage;
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700955 int ret;
956
957 recvcompMessage.hdr.msg_type =
958 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
959
Haiyang Zhang63f69212012-10-02 05:30:23 +0000960 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700961
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700962 /* Send the completion */
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700963 ret = vmbus_sendpacket(channel, &recvcompMessage,
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700964 sizeof(struct nvsp_message_header) + sizeof(u32),
965 transaction_id, VM_PKT_COMP, 0);
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700966
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700967 return ret;
968}
969
970static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
971 u32 *filled, u32 *avail)
972{
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800973 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
974 u32 first = mrc->first;
975 u32 next = mrc->next;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700976
977 *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
978 next - first;
979
980 *avail = NETVSC_RECVSLOT_MAX - *filled - 1;
981}
982
983/* Read the first filled slot, no change to index */
984static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
985 *nvdev, u16 q_idx)
986{
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800987 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700988 u32 filled, avail;
989
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800990 if (unlikely(!mrc->buf))
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700991 return NULL;
992
993 count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
994 if (!filled)
995 return NULL;
996
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800997 return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700998}
999
1000/* Put the first filled slot back to available pool */
1001static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
1002{
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001003 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001004 int num_recv;
1005
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001006 mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001007
1008 num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
1009
1010 if (nvdev->destroy && num_recv == 0)
1011 wake_up(&nvdev->wait_drain);
1012}
1013
1014/* Check and send pending recv completions */
1015static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
1016 struct vmbus_channel *channel, u16 q_idx)
1017{
1018 struct recv_comp_data *rcd;
1019 int ret;
1020
1021 while (true) {
1022 rcd = read_recv_comp_slot(nvdev, q_idx);
1023 if (!rcd)
1024 break;
1025
1026 ret = netvsc_send_recv_completion(channel, rcd->tid,
1027 rcd->status);
1028 if (ret)
1029 break;
1030
1031 put_recv_comp_slot(nvdev, q_idx);
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001032 }
1033}
1034
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001035#define NETVSC_RCD_WATERMARK 80
1036
1037/* Get next available slot */
1038static inline struct recv_comp_data *get_recv_comp_slot(
1039 struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
1040{
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001041 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001042 u32 filled, avail, next;
1043 struct recv_comp_data *rcd;
1044
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001045 if (unlikely(!nvdev->recv_section))
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001046 return NULL;
1047
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001048 if (unlikely(!mrc->buf))
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001049 return NULL;
1050
1051 if (atomic_read(&nvdev->num_outstanding_recvs) >
1052 nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100)
1053 netvsc_chk_recv_comp(nvdev, channel, q_idx);
1054
1055 count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
1056 if (!avail)
1057 return NULL;
1058
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001059 next = mrc->next;
1060 rcd = mrc->buf + next * sizeof(struct recv_comp_data);
1061 mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001062
1063 atomic_inc(&nvdev->num_outstanding_recvs);
1064
1065 return rcd;
1066}
1067
stephen hemmingerdc54a082017-01-24 13:06:08 -08001068static void netvsc_receive(struct net_device *ndev,
1069 struct netvsc_device *net_device,
1070 struct net_device_context *net_device_ctx,
1071 struct hv_device *device,
1072 struct vmbus_channel *channel,
1073 struct vmtransfer_page_packet_header *vmxferpage_packet,
1074 struct nvsp_message *nvsp)
Hank Janssenfceaf242009-07-13 15:34:54 -07001075{
stephen hemmingerdc54a082017-01-24 13:06:08 -08001076 char *recv_buf = net_device->recv_buf;
Haiyang Zhang4baab262014-04-21 14:54:43 -07001077 u32 status = NVSP_STAT_SUCCESS;
Haiyang Zhang45326342011-12-15 13:45:15 -08001078 int i;
1079 int count = 0;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001080 int ret;
1081 struct recv_comp_data *rcd;
1082 u16 q_idx = channel->offermsg.offer.sub_channel_index;
K. Y. Srinivasan779b4d12011-04-26 09:20:22 -07001083
Bill Pemberton454f18a2009-07-27 16:47:24 -04001084 /* Make sure this is a valid nvsp packet */
stephen hemmingerdc54a082017-01-24 13:06:08 -08001085 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1086 netif_err(net_device_ctx, rx_err, ndev,
1087 "Unknown nvsp packet type received %u\n",
1088 nvsp->hdr.msg_type);
Hank Janssenfceaf242009-07-13 15:34:54 -07001089 return;
1090 }
1091
stephen hemmingerdc54a082017-01-24 13:06:08 -08001092 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1093 netif_err(net_device_ctx, rx_err, ndev,
1094 "Invalid xfer page set id - expecting %x got %x\n",
1095 NETVSC_RECEIVE_BUFFER_ID,
1096 vmxferpage_packet->xfer_pageset_id);
Hank Janssenfceaf242009-07-13 15:34:54 -07001097 return;
1098 }
1099
Haiyang Zhang4baab262014-04-21 14:54:43 -07001100 count = vmxferpage_packet->range_cnt;
Hank Janssenfceaf242009-07-13 15:34:54 -07001101
Bill Pemberton454f18a2009-07-27 16:47:24 -04001102 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
Haiyang Zhang4baab262014-04-21 14:54:43 -07001103 for (i = 0; i < count; i++) {
stephen hemmingerdc54a082017-01-24 13:06:08 -08001104 void *data = recv_buf
1105 + vmxferpage_packet->ranges[i].byte_offset;
1106 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
Hank Janssenfceaf242009-07-13 15:34:54 -07001107
Bill Pemberton454f18a2009-07-27 16:47:24 -04001108 /* Pass it to the upper layer */
stephen hemmingerdc54a082017-01-24 13:06:08 -08001109 status = rndis_filter_receive(ndev, net_device, device,
1110 channel, data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -07001111 }
1112
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001113 if (!net_device->chan_table[q_idx].mrc.buf) {
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001114 ret = netvsc_send_recv_completion(channel,
1115 vmxferpage_packet->d.trans_id,
1116 status);
1117 if (ret)
1118 netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
1119 q_idx, vmxferpage_packet->d.trans_id, ret);
1120 return;
1121 }
1122
1123 rcd = get_recv_comp_slot(net_device, channel, q_idx);
1124
1125 if (!rcd) {
1126 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1127 q_idx, vmxferpage_packet->d.trans_id);
1128 return;
1129 }
1130
1131 rcd->tid = vmxferpage_packet->d.trans_id;
1132 rcd->status = status;
Hank Janssenfceaf242009-07-13 15:34:54 -07001133}
1134
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001135static void netvsc_send_table(struct hv_device *hdev,
Haiyang Zhang71790a22015-07-24 10:08:40 -07001136 struct nvsp_message *nvmsg)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001137{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001138 struct net_device *ndev = hv_get_drvdata(hdev);
stephen hemminger7ce10122017-03-09 14:58:29 -08001139 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001140 int i;
1141 u32 count, *tab;
1142
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001143 count = nvmsg->msg.v5_msg.send_table.count;
1144 if (count != VRSS_SEND_TAB_SIZE) {
1145 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1146 return;
1147 }
1148
1149 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1150 nvmsg->msg.v5_msg.send_table.offset);
1151
1152 for (i = 0; i < count; i++)
stephen hemminger7ce10122017-03-09 14:58:29 -08001153 net_device_ctx->tx_send_table[i] = tab[i];
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001154}
1155
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001156static void netvsc_send_vf(struct net_device_context *net_device_ctx,
Haiyang Zhang71790a22015-07-24 10:08:40 -07001157 struct nvsp_message *nvmsg)
1158{
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001159 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1160 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
Haiyang Zhang71790a22015-07-24 10:08:40 -07001161}
1162
1163static inline void netvsc_receive_inband(struct hv_device *hdev,
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001164 struct net_device_context *net_device_ctx,
1165 struct nvsp_message *nvmsg)
Haiyang Zhang71790a22015-07-24 10:08:40 -07001166{
1167 switch (nvmsg->hdr.msg_type) {
1168 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1169 netvsc_send_table(hdev, nvmsg);
1170 break;
1171
1172 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001173 netvsc_send_vf(net_device_ctx, nvmsg);
Haiyang Zhang71790a22015-07-24 10:08:40 -07001174 break;
1175 }
1176}
1177
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001178static void netvsc_process_raw_pkt(struct hv_device *device,
1179 struct vmbus_channel *channel,
1180 struct netvsc_device *net_device,
1181 struct net_device *ndev,
1182 u64 request_id,
1183 struct vmpacket_descriptor *desc)
1184{
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001185 struct net_device_context *net_device_ctx = netdev_priv(ndev);
stephen hemmingerdc54a082017-01-24 13:06:08 -08001186 struct nvsp_message *nvmsg
1187 = (struct nvsp_message *)((unsigned long)desc
1188 + (desc->offset8 << 3));
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001189
1190 switch (desc->type) {
1191 case VM_PKT_COMP:
1192 netvsc_send_completion(net_device, channel, device, desc);
1193 break;
1194
1195 case VM_PKT_DATA_USING_XFER_PAGES:
stephen hemmingerdc54a082017-01-24 13:06:08 -08001196 netvsc_receive(ndev, net_device, net_device_ctx,
1197 device, channel,
1198 (struct vmtransfer_page_packet_header *)desc,
1199 nvmsg);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001200 break;
1201
1202 case VM_PKT_DATA_INBAND:
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001203 netvsc_receive_inband(device, net_device_ctx, nvmsg);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001204 break;
1205
1206 default:
1207 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1208 desc->type, request_id);
1209 break;
1210 }
1211}
1212
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001213void netvsc_channel_cb(void *context)
Hank Janssenfceaf242009-07-13 15:34:54 -07001214{
stephen hemminger0b307eb2017-01-24 13:05:58 -08001215 struct vmbus_channel *channel = context;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001216 u16 q_idx = channel->offermsg.offer.sub_channel_index;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001217 struct hv_device *device;
Haiyang Zhang85799a32010-12-10 12:03:54 -08001218 struct netvsc_device *net_device;
Greg Kroah-Hartman8dc0a062009-08-27 16:02:36 -07001219 struct vmpacket_descriptor *desc;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001220 struct net_device *ndev;
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001221 bool need_to_commit = false;
Hank Janssenfceaf242009-07-13 15:34:54 -07001222
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001223 if (channel->primary_channel != NULL)
1224 device = channel->primary_channel->device_obj;
1225 else
1226 device = channel->device_obj;
1227
stephen hemminger46b4f7f2017-01-24 13:06:11 -08001228 ndev = hv_get_drvdata(device);
1229 if (unlikely(!ndev))
KY Srinivasanee0c4c32014-02-16 16:38:45 -08001230 return;
stephen hemminger0b307eb2017-01-24 13:05:58 -08001231
stephen hemminger46b4f7f2017-01-24 13:06:11 -08001232 net_device = net_device_to_netvsc_device(ndev);
stephen hemmingere14b4db2017-03-16 12:21:32 -07001233 if (unlikely(!net_device))
1234 return;
1235
1236 if (unlikely(net_device->destroy &&
1237 netvsc_channel_idle(net_device, q_idx)))
stephen hemminger46b4f7f2017-01-24 13:06:11 -08001238 return;
Hank Janssenfceaf242009-07-13 15:34:54 -07001239
Dexuan Cui433e19c2017-01-28 11:46:02 -07001240 /* commit_rd_index() -> hv_signal_on_read() needs this. */
1241 init_cached_read_index(channel);
1242
stephen hemminger0b307eb2017-01-24 13:05:58 -08001243 while ((desc = get_next_pkt_raw(channel)) != NULL) {
1244 netvsc_process_raw_pkt(device, channel, net_device,
1245 ndev, desc->trans_id, desc);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001246
stephen hemminger0b307eb2017-01-24 13:05:58 -08001247 put_pkt_raw(channel, desc);
1248 need_to_commit = true;
1249 }
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001250
stephen hemminger0b307eb2017-01-24 13:05:58 -08001251 if (need_to_commit)
1252 commit_rd_index(channel);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001253
1254 netvsc_chk_recv_comp(net_device, channel, q_idx);
Hank Janssenfceaf242009-07-13 15:34:54 -07001255}
Haiyang Zhangaf24ce42011-04-21 12:30:40 -07001256
1257/*
Haiyang Zhangb637e022011-04-21 12:30:45 -07001258 * netvsc_device_add - Callback when the device belonging to this
1259 * driver is added
1260 */
stephen hemminger2c7f83c2017-01-24 13:06:09 -08001261int netvsc_device_add(struct hv_device *device,
1262 const struct netvsc_device_info *device_info)
Haiyang Zhangb637e022011-04-21 12:30:45 -07001263{
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001264 int i, ret = 0;
stephen hemminger2c7f83c2017-01-24 13:06:09 -08001265 int ring_size = device_info->ring_size;
Haiyang Zhangb637e022011-04-21 12:30:45 -07001266 struct netvsc_device *net_device;
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001267 struct net_device *ndev = hv_get_drvdata(device);
1268 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001269
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001270 net_device = alloc_net_device();
Dan Carpenterb1c84922014-09-04 14:11:23 +03001271 if (!net_device)
1272 return -ENOMEM;
Haiyang Zhangb637e022011-04-21 12:30:45 -07001273
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001274 net_device->ring_size = ring_size;
1275
Haiyang Zhangb637e022011-04-21 12:30:45 -07001276 /* Open the channel */
K. Y. Srinivasanaae23982011-05-12 19:35:05 -07001277 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1278 ring_size * PAGE_SIZE, NULL, 0,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001279 netvsc_channel_cb, device->channel);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001280
1281 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -07001282 netdev_err(ndev, "unable to open channel: %d\n", ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001283 goto cleanup;
1284 }
1285
1286 /* Channel is opened */
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +01001287 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
Haiyang Zhangb637e022011-04-21 12:30:45 -07001288
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001289 /* If we're reopening the device we may have multiple queues, fill the
1290 * chn_table with the default channel to use it before subchannels are
1291 * opened.
1292 */
1293 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001294 net_device->chan_table[i].channel = device->channel;
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001295
1296 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1297 * populated.
1298 */
1299 wmb();
1300
1301 net_device_ctx->nvdev = net_device;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001302
Haiyang Zhangb637e022011-04-21 12:30:45 -07001303 /* Connect with the NetVsp */
1304 ret = netvsc_connect_vsp(device);
1305 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -07001306 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -07001307 "unable to connect to NetVSP - %d\n", ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001308 goto close;
1309 }
1310
1311 return ret;
1312
1313close:
1314 /* Now, we can close the channel safely */
1315 vmbus_close(device->channel);
1316
1317cleanup:
Haiyang Zhangf90251c2014-08-15 19:18:19 +00001318 free_netvsc_device(net_device);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001319
1320 return ret;
1321}