blob: df6d8e28949e2c20b0a6292eba511c3dd0875084 [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070036#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
Michael Kelley8eb1b3c2017-05-30 11:36:56 -070040#include <net/checksum.h>
41#include <net/ip6_checksum.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070042
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070043#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070044
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +000045#define RING_SIZE_MIN 64
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010046#define LINKCHANGE_INT (2 * HZ)
stephen hemmingera50af862016-12-06 13:43:54 -080047
Hank Janssen99c8da02010-10-12 10:45:23 -070048static int ring_size = 128;
Stephen Hemminger450d7a42010-05-04 09:58:53 -070049module_param(ring_size, int, S_IRUGO);
50MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Hank Janssenfceaf242009-07-13 15:34:54 -070051
Simon Xiao3f300ff2015-04-28 01:05:17 -070052static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
53 NETIF_MSG_LINK | NETIF_MSG_IFUP |
54 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
55 NETIF_MSG_TX_ERR;
56
57static int debug = -1;
58module_param(debug, int, S_IRUGO);
59MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
60
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080061static void do_set_multicast(struct work_struct *w)
62{
Wenqi Ma792df872012-04-19 00:39:37 +000063 struct net_device_context *ndevctx =
64 container_of(w, struct net_device_context, work);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020065 struct hv_device *device_obj = ndevctx->device_ctx;
66 struct net_device *ndev = hv_get_drvdata(device_obj);
stephen hemminger545a8e72017-03-22 14:51:00 -070067 struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080068 struct rndis_device *rdev;
69
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020070 if (!nvdev)
Wenqi Ma792df872012-04-19 00:39:37 +000071 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080072
73 rdev = nvdev->extension;
74 if (rdev == NULL)
Wenqi Ma792df872012-04-19 00:39:37 +000075 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080076
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020077 if (ndev->flags & IFF_PROMISC)
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080078 rndis_filter_set_packet_filter(rdev,
79 NDIS_PACKET_TYPE_PROMISCUOUS);
80 else
81 rndis_filter_set_packet_filter(rdev,
82 NDIS_PACKET_TYPE_BROADCAST |
83 NDIS_PACKET_TYPE_ALL_MULTICAST |
84 NDIS_PACKET_TYPE_DIRECTED);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080085}
86
Greg Kroah-Hartman4e9bfef2009-07-15 12:45:20 -070087static void netvsc_set_multicast_list(struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -070088{
Wenqi Ma792df872012-04-19 00:39:37 +000089 struct net_device_context *net_device_ctx = netdev_priv(net);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080090
Wenqi Ma792df872012-04-19 00:39:37 +000091 schedule_work(&net_device_ctx->work);
Hank Janssenfceaf242009-07-13 15:34:54 -070092}
93
Hank Janssenfceaf242009-07-13 15:34:54 -070094static int netvsc_open(struct net_device *net)
95{
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +020096 struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
Haiyang Zhang891de742014-02-12 16:54:27 -080097 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -070098 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -070099
Haiyang Zhang891de742014-02-12 16:54:27 -0800100 netif_carrier_off(net);
101
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700102 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200103 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700104 if (ret != 0) {
105 netdev_err(net, "unable to open device (ret %d).\n", ret);
106 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700107 }
108
Haiyang Zhang2de85302015-07-13 13:09:16 -0700109 netif_tx_wake_all_queues(net);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700110
Haiyang Zhang891de742014-02-12 16:54:27 -0800111 rdev = nvdev->extension;
112 if (!rdev->link_state)
113 netif_carrier_on(net);
114
Hank Janssenfceaf242009-07-13 15:34:54 -0700115 return ret;
116}
117
Hank Janssenfceaf242009-07-13 15:34:54 -0700118static int netvsc_close(struct net_device *net)
119{
Hank Janssenfceaf242009-07-13 15:34:54 -0700120 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700121 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700122 int ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700123 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
124 struct vmbus_channel *chn;
Hank Janssenfceaf242009-07-13 15:34:54 -0700125
Haiyang Zhang0a282532012-02-02 07:17:59 +0000126 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700127
Wenqi Ma792df872012-04-19 00:39:37 +0000128 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
129 cancel_work_sync(&net_device_ctx->work);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200130 ret = rndis_filter_close(nvdev);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700131 if (ret != 0) {
Hank Jansseneb335bc2011-03-29 13:58:48 -0700132 netdev_err(net, "unable to close device (ret %d).\n", ret);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700133 return ret;
134 }
135
136 /* Ensure pending bytes in ring are read */
137 while (true) {
138 aread = 0;
139 for (i = 0; i < nvdev->num_chn; i++) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800140 chn = nvdev->chan_table[i].channel;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700141 if (!chn)
142 continue;
143
144 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
145 &awrite);
146
147 if (aread)
148 break;
149
150 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
151 &awrite);
152
153 if (aread)
154 break;
155 }
156
157 retry++;
158 if (retry > retry_max || aread == 0)
159 break;
160
161 msleep(msec);
162
163 if (msec < 1000)
164 msec *= 2;
165 }
166
167 if (aread) {
168 netdev_err(net, "Ring buffer not empty after closing rndis\n");
169 ret = -ETIMEDOUT;
170 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700171
Hank Janssenfceaf242009-07-13 15:34:54 -0700172 return ret;
173}
174
KY Srinivasan8a002512014-03-08 19:23:14 -0800175static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
176 int pkt_type)
177{
178 struct rndis_packet *rndis_pkt;
179 struct rndis_per_packet_info *ppi;
180
181 rndis_pkt = &msg->msg.pkt;
182 rndis_pkt->data_offset += ppi_size;
183
184 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
185 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
186
187 ppi->size = ppi_size;
188 ppi->type = pkt_type;
189 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
190
191 rndis_pkt->per_pkt_info_len += ppi_size;
192
193 return ppi;
194}
195
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700196/* Azure hosts don't support non-TCP port numbers in hashing yet. We compute
197 * hash for non-TCP traffic with only IP numbers.
198 */
199static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk)
200{
201 struct flow_keys flow;
202 u32 hash;
203 static u32 hashrnd __read_mostly;
204
205 net_get_random_once(&hashrnd, sizeof(hashrnd));
206
207 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
208 return 0;
209
210 if (flow.basic.ip_proto == IPPROTO_TCP) {
211 return skb_get_hash(skb);
212 } else {
213 if (flow.basic.n_proto == htons(ETH_P_IP))
214 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
215 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
216 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
217 else
218 hash = 0;
219
220 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
221 }
222
223 return hash;
224}
225
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700226static inline int netvsc_get_tx_queue(struct net_device *ndev,
227 struct sk_buff *skb, int old_idx)
228{
229 const struct net_device_context *ndc = netdev_priv(ndev);
230 struct sock *sk = skb->sk;
231 int q_idx;
232
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700233 q_idx = ndc->tx_send_table[netvsc_get_hash(skb, sk) &
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700234 (VRSS_SEND_TAB_SIZE - 1)];
235
236 /* If queue index changed record the new value */
237 if (q_idx != old_idx &&
238 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
239 sk_tx_queue_set(sk, q_idx);
240
241 return q_idx;
242}
243
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800244/*
245 * Select queue for transmit.
246 *
247 * If a valid queue has already been assigned, then use that.
248 * Otherwise compute tx queue based on hash and the send table.
249 *
250 * This is basically similar to default (__netdev_pick_tx) with the added step
251 * of using the host send_table when no other queue has been assigned.
252 *
253 * TODO support XPS - but get_xps_queue not exported
254 */
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700255static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
256 void *accel_priv, select_queue_fallback_t fallback)
257{
stephen hemminger7ce10122017-03-09 14:58:29 -0800258 unsigned int num_tx_queues = ndev->real_num_tx_queues;
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700259 int q_idx = sk_tx_queue_get(skb->sk);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700260
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700261 if (q_idx < 0 || skb->ooo_okay) {
262 /* If forwarding a packet, we use the recorded queue when
263 * available for better cache locality.
264 */
265 if (skb_rx_queue_recorded(skb))
266 q_idx = skb_get_rx_queue(skb);
267 else
268 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800269 }
270
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700271 while (unlikely(q_idx >= num_tx_queues))
272 q_idx -= num_tx_queues;
273
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700274 return q_idx;
275}
276
KY Srinivasan54a73572014-03-08 19:23:13 -0800277static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
278 struct hv_page_buffer *pb)
279{
280 int j = 0;
281
282 /* Deal with compund pages by ignoring unused part
283 * of the page.
284 */
285 page += (offset >> PAGE_SHIFT);
286 offset &= ~PAGE_MASK;
287
288 while (len > 0) {
289 unsigned long bytes;
290
291 bytes = PAGE_SIZE - offset;
292 if (bytes > len)
293 bytes = len;
294 pb[j].pfn = page_to_pfn(page);
295 pb[j].offset = offset;
296 pb[j].len = bytes;
297
298 offset += bytes;
299 len -= bytes;
300
301 if (offset == PAGE_SIZE && len) {
302 page++;
303 offset = 0;
304 j++;
305 }
306 }
307
308 return j + 1;
309}
310
KY Srinivasan8a002512014-03-08 19:23:14 -0800311static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800312 struct hv_netvsc_packet *packet,
313 struct hv_page_buffer **page_buf)
KY Srinivasan54a73572014-03-08 19:23:13 -0800314{
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800315 struct hv_page_buffer *pb = *page_buf;
KY Srinivasan54a73572014-03-08 19:23:13 -0800316 u32 slots_used = 0;
317 char *data = skb->data;
318 int frags = skb_shinfo(skb)->nr_frags;
319 int i;
320
321 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700322 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800323 * 2. skb linear data
324 * 3. skb fragment data
325 */
326 if (hdr != NULL)
327 slots_used += fill_pg_buf(virt_to_page(hdr),
328 offset_in_page(hdr),
329 len, &pb[slots_used]);
330
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700331 packet->rmsg_size = len;
332 packet->rmsg_pgcnt = slots_used;
333
KY Srinivasan54a73572014-03-08 19:23:13 -0800334 slots_used += fill_pg_buf(virt_to_page(data),
335 offset_in_page(data),
336 skb_headlen(skb), &pb[slots_used]);
337
338 for (i = 0; i < frags; i++) {
339 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
340
341 slots_used += fill_pg_buf(skb_frag_page(frag),
342 frag->page_offset,
343 skb_frag_size(frag), &pb[slots_used]);
344 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800345 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800346}
347
stephen hemminger60b86662017-06-08 16:21:18 -0700348/* Estimate number of page buffers neede to transmit
349 * Need at most 2 for RNDIS header plus skb body and fragments.
350 */
351static unsigned int netvsc_get_slots(const struct sk_buff *skb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800352{
stephen hemminger60b86662017-06-08 16:21:18 -0700353 return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb))
354 + skb_shinfo(skb)->nr_frags
355 + 2;
KY Srinivasan54a73572014-03-08 19:23:13 -0800356}
357
stephen hemminger23312a32017-01-24 13:05:59 -0800358static u32 net_checksum_info(struct sk_buff *skb)
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800359{
stephen hemminger23312a32017-01-24 13:05:59 -0800360 if (skb->protocol == htons(ETH_P_IP)) {
361 struct iphdr *ip = ip_hdr(skb);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800362
stephen hemminger23312a32017-01-24 13:05:59 -0800363 if (ip->protocol == IPPROTO_TCP)
364 return TRANSPORT_INFO_IPV4_TCP;
365 else if (ip->protocol == IPPROTO_UDP)
366 return TRANSPORT_INFO_IPV4_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800367 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800368 struct ipv6hdr *ip6 = ipv6_hdr(skb);
369
370 if (ip6->nexthdr == IPPROTO_TCP)
371 return TRANSPORT_INFO_IPV6_TCP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800372 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
stephen hemminger23312a32017-01-24 13:05:59 -0800373 return TRANSPORT_INFO_IPV6_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800374 }
375
stephen hemminger23312a32017-01-24 13:05:59 -0800376 return TRANSPORT_INFO_NOT_IP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800377}
378
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700379static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700380{
Hank Janssenfceaf242009-07-13 15:34:54 -0700381 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200382 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700383 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800384 unsigned int num_data_pgs;
385 struct rndis_message *rndis_msg;
386 struct rndis_packet *rndis_pkt;
387 u32 rndis_msg_size;
KY Srinivasan8a002512014-03-08 19:23:14 -0800388 struct rndis_per_packet_info *ppi;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700389 u32 hash;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700390 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800391 struct hv_page_buffer *pb = page_buf;
Hank Janssenfceaf242009-07-13 15:34:54 -0700392
stephen hemminger60b86662017-06-08 16:21:18 -0700393 /* We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200394 * of pages in a single packet. If skb is scattered around
395 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800396 */
stephen hemminger60b86662017-06-08 16:21:18 -0700397 num_data_pgs = netvsc_get_slots(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700398 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700399 ++net_device_ctx->eth_stats.tx_scattered;
400
401 if (skb_linearize(skb))
402 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700403
stephen hemminger60b86662017-06-08 16:21:18 -0700404 num_data_pgs = netvsc_get_slots(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700405 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700406 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700407 goto drop;
408 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800409 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700410
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800411 /*
412 * Place the rndis header in the skb head room and
413 * the skb->cb will be used for hv_netvsc_packet
414 * structure.
415 */
416 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700417 if (ret)
418 goto no_memory;
419
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800420 /* Use the skb control buffer for building up the packet */
421 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
422 FIELD_SIZEOF(struct sk_buff, cb));
423 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700424
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700425 packet->q_idx = skb_get_queue_mapping(skb);
426
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800427 packet->total_data_buflen = skb->len;
stephen hemminger793e3952017-01-24 13:06:12 -0800428 packet->total_bytes = skb->len;
429 packet->total_packets = 1;
Hank Janssenfceaf242009-07-13 15:34:54 -0700430
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800431 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700432
KY Srinivasan24476762015-12-01 16:43:06 -0800433 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
Hank Janssenfceaf242009-07-13 15:34:54 -0700434
KY Srinivasan8a002512014-03-08 19:23:14 -0800435 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800436 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
437 rndis_msg->msg_len = packet->total_data_buflen;
438 rndis_pkt = &rndis_msg->msg.pkt;
439 rndis_pkt->data_offset = sizeof(struct rndis_packet);
440 rndis_pkt->data_len = packet->total_data_buflen;
441 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
442
443 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
444
Haiyang Zhang307f0992014-05-21 12:55:39 -0700445 hash = skb_get_hash_raw(skb);
446 if (hash != 0 && net->real_num_tx_queues > 1) {
447 rndis_msg_size += NDIS_HASH_PPI_SIZE;
448 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
449 NBL_HASH_VALUE);
450 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
451 }
452
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700453 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800454 struct ndis_pkt_8021q_info *vlan;
455
456 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
457 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
458 IEEE_8021Q_INFO);
459 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
460 ppi->ppi_offset);
KY Srinivasan760d1e32015-12-01 16:43:19 -0800461 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
462 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800463 VLAN_PRIO_SHIFT;
464 }
465
stephen hemminger23312a32017-01-24 13:05:59 -0800466 if (skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700467 struct ndis_tcp_lso_info *lso_info;
468
469 rndis_msg_size += NDIS_LSO_PPI_SIZE;
470 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
471 TCP_LARGESEND_PKTINFO);
472
473 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
474 ppi->ppi_offset);
475
476 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
stephen hemminger23312a32017-01-24 13:05:59 -0800477 if (skb->protocol == htons(ETH_P_IP)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700478 lso_info->lso_v2_transmit.ip_version =
479 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
480 ip_hdr(skb)->tot_len = 0;
481 ip_hdr(skb)->check = 0;
482 tcp_hdr(skb)->check =
483 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
484 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
485 } else {
486 lso_info->lso_v2_transmit.ip_version =
487 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
488 ipv6_hdr(skb)->payload_len = 0;
489 tcp_hdr(skb)->check =
490 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
491 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
492 }
stephen hemminger23312a32017-01-24 13:05:59 -0800493 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700494 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700495 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
stephen hemminger23312a32017-01-24 13:05:59 -0800496 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
497 struct ndis_tcp_ip_checksum_info *csum_info;
498
stephen hemmingerad19bc82016-10-11 14:03:07 -0700499 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
500 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
501 TCPIP_CHKSUM_PKTINFO);
502
503 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
504 ppi->ppi_offset);
505
stephen hemminger23312a32017-01-24 13:05:59 -0800506 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
507
508 if (skb->protocol == htons(ETH_P_IP)) {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700509 csum_info->transmit.is_ipv4 = 1;
stephen hemminger23312a32017-01-24 13:05:59 -0800510
511 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
512 csum_info->transmit.tcp_checksum = 1;
513 else
514 csum_info->transmit.udp_checksum = 1;
515 } else {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700516 csum_info->transmit.is_ipv6 = 1;
517
stephen hemminger23312a32017-01-24 13:05:59 -0800518 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
519 csum_info->transmit.tcp_checksum = 1;
520 else
521 csum_info->transmit.udp_checksum = 1;
522 }
stephen hemmingerad19bc82016-10-11 14:03:07 -0700523 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800524 /* Can't do offload of this type of checksum */
stephen hemmingerad19bc82016-10-11 14:03:07 -0700525 if (skb_checksum_help(skb))
526 goto drop;
527 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700528 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800529
KY Srinivasan8a002512014-03-08 19:23:14 -0800530 /* Start filling in the page buffers with the rndis hdr */
531 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700532 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800533 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800534 skb, packet, &pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800535
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800536 /* timestamp packet in software */
537 skb_tx_timestamp(skb);
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800538 ret = netvsc_send(net_device_ctx->device_ctx, packet,
539 rndis_msg, &pb, skb);
stephen hemminger793e3952017-01-24 13:06:12 -0800540 if (likely(ret == 0))
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700541 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700542
543 if (ret == -EAGAIN) {
544 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700545 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700546 }
547
548 if (ret == -ENOSPC)
549 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700550
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700551drop:
552 dev_kfree_skb_any(skb);
553 net->stats.tx_dropped++;
554
555 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700556
557no_memory:
558 ++net_device_ctx->eth_stats.tx_no_memory;
559 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700560}
Hank Janssen3e189512010-03-04 22:11:00 +0000561/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700562 * netvsc_linkstatus_callback - Link up/down notification
563 */
K. Y. Srinivasan90ef1172011-05-12 19:34:50 -0700564void netvsc_linkstatus_callback(struct hv_device *device_obj,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700565 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700566{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700567 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700568 struct net_device *net;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700569 struct net_device_context *ndev_ctx;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100570 struct netvsc_reconfig *event;
571 unsigned long flags;
572
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700573 net = hv_get_drvdata(device_obj);
574
575 if (!net)
576 return;
577
578 ndev_ctx = netdev_priv(net);
579
580 /* Update the physical link speed when changing to another vSwitch */
581 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
582 u32 speed;
583
584 speed = *(u32 *)((void *)indicate + indicate->
585 status_buf_offset) / 10000;
586 ndev_ctx->speed = speed;
587 return;
588 }
589
590 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100591 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
592 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
593 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
594 return;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700595
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700596 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700597 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700598
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100599 event = kzalloc(sizeof(*event), GFP_ATOMIC);
600 if (!event)
601 return;
602 event->event = indicate->status;
603
604 spin_lock_irqsave(&ndev_ctx->lock, flags);
605 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
606 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
607
608 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700609}
610
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700611static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800612 struct napi_struct *napi,
stephen hemmingerdc54a082017-01-24 13:06:08 -0800613 const struct ndis_tcp_ip_checksum_info *csum_info,
614 const struct ndis_pkt_8021q_info *vlan,
615 void *data, u32 buflen)
Hank Janssenfceaf242009-07-13 15:34:54 -0700616{
Hank Janssenfceaf242009-07-13 15:34:54 -0700617 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700618
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800619 skb = napi_alloc_skb(napi, buflen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700620 if (!skb)
621 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700622
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700623 /*
624 * Copy to skb. This copy is needed here since the memory pointed by
625 * hv_netvsc_packet cannot be deallocated
626 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800627 memcpy(skb_put(skb, buflen), data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700628
629 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700630
631 /* skb is already created with CHECKSUM_NONE */
632 skb_checksum_none_assert(skb);
633
634 /*
635 * In Linux, the IP checksum is always checked.
636 * Do L4 checksum offload if enabled and present.
637 */
638 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
639 if (csum_info->receive.tcp_checksum_succeeded ||
640 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800641 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800642 }
643
stephen hemmingerdc54a082017-01-24 13:06:08 -0800644 if (vlan) {
645 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
646
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700647 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800648 vlan_tci);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800649 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700650
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700651 return skb;
652}
653
654/*
655 * netvsc_recv_callback - Callback when we receive a packet from the
656 * "wire" on the specified device.
657 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800658int netvsc_recv_callback(struct net_device *net,
659 struct vmbus_channel *channel,
660 void *data, u32 len,
661 const struct ndis_tcp_ip_checksum_info *csum_info,
662 const struct ndis_pkt_8021q_info *vlan)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700663{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200664 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700665 struct netvsc_device *net_device;
stephen hemminger742fe542017-02-27 10:26:50 -0800666 u16 q_idx = channel->offermsg.offer.sub_channel_index;
stephen hemminger545a8e72017-03-22 14:51:00 -0700667 struct netvsc_channel *nvchan;
Stephen Hemmingerf207c102016-09-22 16:56:33 -0700668 struct net_device *vf_netdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700669 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700670 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700671
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700672 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700673 return NVSP_STAT_FAIL;
674
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700675 /*
676 * If necessary, inject this packet into the VF interface.
677 * On Hyper-V, multicast and brodcast packets are only delivered
678 * to the synthetic interface (after subjecting these to
679 * policy filters on the host). Deliver these via the VF
680 * interface in the guest.
681 */
stephen hemminger0719e722017-01-11 09:16:32 -0800682 rcu_read_lock();
stephen hemminger545a8e72017-03-22 14:51:00 -0700683 net_device = rcu_dereference(net_device_ctx->nvdev);
684 if (unlikely(!net_device))
685 goto drop;
686
687 nvchan = &net_device->chan_table[q_idx];
Stephen Hemmingerf207c102016-09-22 16:56:33 -0700688 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700689 if (vf_netdev && (vf_netdev->flags & IFF_UP))
690 net = vf_netdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700691
692 /* Allocate a skb - TODO direct I/O to pages? */
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800693 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
694 csum_info, vlan, data, len);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700695 if (unlikely(!skb)) {
stephen hemminger545a8e72017-03-22 14:51:00 -0700696drop:
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700697 ++net->stats.rx_dropped;
stephen hemminger0719e722017-01-11 09:16:32 -0800698 rcu_read_unlock();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700699 return NVSP_STAT_FAIL;
700 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700701
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700702 if (net != vf_netdev)
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800703 skb_record_rx_queue(skb, q_idx);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700704
705 /*
706 * Even if injecting the packet, record the statistics
707 * on the synthetic device because modifying the VF device
708 * statistics will not work correctly.
709 */
stephen hemminger742fe542017-02-27 10:26:50 -0800710 rx_stats = &nvchan->rx_stats;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700711 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700712 rx_stats->packets++;
stephen hemmingerdc54a082017-01-24 13:06:08 -0800713 rx_stats->bytes += len;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700714
715 if (skb->pkt_type == PACKET_BROADCAST)
716 ++rx_stats->broadcast;
717 else if (skb->pkt_type == PACKET_MULTICAST)
718 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700719 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800720
stephen hemminger742fe542017-02-27 10:26:50 -0800721 napi_gro_receive(&nvchan->napi, skb);
stephen hemminger0719e722017-01-11 09:16:32 -0800722 rcu_read_unlock();
Hank Janssenfceaf242009-07-13 15:34:54 -0700723
Hank Janssenfceaf242009-07-13 15:34:54 -0700724 return 0;
725}
726
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700727static void netvsc_get_drvinfo(struct net_device *net,
728 struct ethtool_drvinfo *info)
729{
Jiri Pirko7826d432013-01-06 00:44:26 +0000730 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000731 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700732}
733
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800734static void netvsc_get_channels(struct net_device *net,
735 struct ethtool_channels *channel)
736{
737 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700738 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800739
740 if (nvdev) {
741 channel->max_combined = nvdev->max_chn;
742 channel->combined_count = nvdev->num_chn;
743 }
744}
745
stephen hemminger2b018882017-01-24 13:06:03 -0800746static int netvsc_set_queues(struct net_device *net, struct hv_device *dev,
747 u32 num_chn)
748{
749 struct netvsc_device_info device_info;
750 int ret;
751
752 memset(&device_info, 0, sizeof(device_info));
753 device_info.num_chn = num_chn;
754 device_info.ring_size = ring_size;
755 device_info.max_num_vrss_chns = num_chn;
756
757 ret = rndis_filter_device_add(dev, &device_info);
758 if (ret)
759 return ret;
760
761 ret = netif_set_real_num_tx_queues(net, num_chn);
762 if (ret)
763 return ret;
764
765 ret = netif_set_real_num_rx_queues(net, num_chn);
766
767 return ret;
768}
769
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700770static int netvsc_set_channels(struct net_device *net,
771 struct ethtool_channels *channels)
772{
773 struct net_device_context *net_device_ctx = netdev_priv(net);
774 struct hv_device *dev = net_device_ctx->device_ctx;
stephen hemminger545a8e72017-03-22 14:51:00 -0700775 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger2b018882017-01-24 13:06:03 -0800776 unsigned int count = channels->combined_count;
stephen hemminger163891d2017-03-22 14:50:58 -0700777 bool was_running;
stephen hemminger2b018882017-01-24 13:06:03 -0800778 int ret;
779
780 /* We do not support separate count for rx, tx, or other */
781 if (count == 0 ||
782 channels->rx_count || channels->tx_count || channels->other_count)
783 return -EINVAL;
784
785 if (count > net->num_tx_queues || count > net->num_rx_queues)
786 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700787
stephen hemmingera0be4502017-03-22 14:51:01 -0700788 if (!nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700789 return -ENODEV;
790
stephen hemminger2b018882017-01-24 13:06:03 -0800791 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700792 return -EINVAL;
793
stephen hemminger2b018882017-01-24 13:06:03 -0800794 if (count > nvdev->max_chn)
795 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700796
stephen hemminger163891d2017-03-22 14:50:58 -0700797 was_running = netif_running(net);
798 if (was_running) {
799 ret = netvsc_close(net);
800 if (ret)
801 return ret;
802 }
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700803
stephen hemminger2289f0a2017-01-24 13:06:10 -0800804 rndis_filter_device_remove(dev, nvdev);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700805
stephen hemminger2b018882017-01-24 13:06:03 -0800806 ret = netvsc_set_queues(net, dev, count);
807 if (ret == 0)
808 nvdev->num_chn = count;
809 else
810 netvsc_set_queues(net, dev, nvdev->num_chn);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700811
stephen hemminger163891d2017-03-22 14:50:58 -0700812 if (was_running)
813 ret = netvsc_open(net);
814
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200815 /* We may have missed link change notifications */
816 schedule_delayed_work(&net_device_ctx->dwork, 0);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700817
818 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700819}
820
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100821static bool
822netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800823{
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100824 struct ethtool_link_ksettings diff1 = *cmd;
825 struct ethtool_link_ksettings diff2 = {};
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800826
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100827 diff1.base.speed = 0;
828 diff1.base.duplex = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800829 /* advertising and cmd are usually set */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100830 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
831 diff1.base.cmd = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800832 /* We set port to PORT_OTHER */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100833 diff2.base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800834
835 return !memcmp(&diff1, &diff2, sizeof(diff1));
836}
837
838static void netvsc_init_settings(struct net_device *dev)
839{
840 struct net_device_context *ndc = netdev_priv(dev);
841
842 ndc->speed = SPEED_UNKNOWN;
Simon Xiaof3c9d40e2017-04-14 14:42:58 -0700843 ndc->duplex = DUPLEX_FULL;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800844}
845
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100846static int netvsc_get_link_ksettings(struct net_device *dev,
847 struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800848{
849 struct net_device_context *ndc = netdev_priv(dev);
850
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100851 cmd->base.speed = ndc->speed;
852 cmd->base.duplex = ndc->duplex;
853 cmd->base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800854
855 return 0;
856}
857
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100858static int netvsc_set_link_ksettings(struct net_device *dev,
859 const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800860{
861 struct net_device_context *ndc = netdev_priv(dev);
862 u32 speed;
863
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100864 speed = cmd->base.speed;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800865 if (!ethtool_validate_speed(speed) ||
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100866 !ethtool_validate_duplex(cmd->base.duplex) ||
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800867 !netvsc_validate_ethtool_ss_cmd(cmd))
868 return -EINVAL;
869
870 ndc->speed = speed;
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100871 ndc->duplex = cmd->base.duplex;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800872
873 return 0;
874}
875
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800876static int netvsc_change_mtu(struct net_device *ndev, int mtu)
877{
878 struct net_device_context *ndevctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -0700879 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200880 struct hv_device *hdev = ndevctx->device_ctx;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800881 struct netvsc_device_info device_info;
stephen hemminger163891d2017-03-22 14:50:58 -0700882 bool was_running;
K. Y. Srinivasan386f5762017-03-24 20:54:37 -0700883 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800884
stephen hemmingera0be4502017-03-22 14:51:01 -0700885 if (!nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800886 return -ENODEV;
887
stephen hemminger163891d2017-03-22 14:50:58 -0700888 was_running = netif_running(ndev);
889 if (was_running) {
890 ret = netvsc_close(ndev);
891 if (ret)
892 return ret;
893 }
Haiyang Zhang2de85302015-07-13 13:09:16 -0700894
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -0700895 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800896 device_info.ring_size = ring_size;
stephen hemminger2b018882017-01-24 13:06:03 -0800897 device_info.num_chn = nvdev->num_chn;
898 device_info.max_num_vrss_chns = nvdev->num_chn;
Dexuan Cui152669b2017-03-02 13:00:53 +0000899
Dexuan Cui152669b2017-03-02 13:00:53 +0000900 rndis_filter_device_remove(hdev, nvdev);
901
902 /* 'nvdev' has been freed in rndis_filter_device_remove() ->
903 * netvsc_device_remove () -> free_netvsc_device().
904 * We mustn't access it before it's re-created in
905 * rndis_filter_device_add() -> netvsc_device_add().
906 */
907
908 ndev->mtu = mtu;
909
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800910 rndis_filter_device_add(hdev, &device_info);
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800911
stephen hemminger163891d2017-03-22 14:50:58 -0700912 if (was_running)
913 ret = netvsc_open(ndev);
914
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200915 /* We may have missed link change notifications */
916 schedule_delayed_work(&ndevctx->dwork, 0);
917
Haiyang Zhang2de85302015-07-13 13:09:16 -0700918 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800919}
920
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800921static void netvsc_get_stats64(struct net_device *net,
922 struct rtnl_link_stats64 *t)
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700923{
924 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger776e7262017-04-14 14:42:57 -0700925 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800926 int i;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700927
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800928 if (!nvdev)
929 return;
930
931 for (i = 0; i < nvdev->num_chn; i++) {
932 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
933 const struct netvsc_stats *stats;
934 u64 packets, bytes, multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700935 unsigned int start;
936
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800937 stats = &nvchan->tx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700938 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800939 start = u64_stats_fetch_begin_irq(&stats->syncp);
940 packets = stats->packets;
941 bytes = stats->bytes;
942 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700943
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800944 t->tx_bytes += bytes;
945 t->tx_packets += packets;
946
947 stats = &nvchan->rx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700948 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800949 start = u64_stats_fetch_begin_irq(&stats->syncp);
950 packets = stats->packets;
951 bytes = stats->bytes;
952 multicast = stats->multicast + stats->broadcast;
953 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700954
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800955 t->rx_bytes += bytes;
956 t->rx_packets += packets;
957 t->multicast += multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700958 }
959
960 t->tx_dropped = net->stats.tx_dropped;
Simon Xiaob5124722017-02-17 11:36:20 -0800961 t->tx_errors = net->stats.tx_errors;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700962
963 t->rx_dropped = net->stats.rx_dropped;
964 t->rx_errors = net->stats.rx_errors;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700965}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000966
967static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
968{
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000969 struct sockaddr *addr = p;
Jianjun Kong9a4c8312013-01-18 16:52:09 +0000970 char save_adr[ETH_ALEN];
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000971 unsigned char save_aatype;
972 int err;
973
974 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
975 save_aatype = ndev->addr_assign_type;
976
977 err = eth_mac_addr(ndev, p);
978 if (err != 0)
979 return err;
980
Vitaly Kuznetsove834da9a2016-06-03 17:51:01 +0200981 err = rndis_filter_set_device_mac(ndev, addr->sa_data);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000982 if (err != 0) {
983 /* roll back to saved MAC */
984 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
985 ndev->addr_assign_type = save_aatype;
986 }
987
988 return err;
989}
990
Stephen Hemminger4323b472016-08-23 12:17:57 -0700991static const struct {
992 char name[ETH_GSTRING_LEN];
993 u16 offset;
994} netvsc_stats[] = {
995 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
996 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
997 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
998 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
999 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
1000};
1001
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001002#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1003
1004/* 4 statistics per queue (rx/tx packets/bytes) */
1005#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1006
Stephen Hemminger4323b472016-08-23 12:17:57 -07001007static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1008{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001009 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001010 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
1011
1012 if (!nvdev)
1013 return -ENODEV;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001014
Stephen Hemminger4323b472016-08-23 12:17:57 -07001015 switch (string_set) {
1016 case ETH_SS_STATS:
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001017 return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001018 default:
1019 return -EINVAL;
1020 }
1021}
1022
1023static void netvsc_get_ethtool_stats(struct net_device *dev,
1024 struct ethtool_stats *stats, u64 *data)
1025{
1026 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001027 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001028 const void *nds = &ndc->eth_stats;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001029 const struct netvsc_stats *qstats;
1030 unsigned int start;
1031 u64 packets, bytes;
1032 int i, j;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001033
stephen hemminger545a8e72017-03-22 14:51:00 -07001034 if (!nvdev)
1035 return;
1036
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001037 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
Stephen Hemminger4323b472016-08-23 12:17:57 -07001038 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001039
1040 for (j = 0; j < nvdev->num_chn; j++) {
1041 qstats = &nvdev->chan_table[j].tx_stats;
1042
1043 do {
1044 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1045 packets = qstats->packets;
1046 bytes = qstats->bytes;
1047 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1048 data[i++] = packets;
1049 data[i++] = bytes;
1050
1051 qstats = &nvdev->chan_table[j].rx_stats;
1052 do {
1053 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1054 packets = qstats->packets;
1055 bytes = qstats->bytes;
1056 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1057 data[i++] = packets;
1058 data[i++] = bytes;
1059 }
Stephen Hemminger4323b472016-08-23 12:17:57 -07001060}
1061
1062static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1063{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001064 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001065 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001066 u8 *p = data;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001067 int i;
1068
stephen hemminger545a8e72017-03-22 14:51:00 -07001069 if (!nvdev)
1070 return;
1071
Stephen Hemminger4323b472016-08-23 12:17:57 -07001072 switch (stringset) {
1073 case ETH_SS_STATS:
1074 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001075 memcpy(p + i * ETH_GSTRING_LEN,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001076 netvsc_stats[i].name, ETH_GSTRING_LEN);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001077
1078 p += i * ETH_GSTRING_LEN;
1079 for (i = 0; i < nvdev->num_chn; i++) {
1080 sprintf(p, "tx_queue_%u_packets", i);
1081 p += ETH_GSTRING_LEN;
1082 sprintf(p, "tx_queue_%u_bytes", i);
1083 p += ETH_GSTRING_LEN;
1084 sprintf(p, "rx_queue_%u_packets", i);
1085 p += ETH_GSTRING_LEN;
1086 sprintf(p, "rx_queue_%u_bytes", i);
1087 p += ETH_GSTRING_LEN;
1088 }
1089
Stephen Hemminger4323b472016-08-23 12:17:57 -07001090 break;
1091 }
1092}
1093
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001094static int
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001095netvsc_get_rss_hash_opts(struct netvsc_device *nvdev,
1096 struct ethtool_rxnfc *info)
1097{
1098 info->data = RXH_IP_SRC | RXH_IP_DST;
1099
1100 switch (info->flow_type) {
1101 case TCP_V4_FLOW:
1102 case TCP_V6_FLOW:
1103 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1104 /* fallthrough */
1105 case UDP_V4_FLOW:
1106 case UDP_V6_FLOW:
1107 case IPV4_FLOW:
1108 case IPV6_FLOW:
1109 break;
1110 default:
1111 info->data = 0;
1112 break;
1113 }
1114
1115 return 0;
1116}
1117
1118static int
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001119netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1120 u32 *rules)
1121{
1122 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001123 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
1124
1125 if (!nvdev)
1126 return -ENODEV;
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001127
1128 switch (info->cmd) {
1129 case ETHTOOL_GRXRINGS:
1130 info->data = nvdev->num_chn;
1131 return 0;
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001132
1133 case ETHTOOL_GRXFH:
1134 return netvsc_get_rss_hash_opts(nvdev, info);
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001135 }
1136 return -EOPNOTSUPP;
1137}
1138
Richard Weinberger316158f2014-07-09 16:23:59 +02001139#ifdef CONFIG_NET_POLL_CONTROLLER
1140static void netvsc_poll_controller(struct net_device *net)
1141{
1142 /* As netvsc_start_xmit() works synchronous we don't have to
1143 * trigger anything here.
1144 */
1145}
1146#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001147
stephen hemminger962f3fe2017-01-24 13:06:02 -08001148static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1149{
1150 return NETVSC_HASH_KEYLEN;
1151}
1152
1153static u32 netvsc_rss_indir_size(struct net_device *dev)
1154{
stephen hemmingerff4a4412017-01-24 13:06:04 -08001155 return ITAB_NUM;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001156}
1157
1158static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1159 u8 *hfunc)
1160{
1161 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001162 struct netvsc_device *ndev = rcu_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001163 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001164 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001165
stephen hemminger545a8e72017-03-22 14:51:00 -07001166 if (!ndev)
1167 return -ENODEV;
1168
stephen hemminger962f3fe2017-01-24 13:06:02 -08001169 if (hfunc)
1170 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1171
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001172 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001173 if (indir) {
1174 for (i = 0; i < ITAB_NUM; i++)
1175 indir[i] = rndis_dev->ind_table[i];
1176 }
1177
stephen hemminger962f3fe2017-01-24 13:06:02 -08001178 if (key)
1179 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1180
1181 return 0;
1182}
1183
1184static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1185 const u8 *key, const u8 hfunc)
1186{
1187 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001188 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001189 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001190 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001191
stephen hemminger545a8e72017-03-22 14:51:00 -07001192 if (!ndev)
1193 return -ENODEV;
1194
stephen hemminger962f3fe2017-01-24 13:06:02 -08001195 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1196 return -EOPNOTSUPP;
1197
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001198 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001199 if (indir) {
1200 for (i = 0; i < ITAB_NUM; i++)
1201 if (indir[i] >= dev->num_rx_queues)
1202 return -EINVAL;
1203
1204 for (i = 0; i < ITAB_NUM; i++)
1205 rndis_dev->ind_table[i] = indir[i];
1206 }
1207
1208 if (!key) {
1209 if (!indir)
1210 return 0;
1211
1212 key = rndis_dev->rss_key;
1213 }
stephen hemminger962f3fe2017-01-24 13:06:02 -08001214
1215 return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
1216}
1217
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001218static const struct ethtool_ops ethtool_ops = {
1219 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001220 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001221 .get_ethtool_stats = netvsc_get_ethtool_stats,
1222 .get_sset_count = netvsc_get_sset_count,
1223 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001224 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001225 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001226 .get_ts_info = ethtool_op_get_ts_info,
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001227 .get_rxnfc = netvsc_get_rxnfc,
stephen hemminger962f3fe2017-01-24 13:06:02 -08001228 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1229 .get_rxfh_indir_size = netvsc_rss_indir_size,
1230 .get_rxfh = netvsc_get_rxfh,
1231 .set_rxfh = netvsc_set_rxfh,
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001232 .get_link_ksettings = netvsc_get_link_ksettings,
1233 .set_link_ksettings = netvsc_set_link_ksettings,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001234};
1235
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001236static const struct net_device_ops device_ops = {
1237 .ndo_open = netvsc_open,
1238 .ndo_stop = netvsc_close,
1239 .ndo_start_xmit = netvsc_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001240 .ndo_set_rx_mode = netvsc_set_multicast_list,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001241 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001242 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001243 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001244 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001245 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001246#ifdef CONFIG_NET_POLL_CONTROLLER
1247 .ndo_poll_controller = netvsc_poll_controller,
1248#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001249};
1250
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001251/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001252 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1253 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1254 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001255 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001256static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001257{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001258 struct net_device_context *ndev_ctx =
1259 container_of(w, struct net_device_context, dwork.work);
1260 struct hv_device *device_obj = ndev_ctx->device_ctx;
1261 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001262 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001263 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001264 struct netvsc_reconfig *event = NULL;
1265 bool notify = false, reschedule = false;
1266 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001267
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001268 rtnl_lock();
stephen hemmingera0be4502017-03-22 14:51:01 -07001269 net_device = rtnl_dereference(ndev_ctx->nvdev);
1270 if (!net_device)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001271 goto out_unlock;
1272
Haiyang Zhang891de742014-02-12 16:54:27 -08001273 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001274
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001275 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1276 if (time_is_after_jiffies(next_reconfig)) {
1277 /* link_watch only sends one notification with current state
1278 * per second, avoid doing reconfig more frequently. Handle
1279 * wrap around.
1280 */
1281 delay = next_reconfig - jiffies;
1282 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1283 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001284 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001285 }
1286 ndev_ctx->last_reconfig = jiffies;
1287
1288 spin_lock_irqsave(&ndev_ctx->lock, flags);
1289 if (!list_empty(&ndev_ctx->reconfig_events)) {
1290 event = list_first_entry(&ndev_ctx->reconfig_events,
1291 struct netvsc_reconfig, list);
1292 list_del(&event->list);
1293 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1294 }
1295 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1296
1297 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001298 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001299
1300 switch (event->event) {
1301 /* Only the following events are possible due to the check in
1302 * netvsc_linkstatus_callback()
1303 */
1304 case RNDIS_STATUS_MEDIA_CONNECT:
1305 if (rdev->link_state) {
1306 rdev->link_state = false;
1307 netif_carrier_on(net);
1308 netif_tx_wake_all_queues(net);
1309 } else {
1310 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001311 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001312 kfree(event);
1313 break;
1314 case RNDIS_STATUS_MEDIA_DISCONNECT:
1315 if (!rdev->link_state) {
1316 rdev->link_state = true;
1317 netif_carrier_off(net);
1318 netif_tx_stop_all_queues(net);
1319 }
1320 kfree(event);
1321 break;
1322 case RNDIS_STATUS_NETWORK_CHANGE:
1323 /* Only makes sense if carrier is present */
1324 if (!rdev->link_state) {
1325 rdev->link_state = true;
1326 netif_carrier_off(net);
1327 netif_tx_stop_all_queues(net);
1328 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1329 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001330 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001331 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1332 reschedule = true;
1333 }
1334 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001335 }
1336
1337 rtnl_unlock();
1338
1339 if (notify)
1340 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001341
1342 /* link_watch only sends one notification with current state per
1343 * second, handle next reconfig event in 2 seconds.
1344 */
1345 if (reschedule)
1346 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001347
1348 return;
1349
1350out_unlock:
1351 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001352}
1353
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001354static struct net_device *get_netvsc_bymac(const u8 *mac)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001355{
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001356 struct net_device *dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001357
Stephen Hemminger8737caa2016-08-23 12:17:44 -07001358 ASSERT_RTNL();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001359
1360 for_each_netdev(&init_net, dev) {
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001361 if (dev->netdev_ops != &device_ops)
1362 continue; /* not a netvsc device */
1363
1364 if (ether_addr_equal(mac, dev->perm_addr))
1365 return dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001366 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001367
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001368 return NULL;
1369}
1370
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001371static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001372{
1373 struct net_device *dev;
1374
1375 ASSERT_RTNL();
1376
1377 for_each_netdev(&init_net, dev) {
1378 struct net_device_context *net_device_ctx;
1379
1380 if (dev->netdev_ops != &device_ops)
1381 continue; /* not a netvsc device */
1382
1383 net_device_ctx = netdev_priv(dev);
1384 if (net_device_ctx->nvdev == NULL)
1385 continue; /* device is removed */
1386
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001387 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001388 return dev; /* a match */
1389 }
1390
1391 return NULL;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001392}
1393
1394static int netvsc_register_vf(struct net_device *vf_netdev)
1395{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001396 struct net_device *ndev;
1397 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001398 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001399
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001400 if (vf_netdev->addr_len != ETH_ALEN)
1401 return NOTIFY_DONE;
1402
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001403 /*
1404 * We will use the MAC address to locate the synthetic interface to
1405 * associate with the VF interface. If we don't find a matching
1406 * synthetic interface, move on.
1407 */
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001408 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001409 if (!ndev)
1410 return NOTIFY_DONE;
1411
1412 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001413 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001414 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001415 return NOTIFY_DONE;
1416
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001417 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001418 /*
1419 * Take a reference on the module.
1420 */
1421 try_module_get(THIS_MODULE);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001422
1423 dev_hold(vf_netdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001424 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001425 return NOTIFY_OK;
1426}
1427
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001428static int netvsc_vf_up(struct net_device *vf_netdev)
1429{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001430 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001431 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001432 struct net_device_context *net_device_ctx;
1433
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001434 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001435 if (!ndev)
1436 return NOTIFY_DONE;
1437
1438 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001439 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001440
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001441 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001442
1443 /*
1444 * Open the device before switching data path.
1445 */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001446 rndis_filter_open(netvsc_dev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001447
1448 /*
1449 * notify the host to switch the data path.
1450 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001451 netvsc_switch_datapath(ndev, true);
1452 netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001453
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001454 netif_carrier_off(ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001455
Vitaly Kuznetsovd0722182016-08-15 17:48:40 +02001456 /* Now notify peers through VF device. */
1457 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001458
1459 return NOTIFY_OK;
1460}
1461
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001462static int netvsc_vf_down(struct net_device *vf_netdev)
1463{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001464 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001465 struct netvsc_device *netvsc_dev;
1466 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001467
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001468 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001469 if (!ndev)
1470 return NOTIFY_DONE;
1471
1472 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001473 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001474
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001475 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001476 netvsc_switch_datapath(ndev, false);
1477 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001478 rndis_filter_close(netvsc_dev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001479 netif_carrier_on(ndev);
Vitaly Kuznetsovd0722182016-08-15 17:48:40 +02001480
1481 /* Now notify peers through netvsc device. */
1482 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001483
1484 return NOTIFY_OK;
1485}
1486
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001487static int netvsc_unregister_vf(struct net_device *vf_netdev)
1488{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001489 struct net_device *ndev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001490 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001491
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001492 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001493 if (!ndev)
1494 return NOTIFY_DONE;
1495
1496 net_device_ctx = netdev_priv(ndev);
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001497
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001498 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001499
1500 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001501 dev_put(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001502 module_put(THIS_MODULE);
1503 return NOTIFY_OK;
1504}
1505
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001506static int netvsc_probe(struct hv_device *dev,
1507 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001508{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001509 struct net_device *net = NULL;
1510 struct net_device_context *net_device_ctx;
1511 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001512 struct netvsc_device *nvdev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001513 int ret;
1514
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001515 net = alloc_etherdev_mq(sizeof(struct net_device_context),
stephen hemminger2b018882017-01-24 13:06:03 -08001516 VRSS_CHANNEL_MAX);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001517 if (!net)
K. Y. Srinivasan51a805d2011-08-25 09:49:11 -07001518 return -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001519
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001520 netif_carrier_off(net);
1521
Haiyang Zhangb37879e2016-08-04 10:42:14 -07001522 netvsc_init_settings(net);
1523
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001524 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001525 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001526 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1527 if (netif_msg_probe(net_device_ctx))
1528 netdev_dbg(net, "netvsc msg_enable: %d\n",
1529 net_device_ctx->msg_enable);
1530
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001531 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001532
Haiyang Zhang891de742014-02-12 16:54:27 -08001533 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Wenqi Ma792df872012-04-19 00:39:37 +00001534 INIT_WORK(&net_device_ctx->work, do_set_multicast);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001535
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001536 spin_lock_init(&net_device_ctx->lock);
1537 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1538
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001539 net->netdev_ops = &device_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001540 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001541 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001542
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01001543 /* We always need headroom for rndis header */
1544 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1545
Haiyang Zhang692e0842011-09-01 12:19:43 -07001546 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001547 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang692e0842011-09-01 12:19:43 -07001548 device_info.ring_size = ring_size;
stephen hemminger3071ada2017-03-22 14:50:59 -07001549 device_info.num_chn = VRSS_CHANNEL_DEFAULT;
Haiyang Zhang692e0842011-09-01 12:19:43 -07001550 ret = rndis_filter_device_add(dev, &device_info);
1551 if (ret != 0) {
1552 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001553 free_netdev(net);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001554 hv_set_drvdata(dev, NULL);
Haiyang Zhang692e0842011-09-01 12:19:43 -07001555 return ret;
1556 }
1557 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1558
stephen hemminger23312a32017-01-24 13:05:59 -08001559 /* hw_features computed in rndis_filter_device_add */
1560 net->features = net->hw_features |
1561 NETIF_F_HIGHDMA | NETIF_F_SG |
1562 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1563 net->vlan_features = net->features;
1564
stephen hemminger545a8e72017-03-22 14:51:00 -07001565 /* RCU not necessary here, device not registered */
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001566 nvdev = net_device_ctx->nvdev;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001567 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1568 netif_set_real_num_rx_queues(net, nvdev->num_chn);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001569
Jarod Wilsond0c2c992016-10-20 13:55:21 -04001570 /* MTU range: 68 - 1500 or 65521 */
1571 net->min_mtu = NETVSC_MTU_MIN;
1572 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
1573 net->max_mtu = NETVSC_MTU - ETH_HLEN;
1574 else
1575 net->max_mtu = ETH_DATA_LEN;
1576
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001577 ret = register_netdev(net);
1578 if (ret != 0) {
1579 pr_err("Unable to register netdev.\n");
stephen hemminger2289f0a2017-01-24 13:06:10 -08001580 rndis_filter_device_remove(dev, nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001581 free_netdev(net);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001582 }
1583
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001584 return ret;
1585}
1586
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001587static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001588{
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001589 struct net_device *net;
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001590 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001591
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001592 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001593
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001594 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001595 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001596 return 0;
1597 }
1598
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001599 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001600
stephen hemmingera0be4502017-03-22 14:51:01 -07001601 netif_device_detach(net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001602
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001603 cancel_delayed_work_sync(&ndev_ctx->dwork);
Wenqi Ma792df872012-04-19 00:39:37 +00001604 cancel_work_sync(&ndev_ctx->work);
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001605
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001606 /*
1607 * Call to the vsc driver to let it know that the device is being
stephen hemmingera0be4502017-03-22 14:51:01 -07001608 * removed. Also blocks mtu and channel changes.
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001609 */
stephen hemmingera0be4502017-03-22 14:51:01 -07001610 rtnl_lock();
stephen hemminger2289f0a2017-01-24 13:06:10 -08001611 rndis_filter_device_remove(dev, ndev_ctx->nvdev);
stephen hemmingera0be4502017-03-22 14:51:01 -07001612 rtnl_unlock();
1613
1614 unregister_netdev(net);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001615
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001616 hv_set_drvdata(dev, NULL);
1617
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001618 free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001619 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001620}
1621
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001622static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001623 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08001624 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001625 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001626};
1627
1628MODULE_DEVICE_TABLE(vmbus, id_table);
1629
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001630/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001631static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00001632 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001633 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001634 .probe = netvsc_probe,
1635 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07001636};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001637
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001638/*
1639 * On Hyper-V, every VF interface is matched with a corresponding
1640 * synthetic interface. The synthetic interface is presented first
1641 * to the guest. When the corresponding VF instance is registered,
1642 * we will take care of switching the data path.
1643 */
1644static int netvsc_netdev_event(struct notifier_block *this,
1645 unsigned long event, void *ptr)
1646{
1647 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1648
Stephen Hemmingeree837a12016-09-22 16:56:31 -07001649 /* Skip our own events */
1650 if (event_dev->netdev_ops == &device_ops)
1651 return NOTIFY_DONE;
1652
1653 /* Avoid non-Ethernet type devices */
1654 if (event_dev->type != ARPHRD_ETHER)
1655 return NOTIFY_DONE;
1656
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02001657 /* Avoid Vlan dev with same MAC registering as VF */
Parav Panditd0d7b102017-02-04 11:00:49 -06001658 if (is_vlan_dev(event_dev))
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02001659 return NOTIFY_DONE;
1660
1661 /* Avoid Bonding master dev with same MAC registering as VF */
Stephen Hemmingeree837a12016-09-22 16:56:31 -07001662 if ((event_dev->priv_flags & IFF_BONDING) &&
1663 (event_dev->flags & IFF_MASTER))
Haiyang Zhangcb2911f2016-06-02 12:02:04 -07001664 return NOTIFY_DONE;
1665
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001666 switch (event) {
1667 case NETDEV_REGISTER:
1668 return netvsc_register_vf(event_dev);
1669 case NETDEV_UNREGISTER:
1670 return netvsc_unregister_vf(event_dev);
1671 case NETDEV_UP:
1672 return netvsc_vf_up(event_dev);
1673 case NETDEV_DOWN:
1674 return netvsc_vf_down(event_dev);
1675 default:
1676 return NOTIFY_DONE;
1677 }
1678}
1679
1680static struct notifier_block netvsc_netdev_notifier = {
1681 .notifier_call = netvsc_netdev_event,
1682};
1683
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001684static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07001685{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001686 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001687 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07001688}
1689
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001690static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001691{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001692 int ret;
1693
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00001694 if (ring_size < RING_SIZE_MIN) {
1695 ring_size = RING_SIZE_MIN;
1696 pr_info("Increased ring_size to %d (min allowed)\n",
1697 ring_size);
1698 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001699 ret = vmbus_driver_register(&netvsc_drv);
1700
1701 if (ret)
1702 return ret;
1703
1704 register_netdevice_notifier(&netvsc_netdev_notifier);
1705 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001706}
1707
Hank Janssen26c14cc2010-02-11 23:02:42 +00001708MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07001709MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07001710
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001711module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001712module_exit(netvsc_drv_exit);