blob: 3544e19915792df3e162b83983bdbf4647a42ab7 [file] [log] [blame]
Thomas Gleixner9952f692019-05-28 10:10:04 -07001// SPDX-License-Identifier: GPL-2.0-only
Hank Janssenfceaf242009-07-13 15:34:54 -07002/*
Hank Janssenfceaf242009-07-13 15:34:54 -07003 * Copyright (c) 2009, Microsoft Corporation.
4 *
Hank Janssenfceaf242009-07-13 15:34:54 -07005 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +00006 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -07007 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -07008 */
Hank Jansseneb335bc2011-03-29 13:58:48 -07009#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
Hank Janssenfceaf242009-07-13 15:34:54 -070011#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070012#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070013#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070016#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070017#include <linux/delay.h>
18#include <linux/netdevice.h>
19#include <linux/inetdevice.h>
20#include <linux/etherdevice.h>
Stephen Hemmingerb93c1b52018-08-21 10:40:38 -070021#include <linux/pci.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000023#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070026#include <linux/rtnetlink.h>
stephen hemminger0c195562017-08-01 19:58:53 -070027#include <linux/netpoll.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070028
Hank Janssenfceaf242009-07-13 15:34:54 -070029#include <net/arp.h>
30#include <net/route.h>
31#include <net/sock.h>
32#include <net/pkt_sched.h>
Michael Kelley8eb1b3c2017-05-30 11:36:56 -070033#include <net/checksum.h>
34#include <net/ip6_checksum.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070035
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070036#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070037
Stephen Hemminger7b2ee502018-03-20 15:03:05 -070038#define RING_SIZE_MIN 64
39#define RETRY_US_LO 5000
40#define RETRY_US_HI 10000
41#define RETRY_MAX 2000 /* >10 sec */
stephen hemminger8b532792017-08-09 17:46:11 -070042
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010043#define LINKCHANGE_INT (2 * HZ)
stephen hemminger6123c662017-08-09 17:46:03 -070044#define VF_TAKEOVER_INT (HZ / 10)
stephen hemmingera50af862016-12-06 13:43:54 -080045
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080046static unsigned int ring_size __ro_after_init = 128;
Joe Perchesd61e4032018-03-23 15:54:39 -070047module_param(ring_size, uint, 0444);
Stephen Hemminger450d7a42010-05-04 09:58:53 -070048MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080049unsigned int netvsc_ring_bytes __ro_after_init;
Hank Janssenfceaf242009-07-13 15:34:54 -070050
Simon Xiao3f300ff2015-04-28 01:05:17 -070051static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
52 NETIF_MSG_LINK | NETIF_MSG_IFUP |
53 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
54 NETIF_MSG_TX_ERR;
55
56static int debug = -1;
Joe Perchesd61e4032018-03-23 15:54:39 -070057module_param(debug, int, 0444);
Simon Xiao3f300ff2015-04-28 01:05:17 -070058MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
59
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -070060static LIST_HEAD(netvsc_dev_list);
61
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080062static void netvsc_change_rx_flags(struct net_device *net, int change)
Hank Janssenfceaf242009-07-13 15:34:54 -070063{
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080064 struct net_device_context *ndev_ctx = netdev_priv(net);
65 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
66 int inc;
67
68 if (!vf_netdev)
69 return;
70
71 if (change & IFF_PROMISC) {
72 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
73 dev_set_promiscuity(vf_netdev, inc);
74 }
75
76 if (change & IFF_ALLMULTI) {
77 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
78 dev_set_allmulti(vf_netdev, inc);
79 }
80}
81
82static void netvsc_set_rx_mode(struct net_device *net)
83{
84 struct net_device_context *ndev_ctx = netdev_priv(net);
Stephen Hemminger35a57b72018-03-07 13:49:11 -080085 struct net_device *vf_netdev;
86 struct netvsc_device *nvdev;
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080087
Stephen Hemminger35a57b72018-03-07 13:49:11 -080088 rcu_read_lock();
89 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080090 if (vf_netdev) {
91 dev_uc_sync(vf_netdev, net);
92 dev_mc_sync(vf_netdev, net);
93 }
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080094
Stephen Hemminger35a57b72018-03-07 13:49:11 -080095 nvdev = rcu_dereference(ndev_ctx->nvdev);
96 if (nvdev)
97 rndis_filter_update(nvdev);
98 rcu_read_unlock();
Hank Janssenfceaf242009-07-13 15:34:54 -070099}
100
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000101static void netvsc_tx_enable(struct netvsc_device *nvscdev,
102 struct net_device *ndev)
103{
104 nvscdev->tx_disable = false;
105 virt_wmb(); /* ensure queue wake up mechanism is on */
106
107 netif_tx_wake_all_queues(ndev);
108}
109
Hank Janssenfceaf242009-07-13 15:34:54 -0700110static int netvsc_open(struct net_device *net)
111{
Haiyang Zhang53fa1a62017-06-21 16:40:47 -0700112 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger0c195562017-08-01 19:58:53 -0700113 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
stephen hemminger79e8cbe2017-07-19 11:53:13 -0700114 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
Haiyang Zhang891de742014-02-12 16:54:27 -0800115 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700116 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700117
Haiyang Zhang891de742014-02-12 16:54:27 -0800118 netif_carrier_off(net);
119
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700120 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200121 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700122 if (ret != 0) {
123 netdev_err(net, "unable to open device (ret %d).\n", ret);
124 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700125 }
126
Haiyang Zhang891de742014-02-12 16:54:27 -0800127 rdev = nvdev->extension;
Dexuan Cui52acf732018-06-06 21:32:51 +0000128 if (!rdev->link_state) {
Haiyang Zhang891de742014-02-12 16:54:27 -0800129 netif_carrier_on(net);
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000130 netvsc_tx_enable(nvdev, net);
Dexuan Cui52acf732018-06-06 21:32:51 +0000131 }
Haiyang Zhang891de742014-02-12 16:54:27 -0800132
stephen hemminger0c195562017-08-01 19:58:53 -0700133 if (vf_netdev) {
134 /* Setting synthetic device up transparently sets
135 * slave as up. If open fails, then slave will be
136 * still be offline (and not used).
137 */
Petr Machata00f54e62018-12-06 17:05:36 +0000138 ret = dev_open(vf_netdev, NULL);
stephen hemminger0c195562017-08-01 19:58:53 -0700139 if (ret)
140 netdev_warn(net,
141 "unable to open slave: %s: %d\n",
142 vf_netdev->name, ret);
143 }
144 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700145}
146
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700147static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
Hank Janssenfceaf242009-07-13 15:34:54 -0700148{
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700149 unsigned int retry = 0;
150 int i;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700151
152 /* Ensure pending bytes in ring are read */
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700153 for (;;) {
154 u32 aread = 0;
155
Haiyang Zhang2de85302015-07-13 13:09:16 -0700156 for (i = 0; i < nvdev->num_chn; i++) {
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700157 struct vmbus_channel *chn
158 = nvdev->chan_table[i].channel;
159
Haiyang Zhang2de85302015-07-13 13:09:16 -0700160 if (!chn)
161 continue;
162
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700163 /* make sure receive not running now */
164 napi_synchronize(&nvdev->chan_table[i].napi);
165
stephen hemminger40975962017-06-08 16:21:19 -0700166 aread = hv_get_bytes_to_read(&chn->inbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700167 if (aread)
168 break;
169
stephen hemminger40975962017-06-08 16:21:19 -0700170 aread = hv_get_bytes_to_read(&chn->outbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700171 if (aread)
172 break;
173 }
174
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700175 if (aread == 0)
176 return 0;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700177
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700178 if (++retry > RETRY_MAX)
179 return -ETIMEDOUT;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700180
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700181 usleep_range(RETRY_US_LO, RETRY_US_HI);
182 }
183}
184
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000185static void netvsc_tx_disable(struct netvsc_device *nvscdev,
186 struct net_device *ndev)
187{
188 if (nvscdev) {
189 nvscdev->tx_disable = true;
190 virt_wmb(); /* ensure txq will not wake up after stop */
191 }
192
193 netif_tx_disable(ndev);
194}
195
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700196static int netvsc_close(struct net_device *net)
197{
198 struct net_device_context *net_device_ctx = netdev_priv(net);
199 struct net_device *vf_netdev
200 = rtnl_dereference(net_device_ctx->vf_netdev);
201 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
202 int ret;
203
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000204 netvsc_tx_disable(nvdev, net);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700205
206 /* No need to close rndis filter if it is removed already */
207 if (!nvdev)
208 return 0;
209
210 ret = rndis_filter_close(nvdev);
211 if (ret != 0) {
212 netdev_err(net, "unable to close device (ret %d).\n", ret);
213 return ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700214 }
215
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700216 ret = netvsc_wait_until_empty(nvdev);
217 if (ret)
Haiyang Zhang2de85302015-07-13 13:09:16 -0700218 netdev_err(net, "Ring buffer not empty after closing rndis\n");
Hank Janssenfceaf242009-07-13 15:34:54 -0700219
stephen hemminger0c195562017-08-01 19:58:53 -0700220 if (vf_netdev)
221 dev_close(vf_netdev);
222
Hank Janssenfceaf242009-07-13 15:34:54 -0700223 return ret;
224}
225
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800226static inline void *init_ppi_data(struct rndis_message *msg,
227 u32 ppi_size, u32 pkt_type)
KY Srinivasan8a002512014-03-08 19:23:14 -0800228{
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800229 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
KY Srinivasan8a002512014-03-08 19:23:14 -0800230 struct rndis_per_packet_info *ppi;
231
KY Srinivasan8a002512014-03-08 19:23:14 -0800232 rndis_pkt->data_offset += ppi_size;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800233 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
234 + rndis_pkt->per_pkt_info_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800235
236 ppi->size = ppi_size;
237 ppi->type = pkt_type;
Haiyang Zhange3a96672018-09-28 14:41:23 +0000238 ppi->internal = 0;
KY Srinivasan8a002512014-03-08 19:23:14 -0800239 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
240
241 rndis_pkt->per_pkt_info_len += ppi_size;
242
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800243 return ppi + 1;
KY Srinivasan8a002512014-03-08 19:23:14 -0800244}
245
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700246/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
247 * packets. We can use ethtool to change UDP hash level when necessary.
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700248 */
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700249static inline u32 netvsc_get_hash(
250 struct sk_buff *skb,
251 const struct net_device_context *ndc)
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700252{
253 struct flow_keys flow;
Haiyang Zhang486e3982017-10-06 08:33:57 -0700254 u32 hash, pkt_proto = 0;
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700255 static u32 hashrnd __read_mostly;
256
257 net_get_random_once(&hashrnd, sizeof(hashrnd));
258
259 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
260 return 0;
261
Haiyang Zhang486e3982017-10-06 08:33:57 -0700262 switch (flow.basic.ip_proto) {
263 case IPPROTO_TCP:
264 if (flow.basic.n_proto == htons(ETH_P_IP))
265 pkt_proto = HV_TCP4_L4HASH;
266 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
267 pkt_proto = HV_TCP6_L4HASH;
268
269 break;
270
271 case IPPROTO_UDP:
272 if (flow.basic.n_proto == htons(ETH_P_IP))
273 pkt_proto = HV_UDP4_L4HASH;
274 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
275 pkt_proto = HV_UDP6_L4HASH;
276
277 break;
278 }
279
280 if (pkt_proto & ndc->l4_hash) {
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700281 return skb_get_hash(skb);
282 } else {
283 if (flow.basic.n_proto == htons(ETH_P_IP))
284 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
285 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
286 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
287 else
288 hash = 0;
289
290 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
291 }
292
293 return hash;
294}
295
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700296static inline int netvsc_get_tx_queue(struct net_device *ndev,
297 struct sk_buff *skb, int old_idx)
298{
299 const struct net_device_context *ndc = netdev_priv(ndev);
300 struct sock *sk = skb->sk;
301 int q_idx;
302
Haiyang Zhang39e91cf2017-10-13 12:28:04 -0700303 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
304 (VRSS_SEND_TAB_SIZE - 1)];
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700305
306 /* If queue index changed record the new value */
307 if (q_idx != old_idx &&
308 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
309 sk_tx_queue_set(sk, q_idx);
310
311 return q_idx;
312}
313
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800314/*
315 * Select queue for transmit.
316 *
317 * If a valid queue has already been assigned, then use that.
318 * Otherwise compute tx queue based on hash and the send table.
319 *
Paolo Abenia350ecc2019-03-20 11:02:06 +0100320 * This is basically similar to default (netdev_pick_tx) with the added step
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800321 * of using the host send_table when no other queue has been assigned.
322 *
323 * TODO support XPS - but get_xps_queue not exported
324 */
stephen hemminger0c195562017-08-01 19:58:53 -0700325static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700326{
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700327 int q_idx = sk_tx_queue_get(skb->sk);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700328
stephen hemminger0c195562017-08-01 19:58:53 -0700329 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700330 /* If forwarding a packet, we use the recorded queue when
331 * available for better cache locality.
332 */
333 if (skb_rx_queue_recorded(skb))
334 q_idx = skb_get_rx_queue(skb);
335 else
336 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800337 }
338
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700339 return q_idx;
340}
341
stephen hemminger0c195562017-08-01 19:58:53 -0700342static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
Paolo Abenia350ecc2019-03-20 11:02:06 +0100343 struct net_device *sb_dev)
stephen hemminger0c195562017-08-01 19:58:53 -0700344{
345 struct net_device_context *ndc = netdev_priv(ndev);
346 struct net_device *vf_netdev;
347 u16 txq;
348
349 rcu_read_lock();
350 vf_netdev = rcu_dereference(ndc->vf_netdev);
351 if (vf_netdev) {
Stephen Hemmingerb3bf5662018-03-02 13:49:07 -0800352 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
353
354 if (vf_ops->ndo_select_queue)
Paolo Abenia350ecc2019-03-20 11:02:06 +0100355 txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
Stephen Hemmingerb3bf5662018-03-02 13:49:07 -0800356 else
Paolo Abenia350ecc2019-03-20 11:02:06 +0100357 txq = netdev_pick_tx(vf_netdev, skb, NULL);
Stephen Hemmingerb3bf5662018-03-02 13:49:07 -0800358
359 /* Record the queue selected by VF so that it can be
360 * used for common case where VF has more queues than
361 * the synthetic device.
362 */
363 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
stephen hemminger0c195562017-08-01 19:58:53 -0700364 } else {
365 txq = netvsc_pick_tx(ndev, skb);
366 }
367 rcu_read_unlock();
368
369 while (unlikely(txq >= ndev->real_num_tx_queues))
370 txq -= ndev->real_num_tx_queues;
371
372 return txq;
373}
374
KY Srinivasan54a73572014-03-08 19:23:13 -0800375static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
stephen hemminger89bb42b2017-08-09 17:46:08 -0700376 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800377{
378 int j = 0;
379
Adrian Vladu52d3b492019-01-03 19:43:08 +0000380 /* Deal with compound pages by ignoring unused part
KY Srinivasan54a73572014-03-08 19:23:13 -0800381 * of the page.
382 */
383 page += (offset >> PAGE_SHIFT);
384 offset &= ~PAGE_MASK;
385
386 while (len > 0) {
387 unsigned long bytes;
388
389 bytes = PAGE_SIZE - offset;
390 if (bytes > len)
391 bytes = len;
392 pb[j].pfn = page_to_pfn(page);
393 pb[j].offset = offset;
394 pb[j].len = bytes;
395
396 offset += bytes;
397 len -= bytes;
398
399 if (offset == PAGE_SIZE && len) {
400 page++;
401 offset = 0;
402 j++;
403 }
404 }
405
406 return j + 1;
407}
408
KY Srinivasan8a002512014-03-08 19:23:14 -0800409static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800410 struct hv_netvsc_packet *packet,
stephen hemminger02b6de02017-07-28 08:59:44 -0700411 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800412{
413 u32 slots_used = 0;
414 char *data = skb->data;
415 int frags = skb_shinfo(skb)->nr_frags;
416 int i;
417
418 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700419 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800420 * 2. skb linear data
421 * 3. skb fragment data
422 */
stephen hemmingerea5a32c2017-08-09 17:46:10 -0700423 slots_used += fill_pg_buf(virt_to_page(hdr),
424 offset_in_page(hdr),
425 len, &pb[slots_used]);
KY Srinivasan54a73572014-03-08 19:23:13 -0800426
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700427 packet->rmsg_size = len;
428 packet->rmsg_pgcnt = slots_used;
429
KY Srinivasan54a73572014-03-08 19:23:13 -0800430 slots_used += fill_pg_buf(virt_to_page(data),
431 offset_in_page(data),
432 skb_headlen(skb), &pb[slots_used]);
433
434 for (i = 0; i < frags; i++) {
435 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
436
437 slots_used += fill_pg_buf(skb_frag_page(frag),
438 frag->page_offset,
439 skb_frag_size(frag), &pb[slots_used]);
440 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800441 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800442}
443
stephen hemminger80d887d2017-07-24 21:03:19 -0700444static int count_skb_frag_slots(struct sk_buff *skb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800445{
stephen hemminger80d887d2017-07-24 21:03:19 -0700446 int i, frags = skb_shinfo(skb)->nr_frags;
447 int pages = 0;
448
449 for (i = 0; i < frags; i++) {
450 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
451 unsigned long size = skb_frag_size(frag);
452 unsigned long offset = frag->page_offset;
453
454 /* Skip unused frames from start of page */
455 offset &= ~PAGE_MASK;
456 pages += PFN_UP(offset + size);
457 }
458 return pages;
459}
460
461static int netvsc_get_slots(struct sk_buff *skb)
462{
463 char *data = skb->data;
464 unsigned int offset = offset_in_page(data);
465 unsigned int len = skb_headlen(skb);
466 int slots;
467 int frag_slots;
468
469 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
470 frag_slots = count_skb_frag_slots(skb);
471 return slots + frag_slots;
KY Srinivasan54a73572014-03-08 19:23:13 -0800472}
473
stephen hemminger23312a32017-01-24 13:05:59 -0800474static u32 net_checksum_info(struct sk_buff *skb)
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800475{
stephen hemminger23312a32017-01-24 13:05:59 -0800476 if (skb->protocol == htons(ETH_P_IP)) {
477 struct iphdr *ip = ip_hdr(skb);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800478
stephen hemminger23312a32017-01-24 13:05:59 -0800479 if (ip->protocol == IPPROTO_TCP)
480 return TRANSPORT_INFO_IPV4_TCP;
481 else if (ip->protocol == IPPROTO_UDP)
482 return TRANSPORT_INFO_IPV4_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800483 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800484 struct ipv6hdr *ip6 = ipv6_hdr(skb);
485
486 if (ip6->nexthdr == IPPROTO_TCP)
487 return TRANSPORT_INFO_IPV6_TCP;
Mohammed Gamal37b9dfa2017-07-24 10:57:26 -0700488 else if (ip6->nexthdr == IPPROTO_UDP)
stephen hemminger23312a32017-01-24 13:05:59 -0800489 return TRANSPORT_INFO_IPV6_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800490 }
491
stephen hemminger23312a32017-01-24 13:05:59 -0800492 return TRANSPORT_INFO_NOT_IP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800493}
494
stephen hemminger0c195562017-08-01 19:58:53 -0700495/* Send skb on the slave VF device. */
496static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
497 struct sk_buff *skb)
498{
499 struct net_device_context *ndev_ctx = netdev_priv(net);
500 unsigned int len = skb->len;
501 int rc;
502
503 skb->dev = vf_netdev;
504 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
505
506 rc = dev_queue_xmit(skb);
507 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
508 struct netvsc_vf_pcpu_stats *pcpu_stats
509 = this_cpu_ptr(ndev_ctx->vf_stats);
510
511 u64_stats_update_begin(&pcpu_stats->syncp);
512 pcpu_stats->tx_packets++;
513 pcpu_stats->tx_bytes += len;
514 u64_stats_update_end(&pcpu_stats->syncp);
515 } else {
516 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
517 }
518
519 return rc;
520}
521
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700522static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700523{
Hank Janssenfceaf242009-07-13 15:34:54 -0700524 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200525 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700526 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800527 unsigned int num_data_pgs;
528 struct rndis_message *rndis_msg;
stephen hemminger0c195562017-08-01 19:58:53 -0700529 struct net_device *vf_netdev;
KY Srinivasan8a002512014-03-08 19:23:14 -0800530 u32 rndis_msg_size;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700531 u32 hash;
stephen hemminger02b6de02017-07-28 08:59:44 -0700532 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
Hank Janssenfceaf242009-07-13 15:34:54 -0700533
stephen hemminger0c195562017-08-01 19:58:53 -0700534 /* if VF is present and up then redirect packets
535 * already called with rcu_read_lock_bh
536 */
537 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
538 if (vf_netdev && netif_running(vf_netdev) &&
539 !netpoll_tx_running(net))
540 return netvsc_vf_xmit(net, vf_netdev, skb);
541
stephen hemminger80d887d2017-07-24 21:03:19 -0700542 /* We will atmost need two pages to describe the rndis
543 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200544 * of pages in a single packet. If skb is scattered around
545 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800546 */
stephen hemminger80d887d2017-07-24 21:03:19 -0700547
548 num_data_pgs = netvsc_get_slots(skb) + 2;
549
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700550 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700551 ++net_device_ctx->eth_stats.tx_scattered;
552
553 if (skb_linearize(skb))
554 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700555
stephen hemminger80d887d2017-07-24 21:03:19 -0700556 num_data_pgs = netvsc_get_slots(skb) + 2;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700557 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700558 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700559 goto drop;
560 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800561 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700562
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800563 /*
564 * Place the rndis header in the skb head room and
565 * the skb->cb will be used for hv_netvsc_packet
566 * structure.
567 */
568 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700569 if (ret)
570 goto no_memory;
571
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800572 /* Use the skb control buffer for building up the packet */
573 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
574 FIELD_SIZEOF(struct sk_buff, cb));
575 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700576
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700577 packet->q_idx = skb_get_queue_mapping(skb);
578
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800579 packet->total_data_buflen = skb->len;
stephen hemminger793e3952017-01-24 13:06:12 -0800580 packet->total_bytes = skb->len;
581 packet->total_packets = 1;
Hank Janssenfceaf242009-07-13 15:34:54 -0700582
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800583 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700584
KY Srinivasan8a002512014-03-08 19:23:14 -0800585 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800586 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
587 rndis_msg->msg_len = packet->total_data_buflen;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800588
589 rndis_msg->msg.pkt = (struct rndis_packet) {
590 .data_offset = sizeof(struct rndis_packet),
591 .data_len = packet->total_data_buflen,
592 .per_pkt_info_offset = sizeof(struct rndis_packet),
593 };
KY Srinivasan8a002512014-03-08 19:23:14 -0800594
595 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
596
Haiyang Zhang307f0992014-05-21 12:55:39 -0700597 hash = skb_get_hash_raw(skb);
598 if (hash != 0 && net->real_num_tx_queues > 1) {
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800599 u32 *hash_info;
600
Haiyang Zhang307f0992014-05-21 12:55:39 -0700601 rndis_msg_size += NDIS_HASH_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800602 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
603 NBL_HASH_VALUE);
604 *hash_info = hash;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700605 }
606
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700607 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800608 struct ndis_pkt_8021q_info *vlan;
609
610 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800611 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
612 IEEE_8021Q_INFO);
stephen hemminger00f50242017-08-09 17:46:09 -0700613
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800614 vlan->value = 0;
Michał Mirosław98ba7802018-11-20 13:20:32 +0100615 vlan->vlanid = skb_vlan_tag_get_id(skb);
616 vlan->cfi = skb_vlan_tag_get_cfi(skb);
617 vlan->pri = skb_vlan_tag_get_prio(skb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800618 }
619
stephen hemminger23312a32017-01-24 13:05:59 -0800620 if (skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700621 struct ndis_tcp_lso_info *lso_info;
622
623 rndis_msg_size += NDIS_LSO_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800624 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
625 TCP_LARGESEND_PKTINFO);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700626
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800627 lso_info->value = 0;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700628 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
stephen hemminger23312a32017-01-24 13:05:59 -0800629 if (skb->protocol == htons(ETH_P_IP)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700630 lso_info->lso_v2_transmit.ip_version =
631 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
632 ip_hdr(skb)->tot_len = 0;
633 ip_hdr(skb)->check = 0;
634 tcp_hdr(skb)->check =
635 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
636 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
637 } else {
638 lso_info->lso_v2_transmit.ip_version =
639 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
640 ipv6_hdr(skb)->payload_len = 0;
641 tcp_hdr(skb)->check =
642 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
643 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
644 }
stephen hemminger23312a32017-01-24 13:05:59 -0800645 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700646 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700647 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
stephen hemminger23312a32017-01-24 13:05:59 -0800648 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
649 struct ndis_tcp_ip_checksum_info *csum_info;
650
stephen hemmingerad19bc82016-10-11 14:03:07 -0700651 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800652 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
653 TCPIP_CHKSUM_PKTINFO);
stephen hemmingerad19bc82016-10-11 14:03:07 -0700654
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800655 csum_info->value = 0;
stephen hemminger23312a32017-01-24 13:05:59 -0800656 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
657
658 if (skb->protocol == htons(ETH_P_IP)) {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700659 csum_info->transmit.is_ipv4 = 1;
stephen hemminger23312a32017-01-24 13:05:59 -0800660
661 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
662 csum_info->transmit.tcp_checksum = 1;
663 else
664 csum_info->transmit.udp_checksum = 1;
665 } else {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700666 csum_info->transmit.is_ipv6 = 1;
667
stephen hemminger23312a32017-01-24 13:05:59 -0800668 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
669 csum_info->transmit.tcp_checksum = 1;
670 else
671 csum_info->transmit.udp_checksum = 1;
672 }
stephen hemmingerad19bc82016-10-11 14:03:07 -0700673 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800674 /* Can't do offload of this type of checksum */
stephen hemmingerad19bc82016-10-11 14:03:07 -0700675 if (skb_checksum_help(skb))
676 goto drop;
677 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700678 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800679
KY Srinivasan8a002512014-03-08 19:23:14 -0800680 /* Start filling in the page buffers with the rndis hdr */
681 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700682 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800683 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
stephen hemminger02b6de02017-07-28 08:59:44 -0700684 skb, packet, pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800685
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800686 /* timestamp packet in software */
687 skb_tx_timestamp(skb);
stephen hemminger2a926f72017-07-19 11:53:17 -0700688
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -0800689 ret = netvsc_send(net, packet, rndis_msg, pb, skb);
stephen hemminger793e3952017-01-24 13:06:12 -0800690 if (likely(ret == 0))
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700691 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700692
693 if (ret == -EAGAIN) {
694 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700695 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700696 }
697
698 if (ret == -ENOSPC)
699 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700700
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700701drop:
702 dev_kfree_skb_any(skb);
703 net->stats.tx_dropped++;
704
705 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700706
707no_memory:
708 ++net_device_ctx->eth_stats.tx_no_memory;
709 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700710}
stephen hemminger89bb42b2017-08-09 17:46:08 -0700711
Hank Janssen3e189512010-03-04 22:11:00 +0000712/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700713 * netvsc_linkstatus_callback - Link up/down notification
714 */
Stephen Hemminger79cf1ba2017-12-12 16:48:37 -0800715void netvsc_linkstatus_callback(struct net_device *net,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700716 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700717{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700718 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
Stephen Hemminger79cf1ba2017-12-12 16:48:37 -0800719 struct net_device_context *ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100720 struct netvsc_reconfig *event;
721 unsigned long flags;
722
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700723 /* Update the physical link speed when changing to another vSwitch */
724 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
725 u32 speed;
726
stephen hemminger89bb42b2017-08-09 17:46:08 -0700727 speed = *(u32 *)((void *)indicate
728 + indicate->status_buf_offset) / 10000;
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700729 ndev_ctx->speed = speed;
730 return;
731 }
732
733 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100734 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
735 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
736 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
737 return;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700738
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700739 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700740 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700741
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100742 event = kzalloc(sizeof(*event), GFP_ATOMIC);
743 if (!event)
744 return;
745 event->event = indicate->status;
746
747 spin_lock_irqsave(&ndev_ctx->lock, flags);
748 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
749 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
750
751 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700752}
753
Haiyang Zhangbf486482019-02-22 18:25:03 +0000754static void netvsc_comp_ipcsum(struct sk_buff *skb)
755{
756 struct iphdr *iph = (struct iphdr *)skb->data;
757
758 iph->check = 0;
759 iph->check = ip_fast_csum(iph, iph->ihl);
760}
761
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700762static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000763 struct netvsc_channel *nvchan)
Hank Janssenfceaf242009-07-13 15:34:54 -0700764{
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000765 struct napi_struct *napi = &nvchan->napi;
766 const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
767 const struct ndis_tcp_ip_checksum_info *csum_info =
768 nvchan->rsc.csum_info;
Hank Janssenfceaf242009-07-13 15:34:54 -0700769 struct sk_buff *skb;
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000770 int i;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700771
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000772 skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700773 if (!skb)
774 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700775
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700776 /*
777 * Copy to skb. This copy is needed here since the memory pointed by
778 * hv_netvsc_packet cannot be deallocated
779 */
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000780 for (i = 0; i < nvchan->rsc.cnt; i++)
781 skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]);
Hank Janssenfceaf242009-07-13 15:34:54 -0700782
783 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700784
785 /* skb is already created with CHECKSUM_NONE */
786 skb_checksum_none_assert(skb);
787
Haiyang Zhangbf486482019-02-22 18:25:03 +0000788 /* Incoming packets may have IP header checksum verified by the host.
789 * They may not have IP header checksum computed after coalescing.
790 * We compute it here if the flags are set, because on Linux, the IP
791 * checksum is always checked.
792 */
793 if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
794 csum_info->receive.ip_checksum_succeeded &&
795 skb->protocol == htons(ETH_P_IP))
796 netvsc_comp_ipcsum(skb);
797
798 /* Do L4 checksum offload if enabled and present.
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700799 */
800 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
801 if (csum_info->receive.tcp_checksum_succeeded ||
802 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800803 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800804 }
805
stephen hemmingerdc54a082017-01-24 13:06:08 -0800806 if (vlan) {
Michał Mirosław98ba7802018-11-20 13:20:32 +0100807 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
808 (vlan->cfi ? VLAN_CFI_MASK : 0);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800809
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700810 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800811 vlan_tci);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800812 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700813
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700814 return skb;
815}
816
817/*
818 * netvsc_recv_callback - Callback when we receive a packet from the
819 * "wire" on the specified device.
820 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800821int netvsc_recv_callback(struct net_device *net,
Stephen Hemminger345ac082017-12-12 16:48:38 -0800822 struct netvsc_device *net_device,
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000823 struct netvsc_channel *nvchan)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700824{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200825 struct net_device_context *net_device_ctx = netdev_priv(net);
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000826 struct vmbus_channel *channel = nvchan->channel;
stephen hemminger742fe542017-02-27 10:26:50 -0800827 u16 q_idx = channel->offermsg.offer.sub_channel_index;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700828 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700829 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700830
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700831 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700832 return NVSP_STAT_FAIL;
833
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700834 /* Allocate a skb - TODO direct I/O to pages? */
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000835 skb = netvsc_alloc_recv_skb(net, nvchan);
836
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700837 if (unlikely(!skb)) {
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -0800838 ++net_device_ctx->eth_stats.rx_no_memory;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700839 return NVSP_STAT_FAIL;
840 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700841
stephen hemminger0c195562017-08-01 19:58:53 -0700842 skb_record_rx_queue(skb, q_idx);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700843
844 /*
845 * Even if injecting the packet, record the statistics
846 * on the synthetic device because modifying the VF device
847 * statistics will not work correctly.
848 */
stephen hemminger742fe542017-02-27 10:26:50 -0800849 rx_stats = &nvchan->rx_stats;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700850 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700851 rx_stats->packets++;
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000852 rx_stats->bytes += nvchan->rsc.pktlen;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700853
854 if (skb->pkt_type == PACKET_BROADCAST)
855 ++rx_stats->broadcast;
856 else if (skb->pkt_type == PACKET_MULTICAST)
857 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700858 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800859
stephen hemminger742fe542017-02-27 10:26:50 -0800860 napi_gro_receive(&nvchan->napi, skb);
Haiyang Zhang5c71dad2018-03-22 12:01:13 -0700861 return NVSP_STAT_SUCCESS;
Hank Janssenfceaf242009-07-13 15:34:54 -0700862}
863
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700864static void netvsc_get_drvinfo(struct net_device *net,
865 struct ethtool_drvinfo *info)
866{
Jiri Pirko7826d432013-01-06 00:44:26 +0000867 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000868 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700869}
870
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800871static void netvsc_get_channels(struct net_device *net,
872 struct ethtool_channels *channel)
873{
874 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700875 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800876
877 if (nvdev) {
878 channel->max_combined = nvdev->max_chn;
879 channel->combined_count = nvdev->num_chn;
880 }
881}
882
Haiyang Zhang7c9f3352019-01-15 00:51:43 +0000883/* Alloc struct netvsc_device_info, and initialize it from either existing
884 * struct netvsc_device, or from default values.
885 */
886static struct netvsc_device_info *netvsc_devinfo_get
887 (struct netvsc_device *nvdev)
888{
889 struct netvsc_device_info *dev_info;
890
891 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
892
893 if (!dev_info)
894 return NULL;
895
896 if (nvdev) {
897 dev_info->num_chn = nvdev->num_chn;
898 dev_info->send_sections = nvdev->send_section_cnt;
899 dev_info->send_section_size = nvdev->send_section_size;
900 dev_info->recv_sections = nvdev->recv_section_cnt;
901 dev_info->recv_section_size = nvdev->recv_section_size;
Haiyang Zhang17d91252019-01-15 00:51:44 +0000902
903 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
904 NETVSC_HASH_KEYLEN);
Haiyang Zhang7c9f3352019-01-15 00:51:43 +0000905 } else {
906 dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
907 dev_info->send_sections = NETVSC_DEFAULT_TX;
908 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
909 dev_info->recv_sections = NETVSC_DEFAULT_RX;
910 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
911 }
912
913 return dev_info;
914}
915
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700916static int netvsc_detach(struct net_device *ndev,
917 struct netvsc_device *nvdev)
918{
919 struct net_device_context *ndev_ctx = netdev_priv(ndev);
920 struct hv_device *hdev = ndev_ctx->device_ctx;
921 int ret;
922
923 /* Don't try continuing to try and setup sub channels */
924 if (cancel_work_sync(&nvdev->subchan_work))
925 nvdev->num_chn = 1;
926
927 /* If device was up (receiving) then shutdown */
928 if (netif_running(ndev)) {
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000929 netvsc_tx_disable(nvdev, ndev);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700930
931 ret = rndis_filter_close(nvdev);
932 if (ret) {
933 netdev_err(ndev,
934 "unable to close device (ret %d).\n", ret);
935 return ret;
936 }
937
938 ret = netvsc_wait_until_empty(nvdev);
939 if (ret) {
940 netdev_err(ndev,
941 "Ring buffer not empty after closing rndis\n");
942 return ret;
943 }
944 }
945
946 netif_device_detach(ndev);
947
948 rndis_filter_device_remove(hdev, nvdev);
949
950 return 0;
951}
952
953static int netvsc_attach(struct net_device *ndev,
954 struct netvsc_device_info *dev_info)
955{
956 struct net_device_context *ndev_ctx = netdev_priv(ndev);
957 struct hv_device *hdev = ndev_ctx->device_ctx;
958 struct netvsc_device *nvdev;
959 struct rndis_device *rdev;
960 int ret;
961
962 nvdev = rndis_filter_device_add(hdev, dev_info);
963 if (IS_ERR(nvdev))
964 return PTR_ERR(nvdev);
965
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -0700966 if (nvdev->num_chn > 1) {
Haiyang Zhang17d91252019-01-15 00:51:44 +0000967 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700968
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -0700969 /* if unavailable, just proceed with one queue */
970 if (ret) {
971 nvdev->max_chn = 1;
972 nvdev->num_chn = 1;
973 }
974 }
975
976 /* In any case device is now ready */
977 netif_device_attach(ndev);
978
979 /* Note: enable and attach happen when sub-channels setup */
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700980 netif_carrier_off(ndev);
981
982 if (netif_running(ndev)) {
983 ret = rndis_filter_open(nvdev);
984 if (ret)
985 return ret;
986
987 rdev = nvdev->extension;
988 if (!rdev->link_state)
989 netif_carrier_on(ndev);
990 }
991
992 return 0;
993}
994
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700995static int netvsc_set_channels(struct net_device *net,
996 struct ethtool_channels *channels)
997{
998 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700999 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger7ca45932017-07-24 10:57:28 -07001000 unsigned int orig, count = channels->combined_count;
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001001 struct netvsc_device_info *device_info;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001002 int ret;
stephen hemminger2b018882017-01-24 13:06:03 -08001003
1004 /* We do not support separate count for rx, tx, or other */
1005 if (count == 0 ||
1006 channels->rx_count || channels->tx_count || channels->other_count)
1007 return -EINVAL;
1008
stephen hemmingera0be4502017-03-22 14:51:01 -07001009 if (!nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001010 return -ENODEV;
1011
stephen hemminger2b018882017-01-24 13:06:03 -08001012 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001013 return -EINVAL;
1014
stephen hemminger2b018882017-01-24 13:06:03 -08001015 if (count > nvdev->max_chn)
1016 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001017
stephen hemminger7ca45932017-07-24 10:57:28 -07001018 orig = nvdev->num_chn;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001019
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001020 device_info = netvsc_devinfo_get(nvdev);
1021
1022 if (!device_info)
1023 return -ENOMEM;
1024
1025 device_info->num_chn = count;
stephen hemminger8b532792017-08-09 17:46:11 -07001026
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001027 ret = netvsc_detach(net, nvdev);
1028 if (ret)
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001029 goto out;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001030
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001031 ret = netvsc_attach(net, device_info);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001032 if (ret) {
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001033 device_info->num_chn = orig;
1034 if (netvsc_attach(net, device_info))
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001035 netdev_err(net, "restoring channel setting failed\n");
stephen hemminger7ca45932017-07-24 10:57:28 -07001036 }
1037
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001038out:
1039 kfree(device_info);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001040 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001041}
1042
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001043static bool
1044netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001045{
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001046 struct ethtool_link_ksettings diff1 = *cmd;
1047 struct ethtool_link_ksettings diff2 = {};
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001048
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001049 diff1.base.speed = 0;
1050 diff1.base.duplex = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001051 /* advertising and cmd are usually set */
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001052 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
1053 diff1.base.cmd = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001054 /* We set port to PORT_OTHER */
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001055 diff2.base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001056
1057 return !memcmp(&diff1, &diff2, sizeof(diff1));
1058}
1059
1060static void netvsc_init_settings(struct net_device *dev)
1061{
1062 struct net_device_context *ndc = netdev_priv(dev);
1063
Haiyang Zhang486e3982017-10-06 08:33:57 -07001064 ndc->l4_hash = HV_DEFAULT_L4HASH;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001065
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001066 ndc->speed = SPEED_UNKNOWN;
Simon Xiaof3c9d40e2017-04-14 14:42:58 -07001067 ndc->duplex = DUPLEX_FULL;
Haiyang Zhangd6792a52018-09-21 18:20:36 +00001068
1069 dev->features = NETIF_F_LRO;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001070}
1071
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001072static int netvsc_get_link_ksettings(struct net_device *dev,
1073 struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001074{
1075 struct net_device_context *ndc = netdev_priv(dev);
1076
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001077 cmd->base.speed = ndc->speed;
1078 cmd->base.duplex = ndc->duplex;
1079 cmd->base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001080
1081 return 0;
1082}
1083
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001084static int netvsc_set_link_ksettings(struct net_device *dev,
1085 const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001086{
1087 struct net_device_context *ndc = netdev_priv(dev);
1088 u32 speed;
1089
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001090 speed = cmd->base.speed;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001091 if (!ethtool_validate_speed(speed) ||
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001092 !ethtool_validate_duplex(cmd->base.duplex) ||
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001093 !netvsc_validate_ethtool_ss_cmd(cmd))
1094 return -EINVAL;
1095
1096 ndc->speed = speed;
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001097 ndc->duplex = cmd->base.duplex;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001098
1099 return 0;
1100}
1101
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001102static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1103{
1104 struct net_device_context *ndevctx = netdev_priv(ndev);
stephen hemminger0c195562017-08-01 19:58:53 -07001105 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001106 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
stephen hemminger9749fed2017-07-19 11:53:16 -07001107 int orig_mtu = ndev->mtu;
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001108 struct netvsc_device_info *device_info;
stephen hemminger9749fed2017-07-19 11:53:16 -07001109 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001110
stephen hemmingera0be4502017-03-22 14:51:01 -07001111 if (!nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001112 return -ENODEV;
1113
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001114 device_info = netvsc_devinfo_get(nvdev);
1115
1116 if (!device_info)
1117 return -ENOMEM;
1118
stephen hemminger0c195562017-08-01 19:58:53 -07001119 /* Change MTU of underlying VF netdev first. */
1120 if (vf_netdev) {
1121 ret = dev_set_mtu(vf_netdev, mtu);
1122 if (ret)
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001123 goto out;
stephen hemminger0c195562017-08-01 19:58:53 -07001124 }
1125
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001126 ret = netvsc_detach(ndev, nvdev);
1127 if (ret)
1128 goto rollback_vf;
Dexuan Cui152669b2017-03-02 13:00:53 +00001129
Dexuan Cui152669b2017-03-02 13:00:53 +00001130 ndev->mtu = mtu;
1131
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001132 ret = netvsc_attach(ndev, device_info);
1133 if (!ret)
1134 goto out;
stephen hemminger9749fed2017-07-19 11:53:16 -07001135
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001136 /* Attempt rollback to original MTU */
1137 ndev->mtu = orig_mtu;
stephen hemminger68d715f2017-08-09 17:46:06 -07001138
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001139 if (netvsc_attach(ndev, device_info))
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001140 netdev_err(ndev, "restoring mtu failed\n");
1141rollback_vf:
1142 if (vf_netdev)
1143 dev_set_mtu(vf_netdev, orig_mtu);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001144
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001145out:
1146 kfree(device_info);
stephen hemminger9749fed2017-07-19 11:53:16 -07001147 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001148}
1149
stephen hemminger0c195562017-08-01 19:58:53 -07001150static void netvsc_get_vf_stats(struct net_device *net,
1151 struct netvsc_vf_pcpu_stats *tot)
1152{
1153 struct net_device_context *ndev_ctx = netdev_priv(net);
1154 int i;
1155
1156 memset(tot, 0, sizeof(*tot));
1157
1158 for_each_possible_cpu(i) {
1159 const struct netvsc_vf_pcpu_stats *stats
1160 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1161 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1162 unsigned int start;
1163
1164 do {
1165 start = u64_stats_fetch_begin_irq(&stats->syncp);
1166 rx_packets = stats->rx_packets;
1167 tx_packets = stats->tx_packets;
1168 rx_bytes = stats->rx_bytes;
1169 tx_bytes = stats->tx_bytes;
1170 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1171
1172 tot->rx_packets += rx_packets;
1173 tot->tx_packets += tx_packets;
1174 tot->rx_bytes += rx_bytes;
1175 tot->tx_bytes += tx_bytes;
1176 tot->tx_dropped += stats->tx_dropped;
1177 }
1178}
1179
Yidong Ren6ae74672018-07-30 17:09:45 +00001180static void netvsc_get_pcpu_stats(struct net_device *net,
1181 struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1182{
1183 struct net_device_context *ndev_ctx = netdev_priv(net);
1184 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1185 int i;
1186
1187 /* fetch percpu stats of vf */
1188 for_each_possible_cpu(i) {
1189 const struct netvsc_vf_pcpu_stats *stats =
1190 per_cpu_ptr(ndev_ctx->vf_stats, i);
1191 struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1192 unsigned int start;
1193
1194 do {
1195 start = u64_stats_fetch_begin_irq(&stats->syncp);
1196 this_tot->vf_rx_packets = stats->rx_packets;
1197 this_tot->vf_tx_packets = stats->tx_packets;
1198 this_tot->vf_rx_bytes = stats->rx_bytes;
1199 this_tot->vf_tx_bytes = stats->tx_bytes;
1200 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1201 this_tot->rx_packets = this_tot->vf_rx_packets;
1202 this_tot->tx_packets = this_tot->vf_tx_packets;
1203 this_tot->rx_bytes = this_tot->vf_rx_bytes;
1204 this_tot->tx_bytes = this_tot->vf_tx_bytes;
1205 }
1206
1207 /* fetch percpu stats of netvsc */
1208 for (i = 0; i < nvdev->num_chn; i++) {
1209 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1210 const struct netvsc_stats *stats;
1211 struct netvsc_ethtool_pcpu_stats *this_tot =
1212 &pcpu_tot[nvchan->channel->target_cpu];
1213 u64 packets, bytes;
1214 unsigned int start;
1215
1216 stats = &nvchan->tx_stats;
1217 do {
1218 start = u64_stats_fetch_begin_irq(&stats->syncp);
1219 packets = stats->packets;
1220 bytes = stats->bytes;
1221 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1222
1223 this_tot->tx_bytes += bytes;
1224 this_tot->tx_packets += packets;
1225
1226 stats = &nvchan->rx_stats;
1227 do {
1228 start = u64_stats_fetch_begin_irq(&stats->syncp);
1229 packets = stats->packets;
1230 bytes = stats->bytes;
1231 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1232
1233 this_tot->rx_bytes += bytes;
1234 this_tot->rx_packets += packets;
1235 }
1236}
1237
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001238static void netvsc_get_stats64(struct net_device *net,
1239 struct rtnl_link_stats64 *t)
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001240{
1241 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger776e7262017-04-14 14:42:57 -07001242 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
stephen hemminger0c195562017-08-01 19:58:53 -07001243 struct netvsc_vf_pcpu_stats vf_tot;
stephen hemminger89bb42b2017-08-09 17:46:08 -07001244 int i;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001245
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001246 if (!nvdev)
1247 return;
1248
stephen hemminger0c195562017-08-01 19:58:53 -07001249 netdev_stats_to_stats64(t, &net->stats);
1250
1251 netvsc_get_vf_stats(net, &vf_tot);
1252 t->rx_packets += vf_tot.rx_packets;
1253 t->tx_packets += vf_tot.tx_packets;
1254 t->rx_bytes += vf_tot.rx_bytes;
1255 t->tx_bytes += vf_tot.tx_bytes;
1256 t->tx_dropped += vf_tot.tx_dropped;
1257
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001258 for (i = 0; i < nvdev->num_chn; i++) {
1259 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1260 const struct netvsc_stats *stats;
1261 u64 packets, bytes, multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001262 unsigned int start;
1263
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001264 stats = &nvchan->tx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001265 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001266 start = u64_stats_fetch_begin_irq(&stats->syncp);
1267 packets = stats->packets;
1268 bytes = stats->bytes;
1269 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001270
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001271 t->tx_bytes += bytes;
1272 t->tx_packets += packets;
1273
1274 stats = &nvchan->rx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001275 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001276 start = u64_stats_fetch_begin_irq(&stats->syncp);
1277 packets = stats->packets;
1278 bytes = stats->bytes;
1279 multicast = stats->multicast + stats->broadcast;
1280 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001281
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001282 t->rx_bytes += bytes;
1283 t->rx_packets += packets;
1284 t->multicast += multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001285 }
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001286}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001287
1288static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1289{
stephen hemminger867047c2017-07-28 08:59:42 -07001290 struct net_device_context *ndc = netdev_priv(ndev);
stephen hemminger16ba3262017-08-09 17:46:05 -07001291 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
stephen hemminger867047c2017-07-28 08:59:42 -07001292 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001293 struct sockaddr *addr = p;
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001294 int err;
1295
stephen hemminger16ba3262017-08-09 17:46:05 -07001296 err = eth_prepare_mac_addr_change(ndev, p);
1297 if (err)
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001298 return err;
1299
stephen hemminger867047c2017-07-28 08:59:42 -07001300 if (!nvdev)
1301 return -ENODEV;
1302
stephen hemminger16ba3262017-08-09 17:46:05 -07001303 if (vf_netdev) {
Petr Machata3a37a962018-12-13 11:54:30 +00001304 err = dev_set_mac_address(vf_netdev, addr, NULL);
stephen hemminger16ba3262017-08-09 17:46:05 -07001305 if (err)
1306 return err;
1307 }
1308
stephen hemminger867047c2017-07-28 08:59:42 -07001309 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
stephen hemminger16ba3262017-08-09 17:46:05 -07001310 if (!err) {
1311 eth_commit_mac_addr_change(ndev, p);
1312 } else if (vf_netdev) {
1313 /* rollback change on VF */
1314 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
Petr Machata3a37a962018-12-13 11:54:30 +00001315 dev_set_mac_address(vf_netdev, addr, NULL);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001316 }
1317
1318 return err;
1319}
1320
Stephen Hemminger4323b472016-08-23 12:17:57 -07001321static const struct {
1322 char name[ETH_GSTRING_LEN];
1323 u16 offset;
1324} netvsc_stats[] = {
1325 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -08001326 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001327 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1328 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1329 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
stephen hemmingercad5c192017-08-09 17:46:12 -07001330 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1331 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -08001332 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
Simon Xiao09af87d2017-09-29 11:39:46 -07001333 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1334 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
Yidong Ren6ae74672018-07-30 17:09:45 +00001335}, pcpu_stats[] = {
1336 { "cpu%u_rx_packets",
1337 offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1338 { "cpu%u_rx_bytes",
1339 offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1340 { "cpu%u_tx_packets",
1341 offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1342 { "cpu%u_tx_bytes",
1343 offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1344 { "cpu%u_vf_rx_packets",
1345 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1346 { "cpu%u_vf_rx_bytes",
1347 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1348 { "cpu%u_vf_tx_packets",
1349 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1350 { "cpu%u_vf_tx_bytes",
1351 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
stephen hemminger0c195562017-08-01 19:58:53 -07001352}, vf_stats[] = {
1353 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1354 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1355 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1356 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1357 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001358};
1359
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001360#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
stephen hemminger0c195562017-08-01 19:58:53 -07001361#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001362
Yidong Ren6ae74672018-07-30 17:09:45 +00001363/* statistics per queue (rx/tx packets/bytes) */
1364#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1365
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001366/* 4 statistics per queue (rx/tx packets/bytes) */
1367#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1368
Stephen Hemminger4323b472016-08-23 12:17:57 -07001369static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1370{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001371 struct net_device_context *ndc = netdev_priv(dev);
stephen hemmingerfbd4c7e2017-06-07 15:53:47 -07001372 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001373
1374 if (!nvdev)
1375 return -ENODEV;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001376
Stephen Hemminger4323b472016-08-23 12:17:57 -07001377 switch (string_set) {
1378 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001379 return NETVSC_GLOBAL_STATS_LEN
1380 + NETVSC_VF_STATS_LEN
Yidong Ren6ae74672018-07-30 17:09:45 +00001381 + NETVSC_QUEUE_STATS_LEN(nvdev)
1382 + NETVSC_PCPU_STATS_LEN;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001383 default:
1384 return -EINVAL;
1385 }
1386}
1387
1388static void netvsc_get_ethtool_stats(struct net_device *dev,
1389 struct ethtool_stats *stats, u64 *data)
1390{
1391 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001392 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001393 const void *nds = &ndc->eth_stats;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001394 const struct netvsc_stats *qstats;
stephen hemminger0c195562017-08-01 19:58:53 -07001395 struct netvsc_vf_pcpu_stats sum;
Yidong Ren6ae74672018-07-30 17:09:45 +00001396 struct netvsc_ethtool_pcpu_stats *pcpu_sum;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001397 unsigned int start;
1398 u64 packets, bytes;
Yidong Ren6ae74672018-07-30 17:09:45 +00001399 int i, j, cpu;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001400
stephen hemminger545a8e72017-03-22 14:51:00 -07001401 if (!nvdev)
1402 return;
1403
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001404 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
Stephen Hemminger4323b472016-08-23 12:17:57 -07001405 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001406
stephen hemminger0c195562017-08-01 19:58:53 -07001407 netvsc_get_vf_stats(dev, &sum);
1408 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1409 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1410
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001411 for (j = 0; j < nvdev->num_chn; j++) {
1412 qstats = &nvdev->chan_table[j].tx_stats;
1413
1414 do {
1415 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1416 packets = qstats->packets;
1417 bytes = qstats->bytes;
1418 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1419 data[i++] = packets;
1420 data[i++] = bytes;
1421
1422 qstats = &nvdev->chan_table[j].rx_stats;
1423 do {
1424 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1425 packets = qstats->packets;
1426 bytes = qstats->bytes;
1427 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1428 data[i++] = packets;
1429 data[i++] = bytes;
1430 }
Yidong Ren6ae74672018-07-30 17:09:45 +00001431
1432 pcpu_sum = kvmalloc_array(num_possible_cpus(),
1433 sizeof(struct netvsc_ethtool_pcpu_stats),
1434 GFP_KERNEL);
1435 netvsc_get_pcpu_stats(dev, pcpu_sum);
1436 for_each_present_cpu(cpu) {
1437 struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1438
1439 for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1440 data[i++] = *(u64 *)((void *)this_sum
1441 + pcpu_stats[j].offset);
1442 }
1443 kvfree(pcpu_sum);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001444}
1445
1446static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1447{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001448 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001449 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001450 u8 *p = data;
Yidong Ren6ae74672018-07-30 17:09:45 +00001451 int i, cpu;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001452
stephen hemminger545a8e72017-03-22 14:51:00 -07001453 if (!nvdev)
1454 return;
1455
Stephen Hemminger4323b472016-08-23 12:17:57 -07001456 switch (stringset) {
1457 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001458 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1459 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1460 p += ETH_GSTRING_LEN;
1461 }
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001462
stephen hemminger0c195562017-08-01 19:58:53 -07001463 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1464 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1465 p += ETH_GSTRING_LEN;
1466 }
1467
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001468 for (i = 0; i < nvdev->num_chn; i++) {
1469 sprintf(p, "tx_queue_%u_packets", i);
1470 p += ETH_GSTRING_LEN;
1471 sprintf(p, "tx_queue_%u_bytes", i);
1472 p += ETH_GSTRING_LEN;
1473 sprintf(p, "rx_queue_%u_packets", i);
1474 p += ETH_GSTRING_LEN;
1475 sprintf(p, "rx_queue_%u_bytes", i);
1476 p += ETH_GSTRING_LEN;
1477 }
1478
Yidong Ren6ae74672018-07-30 17:09:45 +00001479 for_each_present_cpu(cpu) {
1480 for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
1481 sprintf(p, pcpu_stats[i].name, cpu);
1482 p += ETH_GSTRING_LEN;
1483 }
1484 }
1485
Stephen Hemminger4323b472016-08-23 12:17:57 -07001486 break;
1487 }
1488}
1489
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001490static int
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001491netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1492 struct ethtool_rxnfc *info)
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001493{
Haiyang Zhang486e3982017-10-06 08:33:57 -07001494 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1495
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001496 info->data = RXH_IP_SRC | RXH_IP_DST;
1497
1498 switch (info->flow_type) {
1499 case TCP_V4_FLOW:
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001500 if (ndc->l4_hash & HV_TCP4_L4HASH)
1501 info->data |= l4_flag;
1502
1503 break;
1504
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001505 case TCP_V6_FLOW:
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001506 if (ndc->l4_hash & HV_TCP6_L4HASH)
1507 info->data |= l4_flag;
1508
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001509 break;
1510
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001511 case UDP_V4_FLOW:
Haiyang Zhang486e3982017-10-06 08:33:57 -07001512 if (ndc->l4_hash & HV_UDP4_L4HASH)
1513 info->data |= l4_flag;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001514
1515 break;
1516
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001517 case UDP_V6_FLOW:
Haiyang Zhang486e3982017-10-06 08:33:57 -07001518 if (ndc->l4_hash & HV_UDP6_L4HASH)
1519 info->data |= l4_flag;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001520
1521 break;
1522
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001523 case IPV4_FLOW:
1524 case IPV6_FLOW:
1525 break;
1526 default:
1527 info->data = 0;
1528 break;
1529 }
1530
1531 return 0;
1532}
1533
1534static int
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001535netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1536 u32 *rules)
1537{
1538 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001539 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001540
1541 if (!nvdev)
1542 return -ENODEV;
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001543
1544 switch (info->cmd) {
1545 case ETHTOOL_GRXRINGS:
1546 info->data = nvdev->num_chn;
1547 return 0;
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001548
1549 case ETHTOOL_GRXFH:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001550 return netvsc_get_rss_hash_opts(ndc, info);
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001551 }
1552 return -EOPNOTSUPP;
1553}
1554
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001555static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1556 struct ethtool_rxnfc *info)
1557{
1558 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1559 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
Haiyang Zhang486e3982017-10-06 08:33:57 -07001560 switch (info->flow_type) {
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001561 case TCP_V4_FLOW:
1562 ndc->l4_hash |= HV_TCP4_L4HASH;
1563 break;
1564
1565 case TCP_V6_FLOW:
1566 ndc->l4_hash |= HV_TCP6_L4HASH;
1567 break;
1568
Haiyang Zhang486e3982017-10-06 08:33:57 -07001569 case UDP_V4_FLOW:
1570 ndc->l4_hash |= HV_UDP4_L4HASH;
1571 break;
1572
1573 case UDP_V6_FLOW:
1574 ndc->l4_hash |= HV_UDP6_L4HASH;
1575 break;
1576
1577 default:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001578 return -EOPNOTSUPP;
Haiyang Zhang486e3982017-10-06 08:33:57 -07001579 }
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001580
1581 return 0;
1582 }
1583
1584 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
Haiyang Zhang486e3982017-10-06 08:33:57 -07001585 switch (info->flow_type) {
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001586 case TCP_V4_FLOW:
1587 ndc->l4_hash &= ~HV_TCP4_L4HASH;
1588 break;
1589
1590 case TCP_V6_FLOW:
1591 ndc->l4_hash &= ~HV_TCP6_L4HASH;
1592 break;
1593
Haiyang Zhang486e3982017-10-06 08:33:57 -07001594 case UDP_V4_FLOW:
1595 ndc->l4_hash &= ~HV_UDP4_L4HASH;
1596 break;
1597
1598 case UDP_V6_FLOW:
1599 ndc->l4_hash &= ~HV_UDP6_L4HASH;
1600 break;
1601
1602 default:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001603 return -EOPNOTSUPP;
Haiyang Zhang486e3982017-10-06 08:33:57 -07001604 }
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001605
1606 return 0;
1607 }
1608
1609 return -EOPNOTSUPP;
1610}
1611
1612static int
1613netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1614{
1615 struct net_device_context *ndc = netdev_priv(ndev);
1616
1617 if (info->cmd == ETHTOOL_SRXFH)
1618 return netvsc_set_rss_hash_opts(ndc, info);
1619
1620 return -EOPNOTSUPP;
1621}
1622
stephen hemminger962f3fe2017-01-24 13:06:02 -08001623static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1624{
1625 return NETVSC_HASH_KEYLEN;
1626}
1627
1628static u32 netvsc_rss_indir_size(struct net_device *dev)
1629{
stephen hemmingerff4a4412017-01-24 13:06:04 -08001630 return ITAB_NUM;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001631}
1632
1633static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1634 u8 *hfunc)
1635{
1636 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001637 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001638 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001639 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001640
stephen hemminger545a8e72017-03-22 14:51:00 -07001641 if (!ndev)
1642 return -ENODEV;
1643
stephen hemminger962f3fe2017-01-24 13:06:02 -08001644 if (hfunc)
1645 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1646
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001647 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001648 if (indir) {
1649 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhang473713002017-10-13 12:28:03 -07001650 indir[i] = rndis_dev->rx_table[i];
stephen hemmingerff4a4412017-01-24 13:06:04 -08001651 }
1652
stephen hemminger962f3fe2017-01-24 13:06:02 -08001653 if (key)
1654 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1655
1656 return 0;
1657}
1658
1659static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1660 const u8 *key, const u8 hfunc)
1661{
1662 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001663 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001664 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001665 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001666
stephen hemminger545a8e72017-03-22 14:51:00 -07001667 if (!ndev)
1668 return -ENODEV;
1669
stephen hemminger962f3fe2017-01-24 13:06:02 -08001670 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1671 return -EOPNOTSUPP;
1672
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001673 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001674 if (indir) {
1675 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhangdb3cd7a2017-09-01 14:30:07 -07001676 if (indir[i] >= ndev->num_chn)
stephen hemmingerff4a4412017-01-24 13:06:04 -08001677 return -EINVAL;
1678
1679 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhang473713002017-10-13 12:28:03 -07001680 rndis_dev->rx_table[i] = indir[i];
stephen hemmingerff4a4412017-01-24 13:06:04 -08001681 }
1682
1683 if (!key) {
1684 if (!indir)
1685 return 0;
1686
1687 key = rndis_dev->rss_key;
1688 }
stephen hemminger962f3fe2017-01-24 13:06:02 -08001689
Haiyang Zhang715e2ec2017-09-01 14:30:04 -07001690 return rndis_filter_set_rss_param(rndis_dev, key);
stephen hemminger962f3fe2017-01-24 13:06:02 -08001691}
1692
stephen hemminger8b532792017-08-09 17:46:11 -07001693/* Hyper-V RNDIS protocol does not have ring in the HW sense.
1694 * It does have pre-allocated receive area which is divided into sections.
1695 */
1696static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1697 struct ethtool_ringparam *ring)
1698{
1699 u32 max_buf_size;
1700
1701 ring->rx_pending = nvdev->recv_section_cnt;
1702 ring->tx_pending = nvdev->send_section_cnt;
1703
1704 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1705 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1706 else
1707 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1708
1709 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1710 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1711 / nvdev->send_section_size;
1712}
1713
1714static void netvsc_get_ringparam(struct net_device *ndev,
1715 struct ethtool_ringparam *ring)
1716{
1717 struct net_device_context *ndevctx = netdev_priv(ndev);
1718 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1719
1720 if (!nvdev)
1721 return;
1722
1723 __netvsc_get_ringparam(nvdev, ring);
1724}
1725
1726static int netvsc_set_ringparam(struct net_device *ndev,
1727 struct ethtool_ringparam *ring)
1728{
1729 struct net_device_context *ndevctx = netdev_priv(ndev);
1730 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001731 struct netvsc_device_info *device_info;
stephen hemminger8b532792017-08-09 17:46:11 -07001732 struct ethtool_ringparam orig;
1733 u32 new_tx, new_rx;
stephen hemminger8b532792017-08-09 17:46:11 -07001734 int ret = 0;
1735
1736 if (!nvdev || nvdev->destroy)
1737 return -ENODEV;
1738
1739 memset(&orig, 0, sizeof(orig));
1740 __netvsc_get_ringparam(nvdev, &orig);
1741
1742 new_tx = clamp_t(u32, ring->tx_pending,
1743 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1744 new_rx = clamp_t(u32, ring->rx_pending,
1745 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1746
1747 if (new_tx == orig.tx_pending &&
1748 new_rx == orig.rx_pending)
1749 return 0; /* no change */
1750
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001751 device_info = netvsc_devinfo_get(nvdev);
1752
1753 if (!device_info)
1754 return -ENOMEM;
1755
1756 device_info->send_sections = new_tx;
1757 device_info->recv_sections = new_rx;
stephen hemminger8b532792017-08-09 17:46:11 -07001758
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001759 ret = netvsc_detach(ndev, nvdev);
1760 if (ret)
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001761 goto out;
stephen hemminger8b532792017-08-09 17:46:11 -07001762
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001763 ret = netvsc_attach(ndev, device_info);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001764 if (ret) {
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001765 device_info->send_sections = orig.tx_pending;
1766 device_info->recv_sections = orig.rx_pending;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001767
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001768 if (netvsc_attach(ndev, device_info))
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001769 netdev_err(ndev, "restoring ringparam failed");
stephen hemminger8b532792017-08-09 17:46:11 -07001770 }
1771
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001772out:
1773 kfree(device_info);
stephen hemminger8b532792017-08-09 17:46:11 -07001774 return ret;
1775}
1776
Haiyang Zhangd6792a52018-09-21 18:20:36 +00001777static int netvsc_set_features(struct net_device *ndev,
1778 netdev_features_t features)
1779{
1780 netdev_features_t change = features ^ ndev->features;
1781 struct net_device_context *ndevctx = netdev_priv(ndev);
1782 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1783 struct ndis_offload_params offloads;
1784
1785 if (!nvdev || nvdev->destroy)
1786 return -ENODEV;
1787
1788 if (!(change & NETIF_F_LRO))
1789 return 0;
1790
1791 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1792
1793 if (features & NETIF_F_LRO) {
1794 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1795 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1796 } else {
1797 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1798 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1799 }
1800
1801 return rndis_filter_set_offload_params(ndev, nvdev, &offloads);
1802}
1803
Haiyang Zhang273de022018-05-22 11:29:34 -07001804static u32 netvsc_get_msglevel(struct net_device *ndev)
1805{
1806 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1807
1808 return ndev_ctx->msg_enable;
1809}
1810
1811static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1812{
1813 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1814
1815 ndev_ctx->msg_enable = val;
1816}
1817
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001818static const struct ethtool_ops ethtool_ops = {
1819 .get_drvinfo = netvsc_get_drvinfo,
Haiyang Zhang273de022018-05-22 11:29:34 -07001820 .get_msglevel = netvsc_get_msglevel,
1821 .set_msglevel = netvsc_set_msglevel,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001822 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001823 .get_ethtool_stats = netvsc_get_ethtool_stats,
1824 .get_sset_count = netvsc_get_sset_count,
1825 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001826 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001827 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001828 .get_ts_info = ethtool_op_get_ts_info,
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001829 .get_rxnfc = netvsc_get_rxnfc,
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001830 .set_rxnfc = netvsc_set_rxnfc,
stephen hemminger962f3fe2017-01-24 13:06:02 -08001831 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1832 .get_rxfh_indir_size = netvsc_rss_indir_size,
1833 .get_rxfh = netvsc_get_rxfh,
1834 .set_rxfh = netvsc_set_rxfh,
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001835 .get_link_ksettings = netvsc_get_link_ksettings,
1836 .set_link_ksettings = netvsc_set_link_ksettings,
stephen hemminger8b532792017-08-09 17:46:11 -07001837 .get_ringparam = netvsc_get_ringparam,
1838 .set_ringparam = netvsc_set_ringparam,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001839};
1840
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001841static const struct net_device_ops device_ops = {
1842 .ndo_open = netvsc_open,
1843 .ndo_stop = netvsc_close,
1844 .ndo_start_xmit = netvsc_start_xmit,
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001845 .ndo_change_rx_flags = netvsc_change_rx_flags,
1846 .ndo_set_rx_mode = netvsc_set_rx_mode,
Haiyang Zhangd6792a52018-09-21 18:20:36 +00001847 .ndo_set_features = netvsc_set_features,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001848 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001849 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001850 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001851 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001852 .ndo_get_stats64 = netvsc_get_stats64,
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001853};
1854
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001855/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001856 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1857 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1858 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001859 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001860static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001861{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001862 struct net_device_context *ndev_ctx =
1863 container_of(w, struct net_device_context, dwork.work);
1864 struct hv_device *device_obj = ndev_ctx->device_ctx;
1865 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001866 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001867 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001868 struct netvsc_reconfig *event = NULL;
1869 bool notify = false, reschedule = false;
1870 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001871
stephen hemminger9b4e9462017-08-24 16:49:16 -07001872 /* if changes are happening, comeback later */
1873 if (!rtnl_trylock()) {
1874 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1875 return;
1876 }
1877
stephen hemmingera0be4502017-03-22 14:51:01 -07001878 net_device = rtnl_dereference(ndev_ctx->nvdev);
1879 if (!net_device)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001880 goto out_unlock;
1881
Haiyang Zhang891de742014-02-12 16:54:27 -08001882 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001883
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001884 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1885 if (time_is_after_jiffies(next_reconfig)) {
1886 /* link_watch only sends one notification with current state
1887 * per second, avoid doing reconfig more frequently. Handle
1888 * wrap around.
1889 */
1890 delay = next_reconfig - jiffies;
1891 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1892 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001893 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001894 }
1895 ndev_ctx->last_reconfig = jiffies;
1896
1897 spin_lock_irqsave(&ndev_ctx->lock, flags);
1898 if (!list_empty(&ndev_ctx->reconfig_events)) {
1899 event = list_first_entry(&ndev_ctx->reconfig_events,
1900 struct netvsc_reconfig, list);
1901 list_del(&event->list);
1902 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1903 }
1904 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1905
1906 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001907 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001908
1909 switch (event->event) {
1910 /* Only the following events are possible due to the check in
1911 * netvsc_linkstatus_callback()
1912 */
1913 case RNDIS_STATUS_MEDIA_CONNECT:
1914 if (rdev->link_state) {
1915 rdev->link_state = false;
stephen hemminger0c195562017-08-01 19:58:53 -07001916 netif_carrier_on(net);
Haiyang Zhang1b704c42019-03-28 19:40:36 +00001917 netvsc_tx_enable(net_device, net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001918 } else {
1919 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001920 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001921 kfree(event);
1922 break;
1923 case RNDIS_STATUS_MEDIA_DISCONNECT:
1924 if (!rdev->link_state) {
1925 rdev->link_state = true;
1926 netif_carrier_off(net);
Haiyang Zhang1b704c42019-03-28 19:40:36 +00001927 netvsc_tx_disable(net_device, net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001928 }
1929 kfree(event);
1930 break;
1931 case RNDIS_STATUS_NETWORK_CHANGE:
1932 /* Only makes sense if carrier is present */
1933 if (!rdev->link_state) {
1934 rdev->link_state = true;
1935 netif_carrier_off(net);
Haiyang Zhang1b704c42019-03-28 19:40:36 +00001936 netvsc_tx_disable(net_device, net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001937 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1938 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001939 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001940 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1941 reschedule = true;
1942 }
1943 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001944 }
1945
1946 rtnl_unlock();
1947
1948 if (notify)
1949 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001950
1951 /* link_watch only sends one notification with current state per
1952 * second, handle next reconfig event in 2 seconds.
1953 */
1954 if (reschedule)
1955 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001956
1957 return;
1958
1959out_unlock:
1960 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001961}
1962
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001963static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1964{
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001965 struct net_device_context *net_device_ctx;
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001966 struct net_device *dev;
1967
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001968 dev = netdev_master_upper_dev_get(vf_netdev);
1969 if (!dev || dev->netdev_ops != &device_ops)
1970 return NULL; /* not a netvsc device */
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001971
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001972 net_device_ctx = netdev_priv(dev);
1973 if (!rtnl_dereference(net_device_ctx->nvdev))
1974 return NULL; /* device is removed */
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001975
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001976 return dev;
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001977}
1978
stephen hemminger0c195562017-08-01 19:58:53 -07001979/* Called when VF is injecting data into network stack.
1980 * Change the associated network device from VF to netvsc.
1981 * note: already called with rcu_read_lock
1982 */
1983static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1984{
1985 struct sk_buff *skb = *pskb;
1986 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
1987 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1988 struct netvsc_vf_pcpu_stats *pcpu_stats
1989 = this_cpu_ptr(ndev_ctx->vf_stats);
1990
Stephen Hemminger996ed042019-05-28 11:47:30 -07001991 skb = skb_share_check(skb, GFP_ATOMIC);
1992 if (unlikely(!skb))
1993 return RX_HANDLER_CONSUMED;
1994
1995 *pskb = skb;
1996
stephen hemminger0c195562017-08-01 19:58:53 -07001997 skb->dev = ndev;
1998
1999 u64_stats_update_begin(&pcpu_stats->syncp);
2000 pcpu_stats->rx_packets++;
2001 pcpu_stats->rx_bytes += skb->len;
2002 u64_stats_update_end(&pcpu_stats->syncp);
2003
2004 return RX_HANDLER_ANOTHER;
2005}
2006
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002007static int netvsc_vf_join(struct net_device *vf_netdev,
2008 struct net_device *ndev)
2009{
2010 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2011 int ret;
2012
2013 ret = netdev_rx_handler_register(vf_netdev,
2014 netvsc_vf_handle_frame, ndev);
2015 if (ret != 0) {
2016 netdev_err(vf_netdev,
2017 "can not register netvsc VF receive handler (err = %d)\n",
2018 ret);
2019 goto rx_handler_failed;
2020 }
2021
2022 ret = netdev_master_upper_dev_link(vf_netdev, ndev,
2023 NULL, NULL, NULL);
2024 if (ret != 0) {
2025 netdev_err(vf_netdev,
2026 "can not set master device %s (err = %d)\n",
2027 ndev->name, ret);
2028 goto upper_link_failed;
2029 }
2030
2031 /* set slave flag before open to prevent IPv6 addrconf */
2032 vf_netdev->flags |= IFF_SLAVE;
2033
2034 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2035
2036 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2037
2038 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2039 return 0;
2040
2041upper_link_failed:
2042 netdev_rx_handler_unregister(vf_netdev);
2043rx_handler_failed:
2044 return ret;
2045}
2046
stephen hemminger0c195562017-08-01 19:58:53 -07002047static void __netvsc_vf_setup(struct net_device *ndev,
2048 struct net_device *vf_netdev)
2049{
2050 int ret;
2051
stephen hemminger0c195562017-08-01 19:58:53 -07002052 /* Align MTU of VF with master */
2053 ret = dev_set_mtu(vf_netdev, ndev->mtu);
2054 if (ret)
2055 netdev_warn(vf_netdev,
2056 "unable to change mtu to %u\n", ndev->mtu);
2057
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08002058 /* set multicast etc flags on VF */
Petr Machata567c5e12018-12-06 17:05:42 +00002059 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
Stephen Hemmingerb0dee792018-03-07 13:49:12 -08002060
2061 /* sync address list from ndev to VF */
2062 netif_addr_lock_bh(ndev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08002063 dev_uc_sync(vf_netdev, ndev);
2064 dev_mc_sync(vf_netdev, ndev);
Stephen Hemmingerb0dee792018-03-07 13:49:12 -08002065 netif_addr_unlock_bh(ndev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08002066
stephen hemminger0c195562017-08-01 19:58:53 -07002067 if (netif_running(ndev)) {
Petr Machata00f54e62018-12-06 17:05:36 +00002068 ret = dev_open(vf_netdev, NULL);
stephen hemminger0c195562017-08-01 19:58:53 -07002069 if (ret)
2070 netdev_warn(vf_netdev,
2071 "unable to open: %d\n", ret);
2072 }
2073}
2074
2075/* Setup VF as slave of the synthetic device.
2076 * Runs in workqueue to avoid recursion in netlink callbacks.
2077 */
2078static void netvsc_vf_setup(struct work_struct *w)
2079{
2080 struct net_device_context *ndev_ctx
stephen hemminger6123c662017-08-09 17:46:03 -07002081 = container_of(w, struct net_device_context, vf_takeover.work);
stephen hemminger0c195562017-08-01 19:58:53 -07002082 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2083 struct net_device *vf_netdev;
2084
stephen hemmingerfb84af82017-08-04 12:14:00 -07002085 if (!rtnl_trylock()) {
stephen hemminger6123c662017-08-09 17:46:03 -07002086 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
stephen hemmingerfb84af82017-08-04 12:14:00 -07002087 return;
2088 }
2089
stephen hemminger0c195562017-08-01 19:58:53 -07002090 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2091 if (vf_netdev)
2092 __netvsc_vf_setup(ndev, vf_netdev);
2093
2094 rtnl_unlock();
2095}
2096
Haiyang Zhang00547952018-10-15 19:06:15 +00002097/* Find netvsc by VF serial number.
2098 * The PCI hyperv controller records the serial number as the slot kobj name.
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002099 */
2100static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2101{
2102 struct device *parent = vf_netdev->dev.parent;
2103 struct net_device_context *ndev_ctx;
2104 struct pci_dev *pdev;
Haiyang Zhang00547952018-10-15 19:06:15 +00002105 u32 serial;
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002106
2107 if (!parent || !dev_is_pci(parent))
2108 return NULL; /* not a PCI device */
2109
2110 pdev = to_pci_dev(parent);
2111 if (!pdev->slot) {
2112 netdev_notice(vf_netdev, "no PCI slot information\n");
2113 return NULL;
2114 }
2115
Haiyang Zhang00547952018-10-15 19:06:15 +00002116 if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2117 netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2118 pci_slot_name(pdev->slot));
2119 return NULL;
2120 }
2121
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002122 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2123 if (!ndev_ctx->vf_alloc)
2124 continue;
2125
Haiyang Zhang00547952018-10-15 19:06:15 +00002126 if (ndev_ctx->vf_serial == serial)
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002127 return hv_get_drvdata(ndev_ctx->device_ctx);
2128 }
2129
2130 netdev_notice(vf_netdev,
Haiyang Zhang00547952018-10-15 19:06:15 +00002131 "no netdev found for vf serial:%u\n", serial);
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002132 return NULL;
2133}
2134
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002135static int netvsc_register_vf(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002136{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002137 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002138 struct netvsc_device *netvsc_dev;
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002139 struct net_device *ndev;
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07002140 int ret;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002141
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002142 if (vf_netdev->addr_len != ETH_ALEN)
2143 return NOTIFY_DONE;
2144
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002145 ndev = get_netvsc_byslot(vf_netdev);
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002146 if (!ndev)
2147 return NOTIFY_DONE;
2148
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002149 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07002150 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07002151 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002152 return NOTIFY_DONE;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002153
Adrian Vladu52d3b492019-01-03 19:43:08 +00002154 /* if synthetic interface is a different namespace,
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07002155 * then move the VF to that namespace; join will be
2156 * done again in that context.
2157 */
2158 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2159 ret = dev_change_net_namespace(vf_netdev,
2160 dev_net(ndev), "eth%d");
2161 if (ret)
2162 netdev_err(vf_netdev,
2163 "could not move to same namespace as %s: %d\n",
2164 ndev->name, ret);
2165 else
2166 netdev_info(vf_netdev,
2167 "VF moved to namespace with: %s\n",
2168 ndev->name);
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002169 return NOTIFY_DONE;
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07002170 }
stephen hemminger0c195562017-08-01 19:58:53 -07002171
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002172 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
stephen hemminger0c195562017-08-01 19:58:53 -07002173
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07002174 if (netvsc_vf_join(vf_netdev, ndev) != 0)
2175 return NOTIFY_DONE;
2176
Stephen Hemminger07d0f002016-09-22 16:56:30 -07002177 dev_hold(vf_netdev);
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002178 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2179 return NOTIFY_OK;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002180}
2181
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07002182/* VF up/down change detected, schedule to change data path */
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002183static int netvsc_vf_changed(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002184{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002185 struct net_device_context *net_device_ctx;
stephen hemminger7b83f522017-08-07 11:30:00 -07002186 struct netvsc_device *netvsc_dev;
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002187 struct net_device *ndev;
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07002188 bool vf_is_up = netif_running(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002189
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002190 ndev = get_netvsc_byref(vf_netdev);
2191 if (!ndev)
2192 return NOTIFY_DONE;
2193
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002194 net_device_ctx = netdev_priv(ndev);
stephen hemminger7b83f522017-08-07 11:30:00 -07002195 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2196 if (!netvsc_dev)
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002197 return NOTIFY_DONE;
stephen hemminger7b83f522017-08-07 11:30:00 -07002198
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07002199 netvsc_switch_datapath(ndev, vf_is_up);
2200 netdev_info(ndev, "Data path switched %s VF: %s\n",
2201 vf_is_up ? "to" : "from", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002202
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002203 return NOTIFY_OK;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002204}
2205
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002206static int netvsc_unregister_vf(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002207{
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002208 struct net_device *ndev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002209 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002210
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002211 ndev = get_netvsc_byref(vf_netdev);
2212 if (!ndev)
2213 return NOTIFY_DONE;
2214
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002215 net_device_ctx = netdev_priv(ndev);
stephen hemminger6123c662017-08-09 17:46:03 -07002216 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07002217
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002218 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07002219
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002220 netdev_rx_handler_unregister(vf_netdev);
2221 netdev_upper_dev_unlink(vf_netdev, ndev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07002222 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07002223 dev_put(vf_netdev);
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002224
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002225 return NOTIFY_OK;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002226}
2227
K. Y. Srinivasan84946892011-09-13 10:59:38 -07002228static int netvsc_probe(struct hv_device *dev,
2229 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002230{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002231 struct net_device *net = NULL;
2232 struct net_device_context *net_device_ctx;
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002233 struct netvsc_device_info *device_info = NULL;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07002234 struct netvsc_device *nvdev;
stephen hemminger0c195562017-08-01 19:58:53 -07002235 int ret = -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002236
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07002237 net = alloc_etherdev_mq(sizeof(struct net_device_context),
stephen hemminger2b018882017-01-24 13:06:03 -08002238 VRSS_CHANNEL_MAX);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002239 if (!net)
stephen hemminger0c195562017-08-01 19:58:53 -07002240 goto no_net;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002241
Haiyang Zhang1b07da52014-03-04 14:11:06 -08002242 netif_carrier_off(net);
2243
Haiyang Zhangb37879e2016-08-04 10:42:14 -07002244 netvsc_init_settings(net);
2245
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002246 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07002247 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07002248 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2249 if (netif_msg_probe(net_device_ctx))
2250 netdev_dbg(net, "netvsc msg_enable: %d\n",
2251 net_device_ctx->msg_enable);
2252
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07002253 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02002254
Haiyang Zhang891de742014-02-12 16:54:27 -08002255 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002256
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01002257 spin_lock_init(&net_device_ctx->lock);
2258 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
stephen hemminger6123c662017-08-09 17:46:03 -07002259 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
stephen hemminger0c195562017-08-01 19:58:53 -07002260
2261 net_device_ctx->vf_stats
2262 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2263 if (!net_device_ctx->vf_stats)
2264 goto no_stats;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01002265
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002266 net->netdev_ops = &device_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002267 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07002268 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002269
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01002270 /* We always need headroom for rndis header */
2271 net->needed_headroom = RNDIS_AND_PPI_SIZE;
2272
Haiyang Zhang6450f8f2017-09-22 15:31:38 -07002273 /* Initialize the number of queues to be 1, we may change it if more
2274 * channels are offered later.
2275 */
2276 netif_set_real_num_tx_queues(net, 1);
2277 netif_set_real_num_rx_queues(net, 1);
2278
Haiyang Zhang692e0842011-09-01 12:19:43 -07002279 /* Notify the netvsc driver of the new device */
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002280 device_info = netvsc_devinfo_get(NULL);
stephen hemminger9749fed2017-07-19 11:53:16 -07002281
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002282 if (!device_info) {
2283 ret = -ENOMEM;
2284 goto devinfo_failed;
2285 }
2286
2287 nvdev = rndis_filter_device_add(dev, device_info);
stephen hemminger9749fed2017-07-19 11:53:16 -07002288 if (IS_ERR(nvdev)) {
2289 ret = PTR_ERR(nvdev);
Haiyang Zhang692e0842011-09-01 12:19:43 -07002290 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
stephen hemminger0c195562017-08-01 19:58:53 -07002291 goto rndis_failed;
Haiyang Zhang692e0842011-09-01 12:19:43 -07002292 }
stephen hemminger0c195562017-08-01 19:58:53 -07002293
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002294 memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
Haiyang Zhang692e0842011-09-01 12:19:43 -07002295
Dexuan Cuie04e7a72018-08-30 05:42:13 +00002296 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2297 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2298 * all subchannels to show up, but that may not happen because
2299 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2300 * -> ... -> device_add() -> ... -> __device_attach() can't get
2301 * the device lock, so all the subchannels can't be processed --
Adrian Vladu52d3b492019-01-03 19:43:08 +00002302 * finally netvsc_subchan_work() hangs forever.
Dexuan Cuie04e7a72018-08-30 05:42:13 +00002303 */
2304 rtnl_lock();
2305
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -07002306 if (nvdev->num_chn > 1)
2307 schedule_work(&nvdev->subchan_work);
2308
Vitaly Kuznetsovaefd80e2017-11-15 15:12:55 +01002309 /* hw_features computed in rndis_netdev_set_hwcaps() */
stephen hemminger23312a32017-01-24 13:05:59 -08002310 net->features = net->hw_features |
2311 NETIF_F_HIGHDMA | NETIF_F_SG |
2312 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2313 net->vlan_features = net->features;
2314
stephen hemminger9749fed2017-07-19 11:53:16 -07002315 netdev_lockdep_set_classes(net);
2316
Jarod Wilsond0c2c992016-10-20 13:55:21 -04002317 /* MTU range: 68 - 1500 or 65521 */
2318 net->min_mtu = NETVSC_MTU_MIN;
2319 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2320 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2321 else
2322 net->max_mtu = ETH_DATA_LEN;
2323
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002324 ret = register_netdevice(net);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08002325 if (ret != 0) {
2326 pr_err("Unable to register netdev.\n");
stephen hemminger0c195562017-08-01 19:58:53 -07002327 goto register_failed;
Haiyang Zhanga68f9612013-12-20 16:52:31 -08002328 }
2329
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002330 list_add(&net_device_ctx->list, &netvsc_dev_list);
2331 rtnl_unlock();
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002332
2333 kfree(device_info);
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002334 return 0;
stephen hemminger0c195562017-08-01 19:58:53 -07002335
2336register_failed:
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002337 rtnl_unlock();
stephen hemminger0c195562017-08-01 19:58:53 -07002338 rndis_filter_device_remove(dev, nvdev);
2339rndis_failed:
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002340 kfree(device_info);
2341devinfo_failed:
stephen hemminger0c195562017-08-01 19:58:53 -07002342 free_percpu(net_device_ctx->vf_stats);
2343no_stats:
2344 hv_set_drvdata(dev, NULL);
2345 free_netdev(net);
2346no_net:
2347 return ret;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002348}
2349
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002350static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002351{
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002352 struct net_device_context *ndev_ctx;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002353 struct net_device *vf_netdev, *net;
2354 struct netvsc_device *nvdev;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07002355
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002356 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002357 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002358 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002359 return 0;
2360 }
2361
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002362 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002363
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002364 cancel_delayed_work_sync(&ndev_ctx->dwork);
2365
Stephen Hemminger018349d2018-09-13 08:03:43 -07002366 rtnl_lock();
2367 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2368 if (nvdev)
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002369 cancel_work_sync(&nvdev->subchan_work);
2370
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002371 /*
2372 * Call to the vsc driver to let it know that the device is being
stephen hemmingera0be4502017-03-22 14:51:01 -07002373 * removed. Also blocks mtu and channel changes.
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002374 */
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002375 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2376 if (vf_netdev)
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002377 netvsc_unregister_vf(vf_netdev);
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002378
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002379 if (nvdev)
2380 rndis_filter_device_remove(dev, nvdev);
2381
Stephen Hemminger8195b132017-09-06 13:53:05 -07002382 unregister_netdevice(net);
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002383 list_del(&ndev_ctx->list);
Stephen Hemminger8195b132017-09-06 13:53:05 -07002384
stephen hemmingera0be4502017-03-22 14:51:01 -07002385 rtnl_unlock();
2386
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002387 hv_set_drvdata(dev, NULL);
2388
stephen hemminger0c195562017-08-01 19:58:53 -07002389 free_percpu(ndev_ctx->vf_stats);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08002390 free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07002391 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002392}
2393
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002394static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002395 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08002396 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002397 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002398};
2399
2400MODULE_DEVICE_TABLE(vmbus, id_table);
2401
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002402/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002403static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00002404 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002405 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002406 .probe = netvsc_probe,
2407 .remove = netvsc_remove,
Arjan van de Venaf0a5642018-06-05 13:37:49 -07002408 .driver = {
Haiyang Zhang9a336292019-06-13 21:06:53 +00002409 .probe_type = PROBE_FORCE_SYNCHRONOUS,
Arjan van de Venaf0a5642018-06-05 13:37:49 -07002410 },
K. Y. Srinivasand4890972011-05-10 07:55:17 -07002411};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002412
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002413/*
2414 * On Hyper-V, every VF interface is matched with a corresponding
2415 * synthetic interface. The synthetic interface is presented first
2416 * to the guest. When the corresponding VF instance is registered,
2417 * we will take care of switching the data path.
2418 */
2419static int netvsc_netdev_event(struct notifier_block *this,
2420 unsigned long event, void *ptr)
2421{
2422 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2423
2424 /* Skip our own events */
2425 if (event_dev->netdev_ops == &device_ops)
2426 return NOTIFY_DONE;
2427
2428 /* Avoid non-Ethernet type devices */
2429 if (event_dev->type != ARPHRD_ETHER)
2430 return NOTIFY_DONE;
2431
2432 /* Avoid Vlan dev with same MAC registering as VF */
2433 if (is_vlan_dev(event_dev))
2434 return NOTIFY_DONE;
2435
2436 /* Avoid Bonding master dev with same MAC registering as VF */
2437 if ((event_dev->priv_flags & IFF_BONDING) &&
2438 (event_dev->flags & IFF_MASTER))
2439 return NOTIFY_DONE;
2440
2441 switch (event) {
2442 case NETDEV_REGISTER:
2443 return netvsc_register_vf(event_dev);
2444 case NETDEV_UNREGISTER:
2445 return netvsc_unregister_vf(event_dev);
2446 case NETDEV_UP:
2447 case NETDEV_DOWN:
2448 return netvsc_vf_changed(event_dev);
2449 default:
2450 return NOTIFY_DONE;
2451 }
2452}
2453
2454static struct notifier_block netvsc_netdev_notifier = {
2455 .notifier_call = netvsc_netdev_event,
2456};
2457
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002458static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07002459{
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002460 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07002461 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07002462}
2463
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002464static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002465{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002466 int ret;
2467
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002468 if (ring_size < RING_SIZE_MIN) {
2469 ring_size = RING_SIZE_MIN;
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002470 pr_info("Increased ring_size to %u (min allowed)\n",
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002471 ring_size);
2472 }
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002473 netvsc_ring_bytes = ring_size * PAGE_SIZE;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002474
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002475 ret = vmbus_driver_register(&netvsc_drv);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002476 if (ret)
2477 return ret;
2478
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002479 register_netdevice_notifier(&netvsc_netdev_notifier);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002480 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002481}
2482
Hank Janssen26c14cc2010-02-11 23:02:42 +00002483MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07002484MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07002485
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002486module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002487module_exit(netvsc_drv_exit);