blob: 9cc9f638f442cd7630e793b848bd9bc385cf119f [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
Wei Liub3f980b2013-08-26 12:59:38 +010033#include <linux/kthread.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000034#include <linux/ethtool.h>
35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h>
Josh Boyerf35f76e2014-01-05 10:24:01 -050037#include <linux/vmalloc.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000038
39#include <xen/events.h>
40#include <asm/xen/hypercall.h>
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000041#include <xen/balloon.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000042
43#define XENVIF_QUEUE_LENGTH 32
Wei Liub3f980b2013-08-26 12:59:38 +010044#define XENVIF_NAPI_WEIGHT 64
Ian Campbellf942dc22011-03-15 00:06:18 +000045
46int xenvif_schedulable(struct xenvif *vif)
47{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
49}
50
Wei Liue1f00a692013-05-22 06:34:45 +000051static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
Ian Campbellf942dc22011-03-15 00:06:18 +000052{
53 struct xenvif *vif = dev_id;
54
Wei Liub3f980b2013-08-26 12:59:38 +010055 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
56 napi_schedule(&vif->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +000057
Wei Liue1f00a692013-05-22 06:34:45 +000058 return IRQ_HANDLED;
59}
60
Wei Liub3f980b2013-08-26 12:59:38 +010061static int xenvif_poll(struct napi_struct *napi, int budget)
62{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi);
64 int work_done;
65
Wei Liu73764192013-08-26 12:59:39 +010066 work_done = xenvif_tx_action(vif, budget);
Wei Liub3f980b2013-08-26 12:59:38 +010067
68 if (work_done < budget) {
69 int more_to_do = 0;
70 unsigned long flags;
71
72 /* It is necessary to disable IRQ before calling
73 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
74 * lose event from the frontend.
75 *
76 * Consider:
77 * RING_HAS_UNCONSUMED_REQUESTS
78 * <frontend generates event to trigger napi_schedule>
79 * __napi_complete
80 *
81 * This handler is still in scheduled state so the
82 * event has no effect at all. After __napi_complete
83 * this handler is descheduled and cannot get
84 * scheduled again. We lose event in this case and the ring
85 * will be completely stalled.
86 */
87
88 local_irq_save(flags);
89
90 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000091 if (!(more_to_do &&
92 xenvif_tx_pending_slots_available(vif)))
Wei Liub3f980b2013-08-26 12:59:38 +010093 __napi_complete(napi);
94
95 local_irq_restore(flags);
96 }
97
98 return work_done;
99}
100
Wei Liue1f00a692013-05-22 06:34:45 +0000101static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
102{
103 struct xenvif *vif = dev_id;
104
Paul Durrantca2f09f2013-12-06 16:36:07 +0000105 xenvif_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000106
107 return IRQ_HANDLED;
108}
109
Wei Liue1f00a692013-05-22 06:34:45 +0000110static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
111{
112 xenvif_tx_interrupt(irq, dev_id);
113 xenvif_rx_interrupt(irq, dev_id);
114
115 return IRQ_HANDLED;
116}
117
Zoltan Kiss09350782014-03-06 21:48:30 +0000118static void xenvif_wake_queue(unsigned long data)
119{
120 struct xenvif *vif = (struct xenvif *)data;
121
122 if (netif_queue_stopped(vif->dev)) {
123 netdev_err(vif->dev, "draining TX queue\n");
124 vif->rx_queue_purge = true;
125 xenvif_kick_thread(vif);
126 netif_wake_queue(vif->dev);
127 }
128}
129
Ian Campbellf942dc22011-03-15 00:06:18 +0000130static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
131{
132 struct xenvif *vif = netdev_priv(dev);
Paul Durrantca2f09f2013-12-06 16:36:07 +0000133 int min_slots_needed;
Ian Campbellf942dc22011-03-15 00:06:18 +0000134
135 BUG_ON(skb->dev != dev);
136
Wei Liub3f980b2013-08-26 12:59:38 +0100137 /* Drop the packet if vif is not ready */
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000138 if (vif->task == NULL ||
139 vif->dealloc_task == NULL ||
140 !xenvif_schedulable(vif))
Ian Campbellf942dc22011-03-15 00:06:18 +0000141 goto drop;
142
Paul Durrantca2f09f2013-12-06 16:36:07 +0000143 /* At best we'll need one slot for the header and one for each
144 * frag.
145 */
146 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
Ian Campbellf942dc22011-03-15 00:06:18 +0000147
Paul Durrantca2f09f2013-12-06 16:36:07 +0000148 /* If the skb is GSO then we'll also need an extra slot for the
149 * metadata.
150 */
151 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
152 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
153 min_slots_needed++;
Ian Campbellf942dc22011-03-15 00:06:18 +0000154
Paul Durrantca2f09f2013-12-06 16:36:07 +0000155 /* If the skb can't possibly fit in the remaining slots
156 * then turn off the queue to give the ring a chance to
157 * drain.
158 */
Zoltan Kiss09350782014-03-06 21:48:30 +0000159 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
160 vif->wake_queue.function = xenvif_wake_queue;
161 vif->wake_queue.data = (unsigned long)vif;
Paul Durrantca2f09f2013-12-06 16:36:07 +0000162 xenvif_stop_queue(vif);
Zoltan Kiss09350782014-03-06 21:48:30 +0000163 mod_timer(&vif->wake_queue,
164 jiffies + rx_drain_timeout_jiffies);
165 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000166
Paul Durrantca2f09f2013-12-06 16:36:07 +0000167 skb_queue_tail(&vif->rx_queue, skb);
168 xenvif_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000169
170 return NETDEV_TX_OK;
171
172 drop:
173 vif->dev->stats.tx_dropped++;
174 dev_kfree_skb(skb);
175 return NETDEV_TX_OK;
176}
177
Ian Campbellf942dc22011-03-15 00:06:18 +0000178static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
179{
180 struct xenvif *vif = netdev_priv(dev);
181 return &vif->dev->stats;
182}
183
184static void xenvif_up(struct xenvif *vif)
185{
Wei Liub3f980b2013-08-26 12:59:38 +0100186 napi_enable(&vif->napi);
Wei Liue1f00a692013-05-22 06:34:45 +0000187 enable_irq(vif->tx_irq);
188 if (vif->tx_irq != vif->rx_irq)
189 enable_irq(vif->rx_irq);
Wei Liu73764192013-08-26 12:59:39 +0100190 xenvif_check_rx_xenvif(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000191}
192
193static void xenvif_down(struct xenvif *vif)
194{
Wei Liub3f980b2013-08-26 12:59:38 +0100195 napi_disable(&vif->napi);
Wei Liue1f00a692013-05-22 06:34:45 +0000196 disable_irq(vif->tx_irq);
197 if (vif->tx_irq != vif->rx_irq)
198 disable_irq(vif->rx_irq);
David Vrabel3e55f8b2013-02-14 03:18:58 +0000199 del_timer_sync(&vif->credit_timeout);
Ian Campbellf942dc22011-03-15 00:06:18 +0000200}
201
202static int xenvif_open(struct net_device *dev)
203{
204 struct xenvif *vif = netdev_priv(dev);
205 if (netif_carrier_ok(dev))
206 xenvif_up(vif);
207 netif_start_queue(dev);
208 return 0;
209}
210
211static int xenvif_close(struct net_device *dev)
212{
213 struct xenvif *vif = netdev_priv(dev);
214 if (netif_carrier_ok(dev))
215 xenvif_down(vif);
216 netif_stop_queue(dev);
217 return 0;
218}
219
220static int xenvif_change_mtu(struct net_device *dev, int mtu)
221{
222 struct xenvif *vif = netdev_priv(dev);
223 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
224
225 if (mtu > max)
226 return -EINVAL;
227 dev->mtu = mtu;
228 return 0;
229}
230
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000231static netdev_features_t xenvif_fix_features(struct net_device *dev,
232 netdev_features_t features)
Ian Campbellf942dc22011-03-15 00:06:18 +0000233{
234 struct xenvif *vif = netdev_priv(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000235
Michał Mirosław47103042011-04-19 03:35:06 +0000236 if (!vif->can_sg)
237 features &= ~NETIF_F_SG;
Paul Durrant82cada22013-10-16 17:50:32 +0100238 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
Michał Mirosław47103042011-04-19 03:35:06 +0000239 features &= ~NETIF_F_TSO;
Paul Durrant82cada22013-10-16 17:50:32 +0100240 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
241 features &= ~NETIF_F_TSO6;
Paul Durrant146c8a72013-10-16 17:50:28 +0100242 if (!vif->ip_csum)
Michał Mirosław47103042011-04-19 03:35:06 +0000243 features &= ~NETIF_F_IP_CSUM;
Paul Durrant146c8a72013-10-16 17:50:28 +0100244 if (!vif->ipv6_csum)
245 features &= ~NETIF_F_IPV6_CSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000246
Michał Mirosław47103042011-04-19 03:35:06 +0000247 return features;
Ian Campbellf942dc22011-03-15 00:06:18 +0000248}
249
250static const struct xenvif_stat {
251 char name[ETH_GSTRING_LEN];
252 u16 offset;
253} xenvif_stats[] = {
254 {
255 "rx_gso_checksum_fixup",
256 offsetof(struct xenvif, rx_gso_checksum_fixup)
257 },
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000258 /* If (sent != success + fail), there are probably packets never
259 * freed up properly!
260 */
261 {
262 "tx_zerocopy_sent",
263 offsetof(struct xenvif, tx_zerocopy_sent),
264 },
265 {
266 "tx_zerocopy_success",
267 offsetof(struct xenvif, tx_zerocopy_success),
268 },
269 {
270 "tx_zerocopy_fail",
271 offsetof(struct xenvif, tx_zerocopy_fail)
272 },
Zoltan Kisse3377f32014-03-06 21:48:29 +0000273 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
274 * a guest with the same MAX_SKB_FRAG
275 */
276 {
277 "tx_frag_overflow",
278 offsetof(struct xenvif, tx_frag_overflow)
279 },
Ian Campbellf942dc22011-03-15 00:06:18 +0000280};
281
282static int xenvif_get_sset_count(struct net_device *dev, int string_set)
283{
284 switch (string_set) {
285 case ETH_SS_STATS:
286 return ARRAY_SIZE(xenvif_stats);
287 default:
288 return -EINVAL;
289 }
290}
291
292static void xenvif_get_ethtool_stats(struct net_device *dev,
293 struct ethtool_stats *stats, u64 * data)
294{
295 void *vif = netdev_priv(dev);
296 int i;
297
298 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
299 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
300}
301
302static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
303{
304 int i;
305
306 switch (stringset) {
307 case ETH_SS_STATS:
308 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
309 memcpy(data + i * ETH_GSTRING_LEN,
310 xenvif_stats[i].name, ETH_GSTRING_LEN);
311 break;
312 }
313}
314
stephen hemminger813abbb2012-01-04 11:56:58 +0000315static const struct ethtool_ops xenvif_ethtool_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000316 .get_link = ethtool_op_get_link,
317
318 .get_sset_count = xenvif_get_sset_count,
319 .get_ethtool_stats = xenvif_get_ethtool_stats,
320 .get_strings = xenvif_get_strings,
321};
322
stephen hemminger813abbb2012-01-04 11:56:58 +0000323static const struct net_device_ops xenvif_netdev_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000324 .ndo_start_xmit = xenvif_start_xmit,
325 .ndo_get_stats = xenvif_get_stats,
326 .ndo_open = xenvif_open,
327 .ndo_stop = xenvif_close,
328 .ndo_change_mtu = xenvif_change_mtu,
Michał Mirosław47103042011-04-19 03:35:06 +0000329 .ndo_fix_features = xenvif_fix_features,
Matt Wilson4a633a62013-01-22 08:08:25 +0000330 .ndo_set_mac_address = eth_mac_addr,
331 .ndo_validate_addr = eth_validate_addr,
Ian Campbellf942dc22011-03-15 00:06:18 +0000332};
333
334struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
335 unsigned int handle)
336{
337 int err;
338 struct net_device *dev;
339 struct xenvif *vif;
340 char name[IFNAMSIZ] = {};
Wei Liub3f980b2013-08-26 12:59:38 +0100341 int i;
Ian Campbellf942dc22011-03-15 00:06:18 +0000342
343 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
344 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
345 if (dev == NULL) {
Wei Liub3f980b2013-08-26 12:59:38 +0100346 pr_warn("Could not allocate netdev for %s\n", name);
Ian Campbellf942dc22011-03-15 00:06:18 +0000347 return ERR_PTR(-ENOMEM);
348 }
349
350 SET_NETDEV_DEV(dev, parent);
351
352 vif = netdev_priv(dev);
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000353
354 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
355 MAX_GRANT_COPY_OPS);
356 if (vif->grant_copy_op == NULL) {
357 pr_warn("Could not allocate grant copy space for %s\n", name);
358 free_netdev(dev);
359 return ERR_PTR(-ENOMEM);
360 }
361
Ian Campbellf942dc22011-03-15 00:06:18 +0000362 vif->domid = domid;
363 vif->handle = handle;
Ian Campbellf942dc22011-03-15 00:06:18 +0000364 vif->can_sg = 1;
Paul Durrant146c8a72013-10-16 17:50:28 +0100365 vif->ip_csum = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +0000366 vif->dev = dev;
Ian Campbellf942dc22011-03-15 00:06:18 +0000367
368 vif->credit_bytes = vif->remaining_credit = ~0UL;
369 vif->credit_usec = 0UL;
370 init_timer(&vif->credit_timeout);
Wei Liu059dfa62013-10-28 12:07:57 +0000371 vif->credit_window_start = get_jiffies_64();
Ian Campbellf942dc22011-03-15 00:06:18 +0000372
Zoltan Kiss09350782014-03-06 21:48:30 +0000373 init_timer(&vif->wake_queue);
374
Ian Campbellf942dc22011-03-15 00:06:18 +0000375 dev->netdev_ops = &xenvif_netdev_ops;
Paul Durrant146c8a72013-10-16 17:50:28 +0100376 dev->hw_features = NETIF_F_SG |
377 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Paul Durrant82cada22013-10-16 17:50:32 +0100378 NETIF_F_TSO | NETIF_F_TSO6;
Paul Durrant7365bcf2013-10-16 17:50:30 +0100379 dev->features = dev->hw_features | NETIF_F_RXCSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000380 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
381
382 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
383
Wei Liub3f980b2013-08-26 12:59:38 +0100384 skb_queue_head_init(&vif->rx_queue);
385 skb_queue_head_init(&vif->tx_queue);
386
387 vif->pending_cons = 0;
388 vif->pending_prod = MAX_PENDING_REQS;
389 for (i = 0; i < MAX_PENDING_REQS; i++)
390 vif->pending_ring[i] = i;
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000391 spin_lock_init(&vif->callback_lock);
392 spin_lock_init(&vif->response_lock);
393 /* If ballooning is disabled, this will consume real memory, so you
394 * better enable it. The long term solution would be to use just a
395 * bunch of valid page descriptors, without dependency on ballooning
396 */
397 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
398 vif->mmap_pages,
399 false);
400 if (err) {
401 netdev_err(dev, "Could not reserve mmap_pages\n");
402 return ERR_PTR(-ENOMEM);
403 }
404 for (i = 0; i < MAX_PENDING_REQS; i++) {
405 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
406 { .callback = xenvif_zerocopy_callback,
407 .ctx = NULL,
408 .desc = i };
409 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
410 }
Wei Liub3f980b2013-08-26 12:59:38 +0100411
Ian Campbellf942dc22011-03-15 00:06:18 +0000412 /*
413 * Initialise a dummy MAC address. We choose the numerically
414 * largest non-broadcast address to prevent the address getting
415 * stolen by an Ethernet bridge for STP purposes.
416 * (FE:FF:FF:FF:FF:FF)
417 */
418 memset(dev->dev_addr, 0xFF, ETH_ALEN);
419 dev->dev_addr[0] &= ~0x01;
420
Wei Liub3f980b2013-08-26 12:59:38 +0100421 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
422
Ian Campbellf942dc22011-03-15 00:06:18 +0000423 netif_carrier_off(dev);
424
425 err = register_netdev(dev);
426 if (err) {
427 netdev_warn(dev, "Could not register device: err=%d\n", err);
428 free_netdev(dev);
429 return ERR_PTR(err);
430 }
431
432 netdev_dbg(dev, "Successfully created xenvif\n");
Paul Durrant279f4382013-09-17 17:46:08 +0100433
434 __module_get(THIS_MODULE);
435
Ian Campbellf942dc22011-03-15 00:06:18 +0000436 return vif;
437}
438
439int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
Wei Liue1f00a692013-05-22 06:34:45 +0000440 unsigned long rx_ring_ref, unsigned int tx_evtchn,
441 unsigned int rx_evtchn)
Ian Campbellf942dc22011-03-15 00:06:18 +0000442{
Paul Durrant67fa3662013-12-03 14:06:25 +0000443 struct task_struct *task;
Ian Campbellf942dc22011-03-15 00:06:18 +0000444 int err = -ENOMEM;
445
Paul Durrant67fa3662013-12-03 14:06:25 +0000446 BUG_ON(vif->tx_irq);
447 BUG_ON(vif->task);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000448 BUG_ON(vif->dealloc_task);
Ian Campbellf942dc22011-03-15 00:06:18 +0000449
Wei Liu73764192013-08-26 12:59:39 +0100450 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
Ian Campbellf942dc22011-03-15 00:06:18 +0000451 if (err < 0)
452 goto err;
453
Paul Durrantca2f09f2013-12-06 16:36:07 +0000454 init_waitqueue_head(&vif->wq);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000455 init_waitqueue_head(&vif->dealloc_wq);
Paul Durrantca2f09f2013-12-06 16:36:07 +0000456
Wei Liue1f00a692013-05-22 06:34:45 +0000457 if (tx_evtchn == rx_evtchn) {
458 /* feature-split-event-channels == 0 */
459 err = bind_interdomain_evtchn_to_irqhandler(
460 vif->domid, tx_evtchn, xenvif_interrupt, 0,
461 vif->dev->name, vif);
462 if (err < 0)
463 goto err_unmap;
464 vif->tx_irq = vif->rx_irq = err;
465 disable_irq(vif->tx_irq);
466 } else {
467 /* feature-split-event-channels == 1 */
468 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
469 "%s-tx", vif->dev->name);
470 err = bind_interdomain_evtchn_to_irqhandler(
471 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
472 vif->tx_irq_name, vif);
473 if (err < 0)
474 goto err_unmap;
475 vif->tx_irq = err;
476 disable_irq(vif->tx_irq);
477
478 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
479 "%s-rx", vif->dev->name);
480 err = bind_interdomain_evtchn_to_irqhandler(
481 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
482 vif->rx_irq_name, vif);
483 if (err < 0)
484 goto err_tx_unbind;
485 vif->rx_irq = err;
486 disable_irq(vif->rx_irq);
487 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000488
Zoltan Kiss121fa4b2014-03-06 21:48:24 +0000489 task = kthread_create(xenvif_kthread_guest_rx,
490 (void *)vif, "%s-guest-rx", vif->dev->name);
Paul Durrant67fa3662013-12-03 14:06:25 +0000491 if (IS_ERR(task)) {
Wei Liub3f980b2013-08-26 12:59:38 +0100492 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
Paul Durrant67fa3662013-12-03 14:06:25 +0000493 err = PTR_ERR(task);
Wei Liub3f980b2013-08-26 12:59:38 +0100494 goto err_rx_unbind;
495 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000496
Paul Durrant67fa3662013-12-03 14:06:25 +0000497 vif->task = task;
498
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000499 task = kthread_create(xenvif_dealloc_kthread,
500 (void *)vif, "%s-dealloc", vif->dev->name);
501 if (IS_ERR(task)) {
502 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
503 err = PTR_ERR(task);
504 goto err_rx_unbind;
505 }
506
507 vif->dealloc_task = task;
508
Ian Campbellf942dc22011-03-15 00:06:18 +0000509 rtnl_lock();
Michał Mirosław47103042011-04-19 03:35:06 +0000510 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
511 dev_set_mtu(vif->dev, ETH_DATA_LEN);
512 netdev_update_features(vif->dev);
513 netif_carrier_on(vif->dev);
David Vrabeld0e5d832011-09-30 06:37:51 +0000514 if (netif_running(vif->dev))
515 xenvif_up(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000516 rtnl_unlock();
517
Wei Liub3f980b2013-08-26 12:59:38 +0100518 wake_up_process(vif->task);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000519 wake_up_process(vif->dealloc_task);
Wei Liub3f980b2013-08-26 12:59:38 +0100520
Ian Campbellf942dc22011-03-15 00:06:18 +0000521 return 0;
Wei Liub3f980b2013-08-26 12:59:38 +0100522
523err_rx_unbind:
524 unbind_from_irqhandler(vif->rx_irq, vif);
525 vif->rx_irq = 0;
Wei Liue1f00a692013-05-22 06:34:45 +0000526err_tx_unbind:
527 unbind_from_irqhandler(vif->tx_irq, vif);
528 vif->tx_irq = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000529err_unmap:
Wei Liu73764192013-08-26 12:59:39 +0100530 xenvif_unmap_frontend_rings(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000531err:
Wei Liub103f352013-05-16 23:26:11 +0000532 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000533 return err;
534}
535
Ian Campbell488562862013-02-06 23:41:35 +0000536void xenvif_carrier_off(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000537{
538 struct net_device *dev = vif->dev;
Ian Campbell488562862013-02-06 23:41:35 +0000539
540 rtnl_lock();
541 netif_carrier_off(dev); /* discard queued packets */
542 if (netif_running(dev))
543 xenvif_down(vif);
544 rtnl_unlock();
Ian Campbell488562862013-02-06 23:41:35 +0000545}
546
547void xenvif_disconnect(struct xenvif *vif)
548{
549 if (netif_carrier_ok(vif->dev))
550 xenvif_carrier_off(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000551
Paul Durrant67fa3662013-12-03 14:06:25 +0000552 if (vif->task) {
Zoltan Kiss09350782014-03-06 21:48:30 +0000553 del_timer_sync(&vif->wake_queue);
David Vrabeldb739ef2013-11-21 15:26:09 +0000554 kthread_stop(vif->task);
Paul Durrant67fa3662013-12-03 14:06:25 +0000555 vif->task = NULL;
556 }
David Vrabeldb739ef2013-11-21 15:26:09 +0000557
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000558 if (vif->dealloc_task) {
559 kthread_stop(vif->dealloc_task);
560 vif->dealloc_task = NULL;
561 }
562
Wei Liue1f00a692013-05-22 06:34:45 +0000563 if (vif->tx_irq) {
564 if (vif->tx_irq == vif->rx_irq)
565 unbind_from_irqhandler(vif->tx_irq, vif);
566 else {
567 unbind_from_irqhandler(vif->tx_irq, vif);
568 unbind_from_irqhandler(vif->rx_irq, vif);
569 }
Paul Durrant279f4382013-09-17 17:46:08 +0100570 vif->tx_irq = 0;
Wei Liub103f352013-05-16 23:26:11 +0000571 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000572
Paul Durrant279f4382013-09-17 17:46:08 +0100573 xenvif_unmap_frontend_rings(vif);
574}
575
576void xenvif_free(struct xenvif *vif)
577{
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000578 int i, unmap_timeout = 0;
Zoltan Kiss09350782014-03-06 21:48:30 +0000579 /* Here we want to avoid timeout messages if an skb can be legitimatly
580 * stucked somewhere else. Realisticly this could be an another vif's
581 * internal or QDisc queue. That another vif also has this
582 * rx_drain_timeout_msecs timeout, but the timer only ditches the
583 * internal queue. After that, the QDisc queue can put in worst case
584 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
585 * internal queue, so we need several rounds of such timeouts until we
586 * can be sure that no another vif should have skb's from us. We are
587 * not sending more skb's, so newly stucked packets are not interesting
588 * for us here.
589 */
590 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
591 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000592
593 for (i = 0; i < MAX_PENDING_REQS; ++i) {
594 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
595 unmap_timeout++;
596 schedule_timeout(msecs_to_jiffies(1000));
Zoltan Kiss09350782014-03-06 21:48:30 +0000597 if (unmap_timeout > worst_case_skb_lifetime &&
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000598 net_ratelimit())
599 netdev_err(vif->dev,
600 "Page still granted! Index: %x\n",
601 i);
602 i = -1;
603 }
604 }
605
606 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
607
Wei Liub3f980b2013-08-26 12:59:38 +0100608 netif_napi_del(&vif->napi);
609
Ian Campbellf942dc22011-03-15 00:06:18 +0000610 unregister_netdev(vif->dev);
611
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000612 vfree(vif->grant_copy_op);
Ian Campbellf942dc22011-03-15 00:06:18 +0000613 free_netdev(vif->dev);
Wei Liub103f352013-05-16 23:26:11 +0000614
Paul Durrant279f4382013-09-17 17:46:08 +0100615 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000616}