blob: 103ed00775eb4a608220067a54f09a909f255e71 [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
Wei Liub3f980b2013-08-26 12:59:38 +010033#include <linux/kthread.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010034#include <linux/sched/task.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000035#include <linux/ethtool.h>
36#include <linux/rtnetlink.h>
37#include <linux/if_vlan.h>
Arnd Bergmanne7b599d2014-06-10 10:34:36 +020038#include <linux/vmalloc.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000039
40#include <xen/events.h>
41#include <asm/xen/hypercall.h>
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000042#include <xen/balloon.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000043
44#define XENVIF_QUEUE_LENGTH 32
Wei Liub3f980b2013-08-26 12:59:38 +010045#define XENVIF_NAPI_WEIGHT 64
Ian Campbellf942dc22011-03-15 00:06:18 +000046
David Vrabelf48da8b2014-10-22 14:08:54 +010047/* Number of bytes allowed on the internal guest Rx queue. */
48#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
49
Wei Liua64bd932014-08-12 11:48:07 +010050/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
51 * increasing the inflight counter. We need to increase the inflight
52 * counter because core driver calls into xenvif_zerocopy_callback
53 * which calls xenvif_skb_zerocopy_complete.
54 */
55void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
56 struct sk_buff *skb)
57{
58 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
59 atomic_inc(&queue->inflight_packets);
60}
61
62void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
63{
64 atomic_dec(&queue->inflight_packets);
Ross Lagerwall57b22902015-08-04 15:40:59 +010065
66 /* Wake the dealloc thread _after_ decrementing inflight_packets so
67 * that if kthread_stop() has already been called, the dealloc thread
68 * does not wait forever with nothing to wake it.
69 */
70 wake_up(&queue->dealloc_wq);
Wei Liua64bd932014-08-12 11:48:07 +010071}
72
Ian Campbellf942dc22011-03-15 00:06:18 +000073int xenvif_schedulable(struct xenvif *vif)
74{
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +010075 return netif_running(vif->dev) &&
David Vrabelf48da8b2014-10-22 14:08:54 +010076 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
77 !vif->disabled;
Ian Campbellf942dc22011-03-15 00:06:18 +000078}
79
Wei Liue1f00a692013-05-22 06:34:45 +000080static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
Ian Campbellf942dc22011-03-15 00:06:18 +000081{
Wei Liue9ce7cb2014-06-04 10:30:42 +010082 struct xenvif_queue *queue = dev_id;
Ian Campbellf942dc22011-03-15 00:06:18 +000083
Wei Liue9ce7cb2014-06-04 10:30:42 +010084 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
85 napi_schedule(&queue->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +000086
Wei Liue1f00a692013-05-22 06:34:45 +000087 return IRQ_HANDLED;
88}
89
Lad, Prabhakar38741d52015-02-05 13:38:07 +000090static int xenvif_poll(struct napi_struct *napi, int budget)
Wei Liub3f980b2013-08-26 12:59:38 +010091{
Wei Liue9ce7cb2014-06-04 10:30:42 +010092 struct xenvif_queue *queue =
93 container_of(napi, struct xenvif_queue, napi);
Wei Liub3f980b2013-08-26 12:59:38 +010094 int work_done;
95
Wei Liue9d8b2c2014-04-01 12:46:12 +010096 /* This vif is rogue, we pretend we've there is nothing to do
97 * for this vif to deschedule it from NAPI. But this interface
98 * will be turned off in thread context later.
99 */
Zoltan Kiss2561cc12014-08-11 13:01:44 +0100100 if (unlikely(queue->vif->disabled)) {
Wei Liue9d8b2c2014-04-01 12:46:12 +0100101 napi_complete(napi);
102 return 0;
103 }
104
Wei Liue9ce7cb2014-06-04 10:30:42 +0100105 work_done = xenvif_tx_action(queue, budget);
Wei Liub3f980b2013-08-26 12:59:38 +0100106
107 if (work_done < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -0800108 napi_complete_done(napi, work_done);
Wei Liudfa523a2017-06-21 10:21:22 +0100109 /* If the queue is rate-limited, it shall be
110 * rescheduled in the timer callback.
111 */
112 if (likely(!queue->rate_limited))
113 xenvif_napi_schedule_or_enable_events(queue);
Wei Liub3f980b2013-08-26 12:59:38 +0100114 }
115
116 return work_done;
117}
118
Wei Liue1f00a692013-05-22 06:34:45 +0000119static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
120{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100121 struct xenvif_queue *queue = dev_id;
Wei Liue1f00a692013-05-22 06:34:45 +0000122
Wei Liue9ce7cb2014-06-04 10:30:42 +0100123 xenvif_kick_thread(queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000124
125 return IRQ_HANDLED;
126}
127
Zoltan Kissf51de242014-07-08 19:49:14 +0100128irqreturn_t xenvif_interrupt(int irq, void *dev_id)
Wei Liue1f00a692013-05-22 06:34:45 +0000129{
130 xenvif_tx_interrupt(irq, dev_id);
131 xenvif_rx_interrupt(irq, dev_id);
132
133 return IRQ_HANDLED;
134}
135
Wei Liue9ce7cb2014-06-04 10:30:42 +0100136int xenvif_queue_stopped(struct xenvif_queue *queue)
Zoltan Kiss09350782014-03-06 21:48:30 +0000137{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100138 struct net_device *dev = queue->vif->dev;
139 unsigned int id = queue->id;
140 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
141}
Zoltan Kiss09350782014-03-06 21:48:30 +0000142
Wei Liue9ce7cb2014-06-04 10:30:42 +0100143void xenvif_wake_queue(struct xenvif_queue *queue)
144{
145 struct net_device *dev = queue->vif->dev;
146 unsigned int id = queue->id;
147 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
148}
149
Paul Durrant40d8abd2016-05-13 09:37:27 +0100150static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
Paolo Abenia350ecc2019-03-20 11:02:06 +0100151 struct net_device *sb_dev)
Paul Durrant40d8abd2016-05-13 09:37:27 +0100152{
153 struct xenvif *vif = netdev_priv(dev);
154 unsigned int size = vif->hash.size;
Igor Druzhinina2288d42019-02-28 14:11:26 +0000155 unsigned int num_queues;
156
157 /* If queues are not set up internally - always return 0
158 * as the packet going to be dropped anyway */
159 num_queues = READ_ONCE(vif->num_queues);
160 if (num_queues < 1)
161 return 0;
Paul Durrant40d8abd2016-05-13 09:37:27 +0100162
Paul Durrant912e27e2016-10-07 09:32:31 +0100163 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
Paolo Abenia350ecc2019-03-20 11:02:06 +0100164 return netdev_pick_tx(dev, skb, NULL) %
165 dev->real_num_tx_queues;
Paul Durrant40d8abd2016-05-13 09:37:27 +0100166
167 xenvif_set_skb_hash(vif, skb);
168
169 if (size == 0)
170 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
171
Jan Beulich22f9cde2018-09-25 02:13:01 -0600172 return vif->hash.mapping[vif->hash.mapping_sel]
173 [skb_get_hash_raw(skb) % size];
Paul Durrant40d8abd2016-05-13 09:37:27 +0100174}
175
YueHaibinga9ca7f12018-09-26 17:18:14 +0800176static netdev_tx_t
177xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
Ian Campbellf942dc22011-03-15 00:06:18 +0000178{
179 struct xenvif *vif = netdev_priv(dev);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100180 struct xenvif_queue *queue = NULL;
Igor Druzhininb17075d2017-03-10 21:36:22 +0000181 unsigned int num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100182 u16 index;
David Vrabelf48da8b2014-10-22 14:08:54 +0100183 struct xenvif_rx_cb *cb;
Ian Campbellf942dc22011-03-15 00:06:18 +0000184
185 BUG_ON(skb->dev != dev);
186
Igor Druzhininb17075d2017-03-10 21:36:22 +0000187 /* Drop the packet if queues are not set up.
188 * This handler should be called inside an RCU read section
189 * so we don't need to enter it here explicitly.
190 */
191 num_queues = READ_ONCE(vif->num_queues);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100192 if (num_queues < 1)
193 goto drop;
194
195 /* Obtain the queue to be used to transmit this packet */
196 index = skb_get_queue_mapping(skb);
197 if (index >= num_queues) {
Joe Perchescc10f872017-12-05 22:40:25 -0800198 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100199 index, vif->dev->name);
200 index %= num_queues;
201 }
202 queue = &vif->queues[index];
203
204 /* Drop the packet if queue is not ready */
205 if (queue->task == NULL ||
206 queue->dealloc_task == NULL ||
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000207 !xenvif_schedulable(vif))
Ian Campbellf942dc22011-03-15 00:06:18 +0000208 goto drop;
209
Paul Durrant210c34d2015-09-02 17:58:36 +0100210 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
211 struct ethhdr *eth = (struct ethhdr *)skb->data;
212
213 if (!xenvif_mcast_match(vif, eth->h_dest))
214 goto drop;
215 }
216
David Vrabelf48da8b2014-10-22 14:08:54 +0100217 cb = XENVIF_RX_CB(skb);
David Vrabel26c0e102014-12-18 11:13:06 +0000218 cb->expires = jiffies + vif->drain_timeout;
Ian Campbellf942dc22011-03-15 00:06:18 +0000219
Paul Durrant912e27e2016-10-07 09:32:31 +0100220 /* If there is no hash algorithm configured then make sure there
221 * is no hash information in the socket buffer otherwise it
222 * would be incorrectly forwarded to the frontend.
223 */
224 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
225 skb_clear_hash(skb);
226
David Vrabelf48da8b2014-10-22 14:08:54 +0100227 xenvif_rx_queue_tail(queue, skb);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100228 xenvif_kick_thread(queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000229
230 return NETDEV_TX_OK;
231
232 drop:
233 vif->dev->stats.tx_dropped++;
234 dev_kfree_skb(skb);
235 return NETDEV_TX_OK;
236}
237
Ian Campbellf942dc22011-03-15 00:06:18 +0000238static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
239{
240 struct xenvif *vif = netdev_priv(dev);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100241 struct xenvif_queue *queue = NULL;
Igor Druzhininb17075d2017-03-10 21:36:22 +0000242 unsigned int num_queues;
Mart van Santenebf692f2017-02-10 12:02:18 +0000243 u64 rx_bytes = 0;
244 u64 rx_packets = 0;
245 u64 tx_bytes = 0;
246 u64 tx_packets = 0;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100247 unsigned int index;
248
Igor Druzhininb17075d2017-03-10 21:36:22 +0000249 rcu_read_lock();
250 num_queues = READ_ONCE(vif->num_queues);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100251
252 /* Aggregate tx and rx stats from each queue */
Igor Druzhininb17075d2017-03-10 21:36:22 +0000253 for (index = 0; index < num_queues; ++index) {
Wei Liue9ce7cb2014-06-04 10:30:42 +0100254 queue = &vif->queues[index];
255 rx_bytes += queue->stats.rx_bytes;
256 rx_packets += queue->stats.rx_packets;
257 tx_bytes += queue->stats.tx_bytes;
258 tx_packets += queue->stats.tx_packets;
259 }
260
Igor Druzhininb17075d2017-03-10 21:36:22 +0000261 rcu_read_unlock();
Igor Druzhininf16f1df2017-01-17 20:49:38 +0000262
Wei Liue9ce7cb2014-06-04 10:30:42 +0100263 vif->dev->stats.rx_bytes = rx_bytes;
264 vif->dev->stats.rx_packets = rx_packets;
265 vif->dev->stats.tx_bytes = tx_bytes;
266 vif->dev->stats.tx_packets = tx_packets;
267
Ian Campbellf942dc22011-03-15 00:06:18 +0000268 return &vif->dev->stats;
269}
270
271static void xenvif_up(struct xenvif *vif)
272{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100273 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100274 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100275 unsigned int queue_index;
276
277 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
278 queue = &vif->queues[queue_index];
279 napi_enable(&queue->napi);
280 enable_irq(queue->tx_irq);
281 if (queue->tx_irq != queue->rx_irq)
282 enable_irq(queue->rx_irq);
283 xenvif_napi_schedule_or_enable_events(queue);
284 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000285}
286
287static void xenvif_down(struct xenvif *vif)
288{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100289 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100290 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100291 unsigned int queue_index;
292
293 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
294 queue = &vif->queues[queue_index];
Wei Liue9ce7cb2014-06-04 10:30:42 +0100295 disable_irq(queue->tx_irq);
296 if (queue->tx_irq != queue->rx_irq)
297 disable_irq(queue->rx_irq);
Zoltan Kiss8fe78982014-10-28 15:29:30 +0000298 napi_disable(&queue->napi);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100299 del_timer_sync(&queue->credit_timeout);
300 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000301}
302
303static int xenvif_open(struct net_device *dev)
304{
305 struct xenvif *vif = netdev_priv(dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100306 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
Ian Campbellf942dc22011-03-15 00:06:18 +0000307 xenvif_up(vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100308 netif_tx_start_all_queues(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000309 return 0;
310}
311
312static int xenvif_close(struct net_device *dev)
313{
314 struct xenvif *vif = netdev_priv(dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100315 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
Ian Campbellf942dc22011-03-15 00:06:18 +0000316 xenvif_down(vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100317 netif_tx_stop_all_queues(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000318 return 0;
319}
320
321static int xenvif_change_mtu(struct net_device *dev, int mtu)
322{
323 struct xenvif *vif = netdev_priv(dev);
Jarod Wilsond0c2c992016-10-20 13:55:21 -0400324 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
Ian Campbellf942dc22011-03-15 00:06:18 +0000325
326 if (mtu > max)
327 return -EINVAL;
328 dev->mtu = mtu;
329 return 0;
330}
331
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000332static netdev_features_t xenvif_fix_features(struct net_device *dev,
333 netdev_features_t features)
Ian Campbellf942dc22011-03-15 00:06:18 +0000334{
335 struct xenvif *vif = netdev_priv(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000336
Michał Mirosław47103042011-04-19 03:35:06 +0000337 if (!vif->can_sg)
338 features &= ~NETIF_F_SG;
Paul Durrantfedbc8c2016-10-04 10:29:13 +0100339 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
Michał Mirosław47103042011-04-19 03:35:06 +0000340 features &= ~NETIF_F_TSO;
Paul Durrantfedbc8c2016-10-04 10:29:13 +0100341 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
Paul Durrant82cada22013-10-16 17:50:32 +0100342 features &= ~NETIF_F_TSO6;
Paul Durrant146c8a72013-10-16 17:50:28 +0100343 if (!vif->ip_csum)
Michał Mirosław47103042011-04-19 03:35:06 +0000344 features &= ~NETIF_F_IP_CSUM;
Paul Durrant146c8a72013-10-16 17:50:28 +0100345 if (!vif->ipv6_csum)
346 features &= ~NETIF_F_IPV6_CSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000347
Michał Mirosław47103042011-04-19 03:35:06 +0000348 return features;
Ian Campbellf942dc22011-03-15 00:06:18 +0000349}
350
351static const struct xenvif_stat {
352 char name[ETH_GSTRING_LEN];
353 u16 offset;
354} xenvif_stats[] = {
355 {
356 "rx_gso_checksum_fixup",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100357 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
Ian Campbellf942dc22011-03-15 00:06:18 +0000358 },
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000359 /* If (sent != success + fail), there are probably packets never
360 * freed up properly!
361 */
362 {
363 "tx_zerocopy_sent",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100364 offsetof(struct xenvif_stats, tx_zerocopy_sent),
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000365 },
366 {
367 "tx_zerocopy_success",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100368 offsetof(struct xenvif_stats, tx_zerocopy_success),
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000369 },
370 {
371 "tx_zerocopy_fail",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100372 offsetof(struct xenvif_stats, tx_zerocopy_fail)
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000373 },
Zoltan Kisse3377f32014-03-06 21:48:29 +0000374 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
375 * a guest with the same MAX_SKB_FRAG
376 */
377 {
378 "tx_frag_overflow",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100379 offsetof(struct xenvif_stats, tx_frag_overflow)
Zoltan Kisse3377f32014-03-06 21:48:29 +0000380 },
Ian Campbellf942dc22011-03-15 00:06:18 +0000381};
382
383static int xenvif_get_sset_count(struct net_device *dev, int string_set)
384{
385 switch (string_set) {
386 case ETH_SS_STATS:
387 return ARRAY_SIZE(xenvif_stats);
388 default:
389 return -EINVAL;
390 }
391}
392
393static void xenvif_get_ethtool_stats(struct net_device *dev,
394 struct ethtool_stats *stats, u64 * data)
395{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100396 struct xenvif *vif = netdev_priv(dev);
Igor Druzhininb17075d2017-03-10 21:36:22 +0000397 unsigned int num_queues;
Ian Campbellf942dc22011-03-15 00:06:18 +0000398 int i;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100399 unsigned int queue_index;
Ian Campbellf942dc22011-03-15 00:06:18 +0000400
Igor Druzhininb17075d2017-03-10 21:36:22 +0000401 rcu_read_lock();
402 num_queues = READ_ONCE(vif->num_queues);
403
Wei Liue9ce7cb2014-06-04 10:30:42 +0100404 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
405 unsigned long accum = 0;
406 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
David Vrabeld63951d2015-03-04 11:14:46 +0000407 void *vif_stats = &vif->queues[queue_index].stats;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100408 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
409 }
410 data[i] = accum;
411 }
Igor Druzhininb17075d2017-03-10 21:36:22 +0000412
413 rcu_read_unlock();
Ian Campbellf942dc22011-03-15 00:06:18 +0000414}
415
416static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
417{
418 int i;
419
420 switch (stringset) {
421 case ETH_SS_STATS:
422 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
423 memcpy(data + i * ETH_GSTRING_LEN,
424 xenvif_stats[i].name, ETH_GSTRING_LEN);
425 break;
426 }
427}
428
stephen hemminger813abbb2012-01-04 11:56:58 +0000429static const struct ethtool_ops xenvif_ethtool_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000430 .get_link = ethtool_op_get_link,
431
432 .get_sset_count = xenvif_get_sset_count,
433 .get_ethtool_stats = xenvif_get_ethtool_stats,
434 .get_strings = xenvif_get_strings,
435};
436
stephen hemminger813abbb2012-01-04 11:56:58 +0000437static const struct net_device_ops xenvif_netdev_ops = {
Paul Durrant40d8abd2016-05-13 09:37:27 +0100438 .ndo_select_queue = xenvif_select_queue,
Ian Campbellf942dc22011-03-15 00:06:18 +0000439 .ndo_start_xmit = xenvif_start_xmit,
440 .ndo_get_stats = xenvif_get_stats,
441 .ndo_open = xenvif_open,
442 .ndo_stop = xenvif_close,
443 .ndo_change_mtu = xenvif_change_mtu,
Michał Mirosław47103042011-04-19 03:35:06 +0000444 .ndo_fix_features = xenvif_fix_features,
Matt Wilson4a633a62013-01-22 08:08:25 +0000445 .ndo_set_mac_address = eth_mac_addr,
446 .ndo_validate_addr = eth_validate_addr,
Ian Campbellf942dc22011-03-15 00:06:18 +0000447};
448
449struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
450 unsigned int handle)
451{
452 int err;
453 struct net_device *dev;
454 struct xenvif *vif;
455 char name[IFNAMSIZ] = {};
456
457 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100458 /* Allocate a netdev with the max. supported number of queues.
459 * When the guest selects the desired number, it will be updated
Wei Liuf7b50c42014-06-23 10:50:17 +0100460 * via netif_set_real_num_*_queues().
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100461 */
Tom Gundersenc835a672014-07-14 16:37:24 +0200462 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
463 ether_setup, xenvif_max_queues);
Ian Campbellf942dc22011-03-15 00:06:18 +0000464 if (dev == NULL) {
Wei Liub3f980b2013-08-26 12:59:38 +0100465 pr_warn("Could not allocate netdev for %s\n", name);
Ian Campbellf942dc22011-03-15 00:06:18 +0000466 return ERR_PTR(-ENOMEM);
467 }
468
469 SET_NETDEV_DEV(dev, parent);
470
471 vif = netdev_priv(dev);
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000472
Ian Campbellf942dc22011-03-15 00:06:18 +0000473 vif->domid = domid;
474 vif->handle = handle;
Ian Campbellf942dc22011-03-15 00:06:18 +0000475 vif->can_sg = 1;
Paul Durrant146c8a72013-10-16 17:50:28 +0100476 vif->ip_csum = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +0000477 vif->dev = dev;
Wei Liue9d8b2c2014-04-01 12:46:12 +0100478 vif->disabled = false;
David Vrabel26c0e102014-12-18 11:13:06 +0000479 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
480 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
Wei Liue9d8b2c2014-04-01 12:46:12 +0100481
Wei Liuf7b50c42014-06-23 10:50:17 +0100482 /* Start out with no queues. */
Wei Liue9ce7cb2014-06-04 10:30:42 +0100483 vif->queues = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100484 vif->num_queues = 0;
Zoltan Kiss09350782014-03-06 21:48:30 +0000485
David Vrabelecf08d22014-10-22 14:08:55 +0100486 spin_lock_init(&vif->lock);
Paul Durrant210c34d2015-09-02 17:58:36 +0100487 INIT_LIST_HEAD(&vif->fe_mcast_addr);
David Vrabelecf08d22014-10-22 14:08:55 +0100488
Ian Campbellf942dc22011-03-15 00:06:18 +0000489 dev->netdev_ops = &xenvif_netdev_ops;
Paul Durrant146c8a72013-10-16 17:50:28 +0100490 dev->hw_features = NETIF_F_SG |
491 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ross Lagerwall2167ca02016-10-04 10:29:18 +0100492 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
Paul Durrant7365bcf2013-10-16 17:50:30 +0100493 dev->features = dev->hw_features | NETIF_F_RXCSUM;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000494 dev->ethtool_ops = &xenvif_ethtool_ops;
Ian Campbellf942dc22011-03-15 00:06:18 +0000495
496 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
497
Mohammed Gamale1043a42017-10-16 15:20:32 +0200498 dev->min_mtu = ETH_MIN_MTU;
Jarod Wilsond0c2c992016-10-20 13:55:21 -0400499 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
500
Ian Campbellf942dc22011-03-15 00:06:18 +0000501 /*
502 * Initialise a dummy MAC address. We choose the numerically
503 * largest non-broadcast address to prevent the address getting
504 * stolen by an Ethernet bridge for STP purposes.
505 * (FE:FF:FF:FF:FF:FF)
506 */
Joe Perches3b6ed262015-03-02 19:54:51 -0800507 eth_broadcast_addr(dev->dev_addr);
Ian Campbellf942dc22011-03-15 00:06:18 +0000508 dev->dev_addr[0] &= ~0x01;
509
510 netif_carrier_off(dev);
511
512 err = register_netdev(dev);
513 if (err) {
514 netdev_warn(dev, "Could not register device: err=%d\n", err);
515 free_netdev(dev);
516 return ERR_PTR(err);
517 }
518
519 netdev_dbg(dev, "Successfully created xenvif\n");
Paul Durrant279f4382013-09-17 17:46:08 +0100520
521 __module_get(THIS_MODULE);
522
Ian Campbellf942dc22011-03-15 00:06:18 +0000523 return vif;
524}
525
Wei Liue9ce7cb2014-06-04 10:30:42 +0100526int xenvif_init_queue(struct xenvif_queue *queue)
Ian Campbellf942dc22011-03-15 00:06:18 +0000527{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100528 int err, i;
Ian Campbellf942dc22011-03-15 00:06:18 +0000529
Wei Liue9ce7cb2014-06-04 10:30:42 +0100530 queue->credit_bytes = queue->remaining_credit = ~0UL;
531 queue->credit_usec = 0UL;
Kees Cookcac6a8f2017-10-16 17:29:38 -0700532 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100533 queue->credit_window_start = get_jiffies_64();
Ian Campbellf942dc22011-03-15 00:06:18 +0000534
David Vrabelf48da8b2014-10-22 14:08:54 +0100535 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
536
Wei Liue9ce7cb2014-06-04 10:30:42 +0100537 skb_queue_head_init(&queue->rx_queue);
538 skb_queue_head_init(&queue->tx_queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000539
Wei Liue9ce7cb2014-06-04 10:30:42 +0100540 queue->pending_cons = 0;
541 queue->pending_prod = MAX_PENDING_REQS;
542 for (i = 0; i < MAX_PENDING_REQS; ++i)
543 queue->pending_ring[i] = i;
Paul Durrantca2f09f2013-12-06 16:36:07 +0000544
Wei Liue9ce7cb2014-06-04 10:30:42 +0100545 spin_lock_init(&queue->callback_lock);
546 spin_lock_init(&queue->response_lock);
Wei Liue1f00a692013-05-22 06:34:45 +0000547
Wei Liue9ce7cb2014-06-04 10:30:42 +0100548 /* If ballooning is disabled, this will consume real memory, so you
549 * better enable it. The long term solution would be to use just a
550 * bunch of valid page descriptors, without dependency on ballooning
551 */
David Vrabelff4b1562015-01-08 18:06:01 +0000552 err = gnttab_alloc_pages(MAX_PENDING_REQS,
553 queue->mmap_pages);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100554 if (err) {
555 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
556 return -ENOMEM;
Wei Liue1f00a692013-05-22 06:34:45 +0000557 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000558
Wei Liue9ce7cb2014-06-04 10:30:42 +0100559 for (i = 0; i < MAX_PENDING_REQS; i++) {
560 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
561 { .callback = xenvif_zerocopy_callback,
Willem de Bruijncc8737a2017-08-25 13:10:43 -0400562 { { .ctx = NULL,
563 .desc = i } } };
Wei Liue9ce7cb2014-06-04 10:30:42 +0100564 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
Wei Liub3f980b2013-08-26 12:59:38 +0100565 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000566
Wei Liue9ce7cb2014-06-04 10:30:42 +0100567 return 0;
568}
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000569
Wei Liue9ce7cb2014-06-04 10:30:42 +0100570void xenvif_carrier_on(struct xenvif *vif)
571{
Ian Campbellf942dc22011-03-15 00:06:18 +0000572 rtnl_lock();
Michał Mirosław47103042011-04-19 03:35:06 +0000573 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
574 dev_set_mtu(vif->dev, ETH_DATA_LEN);
575 netdev_update_features(vif->dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100576 set_bit(VIF_STATUS_CONNECTED, &vif->status);
David Vrabeld0e5d832011-09-30 06:37:51 +0000577 if (netif_running(vif->dev))
578 xenvif_up(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000579 rtnl_unlock();
Wei Liue9ce7cb2014-06-04 10:30:42 +0100580}
Ian Campbellf942dc22011-03-15 00:06:18 +0000581
Paul Durrant4e15ee22016-05-13 09:37:26 +0100582int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
583 unsigned int evtchn)
584{
585 struct net_device *dev = vif->dev;
586 void *addr;
587 struct xen_netif_ctrl_sring *shared;
Juergen Gross0364a882016-09-22 11:06:25 +0200588 int err;
Paul Durrant4e15ee22016-05-13 09:37:26 +0100589
590 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
591 &ring_ref, 1, &addr);
592 if (err)
593 goto err;
594
595 shared = (struct xen_netif_ctrl_sring *)addr;
596 BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
597
Juergen Gross0364a882016-09-22 11:06:25 +0200598 err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100599 if (err < 0)
600 goto err_unmap;
601
602 vif->ctrl_irq = err;
603
Paul Durrant40d8abd2016-05-13 09:37:27 +0100604 xenvif_init_hash(vif);
605
Juergen Gross0364a882016-09-22 11:06:25 +0200606 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
607 IRQF_ONESHOT, "xen-netback-ctrl", vif);
608 if (err) {
609 pr_warn("Could not setup irq handler for %s\n", dev->name);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100610 goto err_deinit;
611 }
612
Paul Durrant4e15ee22016-05-13 09:37:26 +0100613 return 0;
614
615err_deinit:
Paul Durrant40d8abd2016-05-13 09:37:27 +0100616 xenvif_deinit_hash(vif);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100617 unbind_from_irqhandler(vif->ctrl_irq, vif);
618 vif->ctrl_irq = 0;
619
620err_unmap:
621 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
622 vif->ctrl.sring);
623 vif->ctrl.sring = NULL;
624
625err:
626 return err;
627}
628
629int xenvif_connect_data(struct xenvif_queue *queue,
630 unsigned long tx_ring_ref,
631 unsigned long rx_ring_ref,
632 unsigned int tx_evtchn,
633 unsigned int rx_evtchn)
Wei Liue9ce7cb2014-06-04 10:30:42 +0100634{
635 struct task_struct *task;
Colin Ian King587a7122019-05-30 20:04:38 +0100636 int err;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100637
638 BUG_ON(queue->tx_irq);
639 BUG_ON(queue->task);
640 BUG_ON(queue->dealloc_task);
641
Paul Durrant4e15ee22016-05-13 09:37:26 +0100642 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
643 rx_ring_ref);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100644 if (err < 0)
645 goto err;
646
647 init_waitqueue_head(&queue->wq);
648 init_waitqueue_head(&queue->dealloc_wq);
Wei Liua64bd932014-08-12 11:48:07 +0100649 atomic_set(&queue->inflight_packets, 0);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100650
Wei Liue24f8192014-08-25 16:44:00 +0100651 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
652 XENVIF_NAPI_WEIGHT);
653
Wei Liue9ce7cb2014-06-04 10:30:42 +0100654 if (tx_evtchn == rx_evtchn) {
655 /* feature-split-event-channels == 0 */
656 err = bind_interdomain_evtchn_to_irqhandler(
657 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
658 queue->name, queue);
659 if (err < 0)
660 goto err_unmap;
661 queue->tx_irq = queue->rx_irq = err;
662 disable_irq(queue->tx_irq);
663 } else {
664 /* feature-split-event-channels == 1 */
665 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
666 "%s-tx", queue->name);
667 err = bind_interdomain_evtchn_to_irqhandler(
668 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
669 queue->tx_irq_name, queue);
670 if (err < 0)
671 goto err_unmap;
672 queue->tx_irq = err;
673 disable_irq(queue->tx_irq);
674
675 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
676 "%s-rx", queue->name);
677 err = bind_interdomain_evtchn_to_irqhandler(
678 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
679 queue->rx_irq_name, queue);
680 if (err < 0)
681 goto err_tx_unbind;
682 queue->rx_irq = err;
683 disable_irq(queue->rx_irq);
684 }
685
David Vrabelecf08d22014-10-22 14:08:55 +0100686 queue->stalled = true;
687
Wei Liue9ce7cb2014-06-04 10:30:42 +0100688 task = kthread_create(xenvif_kthread_guest_rx,
689 (void *)queue, "%s-guest-rx", queue->name);
690 if (IS_ERR(task)) {
691 pr_warn("Could not allocate kthread for %s\n", queue->name);
692 err = PTR_ERR(task);
693 goto err_rx_unbind;
694 }
695 queue->task = task;
David Vrabel42b52122015-02-02 16:57:51 +0000696 get_task_struct(task);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100697
698 task = kthread_create(xenvif_dealloc_kthread,
699 (void *)queue, "%s-dealloc", queue->name);
700 if (IS_ERR(task)) {
701 pr_warn("Could not allocate kthread for %s\n", queue->name);
702 err = PTR_ERR(task);
703 goto err_rx_unbind;
704 }
705 queue->dealloc_task = task;
706
707 wake_up_process(queue->task);
708 wake_up_process(queue->dealloc_task);
Wei Liub3f980b2013-08-26 12:59:38 +0100709
Ian Campbellf942dc22011-03-15 00:06:18 +0000710 return 0;
Wei Liub3f980b2013-08-26 12:59:38 +0100711
712err_rx_unbind:
Wei Liue9ce7cb2014-06-04 10:30:42 +0100713 unbind_from_irqhandler(queue->rx_irq, queue);
714 queue->rx_irq = 0;
Wei Liue1f00a692013-05-22 06:34:45 +0000715err_tx_unbind:
Wei Liue9ce7cb2014-06-04 10:30:42 +0100716 unbind_from_irqhandler(queue->tx_irq, queue);
717 queue->tx_irq = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000718err_unmap:
Paul Durrant4e15ee22016-05-13 09:37:26 +0100719 xenvif_unmap_frontend_data_rings(queue);
David Vrabel4a658522016-01-15 14:55:35 +0000720 netif_napi_del(&queue->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +0000721err:
722 return err;
723}
724
Ian Campbell488562862013-02-06 23:41:35 +0000725void xenvif_carrier_off(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000726{
727 struct net_device *dev = vif->dev;
Ian Campbell488562862013-02-06 23:41:35 +0000728
729 rtnl_lock();
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100730 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
731 netif_carrier_off(dev); /* discard queued packets */
732 if (netif_running(dev))
733 xenvif_down(vif);
734 }
Ian Campbell488562862013-02-06 23:41:35 +0000735 rtnl_unlock();
Ian Campbell488562862013-02-06 23:41:35 +0000736}
737
Paul Durrant4e15ee22016-05-13 09:37:26 +0100738void xenvif_disconnect_data(struct xenvif *vif)
Ian Campbell488562862013-02-06 23:41:35 +0000739{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100740 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100741 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100742 unsigned int queue_index;
743
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100744 xenvif_carrier_off(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000745
Wei Liue9ce7cb2014-06-04 10:30:42 +0100746 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
747 queue = &vif->queues[queue_index];
David Vrabeldb739ef2013-11-21 15:26:09 +0000748
Wei Liuea2c5e12014-08-12 11:48:06 +0100749 netif_napi_del(&queue->napi);
750
Wei Liue9ce7cb2014-06-04 10:30:42 +0100751 if (queue->task) {
Wei Liue9ce7cb2014-06-04 10:30:42 +0100752 kthread_stop(queue->task);
David Vrabel42b52122015-02-02 16:57:51 +0000753 put_task_struct(queue->task);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100754 queue->task = NULL;
Wei Liue1f00a692013-05-22 06:34:45 +0000755 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000756
Wei Liue9ce7cb2014-06-04 10:30:42 +0100757 if (queue->dealloc_task) {
758 kthread_stop(queue->dealloc_task);
759 queue->dealloc_task = NULL;
760 }
761
762 if (queue->tx_irq) {
763 if (queue->tx_irq == queue->rx_irq)
764 unbind_from_irqhandler(queue->tx_irq, queue);
765 else {
766 unbind_from_irqhandler(queue->tx_irq, queue);
767 unbind_from_irqhandler(queue->rx_irq, queue);
768 }
769 queue->tx_irq = 0;
770 }
771
Paul Durrant4e15ee22016-05-13 09:37:26 +0100772 xenvif_unmap_frontend_data_rings(queue);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100773 }
Paul Durrant210c34d2015-09-02 17:58:36 +0100774
775 xenvif_mcast_addr_list_free(vif);
Paul Durrant279f4382013-09-17 17:46:08 +0100776}
777
Paul Durrant4e15ee22016-05-13 09:37:26 +0100778void xenvif_disconnect_ctrl(struct xenvif *vif)
779{
Paul Durrant4e15ee22016-05-13 09:37:26 +0100780 if (vif->ctrl_irq) {
Paul Durrantc0fcded2016-05-18 15:55:42 +0100781 xenvif_deinit_hash(vif);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100782 unbind_from_irqhandler(vif->ctrl_irq, vif);
783 vif->ctrl_irq = 0;
784 }
785
786 if (vif->ctrl.sring) {
787 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
788 vif->ctrl.sring);
789 vif->ctrl.sring = NULL;
790 }
791}
792
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100793/* Reverse the relevant parts of xenvif_init_queue().
794 * Used for queue teardown from xenvif_free(), and on the
795 * error handling paths in xenbus.c:connect().
796 */
797void xenvif_deinit_queue(struct xenvif_queue *queue)
798{
David Vrabelff4b1562015-01-08 18:06:01 +0000799 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100800}
801
Paul Durrant279f4382013-09-17 17:46:08 +0100802void xenvif_free(struct xenvif *vif)
803{
David Vrabel9c6f3ff2016-01-15 14:55:36 +0000804 struct xenvif_queue *queues = vif->queues;
Wei Liuf7b50c42014-06-23 10:50:17 +0100805 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100806 unsigned int queue_index;
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000807
Wei Liue9ce7cb2014-06-04 10:30:42 +0100808 unregister_netdev(vif->dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000809 free_netdev(vif->dev);
Wei Liub103f352013-05-16 23:26:11 +0000810
David Vrabel9c6f3ff2016-01-15 14:55:36 +0000811 for (queue_index = 0; queue_index < num_queues; ++queue_index)
812 xenvif_deinit_queue(&queues[queue_index]);
813 vfree(queues);
814
Paul Durrant279f4382013-09-17 17:46:08 +0100815 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000816}