blob: 1d9c9c2096720f2d690e670722402322bddde04f [file] [log] [blame]
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001#include <linux/etherdevice.h>
2#include <linux/if_macvlan.h>
3#include <linux/interrupt.h>
4#include <linux/nsproxy.h>
5#include <linux/compat.h>
6#include <linux/if_tun.h>
7#include <linux/module.h>
8#include <linux/skbuff.h>
9#include <linux/cache.h>
10#include <linux/sched.h>
11#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000013#include <linux/init.h>
14#include <linux/wait.h>
15#include <linux/cdev.h>
16#include <linux/fs.h>
17
18#include <net/net_namespace.h>
19#include <net/rtnetlink.h>
20#include <net/sock.h>
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000021#include <linux/virtio_net.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000022
23/*
24 * A macvtap queue is the central object of this driver, it connects
25 * an open character device to a macvlan interface. There can be
26 * multiple queues on one interface, which map back to queues
27 * implemented in hardware on the underlying device.
28 *
29 * macvtap_proto is used to allocate queues through the sock allocation
30 * mechanism.
31 *
32 * TODO: multiqueue support is currently not implemented, even though
33 * macvtap is basically prepared for that. We will need to add this
34 * here as well as in virtio-net and qemu to get line rate on 10gbit
35 * adapters from a guest.
36 */
37struct macvtap_queue {
38 struct sock sk;
39 struct socket sock;
Eric Dumazet43815482010-04-29 11:01:49 +000040 struct socket_wq wq;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +030041 int vnet_hdr_sz;
Eric Dumazet13707f92011-01-26 19:28:23 +000042 struct macvlan_dev __rcu *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000043 struct file *file;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000044 unsigned int flags;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000045};
46
47static struct proto macvtap_proto = {
48 .name = "macvtap",
49 .owner = THIS_MODULE,
50 .obj_size = sizeof (struct macvtap_queue),
51};
52
53/*
54 * Minor number matches netdev->ifindex, so need a potentially
55 * large value. This also makes it possible to split the
56 * tap functionality out again in the future by offering it
57 * from other drivers besides macvtap. As long as every device
58 * only has one tap, the interface numbers assure that the
59 * device nodes are unique.
60 */
David S. Miller1ebed712010-07-10 19:25:50 -070061static dev_t macvtap_major;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000062#define MACVTAP_NUM_DEVS 65536
Shirley Ma97bc3632011-07-06 12:26:11 +000063#define GOODCOPY_LEN 128
Arnd Bergmann20d29d72010-01-30 12:24:26 +000064static struct class *macvtap_class;
65static struct cdev macvtap_cdev;
66
Arnd Bergmann501c7742010-02-18 05:46:50 +000067static const struct proto_ops macvtap_socket_ops;
68
Arnd Bergmann20d29d72010-01-30 12:24:26 +000069/*
70 * RCU usage:
Arnd Bergmann02df55d2010-02-18 05:45:36 +000071 * The macvtap_queue and the macvlan_dev are loosely coupled, the
72 * pointers from one to the other can only be read while rcu_read_lock
73 * or macvtap_lock is held.
Arnd Bergmann20d29d72010-01-30 12:24:26 +000074 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +000075 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
76 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
77 * q->vlan becomes inaccessible. When the files gets closed,
78 * macvtap_get_queue() fails.
Arnd Bergmann20d29d72010-01-30 12:24:26 +000079 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +000080 * There may still be references to the struct sock inside of the
81 * queue from outbound SKBs, but these never reference back to the
82 * file or the dev. The data structure is freed through __sk_free
83 * when both our references and any pending SKBs are gone.
Arnd Bergmann20d29d72010-01-30 12:24:26 +000084 */
85static DEFINE_SPINLOCK(macvtap_lock);
86
87/*
Krishna Kumar1565c7c2010-08-04 06:15:59 +000088 * get_slot: return a [unused/occupied] slot in vlan->taps[]:
89 * - if 'q' is NULL, return the first empty slot;
90 * - otherwise, return the slot this pointer occupies.
Arnd Bergmann20d29d72010-01-30 12:24:26 +000091 */
Krishna Kumar1565c7c2010-08-04 06:15:59 +000092static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
93{
94 int i;
95
96 for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
97 if (rcu_dereference(vlan->taps[i]) == q)
98 return i;
99 }
100
101 /* Should never happen */
102 BUG_ON(1);
103}
104
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000105static int macvtap_set_queue(struct net_device *dev, struct file *file,
106 struct macvtap_queue *q)
107{
108 struct macvlan_dev *vlan = netdev_priv(dev);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000109 int index;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000110 int err = -EBUSY;
111
112 spin_lock(&macvtap_lock);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000113 if (vlan->numvtaps == MAX_MACVTAP_QUEUES)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000114 goto out;
115
116 err = 0;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000117 index = get_slot(vlan, NULL);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000118 rcu_assign_pointer(q->vlan, vlan);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000119 rcu_assign_pointer(vlan->taps[index], q);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000120 sock_hold(&q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000121
122 q->file = file;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000123 file->private_data = q;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000124
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000125 vlan->numvtaps++;
126
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000127out:
128 spin_unlock(&macvtap_lock);
129 return err;
130}
131
132/*
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000133 * The file owning the queue got closed, give up both
134 * the reference that the files holds as well as the
135 * one from the macvlan_dev if that still exists.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000136 *
137 * Using the spinlock makes sure that we don't get
138 * to the queue again after destroying it.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000139 */
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000140static void macvtap_put_queue(struct macvtap_queue *q)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000141{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000142 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000143
144 spin_lock(&macvtap_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000145 vlan = rcu_dereference_protected(q->vlan,
146 lockdep_is_held(&macvtap_lock));
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000147 if (vlan) {
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000148 int index = get_slot(vlan, q);
149
150 rcu_assign_pointer(vlan->taps[index], NULL);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000151 rcu_assign_pointer(q->vlan, NULL);
152 sock_put(&q->sk);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000153 --vlan->numvtaps;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000154 }
155
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000156 spin_unlock(&macvtap_lock);
157
158 synchronize_rcu();
159 sock_put(&q->sk);
160}
161
162/*
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000163 * Select a queue based on the rxq of the device on which this packet
164 * arrived. If the incoming device is not mq, calculate a flow hash
165 * to select a queue. If all fails, find the first available queue.
166 * Cache vlan->numvtaps since it can become zero during the execution
167 * of this function.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000168 */
169static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
170 struct sk_buff *skb)
171{
172 struct macvlan_dev *vlan = netdev_priv(dev);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000173 struct macvtap_queue *tap = NULL;
174 int numvtaps = vlan->numvtaps;
175 __u32 rxq;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000176
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000177 if (!numvtaps)
178 goto out;
179
180 if (likely(skb_rx_queue_recorded(skb))) {
181 rxq = skb_get_rx_queue(skb);
182
183 while (unlikely(rxq >= numvtaps))
184 rxq -= numvtaps;
185
186 tap = rcu_dereference(vlan->taps[rxq]);
187 if (tap)
188 goto out;
189 }
190
191 /* Check if we can use flow to select a queue */
192 rxq = skb_get_rxhash(skb);
193 if (rxq) {
194 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
195 if (tap)
196 goto out;
197 }
198
199 /* Everything failed - find first available queue */
200 for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
201 tap = rcu_dereference(vlan->taps[rxq]);
202 if (tap)
203 break;
204 }
205
206out:
207 return tap;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000208}
209
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000210/*
211 * The net_device is going away, give up the reference
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000212 * that it holds on all queues and safely set the pointer
213 * from the queues to NULL.
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000214 */
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000215static void macvtap_del_queues(struct net_device *dev)
216{
217 struct macvlan_dev *vlan = netdev_priv(dev);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000218 struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES];
219 int i, j = 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000220
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000221 /* macvtap_put_queue can free some slots, so go through all slots */
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000222 spin_lock(&macvtap_lock);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000223 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
Eric Dumazet13707f92011-01-26 19:28:23 +0000224 q = rcu_dereference_protected(vlan->taps[i],
225 lockdep_is_held(&macvtap_lock));
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000226 if (q) {
227 qlist[j++] = q;
228 rcu_assign_pointer(vlan->taps[i], NULL);
229 rcu_assign_pointer(q->vlan, NULL);
230 vlan->numvtaps--;
231 }
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000232 }
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000233 BUG_ON(vlan->numvtaps != 0);
Eric W. Biederman99f34b32011-10-20 04:26:01 +0000234 /* guarantee that any future macvtap_set_queue will fail */
235 vlan->numvtaps = MAX_MACVTAP_QUEUES;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000236 spin_unlock(&macvtap_lock);
237
238 synchronize_rcu();
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000239
240 for (--j; j >= 0; j--)
241 sock_put(&qlist[j]->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000242}
243
244/*
245 * Forward happens for data that gets sent from one macvlan
246 * endpoint to another one in bridge mode. We just take
247 * the skb and put it into the receive queue.
248 */
249static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
250{
251 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
252 if (!q)
Herbert Xu8a357472010-07-21 21:44:31 +0000253 goto drop;
254
255 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
256 goto drop;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000257
258 skb_queue_tail(&q->sk.sk_receive_queue, skb);
Eric Dumazet4a4771a2010-04-25 22:20:06 +0000259 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
Herbert Xu8a357472010-07-21 21:44:31 +0000260 return NET_RX_SUCCESS;
261
262drop:
263 kfree_skb(skb);
264 return NET_RX_DROP;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000265}
266
267/*
268 * Receive is for data from the external interface (lowerdev),
269 * in case of macvtap, we can treat that the same way as
270 * forward, which macvlan cannot.
271 */
272static int macvtap_receive(struct sk_buff *skb)
273{
274 skb_push(skb, ETH_HLEN);
275 return macvtap_forward(skb->dev, skb);
276}
277
278static int macvtap_newlink(struct net *src_net,
279 struct net_device *dev,
280 struct nlattr *tb[],
281 struct nlattr *data[])
282{
283 struct device *classdev;
284 dev_t devt;
285 int err;
286
287 err = macvlan_common_newlink(src_net, dev, tb, data,
288 macvtap_receive, macvtap_forward);
289 if (err)
290 goto out;
291
292 devt = MKDEV(MAJOR(macvtap_major), dev->ifindex);
293
294 classdev = device_create(macvtap_class, &dev->dev, devt,
295 dev, "tap%d", dev->ifindex);
296 if (IS_ERR(classdev)) {
297 err = PTR_ERR(classdev);
298 macvtap_del_queues(dev);
299 }
300
301out:
302 return err;
303}
304
305static void macvtap_dellink(struct net_device *dev,
306 struct list_head *head)
307{
308 device_destroy(macvtap_class,
309 MKDEV(MAJOR(macvtap_major), dev->ifindex));
310
311 macvtap_del_queues(dev);
312 macvlan_dellink(dev, head);
313}
314
Herbert Xu8a357472010-07-21 21:44:31 +0000315static void macvtap_setup(struct net_device *dev)
316{
317 macvlan_common_setup(dev);
318 dev->tx_queue_len = TUN_READQ_SIZE;
319}
320
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000321static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
322 .kind = "macvtap",
Herbert Xu8a357472010-07-21 21:44:31 +0000323 .setup = macvtap_setup,
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000324 .newlink = macvtap_newlink,
325 .dellink = macvtap_dellink,
326};
327
328
329static void macvtap_sock_write_space(struct sock *sk)
330{
Eric Dumazet43815482010-04-29 11:01:49 +0000331 wait_queue_head_t *wqueue;
332
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000333 if (!sock_writeable(sk) ||
334 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
335 return;
336
Eric Dumazet43815482010-04-29 11:01:49 +0000337 wqueue = sk_sleep(sk);
338 if (wqueue && waitqueue_active(wqueue))
339 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000340}
341
342static int macvtap_open(struct inode *inode, struct file *file)
343{
344 struct net *net = current->nsproxy->net_ns;
345 struct net_device *dev = dev_get_by_index(net, iminor(inode));
346 struct macvtap_queue *q;
347 int err;
348
349 err = -ENODEV;
350 if (!dev)
351 goto out;
352
353 /* check if this is a macvtap device */
354 err = -EINVAL;
355 if (dev->rtnl_link_ops != &macvtap_link_ops)
356 goto out;
357
358 err = -ENOMEM;
359 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
360 &macvtap_proto);
361 if (!q)
362 goto out;
363
Eric Dumazet43815482010-04-29 11:01:49 +0000364 q->sock.wq = &q->wq;
365 init_waitqueue_head(&q->wq.wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000366 q->sock.type = SOCK_RAW;
367 q->sock.state = SS_CONNECTED;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000368 q->sock.file = file;
369 q->sock.ops = &macvtap_socket_ops;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000370 sock_init_data(&q->sock, &q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000371 q->sk.sk_write_space = macvtap_sock_write_space;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000372 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300373 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000374
Shirley Ma97bc3632011-07-06 12:26:11 +0000375 /*
376 * so far only KVM virtio_net uses macvtap, enable zero copy between
377 * guest kernel and host kernel when lower device supports zerocopy
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000378 *
379 * The macvlan supports zerocopy iff the lower device supports zero
380 * copy so we don't have to look at the lower device directly.
Shirley Ma97bc3632011-07-06 12:26:11 +0000381 */
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000382 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
383 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
Shirley Ma97bc3632011-07-06 12:26:11 +0000384
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000385 err = macvtap_set_queue(dev, file, q);
386 if (err)
387 sock_put(&q->sk);
388
389out:
390 if (dev)
391 dev_put(dev);
392
393 return err;
394}
395
396static int macvtap_release(struct inode *inode, struct file *file)
397{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000398 struct macvtap_queue *q = file->private_data;
399 macvtap_put_queue(q);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000400 return 0;
401}
402
403static unsigned int macvtap_poll(struct file *file, poll_table * wait)
404{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000405 struct macvtap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000406 unsigned int mask = POLLERR;
407
408 if (!q)
409 goto out;
410
411 mask = 0;
Eric Dumazet43815482010-04-29 11:01:49 +0000412 poll_wait(file, &q->wq.wait, wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000413
414 if (!skb_queue_empty(&q->sk.sk_receive_queue))
415 mask |= POLLIN | POLLRDNORM;
416
417 if (sock_writeable(&q->sk) ||
418 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
419 sock_writeable(&q->sk)))
420 mask |= POLLOUT | POLLWRNORM;
421
422out:
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000423 return mask;
424}
425
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000426static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
427 size_t len, size_t linear,
428 int noblock, int *err)
429{
430 struct sk_buff *skb;
431
432 /* Under a page? Don't bother with paged skb. */
433 if (prepad + len < PAGE_SIZE || !linear)
434 linear = len;
435
436 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
437 err);
438 if (!skb)
439 return NULL;
440
441 skb_reserve(skb, prepad);
442 skb_put(skb, linear);
443 skb->data_len = len - linear;
444 skb->len += len - linear;
445
446 return skb;
447}
448
Shirley Ma97bc3632011-07-06 12:26:11 +0000449/* set skb frags from iovec, this can move to core network code for reuse */
450static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
451 int offset, size_t count)
452{
453 int len = iov_length(from, count) - offset;
454 int copy = skb_headlen(skb);
455 int size, offset1 = 0;
456 int i = 0;
Shirley Ma97bc3632011-07-06 12:26:11 +0000457
458 /* Skip over from offset */
459 while (count && (offset >= from->iov_len)) {
460 offset -= from->iov_len;
461 ++from;
462 --count;
463 }
464
465 /* copy up to skb headlen */
466 while (count && (copy > 0)) {
467 size = min_t(unsigned int, copy, from->iov_len - offset);
468 if (copy_from_user(skb->data + offset1, from->iov_base + offset,
469 size))
470 return -EFAULT;
471 if (copy > size) {
472 ++from;
473 --count;
474 }
475 copy -= size;
476 offset1 += size;
477 offset = 0;
478 }
479
480 if (len == offset1)
481 return 0;
482
483 while (count--) {
484 struct page *page[MAX_SKB_FRAGS];
485 int num_pages;
486 unsigned long base;
487
488 len = from->iov_len - offset1;
489 if (!len) {
490 offset1 = 0;
491 ++from;
492 continue;
493 }
494 base = (unsigned long)from->iov_base + offset1;
495 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
496 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
497 if ((num_pages != size) ||
498 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
499 /* put_page is in skb free */
500 return -EFAULT;
501 skb->data_len += len;
502 skb->len += len;
503 skb->truesize += len;
504 atomic_add(len, &skb->sk->sk_wmem_alloc);
505 while (len) {
Jason Wang653fc912011-09-18 23:48:31 +0000506 int off = base & ~PAGE_MASK;
507 int size = min_t(int, len, PAGE_SIZE - off);
508 __skb_fill_page_desc(skb, i, page[i], off, size);
Shirley Ma97bc3632011-07-06 12:26:11 +0000509 skb_shinfo(skb)->nr_frags++;
510 /* increase sk_wmem_alloc */
Jason Wang653fc912011-09-18 23:48:31 +0000511 base += size;
512 len -= size;
Shirley Ma97bc3632011-07-06 12:26:11 +0000513 i++;
514 }
515 offset1 = 0;
516 ++from;
517 }
518 return 0;
519}
520
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000521/*
522 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
523 * be shared with the tun/tap driver.
524 */
525static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
526 struct virtio_net_hdr *vnet_hdr)
527{
528 unsigned short gso_type = 0;
529 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
530 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
531 case VIRTIO_NET_HDR_GSO_TCPV4:
532 gso_type = SKB_GSO_TCPV4;
533 break;
534 case VIRTIO_NET_HDR_GSO_TCPV6:
535 gso_type = SKB_GSO_TCPV6;
536 break;
537 case VIRTIO_NET_HDR_GSO_UDP:
538 gso_type = SKB_GSO_UDP;
539 break;
540 default:
541 return -EINVAL;
542 }
543
544 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
545 gso_type |= SKB_GSO_TCP_ECN;
546
547 if (vnet_hdr->gso_size == 0)
548 return -EINVAL;
549 }
550
551 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
552 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
553 vnet_hdr->csum_offset))
554 return -EINVAL;
555 }
556
557 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
558 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
559 skb_shinfo(skb)->gso_type = gso_type;
560
561 /* Header must be checked, and gso_segs computed. */
562 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
563 skb_shinfo(skb)->gso_segs = 0;
564 }
565 return 0;
566}
567
568static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
569 struct virtio_net_hdr *vnet_hdr)
570{
571 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
572
573 if (skb_is_gso(skb)) {
574 struct skb_shared_info *sinfo = skb_shinfo(skb);
575
576 /* This is a hint as to how much should be linear. */
577 vnet_hdr->hdr_len = skb_headlen(skb);
578 vnet_hdr->gso_size = sinfo->gso_size;
579 if (sinfo->gso_type & SKB_GSO_TCPV4)
580 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
581 else if (sinfo->gso_type & SKB_GSO_TCPV6)
582 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
583 else if (sinfo->gso_type & SKB_GSO_UDP)
584 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
585 else
586 BUG();
587 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
588 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
589 } else
590 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
591
592 if (skb->ip_summed == CHECKSUM_PARTIAL) {
593 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Michał Mirosław55508d62010-12-14 15:24:08 +0000594 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000595 vnet_hdr->csum_offset = skb->csum_offset;
Jason Wang10a8d942011-06-10 00:56:17 +0000596 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
597 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000598 } /* else everything is zero */
599
600 return 0;
601}
602
603
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000604/* Get packet from user space buffer */
Shirley Ma97bc3632011-07-06 12:26:11 +0000605static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
606 const struct iovec *iv, unsigned long total_len,
607 size_t count, int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000608{
609 struct sk_buff *skb;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000610 struct macvlan_dev *vlan;
Shirley Ma97bc3632011-07-06 12:26:11 +0000611 unsigned long len = total_len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000612 int err;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000613 struct virtio_net_hdr vnet_hdr = { 0 };
614 int vnet_hdr_len = 0;
Shirley Ma97bc3632011-07-06 12:26:11 +0000615 int copylen;
616 bool zerocopy = false;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000617
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000618 if (q->flags & IFF_VNET_HDR) {
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300619 vnet_hdr_len = q->vnet_hdr_sz;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000620
621 err = -EINVAL;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000622 if (len < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000623 goto err;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000624 len -= vnet_hdr_len;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000625
626 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300627 sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000628 if (err < 0)
629 goto err;
630 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
631 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
632 vnet_hdr.hdr_len)
633 vnet_hdr.hdr_len = vnet_hdr.csum_start +
634 vnet_hdr.csum_offset + 2;
635 err = -EINVAL;
636 if (vnet_hdr.hdr_len > len)
637 goto err;
638 }
639
640 err = -EINVAL;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000641 if (unlikely(len < ETH_HLEN))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000642 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000643
Shirley Ma97bc3632011-07-06 12:26:11 +0000644 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
645 zerocopy = true;
646
647 if (zerocopy) {
648 /* There are 256 bytes to be copied in skb, so there is enough
649 * room for skb expand head in case it is used.
650 * The rest buffer is mapped from userspace.
651 */
652 copylen = vnet_hdr.hdr_len;
653 if (!copylen)
654 copylen = GOODCOPY_LEN;
655 } else
656 copylen = len;
657
658 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
659 vnet_hdr.hdr_len, noblock, &err);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000660 if (!skb)
661 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000662
Shirley Ma97bc3632011-07-06 12:26:11 +0000663 if (zerocopy) {
664 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
665 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
666 } else
667 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
668 len);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000669 if (err)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000670 goto err_kfree;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000671
672 skb_set_network_header(skb, ETH_HLEN);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000673 skb_reset_mac_header(skb);
674 skb->protocol = eth_hdr(skb)->h_proto;
675
676 if (vnet_hdr_len) {
677 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
678 if (err)
679 goto err_kfree;
680 }
681
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000682 rcu_read_lock_bh();
Eric Dumazet13707f92011-01-26 19:28:23 +0000683 vlan = rcu_dereference_bh(q->vlan);
Shirley Ma97bc3632011-07-06 12:26:11 +0000684 /* copy skb_ubuf_info for callback when skb has no error */
685 if (zerocopy)
686 skb_shinfo(skb)->destructor_arg = m->msg_control;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000687 if (vlan)
688 macvlan_start_xmit(skb, vlan->dev);
689 else
690 kfree_skb(skb);
691 rcu_read_unlock_bh();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000692
Shirley Ma97bc3632011-07-06 12:26:11 +0000693 return total_len;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000694
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000695err_kfree:
696 kfree_skb(skb);
697
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000698err:
699 rcu_read_lock_bh();
Eric Dumazet13707f92011-01-26 19:28:23 +0000700 vlan = rcu_dereference_bh(q->vlan);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000701 if (vlan)
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000702 vlan->dev->stats.tx_dropped++;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000703 rcu_read_unlock_bh();
704
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000705 return err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000706}
707
708static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
709 unsigned long count, loff_t pos)
710{
711 struct file *file = iocb->ki_filp;
712 ssize_t result = -ENOLINK;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000713 struct macvtap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000714
Shirley Ma97bc3632011-07-06 12:26:11 +0000715 result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
716 file->f_flags & O_NONBLOCK);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000717 return result;
718}
719
720/* Put packet to the user space buffer */
721static ssize_t macvtap_put_user(struct macvtap_queue *q,
722 const struct sk_buff *skb,
723 const struct iovec *iv, int len)
724{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000725 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000726 int ret;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000727 int vnet_hdr_len = 0;
728
729 if (q->flags & IFF_VNET_HDR) {
730 struct virtio_net_hdr vnet_hdr;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300731 vnet_hdr_len = q->vnet_hdr_sz;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000732 if ((len -= vnet_hdr_len) < 0)
733 return -EINVAL;
734
735 ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
736 if (ret)
737 return ret;
738
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300739 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000740 return -EFAULT;
741 }
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000742
743 len = min_t(int, skb->len, len);
744
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000745 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000746
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000747 rcu_read_lock_bh();
Eric Dumazet13707f92011-01-26 19:28:23 +0000748 vlan = rcu_dereference_bh(q->vlan);
Arnd Bergmann501c7742010-02-18 05:46:50 +0000749 if (vlan)
750 macvlan_count_rx(vlan, len, ret == 0, 0);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000751 rcu_read_unlock_bh();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000752
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000753 return ret ? ret : (len + vnet_hdr_len);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000754}
755
Arnd Bergmann501c7742010-02-18 05:46:50 +0000756static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
757 const struct iovec *iv, unsigned long len,
758 int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000759{
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000760 DECLARE_WAITQUEUE(wait, current);
761 struct sk_buff *skb;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000762 ssize_t ret = 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000763
Eric Dumazet4a4771a2010-04-25 22:20:06 +0000764 add_wait_queue(sk_sleep(&q->sk), &wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000765 while (len) {
766 current->state = TASK_INTERRUPTIBLE;
767
768 /* Read frames from the queue */
769 skb = skb_dequeue(&q->sk.sk_receive_queue);
770 if (!skb) {
Arnd Bergmann501c7742010-02-18 05:46:50 +0000771 if (noblock) {
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000772 ret = -EAGAIN;
773 break;
774 }
775 if (signal_pending(current)) {
776 ret = -ERESTARTSYS;
777 break;
778 }
779 /* Nothing to read, let's sleep */
780 schedule();
781 continue;
782 }
783 ret = macvtap_put_user(q, skb, iv, len);
784 kfree_skb(skb);
785 break;
786 }
787
788 current->state = TASK_RUNNING;
Eric Dumazet4a4771a2010-04-25 22:20:06 +0000789 remove_wait_queue(sk_sleep(&q->sk), &wait);
Arnd Bergmann501c7742010-02-18 05:46:50 +0000790 return ret;
791}
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000792
Arnd Bergmann501c7742010-02-18 05:46:50 +0000793static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
794 unsigned long count, loff_t pos)
795{
796 struct file *file = iocb->ki_filp;
797 struct macvtap_queue *q = file->private_data;
798 ssize_t len, ret = 0;
799
800 len = iov_length(iv, count);
801 if (len < 0) {
802 ret = -EINVAL;
803 goto out;
804 }
805
806 ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
807 ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000808out:
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000809 return ret;
810}
811
812/*
813 * provide compatibility with generic tun/tap interface
814 */
815static long macvtap_ioctl(struct file *file, unsigned int cmd,
816 unsigned long arg)
817{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000818 struct macvtap_queue *q = file->private_data;
819 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000820 void __user *argp = (void __user *)arg;
821 struct ifreq __user *ifr = argp;
822 unsigned int __user *up = argp;
823 unsigned int u;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300824 int __user *sp = argp;
825 int s;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000826 int ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000827
828 switch (cmd) {
829 case TUNSETIFF:
830 /* ignore the name, just look at flags */
831 if (get_user(u, &ifr->ifr_flags))
832 return -EFAULT;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000833
834 ret = 0;
835 if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
836 ret = -EINVAL;
837 else
838 q->flags = u;
839
840 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000841
842 case TUNGETIFF:
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000843 rcu_read_lock_bh();
Eric Dumazet13707f92011-01-26 19:28:23 +0000844 vlan = rcu_dereference_bh(q->vlan);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000845 if (vlan)
846 dev_hold(vlan->dev);
847 rcu_read_unlock_bh();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000848
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000849 if (!vlan)
850 return -ENOLINK;
851
852 ret = 0;
Eric Dumazet13707f92011-01-26 19:28:23 +0000853 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000854 put_user(q->flags, &ifr->ifr_flags))
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000855 ret = -EFAULT;
856 dev_put(vlan->dev);
857 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000858
859 case TUNGETFEATURES:
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000860 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up))
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000861 return -EFAULT;
862 return 0;
863
864 case TUNSETSNDBUF:
865 if (get_user(u, up))
866 return -EFAULT;
867
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000868 q->sk.sk_sndbuf = u;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000869 return 0;
870
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300871 case TUNGETVNETHDRSZ:
872 s = q->vnet_hdr_sz;
873 if (put_user(s, sp))
874 return -EFAULT;
875 return 0;
876
877 case TUNSETVNETHDRSZ:
878 if (get_user(s, sp))
879 return -EFAULT;
880 if (s < (int)sizeof(struct virtio_net_hdr))
881 return -EINVAL;
882
883 q->vnet_hdr_sz = s;
884 return 0;
885
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000886 case TUNSETOFFLOAD:
887 /* let the user check for future flags */
888 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000889 TUN_F_TSO_ECN | TUN_F_UFO))
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000890 return -EINVAL;
891
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000892 /* TODO: only accept frames with the features that
893 got enabled for forwarded frames */
894 if (!(q->flags & IFF_VNET_HDR))
895 return -EINVAL;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000896 return 0;
897
898 default:
899 return -EINVAL;
900 }
901}
902
903#ifdef CONFIG_COMPAT
904static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
905 unsigned long arg)
906{
907 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
908}
909#endif
910
911static const struct file_operations macvtap_fops = {
912 .owner = THIS_MODULE,
913 .open = macvtap_open,
914 .release = macvtap_release,
915 .aio_read = macvtap_aio_read,
916 .aio_write = macvtap_aio_write,
917 .poll = macvtap_poll,
918 .llseek = no_llseek,
919 .unlocked_ioctl = macvtap_ioctl,
920#ifdef CONFIG_COMPAT
921 .compat_ioctl = macvtap_compat_ioctl,
922#endif
923};
924
Arnd Bergmann501c7742010-02-18 05:46:50 +0000925static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
926 struct msghdr *m, size_t total_len)
927{
928 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
Shirley Ma97bc3632011-07-06 12:26:11 +0000929 return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
Arnd Bergmann501c7742010-02-18 05:46:50 +0000930 m->msg_flags & MSG_DONTWAIT);
931}
932
933static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
934 struct msghdr *m, size_t total_len,
935 int flags)
936{
937 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
938 int ret;
939 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
940 return -EINVAL;
941 ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
942 flags & MSG_DONTWAIT);
943 if (ret > total_len) {
944 m->msg_flags |= MSG_TRUNC;
945 ret = flags & MSG_TRUNC ? ret : total_len;
946 }
947 return ret;
948}
949
950/* Ops structure to mimic raw sockets with tun */
951static const struct proto_ops macvtap_socket_ops = {
952 .sendmsg = macvtap_sendmsg,
953 .recvmsg = macvtap_recvmsg,
954};
955
956/* Get an underlying socket object from tun file. Returns error unless file is
957 * attached to a device. The returned object works like a packet socket, it
958 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
959 * holding a reference to the file for as long as the socket is in use. */
960struct socket *macvtap_get_socket(struct file *file)
961{
962 struct macvtap_queue *q;
963 if (file->f_op != &macvtap_fops)
964 return ERR_PTR(-EINVAL);
965 q = file->private_data;
966 if (!q)
967 return ERR_PTR(-EBADFD);
968 return &q->sock;
969}
970EXPORT_SYMBOL_GPL(macvtap_get_socket);
971
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000972static int macvtap_init(void)
973{
974 int err;
975
976 err = alloc_chrdev_region(&macvtap_major, 0,
977 MACVTAP_NUM_DEVS, "macvtap");
978 if (err)
979 goto out1;
980
981 cdev_init(&macvtap_cdev, &macvtap_fops);
982 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
983 if (err)
984 goto out2;
985
986 macvtap_class = class_create(THIS_MODULE, "macvtap");
987 if (IS_ERR(macvtap_class)) {
988 err = PTR_ERR(macvtap_class);
989 goto out3;
990 }
991
992 err = macvlan_link_register(&macvtap_link_ops);
993 if (err)
994 goto out4;
995
996 return 0;
997
998out4:
999 class_unregister(macvtap_class);
1000out3:
1001 cdev_del(&macvtap_cdev);
1002out2:
1003 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1004out1:
1005 return err;
1006}
1007module_init(macvtap_init);
1008
1009static void macvtap_exit(void)
1010{
1011 rtnl_link_unregister(&macvtap_link_ops);
1012 class_unregister(macvtap_class);
1013 cdev_del(&macvtap_cdev);
1014 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1015}
1016module_exit(macvtap_exit);
1017
1018MODULE_ALIAS_RTNL_LINK("macvtap");
1019MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1020MODULE_LICENSE("GPL");