blob: bceca28757717cae65cc5004a829f8462404dad0 [file] [log] [blame]
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001#include <linux/etherdevice.h>
2#include <linux/if_macvlan.h>
Basil Gorf09e2242012-05-03 22:55:24 +00003#include <linux/if_vlan.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +00004#include <linux/interrupt.h>
5#include <linux/nsproxy.h>
6#include <linux/compat.h>
7#include <linux/if_tun.h>
8#include <linux/module.h>
9#include <linux/skbuff.h>
10#include <linux/cache.h>
11#include <linux/sched.h>
12#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000014#include <linux/wait.h>
15#include <linux/cdev.h>
Al Viro40401532012-02-13 03:58:52 +000016#include <linux/idr.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000017#include <linux/fs.h>
Herbert Xu6c36d2e2014-11-07 21:22:25 +080018#include <linux/uio.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000019
20#include <net/net_namespace.h>
21#include <net/rtnetlink.h>
22#include <net/sock.h>
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000023#include <linux/virtio_net.h>
Jason Wang362899b2016-07-15 03:46:31 -040024#include <linux/skb_array.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000025
26/*
27 * A macvtap queue is the central object of this driver, it connects
28 * an open character device to a macvlan interface. There can be
29 * multiple queues on one interface, which map back to queues
30 * implemented in hardware on the underlying device.
31 *
32 * macvtap_proto is used to allocate queues through the sock allocation
33 * mechanism.
34 *
Arnd Bergmann20d29d72010-01-30 12:24:26 +000035 */
36struct macvtap_queue {
37 struct sock sk;
38 struct socket sock;
Eric Dumazet43815482010-04-29 11:01:49 +000039 struct socket_wq wq;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +030040 int vnet_hdr_sz;
Eric Dumazet13707f92011-01-26 19:28:23 +000041 struct macvlan_dev __rcu *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000042 struct file *file;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000043 unsigned int flags;
Jason Wang376b1aa2013-06-05 23:54:38 +000044 u16 queue_index;
Jason Wang815f2362013-06-05 23:54:39 +000045 bool enabled;
46 struct list_head next;
Jason Wang362899b2016-07-15 03:46:31 -040047 struct skb_array skb_array;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000048};
49
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +020050#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
51
52#define MACVTAP_VNET_LE 0x80000000
Greg Kurz8b8e6582015-04-24 14:50:36 +020053#define MACVTAP_VNET_BE 0x40000000
54
55#ifdef CONFIG_TUN_VNET_CROSS_LE
56static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
57{
58 return q->flags & MACVTAP_VNET_BE ? false :
59 virtio_legacy_is_little_endian();
60}
61
62static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp)
63{
64 int s = !!(q->flags & MACVTAP_VNET_BE);
65
66 if (put_user(s, sp))
67 return -EFAULT;
68
69 return 0;
70}
71
72static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp)
73{
74 int s;
75
76 if (get_user(s, sp))
77 return -EFAULT;
78
79 if (s)
80 q->flags |= MACVTAP_VNET_BE;
81 else
82 q->flags &= ~MACVTAP_VNET_BE;
83
84 return 0;
85}
86#else
87static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
88{
89 return virtio_legacy_is_little_endian();
90}
91
92static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp)
93{
94 return -EINVAL;
95}
96
97static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp)
98{
99 return -EINVAL;
100}
101#endif /* CONFIG_TUN_VNET_CROSS_LE */
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200102
Greg Kurz5b11e152015-04-24 14:24:48 +0200103static inline bool macvtap_is_little_endian(struct macvtap_queue *q)
104{
Greg Kurz7d824102015-04-24 14:26:24 +0200105 return q->flags & MACVTAP_VNET_LE ||
Greg Kurz8b8e6582015-04-24 14:50:36 +0200106 macvtap_legacy_is_little_endian(q);
Greg Kurz5b11e152015-04-24 14:24:48 +0200107}
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200108
109static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
110{
Greg Kurz5b11e152015-04-24 14:24:48 +0200111 return __virtio16_to_cpu(macvtap_is_little_endian(q), val);
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200112}
113
114static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
115{
Greg Kurz5b11e152015-04-24 14:24:48 +0200116 return __cpu_to_virtio16(macvtap_is_little_endian(q), val);
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200117}
118
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000119static struct proto macvtap_proto = {
120 .name = "macvtap",
121 .owner = THIS_MODULE,
122 .obj_size = sizeof (struct macvtap_queue),
123};
124
125/*
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000126 * Variables for dealing with macvtaps device numbers.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000127 */
David S. Miller1ebed712010-07-10 19:25:50 -0700128static dev_t macvtap_major;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000129#define MACVTAP_NUM_DEVS (1U << MINORBITS)
130static DEFINE_MUTEX(minor_lock);
131static DEFINE_IDR(minor_idr);
132
Shirley Ma97bc3632011-07-06 12:26:11 +0000133#define GOODCOPY_LEN 128
Marc Angel17af2bc2016-05-05 12:14:26 +0200134static const void *macvtap_net_namespace(struct device *d)
135{
136 struct net_device *dev = to_net_dev(d->parent);
137 return dev_net(dev);
138}
139
140static struct class macvtap_class = {
141 .name = "macvtap",
142 .owner = THIS_MODULE,
143 .ns_type = &net_ns_type_operations,
144 .namespace = macvtap_net_namespace,
145};
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000146static struct cdev macvtap_cdev;
147
Arnd Bergmann501c7742010-02-18 05:46:50 +0000148static const struct proto_ops macvtap_socket_ops;
149
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400150#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
Vlad Yaseviche3e3c422015-02-03 16:36:17 -0500151 NETIF_F_TSO6 | NETIF_F_UFO)
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400152#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
Jason Wangf23d5382015-10-23 00:57:05 -0400153#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
Vlad Yasevicha567dd62013-08-16 15:25:00 -0400154
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500155static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
156{
157 return rcu_dereference(dev->rx_handler_data);
158}
159
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000160/*
161 * RCU usage:
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000162 * The macvtap_queue and the macvlan_dev are loosely coupled, the
163 * pointers from one to the other can only be read while rcu_read_lock
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400164 * or rtnl is held.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000165 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000166 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
167 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
168 * q->vlan becomes inaccessible. When the files gets closed,
169 * macvtap_get_queue() fails.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000170 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000171 * There may still be references to the struct sock inside of the
172 * queue from outbound SKBs, but these never reference back to the
173 * file or the dev. The data structure is freed through __sk_free
174 * when both our references and any pending SKBs are gone.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000175 */
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000176
Jason Wang815f2362013-06-05 23:54:39 +0000177static int macvtap_enable_queue(struct net_device *dev, struct file *file,
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000178 struct macvtap_queue *q)
179{
180 struct macvlan_dev *vlan = netdev_priv(dev);
Jason Wang815f2362013-06-05 23:54:39 +0000181 int err = -EINVAL;
182
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400183 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000184
185 if (q->enabled)
186 goto out;
187
188 err = 0;
189 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
190 q->queue_index = vlan->numvtaps;
191 q->enabled = true;
192
193 vlan->numvtaps++;
194out:
Jason Wang815f2362013-06-05 23:54:39 +0000195 return err;
196}
197
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400198/* Requires RTNL */
Jason Wang815f2362013-06-05 23:54:39 +0000199static int macvtap_set_queue(struct net_device *dev, struct file *file,
200 struct macvtap_queue *q)
201{
202 struct macvlan_dev *vlan = netdev_priv(dev);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000203
Jason Wang815f2362013-06-05 23:54:39 +0000204 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400205 return -EBUSY;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000206
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000207 rcu_assign_pointer(q->vlan, vlan);
Jason Wang376b1aa2013-06-05 23:54:38 +0000208 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000209 sock_hold(&q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000210
211 q->file = file;
Jason Wang376b1aa2013-06-05 23:54:38 +0000212 q->queue_index = vlan->numvtaps;
Jason Wang815f2362013-06-05 23:54:39 +0000213 q->enabled = true;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000214 file->private_data = q;
Jason Wang815f2362013-06-05 23:54:39 +0000215 list_add_tail(&q->next, &vlan->queue_list);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000216
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000217 vlan->numvtaps++;
Jason Wang815f2362013-06-05 23:54:39 +0000218 vlan->numqueues++;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000219
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400220 return 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000221}
222
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400223static int macvtap_disable_queue(struct macvtap_queue *q)
Jason Wang815f2362013-06-05 23:54:39 +0000224{
225 struct macvlan_dev *vlan;
226 struct macvtap_queue *nq;
227
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400228 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000229 if (!q->enabled)
230 return -EINVAL;
231
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400232 vlan = rtnl_dereference(q->vlan);
233
Jason Wang815f2362013-06-05 23:54:39 +0000234 if (vlan) {
235 int index = q->queue_index;
236 BUG_ON(index >= vlan->numvtaps);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400237 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
Jason Wang815f2362013-06-05 23:54:39 +0000238 nq->queue_index = index;
239
240 rcu_assign_pointer(vlan->taps[index], nq);
241 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
242 q->enabled = false;
243
244 vlan->numvtaps--;
245 }
246
247 return 0;
248}
249
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000250/*
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000251 * The file owning the queue got closed, give up both
252 * the reference that the files holds as well as the
253 * one from the macvlan_dev if that still exists.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000254 *
255 * Using the spinlock makes sure that we don't get
256 * to the queue again after destroying it.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000257 */
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000258static void macvtap_put_queue(struct macvtap_queue *q)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000259{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000260 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000261
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400262 rtnl_lock();
263 vlan = rtnl_dereference(q->vlan);
264
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000265 if (vlan) {
Jason Wang815f2362013-06-05 23:54:39 +0000266 if (q->enabled)
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400267 BUG_ON(macvtap_disable_queue(q));
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000268
Jason Wang815f2362013-06-05 23:54:39 +0000269 vlan->numqueues--;
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000270 RCU_INIT_POINTER(q->vlan, NULL);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000271 sock_put(&q->sk);
Jason Wang815f2362013-06-05 23:54:39 +0000272 list_del_init(&q->next);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000273 }
274
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400275 rtnl_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000276
277 synchronize_rcu();
278 sock_put(&q->sk);
279}
280
281/*
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000282 * Select a queue based on the rxq of the device on which this packet
283 * arrived. If the incoming device is not mq, calculate a flow hash
284 * to select a queue. If all fails, find the first available queue.
285 * Cache vlan->numvtaps since it can become zero during the execution
286 * of this function.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000287 */
288static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
289 struct sk_buff *skb)
290{
291 struct macvlan_dev *vlan = netdev_priv(dev);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000292 struct macvtap_queue *tap = NULL;
Jason Wang815f2362013-06-05 23:54:39 +0000293 /* Access to taps array is protected by rcu, but access to numvtaps
294 * isn't. Below we use it to lookup a queue, but treat it as a hint
295 * and validate that the result isn't NULL - in case we are
296 * racing against queue removal.
297 */
Jason Wanged0483f2013-06-05 23:54:33 +0000298 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000299 __u32 rxq;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000300
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000301 if (!numvtaps)
302 goto out;
303
Jason Wang1b16bf42016-07-15 03:46:30 -0400304 if (numvtaps == 1)
305 goto single;
306
Krishna Kumaref0002b2011-11-23 22:17:14 +0000307 /* Check if we can use flow to select a queue */
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800308 rxq = skb_get_hash(skb);
Krishna Kumaref0002b2011-11-23 22:17:14 +0000309 if (rxq) {
310 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
Jason Wang376b1aa2013-06-05 23:54:38 +0000311 goto out;
Krishna Kumaref0002b2011-11-23 22:17:14 +0000312 }
313
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000314 if (likely(skb_rx_queue_recorded(skb))) {
315 rxq = skb_get_rx_queue(skb);
316
317 while (unlikely(rxq >= numvtaps))
318 rxq -= numvtaps;
319
320 tap = rcu_dereference(vlan->taps[rxq]);
Jason Wang376b1aa2013-06-05 23:54:38 +0000321 goto out;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000322 }
323
Jason Wang1b16bf42016-07-15 03:46:30 -0400324single:
Jason Wang376b1aa2013-06-05 23:54:38 +0000325 tap = rcu_dereference(vlan->taps[0]);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000326out:
327 return tap;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000328}
329
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000330/*
331 * The net_device is going away, give up the reference
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000332 * that it holds on all queues and safely set the pointer
333 * from the queues to NULL.
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000334 */
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000335static void macvtap_del_queues(struct net_device *dev)
336{
337 struct macvlan_dev *vlan = netdev_priv(dev);
Pankaj Guptadfe816c2015-06-19 19:47:53 +0530338 struct macvtap_queue *q, *tmp;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000339
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400340 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000341 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
342 list_del_init(&q->next);
Jason Wang376b1aa2013-06-05 23:54:38 +0000343 RCU_INIT_POINTER(q->vlan, NULL);
Jason Wang815f2362013-06-05 23:54:39 +0000344 if (q->enabled)
345 vlan->numvtaps--;
346 vlan->numqueues--;
Pankaj Guptadfe816c2015-06-19 19:47:53 +0530347 sock_put(&q->sk);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000348 }
Jason Wang815f2362013-06-05 23:54:39 +0000349 BUG_ON(vlan->numvtaps);
350 BUG_ON(vlan->numqueues);
Eric W. Biederman99f34b32011-10-20 04:26:01 +0000351 /* guarantee that any future macvtap_set_queue will fail */
352 vlan->numvtaps = MAX_MACVTAP_QUEUES;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000353}
354
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500355static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000356{
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500357 struct sk_buff *skb = *pskb;
358 struct net_device *dev = skb->dev;
359 struct macvlan_dev *vlan;
360 struct macvtap_queue *q;
Vlad Yasevicha567dd62013-08-16 15:25:00 -0400361 netdev_features_t features = TAP_FEATURES;
362
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500363 vlan = macvtap_get_vlan_rcu(dev);
364 if (!vlan)
365 return RX_HANDLER_PASS;
366
367 q = macvtap_get_queue(dev, skb);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000368 if (!q)
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500369 return RX_HANDLER_PASS;
Herbert Xu8a357472010-07-21 21:44:31 +0000370
Jason Wang362899b2016-07-15 03:46:31 -0400371 if (__skb_array_full(&q->skb_array))
Herbert Xu8a357472010-07-21 21:44:31 +0000372 goto drop;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000373
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500374 skb_push(skb, ETH_HLEN);
375
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400376 /* Apply the forward feature mask so that we perform segmentation
Vlad Yaseviche5733322013-08-16 15:25:02 -0400377 * according to users wishes. This only works if VNET_HDR is
378 * enabled.
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400379 */
Vlad Yaseviche5733322013-08-16 15:25:02 -0400380 if (q->flags & IFF_VNET_HDR)
381 features |= vlan->tap_features;
Johannes Berg8b86a612015-04-17 15:45:04 +0200382 if (netif_needs_gso(skb, features)) {
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400383 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
384
385 if (IS_ERR(segs))
386 goto drop;
387
388 if (!segs) {
Jason Wang362899b2016-07-15 03:46:31 -0400389 if (skb_array_produce(&q->skb_array, skb))
390 goto drop;
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400391 goto wake_up;
392 }
393
Eric Dumazetbe0bd312016-05-06 05:58:21 -0700394 consume_skb(skb);
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400395 while (segs) {
396 struct sk_buff *nskb = segs->next;
397
398 segs->next = NULL;
Jason Wang362899b2016-07-15 03:46:31 -0400399 if (skb_array_produce(&q->skb_array, segs)) {
400 kfree_skb(segs);
401 kfree_skb_list(nskb);
402 break;
403 }
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400404 segs = nskb;
405 }
406 } else {
Vlad Yasevichcbdb0422014-04-29 10:09:50 -0400407 /* If we receive a partial checksum and the tap side
408 * doesn't support checksum offload, compute the checksum.
409 * Note: it doesn't matter which checksum feature to
410 * check, we either support them all or none.
411 */
412 if (skb->ip_summed == CHECKSUM_PARTIAL &&
Tom Herberta1882222015-12-14 11:19:43 -0800413 !(features & NETIF_F_CSUM_MASK) &&
Vlad Yasevichcbdb0422014-04-29 10:09:50 -0400414 skb_checksum_help(skb))
415 goto drop;
Jason Wang362899b2016-07-15 03:46:31 -0400416 if (skb_array_produce(&q->skb_array, skb))
417 goto drop;
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400418 }
419
420wake_up:
Eric Dumazet4a4771a2010-04-25 22:20:06 +0000421 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500422 return RX_HANDLER_CONSUMED;
Herbert Xu8a357472010-07-21 21:44:31 +0000423
424drop:
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500425 /* Count errors/drops only here, thus don't care about args. */
426 macvlan_count_rx(vlan, 0, 0, 0);
Herbert Xu8a357472010-07-21 21:44:31 +0000427 kfree_skb(skb);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500428 return RX_HANDLER_CONSUMED;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000429}
430
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000431static int macvtap_get_minor(struct macvlan_dev *vlan)
432{
433 int retval = -ENOMEM;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000434
435 mutex_lock(&minor_lock);
Tejun Heoec09ebc2013-02-27 17:04:34 -0800436 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
437 if (retval >= 0) {
438 vlan->minor = retval;
439 } else if (retval == -ENOSPC) {
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000440 printk(KERN_ERR "too many macvtap devices\n");
441 retval = -EINVAL;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000442 }
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000443 mutex_unlock(&minor_lock);
Tejun Heoec09ebc2013-02-27 17:04:34 -0800444 return retval < 0 ? retval : 0;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000445}
446
447static void macvtap_free_minor(struct macvlan_dev *vlan)
448{
449 mutex_lock(&minor_lock);
450 if (vlan->minor) {
451 idr_remove(&minor_idr, vlan->minor);
452 vlan->minor = 0;
453 }
454 mutex_unlock(&minor_lock);
455}
456
457static struct net_device *dev_get_by_macvtap_minor(int minor)
458{
459 struct net_device *dev = NULL;
460 struct macvlan_dev *vlan;
461
462 mutex_lock(&minor_lock);
463 vlan = idr_find(&minor_idr, minor);
464 if (vlan) {
465 dev = vlan->dev;
466 dev_hold(dev);
467 }
468 mutex_unlock(&minor_lock);
469 return dev;
470}
471
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000472static int macvtap_newlink(struct net *src_net,
473 struct net_device *dev,
474 struct nlattr *tb[],
475 struct nlattr *data[])
476{
Jason Wang815f2362013-06-05 23:54:39 +0000477 struct macvlan_dev *vlan = netdev_priv(dev);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500478 int err;
479
Jason Wang815f2362013-06-05 23:54:39 +0000480 INIT_LIST_HEAD(&vlan->queue_list);
481
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400482 /* Since macvlan supports all offloads by default, make
483 * tap support all offloads also.
484 */
485 vlan->tap_features = TUN_OFFLOADS;
486
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500487 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
488 if (err)
489 return err;
490
Eric W. Biederman9bf19072011-10-20 04:28:46 +0000491 /* Don't put anything that may fail after macvlan_common_newlink
492 * because we can't undo what it does.
493 */
Gao Fenge8242652016-11-25 10:05:06 +0800494 err = macvlan_common_newlink(src_net, dev, tb, data);
495 if (err) {
496 netdev_rx_handler_unregister(dev);
497 return err;
498 }
499
500 return 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000501}
502
503static void macvtap_dellink(struct net_device *dev,
504 struct list_head *head)
505{
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500506 netdev_rx_handler_unregister(dev);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000507 macvtap_del_queues(dev);
508 macvlan_dellink(dev, head);
509}
510
Herbert Xu8a357472010-07-21 21:44:31 +0000511static void macvtap_setup(struct net_device *dev)
512{
513 macvlan_common_setup(dev);
514 dev->tx_queue_len = TUN_READQ_SIZE;
515}
516
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000517static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
518 .kind = "macvtap",
Herbert Xu8a357472010-07-21 21:44:31 +0000519 .setup = macvtap_setup,
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000520 .newlink = macvtap_newlink,
521 .dellink = macvtap_dellink,
522};
523
524
525static void macvtap_sock_write_space(struct sock *sk)
526{
Eric Dumazet43815482010-04-29 11:01:49 +0000527 wait_queue_head_t *wqueue;
528
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000529 if (!sock_writeable(sk) ||
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800530 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000531 return;
532
Eric Dumazet43815482010-04-29 11:01:49 +0000533 wqueue = sk_sleep(sk);
534 if (wqueue && waitqueue_active(wqueue))
535 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000536}
537
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000538static void macvtap_sock_destruct(struct sock *sk)
539{
Jason Wang362899b2016-07-15 03:46:31 -0400540 struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
Jason Wang362899b2016-07-15 03:46:31 -0400541
Jason Wang104a4932016-08-11 18:15:56 +0800542 skb_array_cleanup(&q->skb_array);
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000543}
544
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000545static int macvtap_open(struct inode *inode, struct file *file)
546{
547 struct net *net = current->nsproxy->net_ns;
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400548 struct net_device *dev;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000549 struct macvtap_queue *q;
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400550 int err = -ENODEV;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000551
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400552 rtnl_lock();
553 dev = dev_get_by_macvtap_minor(iminor(inode));
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000554 if (!dev)
Jason Wang362899b2016-07-15 03:46:31 -0400555 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000556
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000557 err = -ENOMEM;
558 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500559 &macvtap_proto, 0);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000560 if (!q)
Jason Wang362899b2016-07-15 03:46:31 -0400561 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000562
Jason Wangd9a90a32013-06-13 14:23:35 +0800563 RCU_INIT_POINTER(q->sock.wq, &q->wq);
Eric Dumazet43815482010-04-29 11:01:49 +0000564 init_waitqueue_head(&q->wq.wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000565 q->sock.type = SOCK_RAW;
566 q->sock.state = SS_CONNECTED;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000567 q->sock.file = file;
568 q->sock.ops = &macvtap_socket_ops;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000569 sock_init_data(&q->sock, &q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000570 q->sk.sk_write_space = macvtap_sock_write_space;
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000571 q->sk.sk_destruct = macvtap_sock_destruct;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000572 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300573 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000574
Shirley Ma97bc3632011-07-06 12:26:11 +0000575 /*
576 * so far only KVM virtio_net uses macvtap, enable zero copy between
577 * guest kernel and host kernel when lower device supports zerocopy
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000578 *
579 * The macvlan supports zerocopy iff the lower device supports zero
580 * copy so we don't have to look at the lower device directly.
Shirley Ma97bc3632011-07-06 12:26:11 +0000581 */
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000582 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
583 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
Shirley Ma97bc3632011-07-06 12:26:11 +0000584
Jason Wang362899b2016-07-15 03:46:31 -0400585 err = -ENOMEM;
586 if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL))
587 goto err_array;
588
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000589 err = macvtap_set_queue(dev, file, q);
590 if (err)
Jason Wang362899b2016-07-15 03:46:31 -0400591 goto err_queue;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000592
Jason Wang362899b2016-07-15 03:46:31 -0400593 dev_put(dev);
594
595 rtnl_unlock();
596 return err;
597
598err_queue:
599 skb_array_cleanup(&q->skb_array);
600err_array:
601 sock_put(&q->sk);
602err:
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000603 if (dev)
604 dev_put(dev);
605
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400606 rtnl_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000607 return err;
608}
609
610static int macvtap_release(struct inode *inode, struct file *file)
611{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000612 struct macvtap_queue *q = file->private_data;
613 macvtap_put_queue(q);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000614 return 0;
615}
616
617static unsigned int macvtap_poll(struct file *file, poll_table * wait)
618{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000619 struct macvtap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000620 unsigned int mask = POLLERR;
621
622 if (!q)
623 goto out;
624
625 mask = 0;
Eric Dumazet43815482010-04-29 11:01:49 +0000626 poll_wait(file, &q->wq.wait, wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000627
Jason Wang362899b2016-07-15 03:46:31 -0400628 if (!skb_array_empty(&q->skb_array))
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000629 mask |= POLLIN | POLLRDNORM;
630
631 if (sock_writeable(&q->sk) ||
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800632 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000633 sock_writeable(&q->sk)))
634 mask |= POLLOUT | POLLWRNORM;
635
636out:
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000637 return mask;
638}
639
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000640static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
641 size_t len, size_t linear,
642 int noblock, int *err)
643{
644 struct sk_buff *skb;
645
646 /* Under a page? Don't bother with paged skb. */
647 if (prepad + len < PAGE_SIZE || !linear)
648 linear = len;
649
650 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -0700651 err, 0);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000652 if (!skb)
653 return NULL;
654
655 skb_reserve(skb, prepad);
656 skb_put(skb, linear);
657 skb->data_len = len - linear;
658 skb->len += len - linear;
659
660 return skb;
661}
662
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800663/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
664#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
665
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000666/* Get packet from user space buffer */
Shirley Ma97bc3632011-07-06 12:26:11 +0000667static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
Al Virof5ff53b2014-06-19 15:36:49 -0400668 struct iov_iter *from, int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000669{
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800670 int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000671 struct sk_buff *skb;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000672 struct macvlan_dev *vlan;
Al Virof5ff53b2014-06-19 15:36:49 -0400673 unsigned long total_len = iov_iter_count(from);
Shirley Ma97bc3632011-07-06 12:26:11 +0000674 unsigned long len = total_len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000675 int err;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000676 struct virtio_net_hdr vnet_hdr = { 0 };
677 int vnet_hdr_len = 0;
Jason Wangb92946e2012-05-02 11:42:15 +0800678 int copylen = 0;
Ivan Vecerac5c62f12015-07-23 16:37:43 +0200679 int depth;
Shirley Ma97bc3632011-07-06 12:26:11 +0000680 bool zerocopy = false;
Jason Wang61d46bf2013-07-10 13:43:28 +0800681 size_t linear;
Al Virof5ff53b2014-06-19 15:36:49 -0400682 ssize_t n;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000683
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000684 if (q->flags & IFF_VNET_HDR) {
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300685 vnet_hdr_len = q->vnet_hdr_sz;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000686
687 err = -EINVAL;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000688 if (len < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000689 goto err;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000690 len -= vnet_hdr_len;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000691
Al Virof5ff53b2014-06-19 15:36:49 -0400692 err = -EFAULT;
693 n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
694 if (n != sizeof(vnet_hdr))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000695 goto err;
Al Virof5ff53b2014-06-19 15:36:49 -0400696 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000697 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200698 macvtap16_to_cpu(q, vnet_hdr.csum_start) +
699 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
700 macvtap16_to_cpu(q, vnet_hdr.hdr_len))
701 vnet_hdr.hdr_len = cpu_to_macvtap16(q,
702 macvtap16_to_cpu(q, vnet_hdr.csum_start) +
703 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000704 err = -EINVAL;
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200705 if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000706 goto err;
707 }
708
709 err = -EINVAL;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000710 if (unlikely(len < ETH_HLEN))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000711 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000712
Jason Wangece793f2013-07-18 10:55:16 +0800713 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
Al Virof5ff53b2014-06-19 15:36:49 -0400714 struct iov_iter i;
715
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200716 copylen = vnet_hdr.hdr_len ?
717 macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
Jason Wang16a3fa22013-11-13 14:00:40 +0800718 if (copylen > good_linear)
719 copylen = good_linear;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500720 else if (copylen < ETH_HLEN)
721 copylen = ETH_HLEN;
Jason Wang61d46bf2013-07-10 13:43:28 +0800722 linear = copylen;
Al Virof5ff53b2014-06-19 15:36:49 -0400723 i = *from;
724 iov_iter_advance(&i, copylen);
725 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
Jason Wangece793f2013-07-18 10:55:16 +0800726 zerocopy = true;
727 }
728
729 if (!zerocopy) {
Shirley Ma97bc3632011-07-06 12:26:11 +0000730 copylen = len;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500731 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
732 if (linear > good_linear)
Jason Wang16a3fa22013-11-13 14:00:40 +0800733 linear = good_linear;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500734 else if (linear < ETH_HLEN)
735 linear = ETH_HLEN;
Jason Wang61d46bf2013-07-10 13:43:28 +0800736 }
Shirley Ma97bc3632011-07-06 12:26:11 +0000737
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800738 skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
Jason Wang61d46bf2013-07-10 13:43:28 +0800739 linear, noblock, &err);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000740 if (!skb)
741 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000742
Jason Wang01d66572012-05-02 11:42:06 +0800743 if (zerocopy)
Al Virof5ff53b2014-06-19 15:36:49 -0400744 err = zerocopy_sg_from_iter(skb, from);
Jason Wangece793f2013-07-18 10:55:16 +0800745 else {
Al Virof5ff53b2014-06-19 15:36:49 -0400746 err = skb_copy_datagram_from_iter(skb, 0, from, len);
Jason Wangece793f2013-07-18 10:55:16 +0800747 if (!err && m && m->msg_control) {
748 struct ubuf_info *uarg = m->msg_control;
749 uarg->callback(uarg, false);
750 }
751 }
752
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000753 if (err)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000754 goto err_kfree;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000755
756 skb_set_network_header(skb, ETH_HLEN);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000757 skb_reset_mac_header(skb);
758 skb->protocol = eth_hdr(skb)->h_proto;
759
760 if (vnet_hdr_len) {
Mike Rapoportfd88d682016-06-08 16:09:19 +0300761 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
762 macvtap_is_little_endian(q));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000763 if (err)
764 goto err_kfree;
765 }
766
Jason Wang40893fd2013-03-26 23:11:22 +0000767 skb_probe_transport_header(skb, ETH_HLEN);
Jason Wang9b4d6692013-03-25 20:19:55 +0000768
Ivan Vecerac5c62f12015-07-23 16:37:43 +0200769 /* Move network header to the right position for VLAN tagged packets */
770 if ((skb->protocol == htons(ETH_P_8021Q) ||
771 skb->protocol == htons(ETH_P_8021AD)) &&
772 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
773 skb_set_network_header(skb, depth);
774
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400775 rcu_read_lock();
776 vlan = rcu_dereference(q->vlan);
Shirley Ma97bc3632011-07-06 12:26:11 +0000777 /* copy skb_ubuf_info for callback when skb has no error */
Jason Wang01d66572012-05-02 11:42:06 +0800778 if (zerocopy) {
Shirley Ma97bc3632011-07-06 12:26:11 +0000779 skb_shinfo(skb)->destructor_arg = m->msg_control;
Jason Wang01d66572012-05-02 11:42:06 +0800780 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
Pravin B Shelarc9af6db2013-02-11 09:27:41 +0000781 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
Jason Wang01d66572012-05-02 11:42:06 +0800782 }
Eric Dumazet29d79192013-08-08 08:06:14 -0700783 if (vlan) {
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500784 skb->dev = vlan->dev;
785 dev_queue_xmit(skb);
Eric Dumazet29d79192013-08-08 08:06:14 -0700786 } else {
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000787 kfree_skb(skb);
Eric Dumazet29d79192013-08-08 08:06:14 -0700788 }
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400789 rcu_read_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000790
Shirley Ma97bc3632011-07-06 12:26:11 +0000791 return total_len;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000792
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000793err_kfree:
794 kfree_skb(skb);
795
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000796err:
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400797 rcu_read_lock();
798 vlan = rcu_dereference(q->vlan);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000799 if (vlan)
Jason Wangcd3e22b2013-11-25 17:19:04 +0800800 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400801 rcu_read_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000802
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000803 return err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000804}
805
Al Virof5ff53b2014-06-19 15:36:49 -0400806static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000807{
808 struct file *file = iocb->ki_filp;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000809 struct macvtap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000810
Al Virof5ff53b2014-06-19 15:36:49 -0400811 return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000812}
813
814/* Put packet to the user space buffer */
815static ssize_t macvtap_put_user(struct macvtap_queue *q,
816 const struct sk_buff *skb,
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800817 struct iov_iter *iter)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000818{
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000819 int ret;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000820 int vnet_hdr_len = 0;
Basil Gorf09e2242012-05-03 22:55:24 +0000821 int vlan_offset = 0;
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800822 int total;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000823
824 if (q->flags & IFF_VNET_HDR) {
825 struct virtio_net_hdr vnet_hdr;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300826 vnet_hdr_len = q->vnet_hdr_sz;
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800827 if (iov_iter_count(iter) < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000828 return -EINVAL;
829
Mike Rapoportfd88d682016-06-08 16:09:19 +0300830 ret = virtio_net_hdr_from_skb(skb, &vnet_hdr,
831 macvtap_is_little_endian(q));
832 if (ret)
833 BUG();
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000834
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800835 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
836 sizeof(vnet_hdr))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000837 return -EFAULT;
Jason Wang7cc76f52014-11-20 16:31:05 +0800838
839 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000840 }
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800841 total = vnet_hdr_len;
Jason Wangce232ce2013-12-11 13:08:34 +0800842 total += skb->len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000843
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100844 if (skb_vlan_tag_present(skb)) {
Basil Gorf09e2242012-05-03 22:55:24 +0000845 struct {
846 __be16 h_vlan_proto;
847 __be16 h_vlan_TCI;
848 } veth;
Jason Wang0fbe0d42013-07-16 13:36:34 +0800849 veth.h_vlan_proto = skb->vlan_proto;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100850 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000851
Basil Gorf09e2242012-05-03 22:55:24 +0000852 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
Jason Wangce232ce2013-12-11 13:08:34 +0800853 total += VLAN_HLEN;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000854
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800855 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
856 if (ret || !iov_iter_count(iter))
Basil Gorf09e2242012-05-03 22:55:24 +0000857 goto done;
858
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800859 ret = copy_to_iter(&veth, sizeof(veth), iter);
860 if (ret != sizeof(veth) || !iov_iter_count(iter))
Basil Gorf09e2242012-05-03 22:55:24 +0000861 goto done;
862 }
863
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800864 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
865 skb->len - vlan_offset);
Basil Gorf09e2242012-05-03 22:55:24 +0000866
867done:
Jason Wangce232ce2013-12-11 13:08:34 +0800868 return ret ? ret : total;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000869}
870
Zhi Yong Wu55ec8e22013-12-07 04:13:05 +0800871static ssize_t macvtap_do_read(struct macvtap_queue *q,
Al Viro3af0bfe2014-11-07 14:13:53 -0500872 struct iov_iter *to,
Arnd Bergmann501c7742010-02-18 05:46:50 +0000873 int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000874{
Hong zhi guoccf7e722012-06-06 22:36:27 +0000875 DEFINE_WAIT(wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000876 struct sk_buff *skb;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000877 ssize_t ret = 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000878
Al Viro3af0bfe2014-11-07 14:13:53 -0500879 if (!iov_iter_count(to))
880 return 0;
881
882 while (1) {
Jason Wang89cee912013-06-05 23:54:34 +0000883 if (!noblock)
884 prepare_to_wait(sk_sleep(&q->sk), &wait,
885 TASK_INTERRUPTIBLE);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000886
887 /* Read frames from the queue */
Jason Wang362899b2016-07-15 03:46:31 -0400888 skb = skb_array_consume(&q->skb_array);
Al Viro3af0bfe2014-11-07 14:13:53 -0500889 if (skb)
890 break;
891 if (noblock) {
892 ret = -EAGAIN;
893 break;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000894 }
Al Viro3af0bfe2014-11-07 14:13:53 -0500895 if (signal_pending(current)) {
896 ret = -ERESTARTSYS;
897 break;
898 }
899 /* Nothing to read, let's sleep */
900 schedule();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000901 }
Vlad Yasevicha499a2e2015-11-09 09:14:17 -0500902 if (!noblock)
903 finish_wait(sk_sleep(&q->sk), &wait);
904
Al Viro3af0bfe2014-11-07 14:13:53 -0500905 if (skb) {
906 ret = macvtap_put_user(q, skb, to);
Jason Wangf51a5e82014-12-01 16:53:15 +0800907 if (unlikely(ret < 0))
908 kfree_skb(skb);
909 else
910 consume_skb(skb);
Al Viro3af0bfe2014-11-07 14:13:53 -0500911 }
Arnd Bergmann501c7742010-02-18 05:46:50 +0000912 return ret;
913}
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000914
Al Viro3af0bfe2014-11-07 14:13:53 -0500915static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to)
Arnd Bergmann501c7742010-02-18 05:46:50 +0000916{
917 struct file *file = iocb->ki_filp;
918 struct macvtap_queue *q = file->private_data;
Al Viro3af0bfe2014-11-07 14:13:53 -0500919 ssize_t len = iov_iter_count(to), ret;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000920
Al Viro3af0bfe2014-11-07 14:13:53 -0500921 ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK);
Jason Wangce232ce2013-12-11 13:08:34 +0800922 ret = min_t(ssize_t, ret, len);
Zhi Yong Wue6ebc7f2013-12-06 14:16:50 +0800923 if (ret > 0)
924 iocb->ki_pos = ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000925 return ret;
926}
927
Jason Wang8f475a32013-06-05 23:54:36 +0000928static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
929{
930 struct macvlan_dev *vlan;
931
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400932 ASSERT_RTNL();
933 vlan = rtnl_dereference(q->vlan);
Jason Wang8f475a32013-06-05 23:54:36 +0000934 if (vlan)
935 dev_hold(vlan->dev);
Jason Wang8f475a32013-06-05 23:54:36 +0000936
937 return vlan;
938}
939
940static void macvtap_put_vlan(struct macvlan_dev *vlan)
941{
942 dev_put(vlan->dev);
943}
944
Jason Wang815f2362013-06-05 23:54:39 +0000945static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
946{
947 struct macvtap_queue *q = file->private_data;
948 struct macvlan_dev *vlan;
949 int ret;
950
951 vlan = macvtap_get_vlan(q);
952 if (!vlan)
953 return -EINVAL;
954
955 if (flags & IFF_ATTACH_QUEUE)
956 ret = macvtap_enable_queue(vlan->dev, file, q);
957 else if (flags & IFF_DETACH_QUEUE)
958 ret = macvtap_disable_queue(q);
Jason Wangf57855a2013-06-13 14:23:36 +0800959 else
960 ret = -EINVAL;
Jason Wang815f2362013-06-05 23:54:39 +0000961
962 macvtap_put_vlan(vlan);
963 return ret;
964}
965
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400966static int set_offload(struct macvtap_queue *q, unsigned long arg)
967{
968 struct macvlan_dev *vlan;
969 netdev_features_t features;
970 netdev_features_t feature_mask = 0;
971
972 vlan = rtnl_dereference(q->vlan);
973 if (!vlan)
974 return -ENOLINK;
975
976 features = vlan->dev->features;
977
978 if (arg & TUN_F_CSUM) {
979 feature_mask = NETIF_F_HW_CSUM;
980
981 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
982 if (arg & TUN_F_TSO_ECN)
983 feature_mask |= NETIF_F_TSO_ECN;
984 if (arg & TUN_F_TSO4)
985 feature_mask |= NETIF_F_TSO;
986 if (arg & TUN_F_TSO6)
987 feature_mask |= NETIF_F_TSO6;
988 }
Vlad Yaseviche3e3c422015-02-03 16:36:17 -0500989
990 if (arg & TUN_F_UFO)
991 feature_mask |= NETIF_F_UFO;
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400992 }
993
994 /* tun/tap driver inverts the usage for TSO offloads, where
995 * setting the TSO bit means that the userspace wants to
996 * accept TSO frames and turning it off means that user space
997 * does not support TSO.
998 * For macvtap, we have to invert it to mean the same thing.
999 * When user space turns off TSO, we turn off GSO/LRO so that
1000 * user-space will not receive TSO frames.
1001 */
Vlad Yaseviche3e3c422015-02-03 16:36:17 -05001002 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001003 features |= RX_OFFLOADS;
1004 else
1005 features &= ~RX_OFFLOADS;
1006
1007 /* tap_features are the same as features on tun/tap and
1008 * reflect user expectations.
1009 */
Vlad Yasevicha567dd62013-08-16 15:25:00 -04001010 vlan->tap_features = feature_mask;
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001011 vlan->set_features = features;
1012 netdev_update_features(vlan->dev);
1013
1014 return 0;
1015}
1016
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001017/*
1018 * provide compatibility with generic tun/tap interface
1019 */
1020static long macvtap_ioctl(struct file *file, unsigned int cmd,
1021 unsigned long arg)
1022{
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001023 struct macvtap_queue *q = file->private_data;
1024 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001025 void __user *argp = (void __user *)arg;
1026 struct ifreq __user *ifr = argp;
1027 unsigned int __user *up = argp;
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001028 unsigned short u;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001029 int __user *sp = argp;
Justin Cormack7f460d32015-05-13 19:19:02 +01001030 struct sockaddr sa;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001031 int s;
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001032 int ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001033
1034 switch (cmd) {
1035 case TUNSETIFF:
1036 /* ignore the name, just look at flags */
1037 if (get_user(u, &ifr->ifr_flags))
1038 return -EFAULT;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001039
1040 ret = 0;
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +02001041 if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001042 ret = -EINVAL;
1043 else
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001044 q->flags = (q->flags & ~MACVTAP_FEATURES) | u;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001045
1046 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001047
1048 case TUNGETIFF:
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001049 rtnl_lock();
Jason Wang8f475a32013-06-05 23:54:36 +00001050 vlan = macvtap_get_vlan(q);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001051 if (!vlan) {
1052 rtnl_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001053 return -ENOLINK;
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001054 }
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001055
1056 ret = 0;
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001057 u = q->flags;
Eric Dumazet13707f92011-01-26 19:28:23 +00001058 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001059 put_user(u, &ifr->ifr_flags))
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001060 ret = -EFAULT;
Jason Wang8f475a32013-06-05 23:54:36 +00001061 macvtap_put_vlan(vlan);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001062 rtnl_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001063 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001064
Jason Wang815f2362013-06-05 23:54:39 +00001065 case TUNSETQUEUE:
1066 if (get_user(u, &ifr->ifr_flags))
1067 return -EFAULT;
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001068 rtnl_lock();
1069 ret = macvtap_ioctl_set_queue(file, u);
1070 rtnl_unlock();
Jason Wang82a19eb2013-07-16 13:36:33 +08001071 return ret;
Jason Wang815f2362013-06-05 23:54:39 +00001072
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001073 case TUNGETFEATURES:
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +02001074 if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001075 return -EFAULT;
1076 return 0;
1077
1078 case TUNSETSNDBUF:
Michael S. Tsirkin3ea79242015-09-18 13:41:09 +03001079 if (get_user(s, sp))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001080 return -EFAULT;
1081
Michael S. Tsirkin3ea79242015-09-18 13:41:09 +03001082 q->sk.sk_sndbuf = s;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001083 return 0;
1084
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001085 case TUNGETVNETHDRSZ:
1086 s = q->vnet_hdr_sz;
1087 if (put_user(s, sp))
1088 return -EFAULT;
1089 return 0;
1090
1091 case TUNSETVNETHDRSZ:
1092 if (get_user(s, sp))
1093 return -EFAULT;
1094 if (s < (int)sizeof(struct virtio_net_hdr))
1095 return -EINVAL;
1096
1097 q->vnet_hdr_sz = s;
1098 return 0;
1099
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +02001100 case TUNGETVNETLE:
1101 s = !!(q->flags & MACVTAP_VNET_LE);
1102 if (put_user(s, sp))
1103 return -EFAULT;
1104 return 0;
1105
1106 case TUNSETVNETLE:
1107 if (get_user(s, sp))
1108 return -EFAULT;
1109 if (s)
1110 q->flags |= MACVTAP_VNET_LE;
1111 else
1112 q->flags &= ~MACVTAP_VNET_LE;
1113 return 0;
1114
Greg Kurz8b8e6582015-04-24 14:50:36 +02001115 case TUNGETVNETBE:
1116 return macvtap_get_vnet_be(q, sp);
1117
1118 case TUNSETVNETBE:
1119 return macvtap_set_vnet_be(q, sp);
1120
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001121 case TUNSETOFFLOAD:
1122 /* let the user check for future flags */
1123 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
Vlad Yaseviche3e3c422015-02-03 16:36:17 -05001124 TUN_F_TSO_ECN | TUN_F_UFO))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001125 return -EINVAL;
1126
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001127 rtnl_lock();
1128 ret = set_offload(q, arg);
1129 rtnl_unlock();
1130 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001131
Justin Cormackb5082082015-05-11 20:00:10 +01001132 case SIOCGIFHWADDR:
1133 rtnl_lock();
1134 vlan = macvtap_get_vlan(q);
1135 if (!vlan) {
1136 rtnl_unlock();
1137 return -ENOLINK;
1138 }
1139 ret = 0;
1140 u = vlan->dev->type;
1141 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1142 copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
1143 put_user(u, &ifr->ifr_hwaddr.sa_family))
1144 ret = -EFAULT;
1145 macvtap_put_vlan(vlan);
1146 rtnl_unlock();
1147 return ret;
1148
1149 case SIOCSIFHWADDR:
Justin Cormack7f460d32015-05-13 19:19:02 +01001150 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1151 return -EFAULT;
Justin Cormackb5082082015-05-11 20:00:10 +01001152 rtnl_lock();
1153 vlan = macvtap_get_vlan(q);
1154 if (!vlan) {
1155 rtnl_unlock();
1156 return -ENOLINK;
1157 }
Justin Cormack7f460d32015-05-13 19:19:02 +01001158 ret = dev_set_mac_address(vlan->dev, &sa);
Justin Cormackb5082082015-05-11 20:00:10 +01001159 macvtap_put_vlan(vlan);
1160 rtnl_unlock();
1161 return ret;
1162
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001163 default:
1164 return -EINVAL;
1165 }
1166}
1167
1168#ifdef CONFIG_COMPAT
1169static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
1170 unsigned long arg)
1171{
1172 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1173}
1174#endif
1175
1176static const struct file_operations macvtap_fops = {
1177 .owner = THIS_MODULE,
1178 .open = macvtap_open,
1179 .release = macvtap_release,
Al Viro3af0bfe2014-11-07 14:13:53 -05001180 .read_iter = macvtap_read_iter,
Al Virof5ff53b2014-06-19 15:36:49 -04001181 .write_iter = macvtap_write_iter,
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001182 .poll = macvtap_poll,
1183 .llseek = no_llseek,
1184 .unlocked_ioctl = macvtap_ioctl,
1185#ifdef CONFIG_COMPAT
1186 .compat_ioctl = macvtap_compat_ioctl,
1187#endif
1188};
1189
Ying Xue1b784142015-03-02 15:37:48 +08001190static int macvtap_sendmsg(struct socket *sock, struct msghdr *m,
1191 size_t total_len)
Arnd Bergmann501c7742010-02-18 05:46:50 +00001192{
1193 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
Al Viroc0371da2014-11-24 10:42:55 -05001194 return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
Arnd Bergmann501c7742010-02-18 05:46:50 +00001195}
1196
Ying Xue1b784142015-03-02 15:37:48 +08001197static int macvtap_recvmsg(struct socket *sock, struct msghdr *m,
1198 size_t total_len, int flags)
Arnd Bergmann501c7742010-02-18 05:46:50 +00001199{
1200 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1201 int ret;
1202 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1203 return -EINVAL;
Al Viroc0371da2014-11-24 10:42:55 -05001204 ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
David S. Millerde2aa472013-12-10 22:06:18 -05001205 if (ret > total_len) {
1206 m->msg_flags |= MSG_TRUNC;
1207 ret = flags & MSG_TRUNC ? ret : total_len;
1208 }
Arnd Bergmann501c7742010-02-18 05:46:50 +00001209 return ret;
1210}
1211
Jason Wang362899b2016-07-15 03:46:31 -04001212static int macvtap_peek_len(struct socket *sock)
1213{
1214 struct macvtap_queue *q = container_of(sock, struct macvtap_queue,
1215 sock);
1216 return skb_array_peek_len(&q->skb_array);
1217}
1218
Arnd Bergmann501c7742010-02-18 05:46:50 +00001219/* Ops structure to mimic raw sockets with tun */
1220static const struct proto_ops macvtap_socket_ops = {
1221 .sendmsg = macvtap_sendmsg,
1222 .recvmsg = macvtap_recvmsg,
Jason Wang362899b2016-07-15 03:46:31 -04001223 .peek_len = macvtap_peek_len,
Arnd Bergmann501c7742010-02-18 05:46:50 +00001224};
1225
1226/* Get an underlying socket object from tun file. Returns error unless file is
1227 * attached to a device. The returned object works like a packet socket, it
1228 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1229 * holding a reference to the file for as long as the socket is in use. */
1230struct socket *macvtap_get_socket(struct file *file)
1231{
1232 struct macvtap_queue *q;
1233 if (file->f_op != &macvtap_fops)
1234 return ERR_PTR(-EINVAL);
1235 q = file->private_data;
1236 if (!q)
1237 return ERR_PTR(-EBADFD);
1238 return &q->sock;
1239}
1240EXPORT_SYMBOL_GPL(macvtap_get_socket);
1241
Jason Wang362899b2016-07-15 03:46:31 -04001242static int macvtap_queue_resize(struct macvlan_dev *vlan)
1243{
1244 struct net_device *dev = vlan->dev;
1245 struct macvtap_queue *q;
1246 struct skb_array **arrays;
1247 int n = vlan->numqueues;
1248 int ret, i = 0;
1249
1250 arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
1251 if (!arrays)
1252 return -ENOMEM;
1253
1254 list_for_each_entry(q, &vlan->queue_list, next)
1255 arrays[i++] = &q->skb_array;
1256
1257 ret = skb_array_resize_multiple(arrays, n,
1258 dev->tx_queue_len, GFP_KERNEL);
1259
1260 kfree(arrays);
1261 return ret;
1262}
1263
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001264static int macvtap_device_event(struct notifier_block *unused,
1265 unsigned long event, void *ptr)
1266{
Jiri Pirko351638e2013-05-28 01:30:21 +00001267 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001268 struct macvlan_dev *vlan;
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001269 struct device *classdev;
1270 dev_t devt;
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001271 int err;
Marc Angel17af2bc2016-05-05 12:14:26 +02001272 char tap_name[IFNAMSIZ];
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001273
1274 if (dev->rtnl_link_ops != &macvtap_link_ops)
1275 return NOTIFY_DONE;
1276
Marc Angel17af2bc2016-05-05 12:14:26 +02001277 snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001278 vlan = netdev_priv(dev);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001279
1280 switch (event) {
1281 case NETDEV_REGISTER:
1282 /* Create the device node here after the network device has
1283 * been registered but before register_netdevice has
1284 * finished running.
1285 */
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001286 err = macvtap_get_minor(vlan);
1287 if (err)
1288 return notifier_from_errno(err);
1289
1290 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
Marc Angel17af2bc2016-05-05 12:14:26 +02001291 classdev = device_create(&macvtap_class, &dev->dev, devt,
1292 dev, tap_name);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001293 if (IS_ERR(classdev)) {
1294 macvtap_free_minor(vlan);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001295 return notifier_from_errno(PTR_ERR(classdev));
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001296 }
Marc Angel17af2bc2016-05-05 12:14:26 +02001297 err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
1298 tap_name);
1299 if (err)
1300 return notifier_from_errno(err);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001301 break;
1302 case NETDEV_UNREGISTER:
Francesco Ruggerie96c37f2016-04-23 15:04:31 -07001303 /* vlan->minor == 0 if NETDEV_REGISTER above failed */
1304 if (vlan->minor == 0)
1305 break;
Marc Angel17af2bc2016-05-05 12:14:26 +02001306 sysfs_remove_link(&dev->dev.kobj, tap_name);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001307 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
Marc Angel17af2bc2016-05-05 12:14:26 +02001308 device_destroy(&macvtap_class, devt);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001309 macvtap_free_minor(vlan);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001310 break;
Jason Wang362899b2016-07-15 03:46:31 -04001311 case NETDEV_CHANGE_TX_QUEUE_LEN:
1312 if (macvtap_queue_resize(vlan))
1313 return NOTIFY_BAD;
1314 break;
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001315 }
1316
1317 return NOTIFY_DONE;
1318}
1319
1320static struct notifier_block macvtap_notifier_block __read_mostly = {
1321 .notifier_call = macvtap_device_event,
1322};
1323
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001324static int macvtap_init(void)
1325{
1326 int err;
1327
1328 err = alloc_chrdev_region(&macvtap_major, 0,
1329 MACVTAP_NUM_DEVS, "macvtap");
1330 if (err)
1331 goto out1;
1332
1333 cdev_init(&macvtap_cdev, &macvtap_fops);
1334 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1335 if (err)
1336 goto out2;
1337
Marc Angel17af2bc2016-05-05 12:14:26 +02001338 err = class_register(&macvtap_class);
1339 if (err)
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001340 goto out3;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001341
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001342 err = register_netdevice_notifier(&macvtap_notifier_block);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001343 if (err)
1344 goto out4;
1345
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001346 err = macvlan_link_register(&macvtap_link_ops);
1347 if (err)
1348 goto out5;
1349
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001350 return 0;
1351
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001352out5:
1353 unregister_netdevice_notifier(&macvtap_notifier_block);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001354out4:
Marc Angel17af2bc2016-05-05 12:14:26 +02001355 class_unregister(&macvtap_class);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001356out3:
1357 cdev_del(&macvtap_cdev);
1358out2:
1359 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1360out1:
1361 return err;
1362}
1363module_init(macvtap_init);
1364
1365static void macvtap_exit(void)
1366{
1367 rtnl_link_unregister(&macvtap_link_ops);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001368 unregister_netdevice_notifier(&macvtap_notifier_block);
Marc Angel17af2bc2016-05-05 12:14:26 +02001369 class_unregister(&macvtap_class);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001370 cdev_del(&macvtap_cdev);
1371 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
Johannes Thumshirnd5de1982015-07-08 17:16:49 +02001372 idr_destroy(&minor_idr);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001373}
1374module_exit(macvtap_exit);
1375
1376MODULE_ALIAS_RTNL_LINK("macvtap");
1377MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1378MODULE_LICENSE("GPL");