blob: 83fdd0a87eb6d738b6932c9cbd92d9ca7739a964 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090012 * Fixes:
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090035 * Ulises Alonso : Frame number limit removal and
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * packet_set_ring memory leak.
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070037 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090040 * byte arrays at the end of sockaddr_ll
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070041 * and packet_mreq.
Johann Baudy69e3c752009-05-18 22:11:22 -070042 * Johann Baudy : Added TX RING.
chetan lokef6fb8f12011-08-19 10:18:16 +000043 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/mm.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080057#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
Herbert Xuffbc6112007-02-04 23:33:10 -080065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090067#include <linux/slab.h>
Neil Horman0e3125c2010-11-16 10:26:47 -080068#include <linux/vmalloc.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020069#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040079#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
Herbert Xu905db442009-01-30 14:12:06 -080086#include <linux/mutex.h>
Eric Dumazet05423b22009-10-26 18:40:35 -070087#include <linux/if_vlan.h>
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -080088#include <linux/virtio_net.h>
Richard Cochraned85b562010-04-07 22:41:28 +000089#include <linux/errqueue.h>
Scott McMillan614f60f2010-06-02 05:53:56 -070090#include <linux/net_tstamp.h>
Jason Wangc1aad272013-03-25 20:19:57 +000091#include <net/flow_keys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93#ifdef CONFIG_INET
94#include <net/inet_common.h>
95#endif
96
Pavel Emelyanov2787b042012-08-13 05:49:39 +000097#include "internal.h"
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 Assumptions:
101 - if device has no dev->hard_header routine, it adds and removes ll header
102 inside itself. In this case ll header is invisible outside of device,
103 but higher levels still should reserve dev->hard_header_len.
104 Some devices are enough clever to reallocate skb, when header
105 will not fit to reserved space (tunnel), another ones are silly
106 (PPP).
107 - packet socket receives packets with pulled ll header,
108 so that SOCK_RAW should push it back.
109
110On receive:
111-----------
112
113Incoming, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700114 mac_header -> ll header
115 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
117Outgoing, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700118 mac_header -> ll header
119 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121Incoming, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700122 mac_header -> UNKNOWN position. It is very likely, that it points to ll
123 header. PPP makes it, that is wrong, because introduce
YOSHIFUJI Hideakidb0c58f2007-07-19 10:44:35 +0900124 assymetry between rx and tx paths.
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700125 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127Outgoing, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700128 mac_header -> data. ll header is still not built!
129 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131Resume
132 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
133
134
135On transmit:
136------------
137
138dev->hard_header != NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700139 mac_header -> ll header
140 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142dev->hard_header == NULL (ll header is added by device, we cannot control it)
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700143 mac_header -> data
144 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 We should set nh.raw on output to correct posistion,
147 packet classifier depends on it.
148 */
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150/* Private packet socket structures. */
151
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700152/* identical to struct packet_mreq except it has
153 * a longer address field.
154 */
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000155struct packet_mreq_max {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700156 int mr_ifindex;
157 unsigned short mr_type;
158 unsigned short mr_alen;
159 unsigned char mr_address[MAX_ADDR_LEN];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160};
David S. Millera2efcfa2007-05-29 13:12:50 -0700161
chetan lokef6fb8f12011-08-19 10:18:16 +0000162static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -0700163 int closing, int tx_ring);
164
chetan lokef6fb8f12011-08-19 10:18:16 +0000165
166#define V3_ALIGNMENT (8)
167
chetan lokebc59ba32011-08-25 10:43:30 +0000168#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
chetan lokef6fb8f12011-08-19 10:18:16 +0000169
170#define BLK_PLUS_PRIV(sz_of_priv) \
171 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
172
chetan lokef6fb8f12011-08-19 10:18:16 +0000173#define PGV_FROM_VMALLOC 1
Johann Baudy69e3c752009-05-18 22:11:22 -0700174
chetan lokef6fb8f12011-08-19 10:18:16 +0000175#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
176#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
177#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
178#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
179#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
180#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
181#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
182
Johann Baudy69e3c752009-05-18 22:11:22 -0700183struct packet_sock;
184static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
Willem de Bruijn77f65eb2013-03-19 10:18:11 +0000185static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
186 struct packet_type *pt, struct net_device *orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
chetan lokef6fb8f12011-08-19 10:18:16 +0000188static void *packet_previous_frame(struct packet_sock *po,
189 struct packet_ring_buffer *rb,
190 int status);
191static void packet_increment_head(struct packet_ring_buffer *buff);
chetan lokebc59ba32011-08-25 10:43:30 +0000192static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
193 struct tpacket_block_desc *);
194static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f12011-08-19 10:18:16 +0000195 struct packet_sock *);
chetan lokebc59ba32011-08-25 10:43:30 +0000196static void prb_retire_current_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f12011-08-19 10:18:16 +0000197 struct packet_sock *, unsigned int status);
chetan lokebc59ba32011-08-25 10:43:30 +0000198static int prb_queue_frozen(struct tpacket_kbdq_core *);
199static void prb_open_block(struct tpacket_kbdq_core *,
200 struct tpacket_block_desc *);
chetan lokef6fb8f12011-08-19 10:18:16 +0000201static void prb_retire_rx_blk_timer_expired(unsigned long);
chetan lokebc59ba32011-08-25 10:43:30 +0000202static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
203static void prb_init_blk_timer(struct packet_sock *,
204 struct tpacket_kbdq_core *,
205 void (*func) (unsigned long));
206static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
207static void prb_clear_rxhash(struct tpacket_kbdq_core *,
208 struct tpacket3_hdr *);
209static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
210 struct tpacket3_hdr *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211static void packet_flush_mclist(struct sock *sk);
212
Herbert Xuffbc6112007-02-04 23:33:10 -0800213struct packet_skb_cb {
214 unsigned int origlen;
215 union {
216 struct sockaddr_pkt pkt;
217 struct sockaddr_ll ll;
218 } sa;
219};
220
221#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
Herbert Xu8dc41942007-02-04 23:31:32 -0800222
chetan lokebc59ba32011-08-25 10:43:30 +0000223#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
chetan lokef6fb8f12011-08-19 10:18:16 +0000224#define GET_PBLOCK_DESC(x, bid) \
chetan lokebc59ba32011-08-25 10:43:30 +0000225 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
chetan lokef6fb8f12011-08-19 10:18:16 +0000226#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
chetan lokebc59ba32011-08-25 10:43:30 +0000227 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
chetan lokef6fb8f12011-08-19 10:18:16 +0000228#define GET_NEXT_PRB_BLK_NUM(x) \
229 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
230 ((x)->kactive_blk_num+1) : 0)
231
David S. Millerdc99f602011-07-05 01:45:05 -0700232static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
233static void __fanout_link(struct sock *sk, struct packet_sock *po);
234
David S. Millerce06b032011-07-04 01:44:29 -0700235/* register_prot_hook must be invoked with the po->bind_lock held,
236 * or from a context in which asynchronous accesses to the packet
237 * socket is not possible (packet_create()).
238 */
239static void register_prot_hook(struct sock *sk)
240{
241 struct packet_sock *po = pkt_sk(sk);
242 if (!po->running) {
David S. Millerdc99f602011-07-05 01:45:05 -0700243 if (po->fanout)
244 __fanout_link(sk, po);
245 else
246 dev_add_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700247 sock_hold(sk);
248 po->running = 1;
249 }
250}
251
252/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
253 * held. If the sync parameter is true, we will temporarily drop
254 * the po->bind_lock and do a synchronize_net to make sure no
255 * asynchronous packet processing paths still refer to the elements
256 * of po->prot_hook. If the sync parameter is false, it is the
257 * callers responsibility to take care of this.
258 */
259static void __unregister_prot_hook(struct sock *sk, bool sync)
260{
261 struct packet_sock *po = pkt_sk(sk);
262
263 po->running = 0;
David S. Millerdc99f602011-07-05 01:45:05 -0700264 if (po->fanout)
265 __fanout_unlink(sk, po);
266 else
267 __dev_remove_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700268 __sock_put(sk);
269
270 if (sync) {
271 spin_unlock(&po->bind_lock);
272 synchronize_net();
273 spin_lock(&po->bind_lock);
274 }
275}
276
277static void unregister_prot_hook(struct sock *sk, bool sync)
278{
279 struct packet_sock *po = pkt_sk(sk);
280
281 if (po->running)
282 __unregister_prot_hook(sk, sync);
283}
284
Changli Gaof6dafa92010-12-07 04:26:16 +0000285static inline __pure struct page *pgv_to_page(void *addr)
Changli Gao0af55bb2010-12-01 02:52:20 +0000286{
287 if (is_vmalloc_addr(addr))
288 return vmalloc_to_page(addr);
289 return virt_to_page(addr);
290}
291
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700292static void __packet_set_status(struct packet_sock *po, void *frame, int status)
293{
294 union {
295 struct tpacket_hdr *h1;
296 struct tpacket2_hdr *h2;
297 void *raw;
298 } h;
299
300 h.raw = frame;
301 switch (po->tp_version) {
302 case TPACKET_V1:
303 h.h1->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000304 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700305 break;
306 case TPACKET_V2:
307 h.h2->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000308 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700309 break;
chetan lokef6fb8f12011-08-19 10:18:16 +0000310 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700311 default:
chetan lokef6fb8f12011-08-19 10:18:16 +0000312 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700313 BUG();
314 }
315
316 smp_wmb();
317}
318
319static int __packet_get_status(struct packet_sock *po, void *frame)
320{
321 union {
322 struct tpacket_hdr *h1;
323 struct tpacket2_hdr *h2;
324 void *raw;
325 } h;
326
327 smp_rmb();
328
329 h.raw = frame;
330 switch (po->tp_version) {
331 case TPACKET_V1:
Changli Gao0af55bb2010-12-01 02:52:20 +0000332 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700333 return h.h1->tp_status;
334 case TPACKET_V2:
Changli Gao0af55bb2010-12-01 02:52:20 +0000335 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700336 return h.h2->tp_status;
chetan lokef6fb8f12011-08-19 10:18:16 +0000337 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700338 default:
chetan lokef6fb8f12011-08-19 10:18:16 +0000339 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700340 BUG();
341 return 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
Johann Baudy69e3c752009-05-18 22:11:22 -0700344
345static void *packet_lookup_frame(struct packet_sock *po,
346 struct packet_ring_buffer *rb,
347 unsigned int position,
348 int status)
349{
350 unsigned int pg_vec_pos, frame_offset;
351 union {
352 struct tpacket_hdr *h1;
353 struct tpacket2_hdr *h2;
354 void *raw;
355 } h;
356
357 pg_vec_pos = position / rb->frames_per_block;
358 frame_offset = position % rb->frames_per_block;
359
Neil Horman0e3125c2010-11-16 10:26:47 -0800360 h.raw = rb->pg_vec[pg_vec_pos].buffer +
361 (frame_offset * rb->frame_size);
Johann Baudy69e3c752009-05-18 22:11:22 -0700362
363 if (status != __packet_get_status(po, h.raw))
364 return NULL;
365
366 return h.raw;
367}
368
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000369static void *packet_current_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -0700370 struct packet_ring_buffer *rb,
371 int status)
372{
373 return packet_lookup_frame(po, rb, rb->head, status);
374}
375
chetan lokebc59ba32011-08-25 10:43:30 +0000376static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000377{
378 del_timer_sync(&pkc->retire_blk_timer);
379}
380
381static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
382 int tx_ring,
383 struct sk_buff_head *rb_queue)
384{
chetan lokebc59ba32011-08-25 10:43:30 +0000385 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000386
387 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
388
389 spin_lock(&rb_queue->lock);
390 pkc->delete_blk_timer = 1;
391 spin_unlock(&rb_queue->lock);
392
393 prb_del_retire_blk_timer(pkc);
394}
395
396static void prb_init_blk_timer(struct packet_sock *po,
chetan lokebc59ba32011-08-25 10:43:30 +0000397 struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000398 void (*func) (unsigned long))
399{
400 init_timer(&pkc->retire_blk_timer);
401 pkc->retire_blk_timer.data = (long)po;
402 pkc->retire_blk_timer.function = func;
403 pkc->retire_blk_timer.expires = jiffies;
404}
405
406static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
407{
chetan lokebc59ba32011-08-25 10:43:30 +0000408 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000409
410 if (tx_ring)
411 BUG();
412
413 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
414 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
415}
416
417static int prb_calc_retire_blk_tmo(struct packet_sock *po,
418 int blk_size_in_bytes)
419{
420 struct net_device *dev;
421 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000422 struct ethtool_cmd ecmd;
423 int err;
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000424 u32 speed;
chetan lokef6fb8f12011-08-19 10:18:16 +0000425
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000426 rtnl_lock();
427 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
428 if (unlikely(!dev)) {
429 rtnl_unlock();
chetan lokef6fb8f12011-08-19 10:18:16 +0000430 return DEFAULT_PRB_RETIRE_TOV;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000431 }
432 err = __ethtool_get_settings(dev, &ecmd);
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000433 speed = ethtool_cmd_speed(&ecmd);
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000434 rtnl_unlock();
435 if (!err) {
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000436 /*
437 * If the link speed is so slow you don't really
438 * need to worry about perf anyways
439 */
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000440 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000441 return DEFAULT_PRB_RETIRE_TOV;
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000442 } else {
443 msec = 1;
444 div = speed / 1000;
chetan lokef6fb8f12011-08-19 10:18:16 +0000445 }
446 }
447
448 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
449
450 if (div)
451 mbits /= div;
452
453 tmo = mbits * msec;
454
455 if (div)
456 return tmo+1;
457 return tmo;
458}
459
chetan lokebc59ba32011-08-25 10:43:30 +0000460static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
chetan lokef6fb8f12011-08-19 10:18:16 +0000461 union tpacket_req_u *req_u)
462{
463 p1->feature_req_word = req_u->req3.tp_feature_req_word;
464}
465
466static void init_prb_bdqc(struct packet_sock *po,
467 struct packet_ring_buffer *rb,
468 struct pgv *pg_vec,
469 union tpacket_req_u *req_u, int tx_ring)
470{
chetan lokebc59ba32011-08-25 10:43:30 +0000471 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
472 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000473
474 memset(p1, 0x0, sizeof(*p1));
475
476 p1->knxt_seq_num = 1;
477 p1->pkbdq = pg_vec;
chetan lokebc59ba32011-08-25 10:43:30 +0000478 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
Joe Perchese3192692012-06-03 17:41:40 +0000479 p1->pkblk_start = pg_vec[0].buffer;
chetan lokef6fb8f12011-08-19 10:18:16 +0000480 p1->kblk_size = req_u->req3.tp_block_size;
481 p1->knum_blocks = req_u->req3.tp_block_nr;
482 p1->hdrlen = po->tp_hdrlen;
483 p1->version = po->tp_version;
484 p1->last_kactive_blk_num = 0;
485 po->stats_u.stats3.tp_freeze_q_cnt = 0;
486 if (req_u->req3.tp_retire_blk_tov)
487 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
488 else
489 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
490 req_u->req3.tp_block_size);
491 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
492 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
493
494 prb_init_ft_ops(p1, req_u);
495 prb_setup_retire_blk_timer(po, tx_ring);
496 prb_open_block(p1, pbd);
497}
498
499/* Do NOT update the last_blk_num first.
500 * Assumes sk_buff_head lock is held.
501 */
chetan lokebc59ba32011-08-25 10:43:30 +0000502static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000503{
504 mod_timer(&pkc->retire_blk_timer,
505 jiffies + pkc->tov_in_jiffies);
506 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
507}
508
509/*
510 * Timer logic:
511 * 1) We refresh the timer only when we open a block.
512 * By doing this we don't waste cycles refreshing the timer
513 * on packet-by-packet basis.
514 *
515 * With a 1MB block-size, on a 1Gbps line, it will take
516 * i) ~8 ms to fill a block + ii) memcpy etc.
517 * In this cut we are not accounting for the memcpy time.
518 *
519 * So, if the user sets the 'tmo' to 10ms then the timer
520 * will never fire while the block is still getting filled
521 * (which is what we want). However, the user could choose
522 * to close a block early and that's fine.
523 *
524 * But when the timer does fire, we check whether or not to refresh it.
525 * Since the tmo granularity is in msecs, it is not too expensive
526 * to refresh the timer, lets say every '8' msecs.
527 * Either the user can set the 'tmo' or we can derive it based on
528 * a) line-speed and b) block-size.
529 * prb_calc_retire_blk_tmo() calculates the tmo.
530 *
531 */
532static void prb_retire_rx_blk_timer_expired(unsigned long data)
533{
534 struct packet_sock *po = (struct packet_sock *)data;
chetan lokebc59ba32011-08-25 10:43:30 +0000535 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000536 unsigned int frozen;
chetan lokebc59ba32011-08-25 10:43:30 +0000537 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000538
539 spin_lock(&po->sk.sk_receive_queue.lock);
540
541 frozen = prb_queue_frozen(pkc);
542 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
543
544 if (unlikely(pkc->delete_blk_timer))
545 goto out;
546
547 /* We only need to plug the race when the block is partially filled.
548 * tpacket_rcv:
549 * lock(); increment BLOCK_NUM_PKTS; unlock()
550 * copy_bits() is in progress ...
551 * timer fires on other cpu:
552 * we can't retire the current block because copy_bits
553 * is in progress.
554 *
555 */
556 if (BLOCK_NUM_PKTS(pbd)) {
557 while (atomic_read(&pkc->blk_fill_in_prog)) {
558 /* Waiting for skb_copy_bits to finish... */
559 cpu_relax();
560 }
561 }
562
563 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
564 if (!frozen) {
565 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
566 if (!prb_dispatch_next_block(pkc, po))
567 goto refresh_timer;
568 else
569 goto out;
570 } else {
571 /* Case 1. Queue was frozen because user-space was
572 * lagging behind.
573 */
574 if (prb_curr_blk_in_use(pkc, pbd)) {
575 /*
576 * Ok, user-space is still behind.
577 * So just refresh the timer.
578 */
579 goto refresh_timer;
580 } else {
581 /* Case 2. queue was frozen,user-space caught up,
582 * now the link went idle && the timer fired.
583 * We don't have a block to close.So we open this
584 * block and restart the timer.
585 * opening a block thaws the queue,restarts timer
586 * Thawing/timer-refresh is a side effect.
587 */
588 prb_open_block(pkc, pbd);
589 goto out;
590 }
591 }
592 }
593
594refresh_timer:
595 _prb_refresh_rx_retire_blk_timer(pkc);
596
597out:
598 spin_unlock(&po->sk.sk_receive_queue.lock);
599}
600
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000601static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
chetan lokebc59ba32011-08-25 10:43:30 +0000602 struct tpacket_block_desc *pbd1, __u32 status)
chetan lokef6fb8f12011-08-19 10:18:16 +0000603{
604 /* Flush everything minus the block header */
605
606#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
607 u8 *start, *end;
608
609 start = (u8 *)pbd1;
610
611 /* Skip the block header(we know header WILL fit in 4K) */
612 start += PAGE_SIZE;
613
614 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
615 for (; start < end; start += PAGE_SIZE)
616 flush_dcache_page(pgv_to_page(start));
617
618 smp_wmb();
619#endif
620
621 /* Now update the block status. */
622
623 BLOCK_STATUS(pbd1) = status;
624
625 /* Flush the block header */
626
627#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
628 start = (u8 *)pbd1;
629 flush_dcache_page(pgv_to_page(start));
630
631 smp_wmb();
632#endif
633}
634
635/*
636 * Side effect:
637 *
638 * 1) flush the block
639 * 2) Increment active_blk_num
640 *
641 * Note:We DONT refresh the timer on purpose.
642 * Because almost always the next block will be opened.
643 */
chetan lokebc59ba32011-08-25 10:43:30 +0000644static void prb_close_block(struct tpacket_kbdq_core *pkc1,
645 struct tpacket_block_desc *pbd1,
chetan lokef6fb8f12011-08-19 10:18:16 +0000646 struct packet_sock *po, unsigned int stat)
647{
648 __u32 status = TP_STATUS_USER | stat;
649
650 struct tpacket3_hdr *last_pkt;
chetan lokebc59ba32011-08-25 10:43:30 +0000651 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f12011-08-19 10:18:16 +0000652
653 if (po->stats.tp_drops)
654 status |= TP_STATUS_LOSING;
655
656 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
657 last_pkt->tp_next_offset = 0;
658
659 /* Get the ts of the last pkt */
660 if (BLOCK_NUM_PKTS(pbd1)) {
661 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
662 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
663 } else {
664 /* Ok, we tmo'd - so get the current time */
665 struct timespec ts;
666 getnstimeofday(&ts);
667 h1->ts_last_pkt.ts_sec = ts.tv_sec;
668 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
669 }
670
671 smp_wmb();
672
673 /* Flush the block */
674 prb_flush_block(pkc1, pbd1, status);
675
676 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
677}
678
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000679static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000680{
681 pkc->reset_pending_on_curr_blk = 0;
682}
683
684/*
685 * Side effect of opening a block:
686 *
687 * 1) prb_queue is thawed.
688 * 2) retire_blk_timer is refreshed.
689 *
690 */
chetan lokebc59ba32011-08-25 10:43:30 +0000691static void prb_open_block(struct tpacket_kbdq_core *pkc1,
692 struct tpacket_block_desc *pbd1)
chetan lokef6fb8f12011-08-19 10:18:16 +0000693{
694 struct timespec ts;
chetan lokebc59ba32011-08-25 10:43:30 +0000695 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f12011-08-19 10:18:16 +0000696
697 smp_rmb();
698
699 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
700
701 /* We could have just memset this but we will lose the
702 * flexibility of making the priv area sticky
703 */
704 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
705 BLOCK_NUM_PKTS(pbd1) = 0;
706 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
707 getnstimeofday(&ts);
708 h1->ts_first_pkt.ts_sec = ts.tv_sec;
709 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
710 pkc1->pkblk_start = (char *)pbd1;
Joe Perchese3192692012-06-03 17:41:40 +0000711 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
chetan lokef6fb8f12011-08-19 10:18:16 +0000712 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
713 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
714 pbd1->version = pkc1->version;
715 pkc1->prev = pkc1->nxt_offset;
716 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
717 prb_thaw_queue(pkc1);
718 _prb_refresh_rx_retire_blk_timer(pkc1);
719
720 smp_wmb();
721
722 return;
723 }
724
725 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
726 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
727 dump_stack();
728 BUG();
729}
730
731/*
732 * Queue freeze logic:
733 * 1) Assume tp_block_nr = 8 blocks.
734 * 2) At time 't0', user opens Rx ring.
735 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
736 * 4) user-space is either sleeping or processing block '0'.
737 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
738 * it will close block-7,loop around and try to fill block '0'.
739 * call-flow:
740 * __packet_lookup_frame_in_block
741 * prb_retire_current_block()
742 * prb_dispatch_next_block()
743 * |->(BLOCK_STATUS == USER) evaluates to true
744 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
745 * 6) Now there are two cases:
746 * 6.1) Link goes idle right after the queue is frozen.
747 * But remember, the last open_block() refreshed the timer.
748 * When this timer expires,it will refresh itself so that we can
749 * re-open block-0 in near future.
750 * 6.2) Link is busy and keeps on receiving packets. This is a simple
751 * case and __packet_lookup_frame_in_block will check if block-0
752 * is free and can now be re-used.
753 */
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000754static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000755 struct packet_sock *po)
756{
757 pkc->reset_pending_on_curr_blk = 1;
758 po->stats_u.stats3.tp_freeze_q_cnt++;
759}
760
761#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
762
763/*
764 * If the next block is free then we will dispatch it
765 * and return a good offset.
766 * Else, we will freeze the queue.
767 * So, caller must check the return value.
768 */
chetan lokebc59ba32011-08-25 10:43:30 +0000769static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000770 struct packet_sock *po)
771{
chetan lokebc59ba32011-08-25 10:43:30 +0000772 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000773
774 smp_rmb();
775
776 /* 1. Get current block num */
777 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
778
779 /* 2. If this block is currently in_use then freeze the queue */
780 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
781 prb_freeze_queue(pkc, po);
782 return NULL;
783 }
784
785 /*
786 * 3.
787 * open this block and return the offset where the first packet
788 * needs to get stored.
789 */
790 prb_open_block(pkc, pbd);
791 return (void *)pkc->nxt_offset;
792}
793
chetan lokebc59ba32011-08-25 10:43:30 +0000794static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000795 struct packet_sock *po, unsigned int status)
796{
chetan lokebc59ba32011-08-25 10:43:30 +0000797 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
chetan lokef6fb8f12011-08-19 10:18:16 +0000798
799 /* retire/close the current block */
800 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
801 /*
802 * Plug the case where copy_bits() is in progress on
803 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
804 * have space to copy the pkt in the current block and
805 * called prb_retire_current_block()
806 *
807 * We don't need to worry about the TMO case because
808 * the timer-handler already handled this case.
809 */
810 if (!(status & TP_STATUS_BLK_TMO)) {
811 while (atomic_read(&pkc->blk_fill_in_prog)) {
812 /* Waiting for skb_copy_bits to finish... */
813 cpu_relax();
814 }
815 }
816 prb_close_block(pkc, pbd, po, status);
817 return;
818 }
819
820 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
821 dump_stack();
822 BUG();
823}
824
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000825static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
chetan lokebc59ba32011-08-25 10:43:30 +0000826 struct tpacket_block_desc *pbd)
chetan lokef6fb8f12011-08-19 10:18:16 +0000827{
828 return TP_STATUS_USER & BLOCK_STATUS(pbd);
829}
830
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000831static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000832{
833 return pkc->reset_pending_on_curr_blk;
834}
835
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000836static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
chetan lokef6fb8f12011-08-19 10:18:16 +0000837{
chetan lokebc59ba32011-08-25 10:43:30 +0000838 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
chetan lokef6fb8f12011-08-19 10:18:16 +0000839 atomic_dec(&pkc->blk_fill_in_prog);
840}
841
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000842static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000843 struct tpacket3_hdr *ppd)
844{
845 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
846}
847
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000848static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000849 struct tpacket3_hdr *ppd)
850{
851 ppd->hv1.tp_rxhash = 0;
852}
853
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000854static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000855 struct tpacket3_hdr *ppd)
856{
857 if (vlan_tx_tag_present(pkc->skb)) {
858 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
859 ppd->tp_status = TP_STATUS_VLAN_VALID;
860 } else {
danborkmann@iogearbox.net9e670302012-08-20 03:34:03 +0000861 ppd->hv1.tp_vlan_tci = 0;
862 ppd->tp_status = TP_STATUS_AVAILABLE;
chetan lokef6fb8f12011-08-19 10:18:16 +0000863 }
864}
865
chetan lokebc59ba32011-08-25 10:43:30 +0000866static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000867 struct tpacket3_hdr *ppd)
868{
869 prb_fill_vlan_info(pkc, ppd);
870
871 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
872 prb_fill_rxhash(pkc, ppd);
873 else
874 prb_clear_rxhash(pkc, ppd);
875}
876
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000877static void prb_fill_curr_block(char *curr,
chetan lokebc59ba32011-08-25 10:43:30 +0000878 struct tpacket_kbdq_core *pkc,
879 struct tpacket_block_desc *pbd,
chetan lokef6fb8f12011-08-19 10:18:16 +0000880 unsigned int len)
881{
882 struct tpacket3_hdr *ppd;
883
884 ppd = (struct tpacket3_hdr *)curr;
885 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
886 pkc->prev = curr;
887 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
888 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
889 BLOCK_NUM_PKTS(pbd) += 1;
890 atomic_inc(&pkc->blk_fill_in_prog);
891 prb_run_all_ft_ops(pkc, ppd);
892}
893
894/* Assumes caller has the sk->rx_queue.lock */
895static void *__packet_lookup_frame_in_block(struct packet_sock *po,
896 struct sk_buff *skb,
897 int status,
898 unsigned int len
899 )
900{
chetan lokebc59ba32011-08-25 10:43:30 +0000901 struct tpacket_kbdq_core *pkc;
902 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000903 char *curr, *end;
904
Joe Perchese3192692012-06-03 17:41:40 +0000905 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
chetan lokef6fb8f12011-08-19 10:18:16 +0000906 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
907
908 /* Queue is frozen when user space is lagging behind */
909 if (prb_queue_frozen(pkc)) {
910 /*
911 * Check if that last block which caused the queue to freeze,
912 * is still in_use by user-space.
913 */
914 if (prb_curr_blk_in_use(pkc, pbd)) {
915 /* Can't record this packet */
916 return NULL;
917 } else {
918 /*
919 * Ok, the block was released by user-space.
920 * Now let's open that block.
921 * opening a block also thaws the queue.
922 * Thawing is a side effect.
923 */
924 prb_open_block(pkc, pbd);
925 }
926 }
927
928 smp_mb();
929 curr = pkc->nxt_offset;
930 pkc->skb = skb;
Joe Perchese3192692012-06-03 17:41:40 +0000931 end = (char *)pbd + pkc->kblk_size;
chetan lokef6fb8f12011-08-19 10:18:16 +0000932
933 /* first try the current block */
934 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
935 prb_fill_curr_block(curr, pkc, pbd, len);
936 return (void *)curr;
937 }
938
939 /* Ok, close the current block */
940 prb_retire_current_block(pkc, po, 0);
941
942 /* Now, try to dispatch the next block */
943 curr = (char *)prb_dispatch_next_block(pkc, po);
944 if (curr) {
945 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
946 prb_fill_curr_block(curr, pkc, pbd, len);
947 return (void *)curr;
948 }
949
950 /*
951 * No free blocks are available.user_space hasn't caught up yet.
952 * Queue was just frozen and now this packet will get dropped.
953 */
954 return NULL;
955}
956
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000957static void *packet_current_rx_frame(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +0000958 struct sk_buff *skb,
959 int status, unsigned int len)
960{
961 char *curr = NULL;
962 switch (po->tp_version) {
963 case TPACKET_V1:
964 case TPACKET_V2:
965 curr = packet_lookup_frame(po, &po->rx_ring,
966 po->rx_ring.head, status);
967 return curr;
968 case TPACKET_V3:
969 return __packet_lookup_frame_in_block(po, skb, status, len);
970 default:
971 WARN(1, "TPACKET version not supported\n");
972 BUG();
Ying Xue99aa3472012-08-06 16:27:10 +0000973 return NULL;
chetan lokef6fb8f12011-08-19 10:18:16 +0000974 }
975}
976
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000977static void *prb_lookup_block(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +0000978 struct packet_ring_buffer *rb,
Willem de Bruijn77f65eb2013-03-19 10:18:11 +0000979 unsigned int idx,
chetan lokef6fb8f12011-08-19 10:18:16 +0000980 int status)
981{
chetan lokebc59ba32011-08-25 10:43:30 +0000982 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
Willem de Bruijn77f65eb2013-03-19 10:18:11 +0000983 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
chetan lokef6fb8f12011-08-19 10:18:16 +0000984
985 if (status != BLOCK_STATUS(pbd))
986 return NULL;
987 return pbd;
988}
989
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000990static int prb_previous_blk_num(struct packet_ring_buffer *rb)
chetan lokef6fb8f12011-08-19 10:18:16 +0000991{
992 unsigned int prev;
993 if (rb->prb_bdqc.kactive_blk_num)
994 prev = rb->prb_bdqc.kactive_blk_num-1;
995 else
996 prev = rb->prb_bdqc.knum_blocks-1;
997 return prev;
998}
999
1000/* Assumes caller has held the rx_queue.lock */
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001001static void *__prb_previous_block(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001002 struct packet_ring_buffer *rb,
1003 int status)
1004{
1005 unsigned int previous = prb_previous_blk_num(rb);
1006 return prb_lookup_block(po, rb, previous, status);
1007}
1008
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001009static void *packet_previous_rx_frame(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001010 struct packet_ring_buffer *rb,
1011 int status)
1012{
1013 if (po->tp_version <= TPACKET_V2)
1014 return packet_previous_frame(po, rb, status);
1015
1016 return __prb_previous_block(po, rb, status);
1017}
1018
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001019static void packet_increment_rx_head(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001020 struct packet_ring_buffer *rb)
1021{
1022 switch (po->tp_version) {
1023 case TPACKET_V1:
1024 case TPACKET_V2:
1025 return packet_increment_head(rb);
1026 case TPACKET_V3:
1027 default:
1028 WARN(1, "TPACKET version not supported.\n");
1029 BUG();
1030 return;
1031 }
1032}
1033
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001034static void *packet_previous_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -07001035 struct packet_ring_buffer *rb,
1036 int status)
1037{
1038 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1039 return packet_lookup_frame(po, rb, previous, status);
1040}
1041
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001042static void packet_increment_head(struct packet_ring_buffer *buff)
Johann Baudy69e3c752009-05-18 22:11:22 -07001043{
1044 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1045}
1046
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001047static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1048{
1049 struct sock *sk = &po->sk;
1050 bool has_room;
1051
1052 if (po->prot_hook.func != tpacket_rcv)
1053 return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1054 <= sk->sk_rcvbuf;
1055
1056 spin_lock(&sk->sk_receive_queue.lock);
1057 if (po->tp_version == TPACKET_V3)
1058 has_room = prb_lookup_block(po, &po->rx_ring,
1059 po->rx_ring.prb_bdqc.kactive_blk_num,
1060 TP_STATUS_KERNEL);
1061 else
1062 has_room = packet_lookup_frame(po, &po->rx_ring,
1063 po->rx_ring.head,
1064 TP_STATUS_KERNEL);
1065 spin_unlock(&sk->sk_receive_queue.lock);
1066
1067 return has_room;
1068}
1069
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070static void packet_sock_destruct(struct sock *sk)
1071{
Richard Cochraned85b562010-04-07 22:41:28 +00001072 skb_queue_purge(&sk->sk_error_queue);
1073
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001074 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1075 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
1077 if (!sock_flag(sk, SOCK_DEAD)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001078 pr_err("Attempt to release alive packet socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 return;
1080 }
1081
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08001082 sk_refcnt_debug_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083}
1084
David S. Millerdc99f602011-07-05 01:45:05 -07001085static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1086{
1087 int x = atomic_read(&f->rr_cur) + 1;
1088
1089 if (x >= num)
1090 x = 0;
1091
1092 return x;
1093}
1094
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001095static unsigned int fanout_demux_hash(struct packet_fanout *f,
1096 struct sk_buff *skb,
1097 unsigned int num)
David S. Millerdc99f602011-07-05 01:45:05 -07001098{
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001099 return (((u64)skb->rxhash) * num) >> 32;
David S. Millerdc99f602011-07-05 01:45:05 -07001100}
1101
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001102static unsigned int fanout_demux_lb(struct packet_fanout *f,
1103 struct sk_buff *skb,
1104 unsigned int num)
David S. Millerdc99f602011-07-05 01:45:05 -07001105{
1106 int cur, old;
1107
1108 cur = atomic_read(&f->rr_cur);
1109 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1110 fanout_rr_next(f, num))) != cur)
1111 cur = old;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001112 return cur;
David S. Millerdc99f602011-07-05 01:45:05 -07001113}
1114
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001115static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1116 struct sk_buff *skb,
1117 unsigned int num)
David S. Miller95ec3eb2011-07-06 01:56:38 -07001118{
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001119 return smp_processor_id() % num;
1120}
David S. Miller95ec3eb2011-07-06 01:56:38 -07001121
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001122static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1123 struct sk_buff *skb,
1124 unsigned int idx, unsigned int skip,
1125 unsigned int num)
1126{
1127 unsigned int i, j;
1128
1129 i = j = min_t(int, f->next[idx], num - 1);
1130 do {
1131 if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1132 if (i != j)
1133 f->next[idx] = i;
1134 return i;
1135 }
1136 if (++i == num)
1137 i = 0;
1138 } while (i != j);
1139
1140 return idx;
1141}
1142
1143static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1144{
1145 return f->flags & (flag >> 8);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001146}
1147
David S. Miller95ec3eb2011-07-06 01:56:38 -07001148static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1149 struct packet_type *pt, struct net_device *orig_dev)
David S. Millerdc99f602011-07-05 01:45:05 -07001150{
1151 struct packet_fanout *f = pt->af_packet_priv;
1152 unsigned int num = f->num_members;
1153 struct packet_sock *po;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001154 unsigned int idx;
David S. Millerdc99f602011-07-05 01:45:05 -07001155
1156 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1157 !num) {
1158 kfree_skb(skb);
1159 return 0;
1160 }
1161
David S. Miller95ec3eb2011-07-06 01:56:38 -07001162 switch (f->type) {
1163 case PACKET_FANOUT_HASH:
1164 default:
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001165 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
Eric Dumazetbc416d92011-10-06 10:28:31 +00001166 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001167 if (!skb)
1168 return 0;
1169 }
1170 skb_get_rxhash(skb);
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001171 idx = fanout_demux_hash(f, skb, num);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001172 break;
1173 case PACKET_FANOUT_LB:
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001174 idx = fanout_demux_lb(f, skb, num);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001175 break;
1176 case PACKET_FANOUT_CPU:
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001177 idx = fanout_demux_cpu(f, skb, num);
1178 break;
1179 case PACKET_FANOUT_ROLLOVER:
1180 idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001181 break;
David S. Miller7736d332011-07-05 01:43:20 -07001182 }
1183
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001184 po = pkt_sk(f->arr[idx]);
1185 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1186 unlikely(!packet_rcv_has_room(po, skb))) {
1187 idx = fanout_demux_rollover(f, skb, idx, idx, num);
1188 po = pkt_sk(f->arr[idx]);
1189 }
David S. Millerdc99f602011-07-05 01:45:05 -07001190
1191 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1192}
1193
Pavel Emelyanovfff33212012-08-16 05:36:48 +00001194DEFINE_MUTEX(fanout_mutex);
1195EXPORT_SYMBOL_GPL(fanout_mutex);
David S. Millerdc99f602011-07-05 01:45:05 -07001196static LIST_HEAD(fanout_list);
1197
1198static void __fanout_link(struct sock *sk, struct packet_sock *po)
1199{
1200 struct packet_fanout *f = po->fanout;
1201
1202 spin_lock(&f->lock);
1203 f->arr[f->num_members] = sk;
1204 smp_wmb();
1205 f->num_members++;
1206 spin_unlock(&f->lock);
1207}
1208
1209static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1210{
1211 struct packet_fanout *f = po->fanout;
1212 int i;
1213
1214 spin_lock(&f->lock);
1215 for (i = 0; i < f->num_members; i++) {
1216 if (f->arr[i] == sk)
1217 break;
1218 }
1219 BUG_ON(i >= f->num_members);
1220 f->arr[i] = f->arr[f->num_members - 1];
1221 f->num_members--;
1222 spin_unlock(&f->lock);
1223}
1224
Fengguang Wua0dfb262012-08-23 19:51:21 +08001225static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001226{
1227 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1228 return true;
1229
1230 return false;
1231}
1232
David S. Miller7736d332011-07-05 01:43:20 -07001233static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
David S. Millerdc99f602011-07-05 01:45:05 -07001234{
1235 struct packet_sock *po = pkt_sk(sk);
1236 struct packet_fanout *f, *match;
David S. Miller7736d332011-07-05 01:43:20 -07001237 u8 type = type_flags & 0xff;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001238 u8 flags = type_flags >> 8;
David S. Millerdc99f602011-07-05 01:45:05 -07001239 int err;
1240
1241 switch (type) {
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001242 case PACKET_FANOUT_ROLLOVER:
1243 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1244 return -EINVAL;
David S. Millerdc99f602011-07-05 01:45:05 -07001245 case PACKET_FANOUT_HASH:
1246 case PACKET_FANOUT_LB:
David S. Miller95ec3eb2011-07-06 01:56:38 -07001247 case PACKET_FANOUT_CPU:
David S. Millerdc99f602011-07-05 01:45:05 -07001248 break;
1249 default:
1250 return -EINVAL;
1251 }
1252
1253 if (!po->running)
1254 return -EINVAL;
1255
1256 if (po->fanout)
1257 return -EALREADY;
1258
1259 mutex_lock(&fanout_mutex);
1260 match = NULL;
1261 list_for_each_entry(f, &fanout_list, list) {
1262 if (f->id == id &&
1263 read_pnet(&f->net) == sock_net(sk)) {
1264 match = f;
1265 break;
1266 }
1267 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001268 err = -EINVAL;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001269 if (match && match->flags != flags)
Eric Dumazetafe62c62011-07-07 06:41:29 -07001270 goto out;
David S. Millerdc99f602011-07-05 01:45:05 -07001271 if (!match) {
Eric Dumazetafe62c62011-07-07 06:41:29 -07001272 err = -ENOMEM;
David S. Millerdc99f602011-07-05 01:45:05 -07001273 match = kzalloc(sizeof(*match), GFP_KERNEL);
Eric Dumazetafe62c62011-07-07 06:41:29 -07001274 if (!match)
1275 goto out;
1276 write_pnet(&match->net, sock_net(sk));
1277 match->id = id;
1278 match->type = type;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001279 match->flags = flags;
Eric Dumazetafe62c62011-07-07 06:41:29 -07001280 atomic_set(&match->rr_cur, 0);
1281 INIT_LIST_HEAD(&match->list);
1282 spin_lock_init(&match->lock);
1283 atomic_set(&match->sk_ref, 0);
1284 match->prot_hook.type = po->prot_hook.type;
1285 match->prot_hook.dev = po->prot_hook.dev;
1286 match->prot_hook.func = packet_rcv_fanout;
1287 match->prot_hook.af_packet_priv = match;
Eric Leblondc0de08d2012-08-16 22:02:58 +00001288 match->prot_hook.id_match = match_fanout_group;
Eric Dumazetafe62c62011-07-07 06:41:29 -07001289 dev_add_pack(&match->prot_hook);
1290 list_add(&match->list, &fanout_list);
1291 }
1292 err = -EINVAL;
1293 if (match->type == type &&
1294 match->prot_hook.type == po->prot_hook.type &&
1295 match->prot_hook.dev == po->prot_hook.dev) {
1296 err = -ENOSPC;
1297 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1298 __dev_remove_pack(&po->prot_hook);
1299 po->fanout = match;
1300 atomic_inc(&match->sk_ref);
1301 __fanout_link(sk, po);
1302 err = 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001303 }
1304 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001305out:
David S. Millerdc99f602011-07-05 01:45:05 -07001306 mutex_unlock(&fanout_mutex);
1307 return err;
1308}
1309
1310static void fanout_release(struct sock *sk)
1311{
1312 struct packet_sock *po = pkt_sk(sk);
1313 struct packet_fanout *f;
1314
1315 f = po->fanout;
1316 if (!f)
1317 return;
1318
Pavel Emelyanovfff33212012-08-16 05:36:48 +00001319 mutex_lock(&fanout_mutex);
David S. Millerdc99f602011-07-05 01:45:05 -07001320 po->fanout = NULL;
1321
David S. Millerdc99f602011-07-05 01:45:05 -07001322 if (atomic_dec_and_test(&f->sk_ref)) {
1323 list_del(&f->list);
1324 dev_remove_pack(&f->prot_hook);
1325 kfree(f);
1326 }
1327 mutex_unlock(&fanout_mutex);
1328}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001330static const struct proto_ops packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001332static const struct proto_ops packet_ops_spkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001334static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1335 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336{
1337 struct sock *sk;
1338 struct sockaddr_pkt *spkt;
1339
1340 /*
1341 * When we registered the protocol we saved the socket in the data
1342 * field for just this event.
1343 */
1344
1345 sk = pt->af_packet_priv;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 /*
1348 * Yank back the headers [hope the device set this
1349 * right or kerboom...]
1350 *
1351 * Incoming packets have ll header pulled,
1352 * push it back.
1353 *
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001354 * For outgoing ones skb->data == skb_mac_header(skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 * so that this procedure is noop.
1356 */
1357
1358 if (skb->pkt_type == PACKET_LOOPBACK)
1359 goto out;
1360
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001361 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001362 goto out;
1363
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001364 skb = skb_share_check(skb, GFP_ATOMIC);
1365 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 goto oom;
1367
1368 /* drop any routing info */
Eric Dumazetadf30902009-06-02 05:19:30 +00001369 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
Phil Oester84531c22005-07-12 11:57:52 -07001371 /* drop conntrack reference */
1372 nf_reset(skb);
1373
Herbert Xuffbc6112007-02-04 23:33:10 -08001374 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001376 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
1378 /*
1379 * The SOCK_PACKET socket receives _all_ frames.
1380 */
1381
1382 spkt->spkt_family = dev->type;
1383 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1384 spkt->spkt_protocol = skb->protocol;
1385
1386 /*
1387 * Charge the memory to the socket. This is done specifically
1388 * to prevent sockets using all the memory up.
1389 */
1390
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001391 if (sock_queue_rcv_skb(sk, skb) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 return 0;
1393
1394out:
1395 kfree_skb(skb);
1396oom:
1397 return 0;
1398}
1399
1400
1401/*
1402 * Output a raw packet to a device layer. This bypasses all the other
1403 * protocol layers and you must therefore supply it with a complete frame
1404 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1407 struct msghdr *msg, size_t len)
1408{
1409 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001410 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001411 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 struct net_device *dev;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001413 __be16 proto = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 int err;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001415 int extra_len = 0;
Jason Wangc1aad272013-03-25 20:19:57 +00001416 struct flow_keys keys;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001419 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 */
1421
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001422 if (saddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 if (msg->msg_namelen < sizeof(struct sockaddr))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001424 return -EINVAL;
1425 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1426 proto = saddr->spkt_protocol;
1427 } else
1428 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001431 * Find the device first to size check it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 */
1433
danborkmann@iogearbox.netde74e922012-06-10 08:59:28 +00001434 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001435retry:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001436 rcu_read_lock();
1437 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 err = -ENODEV;
1439 if (dev == NULL)
1440 goto out_unlock;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001441
David S. Millerd5e76b02007-01-25 19:30:36 -08001442 err = -ENETDOWN;
1443 if (!(dev->flags & IFF_UP))
1444 goto out_unlock;
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 /*
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001447 * You may not queue a frame bigger than the mtu. This is the lowest level
1448 * raw protocol and you must do your own fragmentation at this level.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001450
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001451 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1452 if (!netif_supports_nofcs(dev)) {
1453 err = -EPROTONOSUPPORT;
1454 goto out_unlock;
1455 }
1456 extra_len = 4; /* We're doing our own CRC */
1457 }
1458
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001460 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 goto out_unlock;
1462
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001463 if (!skb) {
1464 size_t reserved = LL_RESERVED_SPACE(dev);
Herbert Xu4ce40912011-11-18 02:20:05 +00001465 int tlen = dev->needed_tailroom;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001466 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001468 rcu_read_unlock();
Herbert Xu4ce40912011-11-18 02:20:05 +00001469 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001470 if (skb == NULL)
1471 return -ENOBUFS;
1472 /* FIXME: Save some space for broken drivers that write a hard
1473 * header at transmission time by themselves. PPP is the notable
1474 * one here. This should really be fixed at the driver level.
1475 */
1476 skb_reserve(skb, reserved);
1477 skb_reset_network_header(skb);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001478
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001479 /* Try to align data part correctly */
1480 if (hhlen) {
1481 skb->data -= hhlen;
1482 skb->tail -= hhlen;
1483 if (len < hhlen)
1484 skb_reset_network_header(skb);
1485 }
1486 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1487 if (err)
1488 goto out_free;
1489 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 }
1491
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001492 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00001493 /* Earlier code assumed this would be a VLAN pkt,
1494 * double-check this now that we have the actual
1495 * packet in hand.
1496 */
1497 struct ethhdr *ehdr;
1498 skb_reset_mac_header(skb);
1499 ehdr = eth_hdr(skb);
1500 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1501 err = -EMSGSIZE;
1502 goto out_unlock;
1503 }
1504 }
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001505
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 skb->protocol = proto;
1507 skb->dev = dev;
1508 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001509 skb->mark = sk->sk_mark;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00001510 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00001511 if (err < 0)
1512 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001514 if (unlikely(extra_len == 4))
1515 skb->no_fcs = 1;
1516
Jason Wangc1aad272013-03-25 20:19:57 +00001517 if (skb_flow_dissect(skb, &keys))
1518 skb_set_transport_header(skb, keys.thoff);
1519 else
1520 skb_reset_transport_header(skb);
1521
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 dev_queue_xmit(skb);
Eric Dumazet654d1f82009-11-02 10:43:32 +01001523 rcu_read_unlock();
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001524 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526out_unlock:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001527 rcu_read_unlock();
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001528out_free:
1529 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 return err;
1531}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001533static unsigned int run_filter(const struct sk_buff *skb,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001534 const struct sock *sk,
David S. Millerdbcb5852007-01-24 15:21:02 -08001535 unsigned int res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536{
1537 struct sk_filter *filter;
1538
Eric Dumazet80f8f102011-01-18 07:46:52 +00001539 rcu_read_lock();
1540 filter = rcu_dereference(sk->sk_filter);
David S. Millerdbcb5852007-01-24 15:21:02 -08001541 if (filter != NULL)
Eric Dumazet0a148422011-04-20 09:27:32 +00001542 res = SK_RUN_FILTER(filter, skb);
Eric Dumazet80f8f102011-01-18 07:46:52 +00001543 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544
David S. Millerdbcb5852007-01-24 15:21:02 -08001545 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546}
1547
1548/*
Eric Dumazet62ab0812010-12-06 20:50:09 +00001549 * This function makes lazy skb cloning in hope that most of packets
1550 * are discarded by BPF.
1551 *
1552 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1553 * and skb->cb are mangled. It works because (and until) packets
1554 * falling here are owned by current CPU. Output packets are cloned
1555 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1556 * sequencially, so that if we return skb to original state on exit,
1557 * we will not harm anyone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 */
1559
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001560static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1561 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562{
1563 struct sock *sk;
1564 struct sockaddr_ll *sll;
1565 struct packet_sock *po;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001566 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001568 unsigned int snaplen, res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
1570 if (skb->pkt_type == PACKET_LOOPBACK)
1571 goto drop;
1572
1573 sk = pt->af_packet_priv;
1574 po = pkt_sk(sk);
1575
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001576 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001577 goto drop;
1578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 skb->dev = dev;
1580
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001581 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 /* The device has an explicit notion of ll header,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001583 * exported to higher levels.
1584 *
1585 * Otherwise, the device hides details of its frame
1586 * structure, so that corresponding packet head is
1587 * never delivered to user.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 */
1589 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001590 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 else if (skb->pkt_type == PACKET_OUTGOING) {
1592 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001593 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 }
1595 }
1596
1597 snaplen = skb->len;
1598
David S. Millerdbcb5852007-01-24 15:21:02 -08001599 res = run_filter(skb, sk, snaplen);
1600 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001601 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001602 if (snaplen > res)
1603 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001605 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 goto drop_n_acct;
1607
1608 if (skb_shared(skb)) {
1609 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1610 if (nskb == NULL)
1611 goto drop_n_acct;
1612
1613 if (skb_head != skb->data) {
1614 skb->data = skb_head;
1615 skb->len = skb_len;
1616 }
Eric Dumazetabc4e4f2012-04-19 02:24:42 +00001617 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 skb = nskb;
1619 }
1620
Herbert Xuffbc6112007-02-04 23:33:10 -08001621 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1622 sizeof(skb->cb));
1623
1624 sll = &PACKET_SKB_CB(skb)->sa.ll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 sll->sll_family = AF_PACKET;
1626 sll->sll_hatype = dev->type;
1627 sll->sll_protocol = skb->protocol;
1628 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001629 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001630 sll->sll_ifindex = orig_dev->ifindex;
1631 else
1632 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001634 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
Herbert Xuffbc6112007-02-04 23:33:10 -08001636 PACKET_SKB_CB(skb)->origlen = skb->len;
Herbert Xu8dc41942007-02-04 23:31:32 -08001637
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 if (pskb_trim(skb, snaplen))
1639 goto drop_n_acct;
1640
1641 skb_set_owner_r(skb, sk);
1642 skb->dev = NULL;
Eric Dumazetadf30902009-06-02 05:19:30 +00001643 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
Phil Oester84531c22005-07-12 11:57:52 -07001645 /* drop conntrack reference */
1646 nf_reset(skb);
1647
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 spin_lock(&sk->sk_receive_queue.lock);
1649 po->stats.tp_packets++;
Neil Horman3b885782009-10-12 13:26:31 -07001650 skb->dropcount = atomic_read(&sk->sk_drops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 __skb_queue_tail(&sk->sk_receive_queue, skb);
1652 spin_unlock(&sk->sk_receive_queue.lock);
1653 sk->sk_data_ready(sk, skb->len);
1654 return 0;
1655
1656drop_n_acct:
Willem de Bruijn7091fbd2011-09-30 10:38:28 +00001657 spin_lock(&sk->sk_receive_queue.lock);
1658 po->stats.tp_drops++;
1659 atomic_inc(&sk->sk_drops);
1660 spin_unlock(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
1662drop_n_restore:
1663 if (skb_head != skb->data && skb_shared(skb)) {
1664 skb->data = skb_head;
1665 skb->len = skb_len;
1666 }
1667drop:
Neil Hormanead2ceb2009-03-11 09:49:55 +00001668 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 return 0;
1670}
1671
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001672static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1673 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674{
1675 struct sock *sk;
1676 struct packet_sock *po;
1677 struct sockaddr_ll *sll;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001678 union {
1679 struct tpacket_hdr *h1;
1680 struct tpacket2_hdr *h2;
chetan lokef6fb8f12011-08-19 10:18:16 +00001681 struct tpacket3_hdr *h3;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001682 void *raw;
1683 } h;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001684 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001686 unsigned int snaplen, res;
chetan lokef6fb8f12011-08-19 10:18:16 +00001687 unsigned long status = TP_STATUS_USER;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001688 unsigned short macoff, netoff, hdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 struct sk_buff *copy_skb = NULL;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001690 struct timeval tv;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001691 struct timespec ts;
Scott McMillan614f60f2010-06-02 05:53:56 -07001692 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694 if (skb->pkt_type == PACKET_LOOPBACK)
1695 goto drop;
1696
1697 sk = pt->af_packet_priv;
1698 po = pkt_sk(sk);
1699
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001700 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001701 goto drop;
1702
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001703 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001705 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 else if (skb->pkt_type == PACKET_OUTGOING) {
1707 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001708 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 }
1710 }
1711
Herbert Xu8dc41942007-02-04 23:31:32 -08001712 if (skb->ip_summed == CHECKSUM_PARTIAL)
1713 status |= TP_STATUS_CSUMNOTREADY;
1714
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 snaplen = skb->len;
1716
David S. Millerdbcb5852007-01-24 15:21:02 -08001717 res = run_filter(skb, sk, snaplen);
1718 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001719 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001720 if (snaplen > res)
1721 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 if (sk->sk_type == SOCK_DGRAM) {
Patrick McHardy8913336a2008-07-18 18:05:19 -07001724 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1725 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 } else {
Eric Dumazet95c96172012-04-15 05:58:06 +00001727 unsigned int maclen = skb_network_offset(skb);
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001728 netoff = TPACKET_ALIGN(po->tp_hdrlen +
Patrick McHardy8913336a2008-07-18 18:05:19 -07001729 (maclen < 16 ? 16 : maclen)) +
1730 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 macoff = netoff - maclen;
1732 }
chetan lokef6fb8f12011-08-19 10:18:16 +00001733 if (po->tp_version <= TPACKET_V2) {
1734 if (macoff + snaplen > po->rx_ring.frame_size) {
1735 if (po->copy_thresh &&
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001736 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
chetan lokef6fb8f12011-08-19 10:18:16 +00001737 if (skb_shared(skb)) {
1738 copy_skb = skb_clone(skb, GFP_ATOMIC);
1739 } else {
1740 copy_skb = skb_get(skb);
1741 skb_head = skb->data;
1742 }
1743 if (copy_skb)
1744 skb_set_owner_r(copy_skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 }
chetan lokef6fb8f12011-08-19 10:18:16 +00001746 snaplen = po->rx_ring.frame_size - macoff;
1747 if ((int)snaplen < 0)
1748 snaplen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 spin_lock(&sk->sk_receive_queue.lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00001752 h.raw = packet_current_rx_frame(po, skb,
1753 TP_STATUS_KERNEL, (macoff+snaplen));
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001754 if (!h.raw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 goto ring_is_full;
chetan lokef6fb8f12011-08-19 10:18:16 +00001756 if (po->tp_version <= TPACKET_V2) {
1757 packet_increment_rx_head(po, &po->rx_ring);
1758 /*
1759 * LOSING will be reported till you read the stats,
1760 * because it's COR - Clear On Read.
1761 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1762 * at packet level.
1763 */
1764 if (po->stats.tp_drops)
1765 status |= TP_STATUS_LOSING;
1766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 po->stats.tp_packets++;
1768 if (copy_skb) {
1769 status |= TP_STATUS_COPY;
1770 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1771 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 spin_unlock(&sk->sk_receive_queue.lock);
1773
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001774 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001776 switch (po->tp_version) {
1777 case TPACKET_V1:
1778 h.h1->tp_len = skb->len;
1779 h.h1->tp_snaplen = snaplen;
1780 h.h1->tp_mac = macoff;
1781 h.h1->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001782 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1783 && shhwtstamps->syststamp.tv64)
1784 tv = ktime_to_timeval(shhwtstamps->syststamp);
1785 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1786 && shhwtstamps->hwtstamp.tv64)
1787 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1788 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001789 tv = ktime_to_timeval(skb->tstamp);
1790 else
1791 do_gettimeofday(&tv);
1792 h.h1->tp_sec = tv.tv_sec;
1793 h.h1->tp_usec = tv.tv_usec;
1794 hdrlen = sizeof(*h.h1);
1795 break;
1796 case TPACKET_V2:
1797 h.h2->tp_len = skb->len;
1798 h.h2->tp_snaplen = snaplen;
1799 h.h2->tp_mac = macoff;
1800 h.h2->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001801 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1802 && shhwtstamps->syststamp.tv64)
1803 ts = ktime_to_timespec(shhwtstamps->syststamp);
1804 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1805 && shhwtstamps->hwtstamp.tv64)
1806 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1807 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001808 ts = ktime_to_timespec(skb->tstamp);
1809 else
1810 getnstimeofday(&ts);
1811 h.h2->tp_sec = ts.tv_sec;
1812 h.h2->tp_nsec = ts.tv_nsec;
Ben Greeara3bcc232011-06-01 06:49:10 +00001813 if (vlan_tx_tag_present(skb)) {
1814 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1815 status |= TP_STATUS_VLAN_VALID;
1816 } else {
1817 h.h2->tp_vlan_tci = 0;
1818 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07001819 h.h2->tp_padding = 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001820 hdrlen = sizeof(*h.h2);
1821 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00001822 case TPACKET_V3:
1823 /* tp_nxt_offset,vlan are already populated above.
1824 * So DONT clear those fields here
1825 */
1826 h.h3->tp_status |= status;
1827 h.h3->tp_len = skb->len;
1828 h.h3->tp_snaplen = snaplen;
1829 h.h3->tp_mac = macoff;
1830 h.h3->tp_net = netoff;
1831 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1832 && shhwtstamps->syststamp.tv64)
1833 ts = ktime_to_timespec(shhwtstamps->syststamp);
1834 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1835 && shhwtstamps->hwtstamp.tv64)
1836 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1837 else if (skb->tstamp.tv64)
1838 ts = ktime_to_timespec(skb->tstamp);
1839 else
1840 getnstimeofday(&ts);
1841 h.h3->tp_sec = ts.tv_sec;
1842 h.h3->tp_nsec = ts.tv_nsec;
1843 hdrlen = sizeof(*h.h3);
1844 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001845 default:
1846 BUG();
1847 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001849 sll = h.raw + TPACKET_ALIGN(hdrlen);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001850 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 sll->sll_family = AF_PACKET;
1852 sll->sll_hatype = dev->type;
1853 sll->sll_protocol = skb->protocol;
1854 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001855 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001856 sll->sll_ifindex = orig_dev->ifindex;
1857 else
1858 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
Ralf Baechlee16aa202006-12-07 00:11:33 -08001860 smp_mb();
Changli Gaof6dafa92010-12-07 04:26:16 +00001861#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 {
Changli Gao0af55bb2010-12-01 02:52:20 +00001863 u8 *start, *end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
chetan lokef6fb8f12011-08-19 10:18:16 +00001865 if (po->tp_version <= TPACKET_V2) {
1866 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1867 + macoff + snaplen);
1868 for (start = h.raw; start < end; start += PAGE_SIZE)
1869 flush_dcache_page(pgv_to_page(start));
1870 }
Chetan Lokecc9f01b2011-07-14 08:36:33 -07001871 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 }
Changli Gaof6dafa92010-12-07 04:26:16 +00001873#endif
chetan lokef6fb8f12011-08-19 10:18:16 +00001874 if (po->tp_version <= TPACKET_V2)
1875 __packet_set_status(po, h.raw, status);
1876 else
1877 prb_clear_blk_fill_status(&po->rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878
1879 sk->sk_data_ready(sk, 0);
1880
1881drop_n_restore:
1882 if (skb_head != skb->data && skb_shared(skb)) {
1883 skb->data = skb_head;
1884 skb->len = skb_len;
1885 }
1886drop:
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001887 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 return 0;
1889
1890ring_is_full:
1891 po->stats.tp_drops++;
1892 spin_unlock(&sk->sk_receive_queue.lock);
1893
1894 sk->sk_data_ready(sk, 0);
Wei Yongjunacb5d752009-02-25 00:36:42 +00001895 kfree_skb(copy_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 goto drop_n_restore;
1897}
1898
Johann Baudy69e3c752009-05-18 22:11:22 -07001899static void tpacket_destruct_skb(struct sk_buff *skb)
1900{
1901 struct packet_sock *po = pkt_sk(skb->sk);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001902 void *ph;
Johann Baudy69e3c752009-05-18 22:11:22 -07001903
Johann Baudy69e3c752009-05-18 22:11:22 -07001904 if (likely(po->tx_ring.pg_vec)) {
1905 ph = skb_shinfo(skb)->destructor_arg;
Johann Baudy69e3c752009-05-18 22:11:22 -07001906 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1907 atomic_dec(&po->tx_ring.pending);
1908 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1909 }
1910
1911 sock_wfree(skb);
1912}
1913
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001914static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1915 void *frame, struct net_device *dev, int size_max,
Herbert Xuae641942011-11-18 02:20:04 +00001916 __be16 proto, unsigned char *addr, int hlen)
Johann Baudy69e3c752009-05-18 22:11:22 -07001917{
1918 union {
1919 struct tpacket_hdr *h1;
1920 struct tpacket2_hdr *h2;
1921 void *raw;
1922 } ph;
1923 int to_write, offset, len, tp_len, nr_frags, len_max;
1924 struct socket *sock = po->sk.sk_socket;
1925 struct page *page;
1926 void *data;
1927 int err;
Jason Wangc1aad272013-03-25 20:19:57 +00001928 struct flow_keys keys;
Johann Baudy69e3c752009-05-18 22:11:22 -07001929
1930 ph.raw = frame;
1931
1932 skb->protocol = proto;
1933 skb->dev = dev;
1934 skb->priority = po->sk.sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001935 skb->mark = po->sk.sk_mark;
Johann Baudy69e3c752009-05-18 22:11:22 -07001936 skb_shinfo(skb)->destructor_arg = ph.raw;
1937
1938 switch (po->tp_version) {
1939 case TPACKET_V2:
1940 tp_len = ph.h2->tp_len;
1941 break;
1942 default:
1943 tp_len = ph.h1->tp_len;
1944 break;
1945 }
1946 if (unlikely(tp_len > size_max)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001947 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
Johann Baudy69e3c752009-05-18 22:11:22 -07001948 return -EMSGSIZE;
1949 }
1950
Herbert Xuae641942011-11-18 02:20:04 +00001951 skb_reserve(skb, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07001952 skb_reset_network_header(skb);
1953
Jason Wangc1aad272013-03-25 20:19:57 +00001954 if (skb_flow_dissect(skb, &keys))
1955 skb_set_transport_header(skb, keys.thoff);
1956 else
1957 skb_reset_transport_header(skb);
1958
Paul Chavent5920cd3a2012-11-06 23:10:47 +00001959 if (po->tp_tx_has_off) {
1960 int off_min, off_max, off;
1961 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
1962 off_max = po->tx_ring.frame_size - tp_len;
1963 if (sock->type == SOCK_DGRAM) {
1964 switch (po->tp_version) {
1965 case TPACKET_V2:
1966 off = ph.h2->tp_net;
1967 break;
1968 default:
1969 off = ph.h1->tp_net;
1970 break;
1971 }
1972 } else {
1973 switch (po->tp_version) {
1974 case TPACKET_V2:
1975 off = ph.h2->tp_mac;
1976 break;
1977 default:
1978 off = ph.h1->tp_mac;
1979 break;
1980 }
1981 }
1982 if (unlikely((off < off_min) || (off_max < off)))
1983 return -EINVAL;
1984 data = ph.raw + off;
1985 } else {
1986 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1987 }
Johann Baudy69e3c752009-05-18 22:11:22 -07001988 to_write = tp_len;
1989
1990 if (sock->type == SOCK_DGRAM) {
1991 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1992 NULL, tp_len);
1993 if (unlikely(err < 0))
1994 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001995 } else if (dev->hard_header_len) {
Johann Baudy69e3c752009-05-18 22:11:22 -07001996 /* net device doesn't like empty head */
1997 if (unlikely(tp_len <= dev->hard_header_len)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001998 pr_err("packet size is too short (%d < %d)\n",
1999 tp_len, dev->hard_header_len);
Johann Baudy69e3c752009-05-18 22:11:22 -07002000 return -EINVAL;
2001 }
2002
2003 skb_push(skb, dev->hard_header_len);
2004 err = skb_store_bits(skb, 0, data,
2005 dev->hard_header_len);
2006 if (unlikely(err))
2007 return err;
2008
2009 data += dev->hard_header_len;
2010 to_write -= dev->hard_header_len;
2011 }
2012
Johann Baudy69e3c752009-05-18 22:11:22 -07002013 offset = offset_in_page(data);
2014 len_max = PAGE_SIZE - offset;
2015 len = ((to_write > len_max) ? len_max : to_write);
2016
2017 skb->data_len = to_write;
2018 skb->len += to_write;
2019 skb->truesize += to_write;
2020 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2021
2022 while (likely(to_write)) {
2023 nr_frags = skb_shinfo(skb)->nr_frags;
2024
2025 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002026 pr_err("Packet exceed the number of skb frags(%lu)\n",
2027 MAX_SKB_FRAGS);
Johann Baudy69e3c752009-05-18 22:11:22 -07002028 return -EFAULT;
2029 }
2030
Changli Gao0af55bb2010-12-01 02:52:20 +00002031 page = pgv_to_page(data);
2032 data += len;
Johann Baudy69e3c752009-05-18 22:11:22 -07002033 flush_dcache_page(page);
2034 get_page(page);
Changli Gao0af55bb2010-12-01 02:52:20 +00002035 skb_fill_page_desc(skb, nr_frags, page, offset, len);
Johann Baudy69e3c752009-05-18 22:11:22 -07002036 to_write -= len;
2037 offset = 0;
2038 len_max = PAGE_SIZE;
2039 len = ((to_write > len_max) ? len_max : to_write);
2040 }
2041
2042 return tp_len;
2043}
2044
2045static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2046{
Johann Baudy69e3c752009-05-18 22:11:22 -07002047 struct sk_buff *skb;
2048 struct net_device *dev;
2049 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002050 bool need_rls_dev = false;
2051 int err, reserve = 0;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002052 void *ph;
2053 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Johann Baudy69e3c752009-05-18 22:11:22 -07002054 int tp_len, size_max;
2055 unsigned char *addr;
2056 int len_sum = 0;
danborkmann@iogearbox.net9e670302012-08-20 03:34:03 +00002057 int status = TP_STATUS_AVAILABLE;
Herbert Xuae641942011-11-18 02:20:04 +00002058 int hlen, tlen;
Johann Baudy69e3c752009-05-18 22:11:22 -07002059
Johann Baudy69e3c752009-05-18 22:11:22 -07002060 mutex_lock(&po->pg_vec_lock);
2061
Johann Baudy69e3c752009-05-18 22:11:22 -07002062 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002063 dev = po->prot_hook.dev;
Johann Baudy69e3c752009-05-18 22:11:22 -07002064 proto = po->num;
2065 addr = NULL;
2066 } else {
2067 err = -EINVAL;
2068 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2069 goto out;
2070 if (msg->msg_namelen < (saddr->sll_halen
2071 + offsetof(struct sockaddr_ll,
2072 sll_addr)))
2073 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07002074 proto = saddr->sll_protocol;
2075 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002076 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2077 need_rls_dev = true;
Johann Baudy69e3c752009-05-18 22:11:22 -07002078 }
2079
Johann Baudy69e3c752009-05-18 22:11:22 -07002080 err = -ENXIO;
2081 if (unlikely(dev == NULL))
2082 goto out;
2083
2084 reserve = dev->hard_header_len;
2085
2086 err = -ENETDOWN;
2087 if (unlikely(!(dev->flags & IFF_UP)))
2088 goto out_put;
2089
2090 size_max = po->tx_ring.frame_size
Gabor Gombasb5dd8842009-10-29 03:19:11 -07002091 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
Johann Baudy69e3c752009-05-18 22:11:22 -07002092
2093 if (size_max > dev->mtu + reserve)
2094 size_max = dev->mtu + reserve;
2095
2096 do {
2097 ph = packet_current_frame(po, &po->tx_ring,
2098 TP_STATUS_SEND_REQUEST);
2099
2100 if (unlikely(ph == NULL)) {
2101 schedule();
2102 continue;
2103 }
2104
2105 status = TP_STATUS_SEND_REQUEST;
Herbert Xuae641942011-11-18 02:20:04 +00002106 hlen = LL_RESERVED_SPACE(dev);
2107 tlen = dev->needed_tailroom;
Johann Baudy69e3c752009-05-18 22:11:22 -07002108 skb = sock_alloc_send_skb(&po->sk,
Herbert Xuae641942011-11-18 02:20:04 +00002109 hlen + tlen + sizeof(struct sockaddr_ll),
Johann Baudy69e3c752009-05-18 22:11:22 -07002110 0, &err);
2111
2112 if (unlikely(skb == NULL))
2113 goto out_status;
2114
2115 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
Herbert Xuae641942011-11-18 02:20:04 +00002116 addr, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07002117
2118 if (unlikely(tp_len < 0)) {
2119 if (po->tp_loss) {
2120 __packet_set_status(po, ph,
2121 TP_STATUS_AVAILABLE);
2122 packet_increment_head(&po->tx_ring);
2123 kfree_skb(skb);
2124 continue;
2125 } else {
2126 status = TP_STATUS_WRONG_FORMAT;
2127 err = tp_len;
2128 goto out_status;
2129 }
2130 }
2131
2132 skb->destructor = tpacket_destruct_skb;
2133 __packet_set_status(po, ph, TP_STATUS_SENDING);
2134 atomic_inc(&po->tx_ring.pending);
2135
2136 status = TP_STATUS_SEND_REQUEST;
2137 err = dev_queue_xmit(skb);
Jarek Poplawskieb70df12010-01-10 22:04:19 +00002138 if (unlikely(err > 0)) {
2139 err = net_xmit_errno(err);
2140 if (err && __packet_get_status(po, ph) ==
2141 TP_STATUS_AVAILABLE) {
2142 /* skb was destructed already */
2143 skb = NULL;
2144 goto out_status;
2145 }
2146 /*
2147 * skb was dropped but not destructed yet;
2148 * let's treat it like congestion or err < 0
2149 */
2150 err = 0;
2151 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002152 packet_increment_head(&po->tx_ring);
2153 len_sum += tp_len;
Joe Perchesf64f9e72009-11-29 16:55:45 -08002154 } while (likely((ph != NULL) ||
2155 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2156 (atomic_read(&po->tx_ring.pending))))
2157 );
Johann Baudy69e3c752009-05-18 22:11:22 -07002158
2159 err = len_sum;
2160 goto out_put;
2161
Johann Baudy69e3c752009-05-18 22:11:22 -07002162out_status:
2163 __packet_set_status(po, ph, status);
2164 kfree_skb(skb);
2165out_put:
Ben Greear827d9782011-06-01 07:18:53 +00002166 if (need_rls_dev)
2167 dev_put(dev);
Johann Baudy69e3c752009-05-18 22:11:22 -07002168out:
2169 mutex_unlock(&po->pg_vec_lock);
2170 return err;
2171}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
Olof Johanssoneea49cc92011-11-02 11:00:49 +00002173static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2174 size_t reserve, size_t len,
2175 size_t linear, int noblock,
2176 int *err)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002177{
2178 struct sk_buff *skb;
2179
2180 /* Under a page? Don't bother with paged skb. */
2181 if (prepad + len < PAGE_SIZE || !linear)
2182 linear = len;
2183
2184 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2185 err);
2186 if (!skb)
2187 return NULL;
2188
2189 skb_reserve(skb, reserve);
2190 skb_put(skb, linear);
2191 skb->data_len = len - linear;
2192 skb->len += len - linear;
2193
2194 return skb;
2195}
2196
Johann Baudy69e3c752009-05-18 22:11:22 -07002197static int packet_snd(struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 struct msghdr *msg, size_t len)
2199{
2200 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002201 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 struct sk_buff *skb;
2203 struct net_device *dev;
Al Viro0e11c912006-11-08 00:26:29 -08002204 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002205 bool need_rls_dev = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 unsigned char *addr;
Ben Greear827d9782011-06-01 07:18:53 +00002207 int err, reserve = 0;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002208 struct virtio_net_hdr vnet_hdr = { 0 };
2209 int offset = 0;
2210 int vnet_hdr_len;
2211 struct packet_sock *po = pkt_sk(sk);
2212 unsigned short gso_type = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002213 int hlen, tlen;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002214 int extra_len = 0;
Jason Wangc1aad272013-03-25 20:19:57 +00002215 struct flow_keys keys;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
2217 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002218 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002220
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002222 dev = po->prot_hook.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 proto = po->num;
2224 addr = NULL;
2225 } else {
2226 err = -EINVAL;
2227 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2228 goto out;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002229 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2230 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 proto = saddr->sll_protocol;
2232 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002233 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2234 need_rls_dev = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 }
2236
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 err = -ENXIO;
2238 if (dev == NULL)
2239 goto out_unlock;
2240 if (sock->type == SOCK_RAW)
2241 reserve = dev->hard_header_len;
2242
David S. Millerd5e76b02007-01-25 19:30:36 -08002243 err = -ENETDOWN;
2244 if (!(dev->flags & IFF_UP))
2245 goto out_unlock;
2246
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002247 if (po->has_vnet_hdr) {
2248 vnet_hdr_len = sizeof(vnet_hdr);
2249
2250 err = -EINVAL;
2251 if (len < vnet_hdr_len)
2252 goto out_unlock;
2253
2254 len -= vnet_hdr_len;
2255
2256 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2257 vnet_hdr_len);
2258 if (err < 0)
2259 goto out_unlock;
2260
2261 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2262 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2263 vnet_hdr.hdr_len))
2264 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2265 vnet_hdr.csum_offset + 2;
2266
2267 err = -EINVAL;
2268 if (vnet_hdr.hdr_len > len)
2269 goto out_unlock;
2270
2271 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2272 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2273 case VIRTIO_NET_HDR_GSO_TCPV4:
2274 gso_type = SKB_GSO_TCPV4;
2275 break;
2276 case VIRTIO_NET_HDR_GSO_TCPV6:
2277 gso_type = SKB_GSO_TCPV6;
2278 break;
2279 case VIRTIO_NET_HDR_GSO_UDP:
2280 gso_type = SKB_GSO_UDP;
2281 break;
2282 default:
2283 goto out_unlock;
2284 }
2285
2286 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2287 gso_type |= SKB_GSO_TCP_ECN;
2288
2289 if (vnet_hdr.gso_size == 0)
2290 goto out_unlock;
2291
2292 }
2293 }
2294
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002295 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2296 if (!netif_supports_nofcs(dev)) {
2297 err = -EPROTONOSUPPORT;
2298 goto out_unlock;
2299 }
2300 extra_len = 4; /* We're doing our own CRC */
2301 }
2302
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002304 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 goto out_unlock;
2306
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002307 err = -ENOBUFS;
Herbert Xuae641942011-11-18 02:20:04 +00002308 hlen = LL_RESERVED_SPACE(dev);
2309 tlen = dev->needed_tailroom;
2310 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002311 msg->msg_flags & MSG_DONTWAIT, &err);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002312 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 goto out_unlock;
2314
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002315 skb_set_network_header(skb, reserve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002317 err = -EINVAL;
2318 if (sock->type == SOCK_DGRAM &&
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002319 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002320 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
2322 /* Returns -EFAULT on error */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002323 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 if (err)
2325 goto out_free;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002326 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00002327 if (err < 0)
2328 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002330 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00002331 /* Earlier code assumed this would be a VLAN pkt,
2332 * double-check this now that we have the actual
2333 * packet in hand.
2334 */
2335 struct ethhdr *ehdr;
2336 skb_reset_mac_header(skb);
2337 ehdr = eth_hdr(skb);
2338 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2339 err = -EMSGSIZE;
2340 goto out_free;
2341 }
2342 }
2343
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 skb->protocol = proto;
2345 skb->dev = dev;
2346 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00002347 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002349 if (po->has_vnet_hdr) {
2350 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2351 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2352 vnet_hdr.csum_offset)) {
2353 err = -EINVAL;
2354 goto out_free;
2355 }
2356 }
2357
2358 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2359 skb_shinfo(skb)->gso_type = gso_type;
2360
2361 /* Header must be checked, and gso_segs computed. */
2362 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2363 skb_shinfo(skb)->gso_segs = 0;
2364
2365 len += vnet_hdr_len;
2366 }
2367
Jason Wangc1aad272013-03-25 20:19:57 +00002368 if (skb->ip_summed == CHECKSUM_PARTIAL)
2369 skb_set_transport_header(skb, skb_checksum_start_offset(skb));
2370 else if (skb_flow_dissect(skb, &keys))
2371 skb_set_transport_header(skb, keys.thoff);
2372 else
2373 skb_set_transport_header(skb, reserve);
2374
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002375 if (unlikely(extra_len == 4))
2376 skb->no_fcs = 1;
2377
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 /*
2379 * Now send it
2380 */
2381
2382 err = dev_queue_xmit(skb);
2383 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2384 goto out_unlock;
2385
Ben Greear827d9782011-06-01 07:18:53 +00002386 if (need_rls_dev)
2387 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002389 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
2391out_free:
2392 kfree_skb(skb);
2393out_unlock:
Ben Greear827d9782011-06-01 07:18:53 +00002394 if (dev && need_rls_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 dev_put(dev);
2396out:
2397 return err;
2398}
2399
Johann Baudy69e3c752009-05-18 22:11:22 -07002400static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2401 struct msghdr *msg, size_t len)
2402{
Johann Baudy69e3c752009-05-18 22:11:22 -07002403 struct sock *sk = sock->sk;
2404 struct packet_sock *po = pkt_sk(sk);
2405 if (po->tx_ring.pg_vec)
2406 return tpacket_snd(po, msg);
2407 else
Johann Baudy69e3c752009-05-18 22:11:22 -07002408 return packet_snd(sock, msg, len);
2409}
2410
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411/*
2412 * Close a PACKET socket. This is fairly simple. We immediately go
2413 * to 'closed' state and remove our protocol entry in the device list.
2414 */
2415
2416static int packet_release(struct socket *sock)
2417{
2418 struct sock *sk = sock->sk;
2419 struct packet_sock *po;
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08002420 struct net *net;
chetan lokef6fb8f12011-08-19 10:18:16 +00002421 union tpacket_req_u req_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422
2423 if (!sk)
2424 return 0;
2425
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002426 net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 po = pkt_sk(sk);
2428
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002429 mutex_lock(&net->packet.sklist_lock);
stephen hemminger808f5112010-02-22 07:57:18 +00002430 sk_del_node_init_rcu(sk);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002431 mutex_unlock(&net->packet.sklist_lock);
2432
2433 preempt_disable();
Eric Dumazet920de802008-11-24 00:09:29 -08002434 sock_prot_inuse_add(net, sk->sk_prot, -1);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002435 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436
stephen hemminger808f5112010-02-22 07:57:18 +00002437 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002438 unregister_prot_hook(sk, false);
Ben Greear160ff182011-06-01 07:18:52 +00002439 if (po->prot_hook.dev) {
2440 dev_put(po->prot_hook.dev);
2441 po->prot_hook.dev = NULL;
2442 }
stephen hemminger808f5112010-02-22 07:57:18 +00002443 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 packet_flush_mclist(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446
Phil Sutter9665d5d2013-02-01 07:21:41 +00002447 if (po->rx_ring.pg_vec) {
2448 memset(&req_u, 0, sizeof(req_u));
chetan lokef6fb8f12011-08-19 10:18:16 +00002449 packet_set_ring(sk, &req_u, 1, 0);
Phil Sutter9665d5d2013-02-01 07:21:41 +00002450 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002451
Phil Sutter9665d5d2013-02-01 07:21:41 +00002452 if (po->tx_ring.pg_vec) {
2453 memset(&req_u, 0, sizeof(req_u));
chetan lokef6fb8f12011-08-19 10:18:16 +00002454 packet_set_ring(sk, &req_u, 1, 1);
Phil Sutter9665d5d2013-02-01 07:21:41 +00002455 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456
David S. Millerdc99f602011-07-05 01:45:05 -07002457 fanout_release(sk);
2458
stephen hemminger808f5112010-02-22 07:57:18 +00002459 synchronize_net();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 /*
2461 * Now the socket is dead. No more input will appear.
2462 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 sock_orphan(sk);
2464 sock->sk = NULL;
2465
2466 /* Purge queues */
2467
2468 skb_queue_purge(&sk->sk_receive_queue);
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002469 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470
2471 sock_put(sk);
2472 return 0;
2473}
2474
2475/*
2476 * Attach a packet hook.
2477 */
2478
Al Viro0e11c912006-11-08 00:26:29 -08002479static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480{
2481 struct packet_sock *po = pkt_sk(sk);
David S. Millerdc99f602011-07-05 01:45:05 -07002482
Wei Yongjunaef950b2011-12-27 22:32:41 -05002483 if (po->fanout) {
2484 if (dev)
2485 dev_put(dev);
2486
David S. Millerdc99f602011-07-05 01:45:05 -07002487 return -EINVAL;
Wei Yongjunaef950b2011-12-27 22:32:41 -05002488 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489
2490 lock_sock(sk);
2491
2492 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002493 unregister_prot_hook(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 po->num = protocol;
2495 po->prot_hook.type = protocol;
Ben Greear160ff182011-06-01 07:18:52 +00002496 if (po->prot_hook.dev)
2497 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 po->prot_hook.dev = dev;
2499
2500 po->ifindex = dev ? dev->ifindex : 0;
2501
2502 if (protocol == 0)
2503 goto out_unlock;
2504
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002505 if (!dev || (dev->flags & IFF_UP)) {
David S. Millerce06b032011-07-04 01:44:29 -07002506 register_prot_hook(sk);
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002507 } else {
2508 sk->sk_err = ENETDOWN;
2509 if (!sock_flag(sk, SOCK_DEAD))
2510 sk->sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 }
2512
2513out_unlock:
2514 spin_unlock(&po->bind_lock);
2515 release_sock(sk);
2516 return 0;
2517}
2518
2519/*
2520 * Bind a packet socket to a device
2521 */
2522
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002523static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2524 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002526 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 char name[15];
2528 struct net_device *dev;
2529 int err = -ENODEV;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002530
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 /*
2532 * Check legality
2533 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002534
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002535 if (addr_len != sizeof(struct sockaddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002537 strlcpy(name, uaddr->sa_data, sizeof(name));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002539 dev = dev_get_by_name(sock_net(sk), name);
Ben Greear160ff182011-06-01 07:18:52 +00002540 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 return err;
2543}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544
2545static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2546{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002547 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2548 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 struct net_device *dev = NULL;
2550 int err;
2551
2552
2553 /*
2554 * Check legality
2555 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002556
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 if (addr_len < sizeof(struct sockaddr_ll))
2558 return -EINVAL;
2559 if (sll->sll_family != AF_PACKET)
2560 return -EINVAL;
2561
2562 if (sll->sll_ifindex) {
2563 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002564 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 if (dev == NULL)
2566 goto out;
2567 }
2568 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
2570out:
2571 return err;
2572}
2573
2574static struct proto packet_proto = {
2575 .name = "PACKET",
2576 .owner = THIS_MODULE,
2577 .obj_size = sizeof(struct packet_sock),
2578};
2579
2580/*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002581 * Create a packet of type SOCK_PACKET.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 */
2583
Eric Paris3f378b62009-11-05 22:18:14 -08002584static int packet_create(struct net *net, struct socket *sock, int protocol,
2585 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586{
2587 struct sock *sk;
2588 struct packet_sock *po;
Al Viro0e11c912006-11-08 00:26:29 -08002589 __be16 proto = (__force __be16)protocol; /* weird, but documented */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 int err;
2591
Eric W. Biedermandf008c92012-11-16 03:03:07 +00002592 if (!ns_capable(net->user_ns, CAP_NET_RAW))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 return -EPERM;
David S. Millerbe020972007-05-29 13:16:31 -07002594 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2595 sock->type != SOCK_PACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 return -ESOCKTNOSUPPORT;
2597
2598 sock->state = SS_UNCONNECTED;
2599
2600 err = -ENOBUFS;
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07002601 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 if (sk == NULL)
2603 goto out;
2604
2605 sock->ops = &packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 if (sock->type == SOCK_PACKET)
2607 sock->ops = &packet_ops_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002608
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 sock_init_data(sock, sk);
2610
2611 po = pkt_sk(sk);
2612 sk->sk_family = PF_PACKET;
Al Viro0e11c912006-11-08 00:26:29 -08002613 po->num = proto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
2615 sk->sk_destruct = packet_sock_destruct;
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002616 sk_refcnt_debug_inc(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
2618 /*
2619 * Attach a protocol block
2620 */
2621
2622 spin_lock_init(&po->bind_lock);
Herbert Xu905db442009-01-30 14:12:06 -08002623 mutex_init(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 po->prot_hook.func = packet_rcv;
David S. Millerbe020972007-05-29 13:16:31 -07002625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 if (sock->type == SOCK_PACKET)
2627 po->prot_hook.func = packet_rcv_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002628
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 po->prot_hook.af_packet_priv = sk;
2630
Al Viro0e11c912006-11-08 00:26:29 -08002631 if (proto) {
2632 po->prot_hook.type = proto;
David S. Millerce06b032011-07-04 01:44:29 -07002633 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 }
2635
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002636 mutex_lock(&net->packet.sklist_lock);
stephen hemminger808f5112010-02-22 07:57:18 +00002637 sk_add_node_rcu(sk, &net->packet.sklist);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002638 mutex_unlock(&net->packet.sklist_lock);
2639
2640 preempt_disable();
Eric Dumazet36804532008-11-19 14:25:35 -08002641 sock_prot_inuse_add(net, &packet_proto, 1);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002642 preempt_enable();
stephen hemminger808f5112010-02-22 07:57:18 +00002643
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002644 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645out:
2646 return err;
2647}
2648
Richard Cochraned85b562010-04-07 22:41:28 +00002649static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2650{
2651 struct sock_exterr_skb *serr;
2652 struct sk_buff *skb, *skb2;
2653 int copied, err;
2654
2655 err = -EAGAIN;
2656 skb = skb_dequeue(&sk->sk_error_queue);
2657 if (skb == NULL)
2658 goto out;
2659
2660 copied = skb->len;
2661 if (copied > len) {
2662 msg->msg_flags |= MSG_TRUNC;
2663 copied = len;
2664 }
2665 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2666 if (err)
2667 goto out_free_skb;
2668
2669 sock_recv_timestamp(msg, sk, skb);
2670
2671 serr = SKB_EXT_ERR(skb);
2672 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2673 sizeof(serr->ee), &serr->ee);
2674
2675 msg->msg_flags |= MSG_ERRQUEUE;
2676 err = copied;
2677
2678 /* Reset and regenerate socket error */
2679 spin_lock_bh(&sk->sk_error_queue.lock);
2680 sk->sk_err = 0;
2681 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2682 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2683 spin_unlock_bh(&sk->sk_error_queue.lock);
2684 sk->sk_error_report(sk);
2685 } else
2686 spin_unlock_bh(&sk->sk_error_queue.lock);
2687
2688out_free_skb:
2689 kfree_skb(skb);
2690out:
2691 return err;
2692}
2693
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694/*
2695 * Pull a packet from our receive queue and hand it to the user.
2696 * If necessary we block.
2697 */
2698
2699static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2700 struct msghdr *msg, size_t len, int flags)
2701{
2702 struct sock *sk = sock->sk;
2703 struct sk_buff *skb;
2704 int copied, err;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002705 struct sockaddr_ll *sll;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002706 int vnet_hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707
2708 err = -EINVAL;
Richard Cochraned85b562010-04-07 22:41:28 +00002709 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 goto out;
2711
2712#if 0
2713 /* What error should we return now? EUNATTACH? */
2714 if (pkt_sk(sk)->ifindex < 0)
2715 return -ENODEV;
2716#endif
2717
Richard Cochraned85b562010-04-07 22:41:28 +00002718 if (flags & MSG_ERRQUEUE) {
2719 err = packet_recv_error(sk, msg, len);
2720 goto out;
2721 }
2722
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 * Call the generic datagram receiver. This handles all sorts
2725 * of horrible races and re-entrancy so we can forget about it
2726 * in the protocol layers.
2727 *
2728 * Now it will return ENETDOWN, if device have just gone down,
2729 * but then it will block.
2730 */
2731
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002732 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733
2734 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002735 * An error occurred so return it. Because skb_recv_datagram()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 * handles the blocking we don't see and worry about blocking
2737 * retries.
2738 */
2739
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002740 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 goto out;
2742
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002743 if (pkt_sk(sk)->has_vnet_hdr) {
2744 struct virtio_net_hdr vnet_hdr = { 0 };
2745
2746 err = -EINVAL;
2747 vnet_hdr_len = sizeof(vnet_hdr);
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002748 if (len < vnet_hdr_len)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002749 goto out_free;
2750
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002751 len -= vnet_hdr_len;
2752
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002753 if (skb_is_gso(skb)) {
2754 struct skb_shared_info *sinfo = skb_shinfo(skb);
2755
2756 /* This is a hint as to how much should be linear. */
2757 vnet_hdr.hdr_len = skb_headlen(skb);
2758 vnet_hdr.gso_size = sinfo->gso_size;
2759 if (sinfo->gso_type & SKB_GSO_TCPV4)
2760 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2761 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2762 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2763 else if (sinfo->gso_type & SKB_GSO_UDP)
2764 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2765 else if (sinfo->gso_type & SKB_GSO_FCOE)
2766 goto out_free;
2767 else
2768 BUG();
2769 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2770 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2771 } else
2772 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2773
2774 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2775 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Michał Mirosław55508d62010-12-14 15:24:08 +00002776 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002777 vnet_hdr.csum_offset = skb->csum_offset;
Jason Wang10a8d942011-06-10 00:56:17 +00002778 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2779 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002780 } /* else everything is zero */
2781
2782 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2783 vnet_hdr_len);
2784 if (err < 0)
2785 goto out_free;
2786 }
2787
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 /*
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002789 * If the address length field is there to be filled in, we fill
2790 * it in now.
2791 */
2792
Herbert Xuffbc6112007-02-04 23:33:10 -08002793 sll = &PACKET_SKB_CB(skb)->sa.ll;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002794 if (sock->type == SOCK_PACKET)
2795 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2796 else
2797 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2798
2799 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 * You lose any data beyond the buffer you gave. If it worries a
2801 * user program they can ask the device for its MTU anyway.
2802 */
2803
2804 copied = skb->len;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002805 if (copied > len) {
2806 copied = len;
2807 msg->msg_flags |= MSG_TRUNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 }
2809
2810 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2811 if (err)
2812 goto out_free;
2813
Neil Horman3b885782009-10-12 13:26:31 -07002814 sock_recv_ts_and_drops(msg, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815
2816 if (msg->msg_name)
Herbert Xuffbc6112007-02-04 23:33:10 -08002817 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2818 msg->msg_namelen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
Herbert Xu8dc41942007-02-04 23:31:32 -08002820 if (pkt_sk(sk)->auxdata) {
Herbert Xuffbc6112007-02-04 23:33:10 -08002821 struct tpacket_auxdata aux;
2822
2823 aux.tp_status = TP_STATUS_USER;
2824 if (skb->ip_summed == CHECKSUM_PARTIAL)
2825 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2826 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2827 aux.tp_snaplen = skb->len;
2828 aux.tp_mac = 0;
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002829 aux.tp_net = skb_network_offset(skb);
Ben Greeara3bcc232011-06-01 06:49:10 +00002830 if (vlan_tx_tag_present(skb)) {
2831 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2832 aux.tp_status |= TP_STATUS_VLAN_VALID;
2833 } else {
2834 aux.tp_vlan_tci = 0;
2835 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07002836 aux.tp_padding = 0;
Herbert Xuffbc6112007-02-04 23:33:10 -08002837 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
Herbert Xu8dc41942007-02-04 23:31:32 -08002838 }
2839
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 /*
2841 * Free or return the buffer as appropriate. Again this
2842 * hides all the races and re-entrancy issues from us.
2843 */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002844 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
2846out_free:
2847 skb_free_datagram(sk, skb);
2848out:
2849 return err;
2850}
2851
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2853 int *uaddr_len, int peer)
2854{
2855 struct net_device *dev;
2856 struct sock *sk = sock->sk;
2857
2858 if (peer)
2859 return -EOPNOTSUPP;
2860
2861 uaddr->sa_family = AF_PACKET;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002862 rcu_read_lock();
2863 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2864 if (dev)
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002865 strncpy(uaddr->sa_data, dev->name, 14);
Eric Dumazet654d1f82009-11-02 10:43:32 +01002866 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 memset(uaddr->sa_data, 0, 14);
Eric Dumazet654d1f82009-11-02 10:43:32 +01002868 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 *uaddr_len = sizeof(*uaddr);
2870
2871 return 0;
2872}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873
2874static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2875 int *uaddr_len, int peer)
2876{
2877 struct net_device *dev;
2878 struct sock *sk = sock->sk;
2879 struct packet_sock *po = pkt_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00002880 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881
2882 if (peer)
2883 return -EOPNOTSUPP;
2884
2885 sll->sll_family = AF_PACKET;
2886 sll->sll_ifindex = po->ifindex;
2887 sll->sll_protocol = po->num;
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002888 sll->sll_pkttype = 0;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002889 rcu_read_lock();
2890 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 if (dev) {
2892 sll->sll_hatype = dev->type;
2893 sll->sll_halen = dev->addr_len;
2894 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 } else {
2896 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2897 sll->sll_halen = 0;
2898 }
Eric Dumazet654d1f82009-11-02 10:43:32 +01002899 rcu_read_unlock();
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002900 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901
2902 return 0;
2903}
2904
Wang Chen2aeb0b82008-07-14 20:49:46 -07002905static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2906 int what)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907{
2908 switch (i->type) {
2909 case PACKET_MR_MULTICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002910 if (i->alen != dev->addr_len)
2911 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 if (what > 0)
Jiri Pirko22bedad32010-04-01 21:22:57 +00002913 return dev_mc_add(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 else
Jiri Pirko22bedad32010-04-01 21:22:57 +00002915 return dev_mc_del(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 break;
2917 case PACKET_MR_PROMISC:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002918 return dev_set_promiscuity(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 break;
2920 case PACKET_MR_ALLMULTI:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002921 return dev_set_allmulti(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 break;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002923 case PACKET_MR_UNICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002924 if (i->alen != dev->addr_len)
2925 return -EINVAL;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002926 if (what > 0)
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002927 return dev_uc_add(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002928 else
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002929 return dev_uc_del(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002930 break;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002931 default:
2932 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 }
Wang Chen2aeb0b82008-07-14 20:49:46 -07002934 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935}
2936
2937static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2938{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002939 for ( ; i; i = i->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 if (i->ifindex == dev->ifindex)
2941 packet_dev_mc(dev, i, what);
2942 }
2943}
2944
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002945static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946{
2947 struct packet_sock *po = pkt_sk(sk);
2948 struct packet_mclist *ml, *i;
2949 struct net_device *dev;
2950 int err;
2951
2952 rtnl_lock();
2953
2954 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002955 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 if (!dev)
2957 goto done;
2958
2959 err = -EINVAL;
Jiri Pirko11625632010-03-02 20:40:01 +00002960 if (mreq->mr_alen > dev->addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 goto done;
2962
2963 err = -ENOBUFS;
Kris Katterjohn8b3a7002006-01-11 15:56:43 -08002964 i = kmalloc(sizeof(*i), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 if (i == NULL)
2966 goto done;
2967
2968 err = 0;
2969 for (ml = po->mclist; ml; ml = ml->next) {
2970 if (ml->ifindex == mreq->mr_ifindex &&
2971 ml->type == mreq->mr_type &&
2972 ml->alen == mreq->mr_alen &&
2973 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2974 ml->count++;
2975 /* Free the new element ... */
2976 kfree(i);
2977 goto done;
2978 }
2979 }
2980
2981 i->type = mreq->mr_type;
2982 i->ifindex = mreq->mr_ifindex;
2983 i->alen = mreq->mr_alen;
2984 memcpy(i->addr, mreq->mr_address, i->alen);
2985 i->count = 1;
2986 i->next = po->mclist;
2987 po->mclist = i;
Wang Chen2aeb0b82008-07-14 20:49:46 -07002988 err = packet_dev_mc(dev, i, 1);
2989 if (err) {
2990 po->mclist = i->next;
2991 kfree(i);
2992 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993
2994done:
2995 rtnl_unlock();
2996 return err;
2997}
2998
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002999static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000{
3001 struct packet_mclist *ml, **mlp;
3002
3003 rtnl_lock();
3004
3005 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3006 if (ml->ifindex == mreq->mr_ifindex &&
3007 ml->type == mreq->mr_type &&
3008 ml->alen == mreq->mr_alen &&
3009 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3010 if (--ml->count == 0) {
3011 struct net_device *dev;
3012 *mlp = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00003013 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3014 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 kfree(ml);
3017 }
3018 rtnl_unlock();
3019 return 0;
3020 }
3021 }
3022 rtnl_unlock();
3023 return -EADDRNOTAVAIL;
3024}
3025
3026static void packet_flush_mclist(struct sock *sk)
3027{
3028 struct packet_sock *po = pkt_sk(sk);
3029 struct packet_mclist *ml;
3030
3031 if (!po->mclist)
3032 return;
3033
3034 rtnl_lock();
3035 while ((ml = po->mclist) != NULL) {
3036 struct net_device *dev;
3037
3038 po->mclist = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00003039 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3040 if (dev != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 kfree(ml);
3043 }
3044 rtnl_unlock();
3045}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046
3047static int
David S. Millerb7058842009-09-30 16:12:20 -07003048packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049{
3050 struct sock *sk = sock->sk;
Herbert Xu8dc41942007-02-04 23:31:32 -08003051 struct packet_sock *po = pkt_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 int ret;
3053
3054 if (level != SOL_PACKET)
3055 return -ENOPROTOOPT;
3056
Johann Baudy69e3c752009-05-18 22:11:22 -07003057 switch (optname) {
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003058 case PACKET_ADD_MEMBERSHIP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 case PACKET_DROP_MEMBERSHIP:
3060 {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003061 struct packet_mreq_max mreq;
3062 int len = optlen;
3063 memset(&mreq, 0, sizeof(mreq));
3064 if (len < sizeof(struct packet_mreq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 return -EINVAL;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003066 if (len > sizeof(mreq))
3067 len = sizeof(mreq);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003068 if (copy_from_user(&mreq, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 return -EFAULT;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003070 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3071 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072 if (optname == PACKET_ADD_MEMBERSHIP)
3073 ret = packet_mc_add(sk, &mreq);
3074 else
3075 ret = packet_mc_drop(sk, &mreq);
3076 return ret;
3077 }
David S. Millera2efcfa2007-05-29 13:12:50 -07003078
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 case PACKET_RX_RING:
Johann Baudy69e3c752009-05-18 22:11:22 -07003080 case PACKET_TX_RING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 {
chetan lokef6fb8f12011-08-19 10:18:16 +00003082 union tpacket_req_u req_u;
3083 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084
chetan lokef6fb8f12011-08-19 10:18:16 +00003085 switch (po->tp_version) {
3086 case TPACKET_V1:
3087 case TPACKET_V2:
3088 len = sizeof(req_u.req);
3089 break;
3090 case TPACKET_V3:
3091 default:
3092 len = sizeof(req_u.req3);
3093 break;
3094 }
3095 if (optlen < len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096 return -EINVAL;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003097 if (pkt_sk(sk)->has_vnet_hdr)
3098 return -EINVAL;
chetan lokef6fb8f12011-08-19 10:18:16 +00003099 if (copy_from_user(&req_u.req, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 return -EFAULT;
chetan lokef6fb8f12011-08-19 10:18:16 +00003101 return packet_set_ring(sk, &req_u, 0,
3102 optname == PACKET_TX_RING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 }
3104 case PACKET_COPY_THRESH:
3105 {
3106 int val;
3107
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003108 if (optlen != sizeof(val))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003110 if (copy_from_user(&val, optval, sizeof(val)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 return -EFAULT;
3112
3113 pkt_sk(sk)->copy_thresh = val;
3114 return 0;
3115 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003116 case PACKET_VERSION:
3117 {
3118 int val;
3119
3120 if (optlen != sizeof(val))
3121 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003122 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003123 return -EBUSY;
3124 if (copy_from_user(&val, optval, sizeof(val)))
3125 return -EFAULT;
3126 switch (val) {
3127 case TPACKET_V1:
3128 case TPACKET_V2:
chetan lokef6fb8f12011-08-19 10:18:16 +00003129 case TPACKET_V3:
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003130 po->tp_version = val;
3131 return 0;
3132 default:
3133 return -EINVAL;
3134 }
3135 }
Patrick McHardy8913336a2008-07-18 18:05:19 -07003136 case PACKET_RESERVE:
3137 {
3138 unsigned int val;
3139
3140 if (optlen != sizeof(val))
3141 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003142 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardy8913336a2008-07-18 18:05:19 -07003143 return -EBUSY;
3144 if (copy_from_user(&val, optval, sizeof(val)))
3145 return -EFAULT;
3146 po->tp_reserve = val;
3147 return 0;
3148 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003149 case PACKET_LOSS:
3150 {
3151 unsigned int val;
3152
3153 if (optlen != sizeof(val))
3154 return -EINVAL;
3155 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3156 return -EBUSY;
3157 if (copy_from_user(&val, optval, sizeof(val)))
3158 return -EFAULT;
3159 po->tp_loss = !!val;
3160 return 0;
3161 }
Herbert Xu8dc41942007-02-04 23:31:32 -08003162 case PACKET_AUXDATA:
3163 {
3164 int val;
3165
3166 if (optlen < sizeof(val))
3167 return -EINVAL;
3168 if (copy_from_user(&val, optval, sizeof(val)))
3169 return -EFAULT;
3170
3171 po->auxdata = !!val;
3172 return 0;
3173 }
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003174 case PACKET_ORIGDEV:
3175 {
3176 int val;
3177
3178 if (optlen < sizeof(val))
3179 return -EINVAL;
3180 if (copy_from_user(&val, optval, sizeof(val)))
3181 return -EFAULT;
3182
3183 po->origdev = !!val;
3184 return 0;
3185 }
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003186 case PACKET_VNET_HDR:
3187 {
3188 int val;
3189
3190 if (sock->type != SOCK_RAW)
3191 return -EINVAL;
3192 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3193 return -EBUSY;
3194 if (optlen < sizeof(val))
3195 return -EINVAL;
3196 if (copy_from_user(&val, optval, sizeof(val)))
3197 return -EFAULT;
3198
3199 po->has_vnet_hdr = !!val;
3200 return 0;
3201 }
Scott McMillan614f60f2010-06-02 05:53:56 -07003202 case PACKET_TIMESTAMP:
3203 {
3204 int val;
3205
3206 if (optlen != sizeof(val))
3207 return -EINVAL;
3208 if (copy_from_user(&val, optval, sizeof(val)))
3209 return -EFAULT;
3210
3211 po->tp_tstamp = val;
3212 return 0;
3213 }
David S. Millerdc99f602011-07-05 01:45:05 -07003214 case PACKET_FANOUT:
3215 {
3216 int val;
3217
3218 if (optlen != sizeof(val))
3219 return -EINVAL;
3220 if (copy_from_user(&val, optval, sizeof(val)))
3221 return -EFAULT;
3222
3223 return fanout_add(sk, val & 0xffff, val >> 16);
3224 }
Paul Chavent5920cd3a2012-11-06 23:10:47 +00003225 case PACKET_TX_HAS_OFF:
3226 {
3227 unsigned int val;
3228
3229 if (optlen != sizeof(val))
3230 return -EINVAL;
3231 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3232 return -EBUSY;
3233 if (copy_from_user(&val, optval, sizeof(val)))
3234 return -EFAULT;
3235 po->tp_tx_has_off = !!val;
3236 return 0;
3237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 default:
3239 return -ENOPROTOOPT;
3240 }
3241}
3242
3243static int packet_getsockopt(struct socket *sock, int level, int optname,
3244 char __user *optval, int __user *optlen)
3245{
3246 int len;
Eric Dumazetc06fff62012-04-19 21:56:11 +00003247 int val, lv = sizeof(val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 struct sock *sk = sock->sk;
3249 struct packet_sock *po = pkt_sk(sk);
Eric Dumazetc06fff62012-04-19 21:56:11 +00003250 void *data = &val;
Herbert Xu8dc41942007-02-04 23:31:32 -08003251 struct tpacket_stats st;
chetan lokef6fb8f12011-08-19 10:18:16 +00003252 union tpacket_stats_u st_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253
3254 if (level != SOL_PACKET)
3255 return -ENOPROTOOPT;
3256
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003257 if (get_user(len, optlen))
3258 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259
3260 if (len < 0)
3261 return -EINVAL;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003262
Johann Baudy69e3c752009-05-18 22:11:22 -07003263 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 case PACKET_STATISTICS:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265 spin_lock_bh(&sk->sk_receive_queue.lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00003266 if (po->tp_version == TPACKET_V3) {
Eric Dumazetc06fff62012-04-19 21:56:11 +00003267 lv = sizeof(struct tpacket_stats_v3);
chetan lokef6fb8f12011-08-19 10:18:16 +00003268 memcpy(&st_u.stats3, &po->stats,
Eric Dumazetc06fff62012-04-19 21:56:11 +00003269 sizeof(struct tpacket_stats));
chetan lokef6fb8f12011-08-19 10:18:16 +00003270 st_u.stats3.tp_freeze_q_cnt =
Eric Dumazetc06fff62012-04-19 21:56:11 +00003271 po->stats_u.stats3.tp_freeze_q_cnt;
chetan lokef6fb8f12011-08-19 10:18:16 +00003272 st_u.stats3.tp_packets += po->stats.tp_drops;
3273 data = &st_u.stats3;
3274 } else {
Eric Dumazetc06fff62012-04-19 21:56:11 +00003275 lv = sizeof(struct tpacket_stats);
chetan lokef6fb8f12011-08-19 10:18:16 +00003276 st = po->stats;
3277 st.tp_packets += st.tp_drops;
3278 data = &st;
3279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 memset(&po->stats, 0, sizeof(st));
3281 spin_unlock_bh(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 break;
Herbert Xu8dc41942007-02-04 23:31:32 -08003283 case PACKET_AUXDATA:
Herbert Xu8dc41942007-02-04 23:31:32 -08003284 val = po->auxdata;
Herbert Xu8dc41942007-02-04 23:31:32 -08003285 break;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003286 case PACKET_ORIGDEV:
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003287 val = po->origdev;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003288 break;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003289 case PACKET_VNET_HDR:
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003290 val = po->has_vnet_hdr;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003291 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003292 case PACKET_VERSION:
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003293 val = po->tp_version;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003294 break;
3295 case PACKET_HDRLEN:
3296 if (len > sizeof(int))
3297 len = sizeof(int);
3298 if (copy_from_user(&val, optval, len))
3299 return -EFAULT;
3300 switch (val) {
3301 case TPACKET_V1:
3302 val = sizeof(struct tpacket_hdr);
3303 break;
3304 case TPACKET_V2:
3305 val = sizeof(struct tpacket2_hdr);
3306 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00003307 case TPACKET_V3:
3308 val = sizeof(struct tpacket3_hdr);
3309 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003310 default:
3311 return -EINVAL;
3312 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003313 break;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003314 case PACKET_RESERVE:
Patrick McHardy8913336a2008-07-18 18:05:19 -07003315 val = po->tp_reserve;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003316 break;
Johann Baudy69e3c752009-05-18 22:11:22 -07003317 case PACKET_LOSS:
Johann Baudy69e3c752009-05-18 22:11:22 -07003318 val = po->tp_loss;
Johann Baudy69e3c752009-05-18 22:11:22 -07003319 break;
Scott McMillan614f60f2010-06-02 05:53:56 -07003320 case PACKET_TIMESTAMP:
Scott McMillan614f60f2010-06-02 05:53:56 -07003321 val = po->tp_tstamp;
Scott McMillan614f60f2010-06-02 05:53:56 -07003322 break;
David S. Millerdc99f602011-07-05 01:45:05 -07003323 case PACKET_FANOUT:
David S. Millerdc99f602011-07-05 01:45:05 -07003324 val = (po->fanout ?
3325 ((u32)po->fanout->id |
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00003326 ((u32)po->fanout->type << 16) |
3327 ((u32)po->fanout->flags << 24)) :
David S. Millerdc99f602011-07-05 01:45:05 -07003328 0);
David S. Millerdc99f602011-07-05 01:45:05 -07003329 break;
Paul Chavent5920cd3a2012-11-06 23:10:47 +00003330 case PACKET_TX_HAS_OFF:
3331 val = po->tp_tx_has_off;
3332 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 default:
3334 return -ENOPROTOOPT;
3335 }
3336
Eric Dumazetc06fff62012-04-19 21:56:11 +00003337 if (len > lv)
3338 len = lv;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003339 if (put_user(len, optlen))
3340 return -EFAULT;
Herbert Xu8dc41942007-02-04 23:31:32 -08003341 if (copy_to_user(optval, data, len))
3342 return -EFAULT;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003343 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344}
3345
3346
3347static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3348{
3349 struct sock *sk;
Jason Lunzad930652007-02-20 23:19:54 -08003350 struct net_device *dev = data;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003351 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352
stephen hemminger808f5112010-02-22 07:57:18 +00003353 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08003354 sk_for_each_rcu(sk, &net->packet.sklist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 struct packet_sock *po = pkt_sk(sk);
3356
3357 switch (msg) {
3358 case NETDEV_UNREGISTER:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 if (po->mclist)
3360 packet_dev_mclist(dev, po->mclist, -1);
David S. Millera2efcfa2007-05-29 13:12:50 -07003361 /* fallthrough */
3362
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 case NETDEV_DOWN:
3364 if (dev->ifindex == po->ifindex) {
3365 spin_lock(&po->bind_lock);
3366 if (po->running) {
David S. Millerce06b032011-07-04 01:44:29 -07003367 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 sk->sk_err = ENETDOWN;
3369 if (!sock_flag(sk, SOCK_DEAD))
3370 sk->sk_error_report(sk);
3371 }
3372 if (msg == NETDEV_UNREGISTER) {
3373 po->ifindex = -1;
Ben Greear160ff182011-06-01 07:18:52 +00003374 if (po->prot_hook.dev)
3375 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 po->prot_hook.dev = NULL;
3377 }
3378 spin_unlock(&po->bind_lock);
3379 }
3380 break;
3381 case NETDEV_UP:
stephen hemminger808f5112010-02-22 07:57:18 +00003382 if (dev->ifindex == po->ifindex) {
3383 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003384 if (po->num)
3385 register_prot_hook(sk);
stephen hemminger808f5112010-02-22 07:57:18 +00003386 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 break;
3389 }
3390 }
stephen hemminger808f5112010-02-22 07:57:18 +00003391 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 return NOTIFY_DONE;
3393}
3394
3395
3396static int packet_ioctl(struct socket *sock, unsigned int cmd,
3397 unsigned long arg)
3398{
3399 struct sock *sk = sock->sk;
3400
Johann Baudy69e3c752009-05-18 22:11:22 -07003401 switch (cmd) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003402 case SIOCOUTQ:
3403 {
3404 int amount = sk_wmem_alloc_get(sk);
Eric Dumazet31e6d362009-06-17 19:05:41 -07003405
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003406 return put_user(amount, (int __user *)arg);
3407 }
3408 case SIOCINQ:
3409 {
3410 struct sk_buff *skb;
3411 int amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003413 spin_lock_bh(&sk->sk_receive_queue.lock);
3414 skb = skb_peek(&sk->sk_receive_queue);
3415 if (skb)
3416 amount = skb->len;
3417 spin_unlock_bh(&sk->sk_receive_queue.lock);
3418 return put_user(amount, (int __user *)arg);
3419 }
3420 case SIOCGSTAMP:
3421 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3422 case SIOCGSTAMPNS:
3423 return sock_get_timestampns(sk, (struct timespec __user *)arg);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003424
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425#ifdef CONFIG_INET
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003426 case SIOCADDRT:
3427 case SIOCDELRT:
3428 case SIOCDARP:
3429 case SIOCGARP:
3430 case SIOCSARP:
3431 case SIOCGIFADDR:
3432 case SIOCSIFADDR:
3433 case SIOCGIFBRDADDR:
3434 case SIOCSIFBRDADDR:
3435 case SIOCGIFNETMASK:
3436 case SIOCSIFNETMASK:
3437 case SIOCGIFDSTADDR:
3438 case SIOCSIFDSTADDR:
3439 case SIOCSIFFLAGS:
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003440 return inet_dgram_ops.ioctl(sock, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441#endif
3442
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003443 default:
3444 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 }
3446 return 0;
3447}
3448
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003449static unsigned int packet_poll(struct file *file, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 poll_table *wait)
3451{
3452 struct sock *sk = sock->sk;
3453 struct packet_sock *po = pkt_sk(sk);
3454 unsigned int mask = datagram_poll(file, sock, wait);
3455
3456 spin_lock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003457 if (po->rx_ring.pg_vec) {
chetan lokef6fb8f12011-08-19 10:18:16 +00003458 if (!packet_previous_rx_frame(po, &po->rx_ring,
3459 TP_STATUS_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 mask |= POLLIN | POLLRDNORM;
3461 }
3462 spin_unlock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003463 spin_lock_bh(&sk->sk_write_queue.lock);
3464 if (po->tx_ring.pg_vec) {
3465 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3466 mask |= POLLOUT | POLLWRNORM;
3467 }
3468 spin_unlock_bh(&sk->sk_write_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 return mask;
3470}
3471
3472
3473/* Dirty? Well, I still did not learn better way to account
3474 * for user mmaps.
3475 */
3476
3477static void packet_mm_open(struct vm_area_struct *vma)
3478{
3479 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003480 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003482
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 if (sk)
3484 atomic_inc(&pkt_sk(sk)->mapped);
3485}
3486
3487static void packet_mm_close(struct vm_area_struct *vma)
3488{
3489 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003490 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003492
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 if (sk)
3494 atomic_dec(&pkt_sk(sk)->mapped);
3495}
3496
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +04003497static const struct vm_operations_struct packet_mmap_ops = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003498 .open = packet_mm_open,
3499 .close = packet_mm_close,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500};
3501
Neil Horman0e3125c2010-11-16 10:26:47 -08003502static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3503 unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504{
3505 int i;
3506
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003507 for (i = 0; i < len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003508 if (likely(pg_vec[i].buffer)) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003509 if (is_vmalloc_addr(pg_vec[i].buffer))
Neil Horman0e3125c2010-11-16 10:26:47 -08003510 vfree(pg_vec[i].buffer);
3511 else
3512 free_pages((unsigned long)pg_vec[i].buffer,
3513 order);
3514 pg_vec[i].buffer = NULL;
3515 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516 }
3517 kfree(pg_vec);
3518}
3519
Olof Johanssoneea49cc92011-11-02 11:00:49 +00003520static char *alloc_one_pg_vec_page(unsigned long order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003521{
Neil Horman0e3125c2010-11-16 10:26:47 -08003522 char *buffer = NULL;
3523 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3524 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
Eric Dumazet719bfea2009-04-15 03:39:52 -07003525
Neil Horman0e3125c2010-11-16 10:26:47 -08003526 buffer = (char *) __get_free_pages(gfp_flags, order);
3527
3528 if (buffer)
3529 return buffer;
3530
3531 /*
3532 * __get_free_pages failed, fall back to vmalloc
3533 */
Eric Dumazetbbce5a52010-11-20 07:31:54 +00003534 buffer = vzalloc((1 << order) * PAGE_SIZE);
Neil Horman0e3125c2010-11-16 10:26:47 -08003535
3536 if (buffer)
3537 return buffer;
3538
3539 /*
3540 * vmalloc failed, lets dig into swap here
3541 */
Neil Horman0e3125c2010-11-16 10:26:47 -08003542 gfp_flags &= ~__GFP_NORETRY;
3543 buffer = (char *)__get_free_pages(gfp_flags, order);
3544 if (buffer)
3545 return buffer;
3546
3547 /*
3548 * complete and utter failure
3549 */
3550 return NULL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003551}
3552
Neil Horman0e3125c2010-11-16 10:26:47 -08003553static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003554{
3555 unsigned int block_nr = req->tp_block_nr;
Neil Horman0e3125c2010-11-16 10:26:47 -08003556 struct pgv *pg_vec;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003557 int i;
3558
Neil Horman0e3125c2010-11-16 10:26:47 -08003559 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003560 if (unlikely(!pg_vec))
3561 goto out;
3562
3563 for (i = 0; i < block_nr; i++) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003564 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
Neil Horman0e3125c2010-11-16 10:26:47 -08003565 if (unlikely(!pg_vec[i].buffer))
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003566 goto out_free_pgvec;
3567 }
3568
3569out:
3570 return pg_vec;
3571
3572out_free_pgvec:
3573 free_pg_vec(pg_vec, order, block_nr);
3574 pg_vec = NULL;
3575 goto out;
3576}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577
chetan lokef6fb8f12011-08-19 10:18:16 +00003578static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -07003579 int closing, int tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580{
Neil Horman0e3125c2010-11-16 10:26:47 -08003581 struct pgv *pg_vec = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 struct packet_sock *po = pkt_sk(sk);
Al Viro0e11c912006-11-08 00:26:29 -08003583 int was_running, order = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003584 struct packet_ring_buffer *rb;
3585 struct sk_buff_head *rb_queue;
Al Viro0e11c912006-11-08 00:26:29 -08003586 __be16 num;
chetan lokef6fb8f12011-08-19 10:18:16 +00003587 int err = -EINVAL;
3588 /* Added to avoid minimal code churn */
3589 struct tpacket_req *req = &req_u->req;
3590
3591 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3592 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3593 WARN(1, "Tx-ring is not supported.\n");
3594 goto out;
3595 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003596
3597 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3598 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3599
3600 err = -EBUSY;
3601 if (!closing) {
3602 if (atomic_read(&po->mapped))
3603 goto out;
3604 if (atomic_read(&rb->pending))
3605 goto out;
3606 }
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003607
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 if (req->tp_block_nr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 /* Sanity tests and some calculations */
Johann Baudy69e3c752009-05-18 22:11:22 -07003610 err = -EBUSY;
3611 if (unlikely(rb->pg_vec))
3612 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003614 switch (po->tp_version) {
3615 case TPACKET_V1:
3616 po->tp_hdrlen = TPACKET_HDRLEN;
3617 break;
3618 case TPACKET_V2:
3619 po->tp_hdrlen = TPACKET2_HDRLEN;
3620 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00003621 case TPACKET_V3:
3622 po->tp_hdrlen = TPACKET3_HDRLEN;
3623 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003624 }
3625
Johann Baudy69e3c752009-05-18 22:11:22 -07003626 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003627 if (unlikely((int)req->tp_block_size <= 0))
Johann Baudy69e3c752009-05-18 22:11:22 -07003628 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003629 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003630 goto out;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003631 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
Johann Baudy69e3c752009-05-18 22:11:22 -07003632 po->tp_reserve))
3633 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003634 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003635 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636
Johann Baudy69e3c752009-05-18 22:11:22 -07003637 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3638 if (unlikely(rb->frames_per_block <= 0))
3639 goto out;
3640 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3641 req->tp_frame_nr))
3642 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643
3644 err = -ENOMEM;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003645 order = get_order(req->tp_block_size);
3646 pg_vec = alloc_pg_vec(req, order);
3647 if (unlikely(!pg_vec))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 goto out;
chetan lokef6fb8f12011-08-19 10:18:16 +00003649 switch (po->tp_version) {
3650 case TPACKET_V3:
3651 /* Transmit path is not supported. We checked
3652 * it above but just being paranoid
3653 */
3654 if (!tx_ring)
3655 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3656 break;
3657 default:
3658 break;
3659 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003660 }
3661 /* Done */
3662 else {
3663 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003664 if (unlikely(req->tp_frame_nr))
Johann Baudy69e3c752009-05-18 22:11:22 -07003665 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 }
3667
3668 lock_sock(sk);
3669
3670 /* Detach socket from network */
3671 spin_lock(&po->bind_lock);
3672 was_running = po->running;
3673 num = po->num;
3674 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 po->num = 0;
David S. Millerce06b032011-07-04 01:44:29 -07003676 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 }
3678 spin_unlock(&po->bind_lock);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003679
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 synchronize_net();
3681
3682 err = -EBUSY;
Herbert Xu905db442009-01-30 14:12:06 -08003683 mutex_lock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 if (closing || atomic_read(&po->mapped) == 0) {
3685 err = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003686 spin_lock_bh(&rb_queue->lock);
Changli Gaoc053fd92010-12-10 16:02:20 -08003687 swap(rb->pg_vec, pg_vec);
Johann Baudy69e3c752009-05-18 22:11:22 -07003688 rb->frame_max = (req->tp_frame_nr - 1);
3689 rb->head = 0;
3690 rb->frame_size = req->tp_frame_size;
3691 spin_unlock_bh(&rb_queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692
Changli Gaoc053fd92010-12-10 16:02:20 -08003693 swap(rb->pg_vec_order, order);
3694 swap(rb->pg_vec_len, req->tp_block_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695
Johann Baudy69e3c752009-05-18 22:11:22 -07003696 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3697 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3698 tpacket_rcv : packet_rcv;
3699 skb_queue_purge(rb_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 if (atomic_read(&po->mapped))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003701 pr_err("packet_mmap: vma is busy: %d\n",
3702 atomic_read(&po->mapped));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 }
Herbert Xu905db442009-01-30 14:12:06 -08003704 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705
3706 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003707 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 po->num = num;
David S. Millerce06b032011-07-04 01:44:29 -07003709 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 }
3711 spin_unlock(&po->bind_lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00003712 if (closing && (po->tp_version > TPACKET_V2)) {
3713 /* Because we don't support block-based V3 on tx-ring */
3714 if (!tx_ring)
3715 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3716 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 release_sock(sk);
3718
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 if (pg_vec)
3720 free_pg_vec(pg_vec, order, req->tp_block_nr);
3721out:
3722 return err;
3723}
3724
Johann Baudy69e3c752009-05-18 22:11:22 -07003725static int packet_mmap(struct file *file, struct socket *sock,
3726 struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727{
3728 struct sock *sk = sock->sk;
3729 struct packet_sock *po = pkt_sk(sk);
Johann Baudy69e3c752009-05-18 22:11:22 -07003730 unsigned long size, expected_size;
3731 struct packet_ring_buffer *rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 unsigned long start;
3733 int err = -EINVAL;
3734 int i;
3735
3736 if (vma->vm_pgoff)
3737 return -EINVAL;
3738
Herbert Xu905db442009-01-30 14:12:06 -08003739 mutex_lock(&po->pg_vec_lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003740
3741 expected_size = 0;
3742 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3743 if (rb->pg_vec) {
3744 expected_size += rb->pg_vec_len
3745 * rb->pg_vec_pages
3746 * PAGE_SIZE;
3747 }
3748 }
3749
3750 if (expected_size == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07003752
3753 size = vma->vm_end - vma->vm_start;
3754 if (size != expected_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 goto out;
3756
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 start = vma->vm_start;
Johann Baudy69e3c752009-05-18 22:11:22 -07003758 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3759 if (rb->pg_vec == NULL)
3760 continue;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003761
Johann Baudy69e3c752009-05-18 22:11:22 -07003762 for (i = 0; i < rb->pg_vec_len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003763 struct page *page;
3764 void *kaddr = rb->pg_vec[i].buffer;
Johann Baudy69e3c752009-05-18 22:11:22 -07003765 int pg_num;
3766
Changli Gaoc56b4d92010-12-01 02:52:57 +00003767 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3768 page = pgv_to_page(kaddr);
Johann Baudy69e3c752009-05-18 22:11:22 -07003769 err = vm_insert_page(vma, start, page);
3770 if (unlikely(err))
3771 goto out;
3772 start += PAGE_SIZE;
Neil Horman0e3125c2010-11-16 10:26:47 -08003773 kaddr += PAGE_SIZE;
Johann Baudy69e3c752009-05-18 22:11:22 -07003774 }
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003775 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003777
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003778 atomic_inc(&po->mapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779 vma->vm_ops = &packet_mmap_ops;
3780 err = 0;
3781
3782out:
Herbert Xu905db442009-01-30 14:12:06 -08003783 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 return err;
3785}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003787static const struct proto_ops packet_ops_spkt = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788 .family = PF_PACKET,
3789 .owner = THIS_MODULE,
3790 .release = packet_release,
3791 .bind = packet_bind_spkt,
3792 .connect = sock_no_connect,
3793 .socketpair = sock_no_socketpair,
3794 .accept = sock_no_accept,
3795 .getname = packet_getname_spkt,
3796 .poll = datagram_poll,
3797 .ioctl = packet_ioctl,
3798 .listen = sock_no_listen,
3799 .shutdown = sock_no_shutdown,
3800 .setsockopt = sock_no_setsockopt,
3801 .getsockopt = sock_no_getsockopt,
3802 .sendmsg = packet_sendmsg_spkt,
3803 .recvmsg = packet_recvmsg,
3804 .mmap = sock_no_mmap,
3805 .sendpage = sock_no_sendpage,
3806};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003808static const struct proto_ops packet_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809 .family = PF_PACKET,
3810 .owner = THIS_MODULE,
3811 .release = packet_release,
3812 .bind = packet_bind,
3813 .connect = sock_no_connect,
3814 .socketpair = sock_no_socketpair,
3815 .accept = sock_no_accept,
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003816 .getname = packet_getname,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 .poll = packet_poll,
3818 .ioctl = packet_ioctl,
3819 .listen = sock_no_listen,
3820 .shutdown = sock_no_shutdown,
3821 .setsockopt = packet_setsockopt,
3822 .getsockopt = packet_getsockopt,
3823 .sendmsg = packet_sendmsg,
3824 .recvmsg = packet_recvmsg,
3825 .mmap = packet_mmap,
3826 .sendpage = sock_no_sendpage,
3827};
3828
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003829static const struct net_proto_family packet_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 .family = PF_PACKET,
3831 .create = packet_create,
3832 .owner = THIS_MODULE,
3833};
3834
3835static struct notifier_block packet_netdev_notifier = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003836 .notifier_call = packet_notifier,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837};
3838
3839#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840
3841static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
stephen hemminger808f5112010-02-22 07:57:18 +00003842 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843{
Denis V. Luneve372c412007-11-19 22:31:54 -08003844 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003845
3846 rcu_read_lock();
3847 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848}
3849
3850static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3851{
Herbert Xu1bf40952007-12-16 14:04:02 -08003852 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003853 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854}
3855
3856static void packet_seq_stop(struct seq_file *seq, void *v)
stephen hemminger808f5112010-02-22 07:57:18 +00003857 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858{
stephen hemminger808f5112010-02-22 07:57:18 +00003859 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860}
3861
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003862static int packet_seq_show(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863{
3864 if (v == SEQ_START_TOKEN)
3865 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3866 else {
Li Zefanb7ceabd2010-02-08 23:19:29 +00003867 struct sock *s = sk_entry(v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003868 const struct packet_sock *po = pkt_sk(s);
3869
3870 seq_printf(seq,
Dan Rosenberg71338aa2011-05-23 12:17:35 +00003871 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872 s,
3873 atomic_read(&s->sk_refcnt),
3874 s->sk_type,
3875 ntohs(po->num),
3876 po->ifindex,
3877 po->running,
3878 atomic_read(&s->sk_rmem_alloc),
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06003879 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003880 sock_i_ino(s));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 }
3882
3883 return 0;
3884}
3885
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003886static const struct seq_operations packet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 .start = packet_seq_start,
3888 .next = packet_seq_next,
3889 .stop = packet_seq_stop,
3890 .show = packet_seq_show,
3891};
3892
3893static int packet_seq_open(struct inode *inode, struct file *file)
3894{
Denis V. Luneve372c412007-11-19 22:31:54 -08003895 return seq_open_net(inode, file, &packet_seq_ops,
3896 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897}
3898
Arjan van de Venda7071d2007-02-12 00:55:36 -08003899static const struct file_operations packet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900 .owner = THIS_MODULE,
3901 .open = packet_seq_open,
3902 .read = seq_read,
3903 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003904 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905};
3906
3907#endif
3908
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003909static int __net_init packet_net_init(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003910{
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00003911 mutex_init(&net->packet.sklist_lock);
Denis V. Lunev2aaef4e2007-12-11 04:19:54 -08003912 INIT_HLIST_HEAD(&net->packet.sklist);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003913
Gao fengd4beaa62013-02-18 01:34:54 +00003914 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003915 return -ENOMEM;
3916
3917 return 0;
3918}
3919
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003920static void __net_exit packet_net_exit(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003921{
Gao fengece31ff2013-02-18 01:34:56 +00003922 remove_proc_entry("packet", net->proc_net);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003923}
3924
3925static struct pernet_operations packet_net_ops = {
3926 .init = packet_net_init,
3927 .exit = packet_net_exit,
3928};
3929
3930
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931static void __exit packet_exit(void)
3932{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 unregister_netdevice_notifier(&packet_netdev_notifier);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003934 unregister_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 sock_unregister(PF_PACKET);
3936 proto_unregister(&packet_proto);
3937}
3938
3939static int __init packet_init(void)
3940{
3941 int rc = proto_register(&packet_proto, 0);
3942
3943 if (rc != 0)
3944 goto out;
3945
3946 sock_register(&packet_family_ops);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003947 register_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 register_netdevice_notifier(&packet_netdev_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949out:
3950 return rc;
3951}
3952
3953module_init(packet_init);
3954module_exit(packet_exit);
3955MODULE_LICENSE("GPL");
3956MODULE_ALIAS_NETPROTO(PF_PACKET);