Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 4 | * operating system. INET is implemented using the BSD Socket |
| 5 | * interface as the means of communication with the user level. |
| 6 | * |
| 7 | * PACKET - implements raw packet sockets. |
| 8 | * |
Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 9 | * Authors: Ross Biro |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| 11 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
| 12 | * |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 13 | * Fixes: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * Alan Cox : verify_area() now used correctly |
| 15 | * Alan Cox : new skbuff lists, look ma no backlogs! |
| 16 | * Alan Cox : tidied skbuff lists. |
| 17 | * Alan Cox : Now uses generic datagram routines I |
| 18 | * added. Also fixed the peek/read crash |
| 19 | * from all old Linux datagram code. |
| 20 | * Alan Cox : Uses the improved datagram code. |
| 21 | * Alan Cox : Added NULL's for socket options. |
| 22 | * Alan Cox : Re-commented the code. |
| 23 | * Alan Cox : Use new kernel side addressing |
| 24 | * Rob Janssen : Correct MTU usage. |
| 25 | * Dave Platt : Counter leaks caused by incorrect |
| 26 | * interrupt locking and some slightly |
| 27 | * dubious gcc output. Can you read |
| 28 | * compiler: it said _VOLATILE_ |
| 29 | * Richard Kooijman : Timestamp fixes. |
| 30 | * Alan Cox : New buffers. Use sk->mac.raw. |
| 31 | * Alan Cox : sendmsg/recvmsg support. |
| 32 | * Alan Cox : Protocol setting support |
| 33 | * Alexey Kuznetsov : Untied from IPv4 stack. |
| 34 | * Cyrus Durgin : Fixed kerneld for kmod. |
| 35 | * Michal Ostrowski : Module initialization cleanup. |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 36 | * Ulises Alonso : Frame number limit removal and |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | * packet_set_ring memory leak. |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 38 | * Eric Biederman : Allow for > 8 byte hardware addresses. |
| 39 | * The convention is that longer addresses |
| 40 | * will simply extend the hardware address |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 41 | * byte arrays at the end of sockaddr_ll |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 42 | * and packet_mreq. |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 43 | * Johann Baudy : Added TX RING. |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 44 | * Chetan Loke : Implemented TPACKET_V3 block abstraction |
| 45 | * layer. |
| 46 | * Copyright (C) 2011, <lokec@ccs.neu.edu> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | */ |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 48 | |
Baruch Siach | dc41c4a | 2021-09-09 20:49:47 +0300 | [diff] [blame] | 49 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 50 | |
Jakub Kicinski | cc69837 | 2020-11-20 14:50:52 -0800 | [diff] [blame] | 51 | #include <linux/ethtool.h> |
Jakub Kicinski | b645941 | 2021-12-28 16:49:13 -0800 | [diff] [blame] | 52 | #include <linux/filter.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #include <linux/types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #include <linux/mm.h> |
Randy Dunlap | 4fc268d | 2006-01-11 12:17:47 -0800 | [diff] [blame] | 55 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | #include <linux/fcntl.h> |
| 57 | #include <linux/socket.h> |
| 58 | #include <linux/in.h> |
| 59 | #include <linux/inet.h> |
| 60 | #include <linux/netdevice.h> |
| 61 | #include <linux/if_packet.h> |
| 62 | #include <linux/wireless.h> |
Herbert Xu | ffbc611 | 2007-02-04 23:33:10 -0800 | [diff] [blame] | 63 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | #include <linux/kmod.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 65 | #include <linux/slab.h> |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 66 | #include <linux/vmalloc.h> |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 67 | #include <net/net_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | #include <net/ip.h> |
| 69 | #include <net/protocol.h> |
| 70 | #include <linux/skbuff.h> |
| 71 | #include <net/sock.h> |
| 72 | #include <linux/errno.h> |
| 73 | #include <linux/timer.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 74 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | #include <asm/ioctls.h> |
| 76 | #include <asm/page.h> |
Al Viro | a1f8e7f7 | 2006-10-19 16:08:53 -0400 | [diff] [blame] | 77 | #include <asm/cacheflush.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | #include <asm/io.h> |
| 79 | #include <linux/proc_fs.h> |
| 80 | #include <linux/seq_file.h> |
| 81 | #include <linux/poll.h> |
| 82 | #include <linux/module.h> |
| 83 | #include <linux/init.h> |
Herbert Xu | 905db44 | 2009-01-30 14:12:06 -0800 | [diff] [blame] | 84 | #include <linux/mutex.h> |
Eric Dumazet | 05423b2 | 2009-10-26 18:40:35 -0700 | [diff] [blame] | 85 | #include <linux/if_vlan.h> |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 86 | #include <linux/virtio_net.h> |
Richard Cochran | ed85b56 | 2010-04-07 22:41:28 +0000 | [diff] [blame] | 87 | #include <linux/errqueue.h> |
Scott McMillan | 614f60f | 2010-06-02 05:53:56 -0700 | [diff] [blame] | 88 | #include <linux/net_tstamp.h> |
Daniel Borkmann | b013840 | 2014-01-15 16:25:36 +0100 | [diff] [blame] | 89 | #include <linux/percpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | #ifdef CONFIG_INET |
| 91 | #include <net/inet_common.h> |
| 92 | #endif |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 93 | #include <linux/bpf.h> |
Willem de Bruijn | 719c44d | 2016-06-07 12:06:34 -0400 | [diff] [blame] | 94 | #include <net/compat.h> |
Pablo Neira Ayuso | 0d7308c | 2021-10-08 22:06:04 +0200 | [diff] [blame] | 95 | #include <linux/netfilter_netdev.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 97 | #include "internal.h" |
| 98 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | Assumptions: |
Eyal Birger | d549699 | 2020-11-21 08:28:17 +0200 | [diff] [blame] | 101 | - If the device has no dev->header_ops->create, there is no LL header |
| 102 | visible above the device. In this case, its hard_header_len should be 0. |
Xie He | b4c5881 | 2020-09-14 00:41:54 -0700 | [diff] [blame] | 103 | The device may prepend its own header internally. In this case, its |
| 104 | needed_headroom should be set to the space needed for it to add its |
| 105 | internal header. |
| 106 | For example, a WiFi driver pretending to be an Ethernet driver should |
| 107 | set its hard_header_len to be the Ethernet header length, and set its |
| 108 | needed_headroom to be (the real WiFi header length - the fake Ethernet |
| 109 | header length). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | - packet socket receives packets with pulled ll header, |
| 111 | so that SOCK_RAW should push it back. |
| 112 | |
| 113 | On receive: |
| 114 | ----------- |
| 115 | |
Eyal Birger | d549699 | 2020-11-21 08:28:17 +0200 | [diff] [blame] | 116 | Incoming, dev_has_header(dev) == true |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 117 | mac_header -> ll header |
| 118 | data -> data |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
Eyal Birger | d549699 | 2020-11-21 08:28:17 +0200 | [diff] [blame] | 120 | Outgoing, dev_has_header(dev) == true |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 121 | mac_header -> ll header |
| 122 | data -> ll header |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
Eyal Birger | d549699 | 2020-11-21 08:28:17 +0200 | [diff] [blame] | 124 | Incoming, dev_has_header(dev) == false |
Xie He | b79a80b | 2020-09-16 05:23:08 -0700 | [diff] [blame] | 125 | mac_header -> data |
| 126 | However drivers often make it point to the ll header. |
| 127 | This is incorrect because the ll header should be invisible to us. |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 128 | data -> data |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | |
Eyal Birger | d549699 | 2020-11-21 08:28:17 +0200 | [diff] [blame] | 130 | Outgoing, dev_has_header(dev) == false |
Xie He | b79a80b | 2020-09-16 05:23:08 -0700 | [diff] [blame] | 131 | mac_header -> data. ll header is invisible to us. |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 132 | data -> data |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | |
| 134 | Resume |
Eyal Birger | d549699 | 2020-11-21 08:28:17 +0200 | [diff] [blame] | 135 | If dev_has_header(dev) == false we are unable to restore the ll header, |
Xie He | b79a80b | 2020-09-16 05:23:08 -0700 | [diff] [blame] | 136 | because it is invisible to us. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
| 138 | |
| 139 | On transmit: |
| 140 | ------------ |
| 141 | |
Xie He | 21c85974 | 2021-02-05 14:41:24 -0800 | [diff] [blame] | 142 | dev_has_header(dev) == true |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 143 | mac_header -> ll header |
| 144 | data -> ll header |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
Xie He | 21c85974 | 2021-02-05 14:41:24 -0800 | [diff] [blame] | 146 | dev_has_header(dev) == false (ll header is invisible to us) |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 147 | mac_header -> data |
| 148 | data -> data |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | |
Xie He | 0959972 | 2020-09-18 06:56:16 -0700 | [diff] [blame] | 150 | We should set network_header on output to the correct position, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | packet classifier depends on it. |
| 152 | */ |
| 153 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | /* Private packet socket structures. */ |
| 155 | |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 156 | /* identical to struct packet_mreq except it has |
| 157 | * a longer address field. |
| 158 | */ |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 159 | struct packet_mreq_max { |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 160 | int mr_ifindex; |
| 161 | unsigned short mr_type; |
| 162 | unsigned short mr_alen; |
| 163 | unsigned char mr_address[MAX_ADDR_LEN]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | }; |
David S. Miller | a2efcfa | 2007-05-29 13:12:50 -0700 | [diff] [blame] | 165 | |
Daniel Borkmann | 184f489 | 2013-04-16 01:57:46 +0000 | [diff] [blame] | 166 | union tpacket_uhdr { |
| 167 | struct tpacket_hdr *h1; |
| 168 | struct tpacket2_hdr *h2; |
| 169 | struct tpacket3_hdr *h3; |
| 170 | void *raw; |
| 171 | }; |
| 172 | |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 173 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 174 | int closing, int tx_ring); |
| 175 | |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 176 | #define V3_ALIGNMENT (8) |
| 177 | |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 178 | #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 179 | |
| 180 | #define BLK_PLUS_PRIV(sz_of_priv) \ |
| 181 | (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) |
| 182 | |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 183 | #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) |
| 184 | #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) |
| 185 | #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) |
| 186 | #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) |
| 187 | #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) |
| 188 | #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 189 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 190 | struct packet_sock; |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 191 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
| 192 | struct packet_type *pt, struct net_device *orig_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 194 | static void *packet_previous_frame(struct packet_sock *po, |
| 195 | struct packet_ring_buffer *rb, |
| 196 | int status); |
| 197 | static void packet_increment_head(struct packet_ring_buffer *buff); |
Rosen, Rami | 878cd3b | 2017-05-24 18:34:11 +0300 | [diff] [blame] | 198 | static int prb_curr_blk_in_use(struct tpacket_block_desc *); |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 199 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 200 | struct packet_sock *); |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 201 | static void prb_retire_current_block(struct tpacket_kbdq_core *, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 202 | struct packet_sock *, unsigned int status); |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 203 | static int prb_queue_frozen(struct tpacket_kbdq_core *); |
| 204 | static void prb_open_block(struct tpacket_kbdq_core *, |
| 205 | struct tpacket_block_desc *); |
Kees Cook | 17bfd8c | 2017-10-24 01:46:26 -0700 | [diff] [blame] | 206 | static void prb_retire_rx_blk_timer_expired(struct timer_list *); |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 207 | static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 208 | static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); |
| 209 | static void prb_clear_rxhash(struct tpacket_kbdq_core *, |
| 210 | struct tpacket3_hdr *); |
| 211 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *, |
| 212 | struct tpacket3_hdr *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | static void packet_flush_mclist(struct sock *sk); |
Magnus Karlsson | 865b03f | 2018-05-02 13:01:33 +0200 | [diff] [blame] | 214 | static u16 packet_pick_tx_queue(struct sk_buff *skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | |
Herbert Xu | ffbc611 | 2007-02-04 23:33:10 -0800 | [diff] [blame] | 216 | struct packet_skb_cb { |
Herbert Xu | ffbc611 | 2007-02-04 23:33:10 -0800 | [diff] [blame] | 217 | union { |
| 218 | struct sockaddr_pkt pkt; |
Eyal Birger | 2472d76 | 2015-03-01 14:58:28 +0200 | [diff] [blame] | 219 | union { |
| 220 | /* Trick: alias skb original length with |
| 221 | * ll.sll_family and ll.protocol in order |
| 222 | * to save room. |
| 223 | */ |
| 224 | unsigned int origlen; |
| 225 | struct sockaddr_ll ll; |
| 226 | }; |
Herbert Xu | ffbc611 | 2007-02-04 23:33:10 -0800 | [diff] [blame] | 227 | } sa; |
| 228 | }; |
| 229 | |
David Woodhouse | d3869ef | 2015-09-23 19:45:08 +0100 | [diff] [blame] | 230 | #define vio_le() virtio_legacy_is_little_endian() |
| 231 | |
Herbert Xu | ffbc611 | 2007-02-04 23:33:10 -0800 | [diff] [blame] | 232 | #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 233 | |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 234 | #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 235 | #define GET_PBLOCK_DESC(x, bid) \ |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 236 | ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 237 | #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 238 | ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 239 | #define GET_NEXT_PRB_BLK_NUM(x) \ |
| 240 | (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ |
| 241 | ((x)->kactive_blk_num+1) : 0) |
| 242 | |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 243 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po); |
| 244 | static void __fanout_link(struct sock *sk, struct packet_sock *po); |
| 245 | |
Pablo Neira Ayuso | 0d7308c | 2021-10-08 22:06:04 +0200 | [diff] [blame] | 246 | #ifdef CONFIG_NETFILTER_EGRESS |
| 247 | static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb) |
| 248 | { |
| 249 | struct sk_buff *next, *head = NULL, *tail; |
| 250 | int rc; |
| 251 | |
| 252 | rcu_read_lock(); |
| 253 | for (; skb != NULL; skb = next) { |
| 254 | next = skb->next; |
| 255 | skb_mark_not_on_list(skb); |
| 256 | |
| 257 | if (!nf_hook_egress(skb, &rc, skb->dev)) |
| 258 | continue; |
| 259 | |
| 260 | if (!head) |
| 261 | head = skb; |
| 262 | else |
| 263 | tail->next = skb; |
| 264 | |
| 265 | tail = skb; |
| 266 | } |
| 267 | rcu_read_unlock(); |
| 268 | |
| 269 | return head; |
| 270 | } |
| 271 | #endif |
| 272 | |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 273 | static int packet_direct_xmit(struct sk_buff *skb) |
| 274 | { |
Pablo Neira Ayuso | 0d7308c | 2021-10-08 22:06:04 +0200 | [diff] [blame] | 275 | #ifdef CONFIG_NETFILTER_EGRESS |
| 276 | if (nf_hook_egress_active()) { |
| 277 | skb = nf_hook_direct_egress(skb); |
| 278 | if (!skb) |
| 279 | return NET_XMIT_DROP; |
| 280 | } |
| 281 | #endif |
Magnus Karlsson | 865b03f | 2018-05-02 13:01:33 +0200 | [diff] [blame] | 282 | return dev_direct_xmit(skb, packet_pick_tx_queue(skb)); |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 283 | } |
| 284 | |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 285 | static struct net_device *packet_cached_dev_get(struct packet_sock *po) |
| 286 | { |
| 287 | struct net_device *dev; |
| 288 | |
| 289 | rcu_read_lock(); |
| 290 | dev = rcu_dereference(po->cached_dev); |
Yajun Deng | 1160dfa | 2021-08-05 19:55:27 +0800 | [diff] [blame] | 291 | dev_hold(dev); |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 292 | rcu_read_unlock(); |
| 293 | |
| 294 | return dev; |
| 295 | } |
| 296 | |
| 297 | static void packet_cached_dev_assign(struct packet_sock *po, |
| 298 | struct net_device *dev) |
| 299 | { |
| 300 | rcu_assign_pointer(po->cached_dev, dev); |
| 301 | } |
| 302 | |
| 303 | static void packet_cached_dev_reset(struct packet_sock *po) |
| 304 | { |
| 305 | RCU_INIT_POINTER(po->cached_dev, NULL); |
| 306 | } |
| 307 | |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 308 | static bool packet_use_direct_xmit(const struct packet_sock *po) |
| 309 | { |
| 310 | return po->xmit == packet_direct_xmit; |
| 311 | } |
| 312 | |
Magnus Karlsson | 865b03f | 2018-05-02 13:01:33 +0200 | [diff] [blame] | 313 | static u16 packet_pick_tx_queue(struct sk_buff *skb) |
Daniel Borkmann | 0fd5d57 | 2014-02-16 15:55:22 +0100 | [diff] [blame] | 314 | { |
Magnus Karlsson | 865b03f | 2018-05-02 13:01:33 +0200 | [diff] [blame] | 315 | struct net_device *dev = skb->dev; |
Daniel Borkmann | 0fd5d57 | 2014-02-16 15:55:22 +0100 | [diff] [blame] | 316 | const struct net_device_ops *ops = dev->netdev_ops; |
Paolo Abeni | b71b583 | 2019-03-20 11:02:05 +0100 | [diff] [blame] | 317 | int cpu = raw_smp_processor_id(); |
Daniel Borkmann | 0fd5d57 | 2014-02-16 15:55:22 +0100 | [diff] [blame] | 318 | u16 queue_index; |
| 319 | |
Paolo Abeni | b71b583 | 2019-03-20 11:02:05 +0100 | [diff] [blame] | 320 | #ifdef CONFIG_XPS |
| 321 | skb->sender_cpu = cpu + 1; |
| 322 | #endif |
| 323 | skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues); |
Daniel Borkmann | 0fd5d57 | 2014-02-16 15:55:22 +0100 | [diff] [blame] | 324 | if (ops->ndo_select_queue) { |
Paolo Abeni | a350ecc | 2019-03-20 11:02:06 +0100 | [diff] [blame] | 325 | queue_index = ops->ndo_select_queue(dev, skb, NULL); |
Daniel Borkmann | 0fd5d57 | 2014-02-16 15:55:22 +0100 | [diff] [blame] | 326 | queue_index = netdev_cap_txqueue(dev, queue_index); |
| 327 | } else { |
Paolo Abeni | b71b583 | 2019-03-20 11:02:05 +0100 | [diff] [blame] | 328 | queue_index = netdev_pick_tx(dev, skb, NULL); |
Daniel Borkmann | 0fd5d57 | 2014-02-16 15:55:22 +0100 | [diff] [blame] | 329 | } |
| 330 | |
Magnus Karlsson | 865b03f | 2018-05-02 13:01:33 +0200 | [diff] [blame] | 331 | return queue_index; |
Daniel Borkmann | 0fd5d57 | 2014-02-16 15:55:22 +0100 | [diff] [blame] | 332 | } |
| 333 | |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 334 | /* __register_prot_hook must be invoked through register_prot_hook |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 335 | * or from a context in which asynchronous accesses to the packet |
| 336 | * socket is not possible (packet_create()). |
| 337 | */ |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 338 | static void __register_prot_hook(struct sock *sk) |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 339 | { |
| 340 | struct packet_sock *po = pkt_sk(sk); |
Daniel Borkmann | e40526c | 2013-11-21 16:50:58 +0100 | [diff] [blame] | 341 | |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 342 | if (!po->running) { |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 343 | if (po->fanout) |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 344 | __fanout_link(sk, po); |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 345 | else |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 346 | dev_add_pack(&po->prot_hook); |
Daniel Borkmann | e40526c | 2013-11-21 16:50:58 +0100 | [diff] [blame] | 347 | |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 348 | sock_hold(sk); |
| 349 | po->running = 1; |
| 350 | } |
| 351 | } |
| 352 | |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 353 | static void register_prot_hook(struct sock *sk) |
| 354 | { |
| 355 | lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); |
| 356 | __register_prot_hook(sk); |
| 357 | } |
| 358 | |
| 359 | /* If the sync parameter is true, we will temporarily drop |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 360 | * the po->bind_lock and do a synchronize_net to make sure no |
| 361 | * asynchronous packet processing paths still refer to the elements |
| 362 | * of po->prot_hook. If the sync parameter is false, it is the |
| 363 | * callers responsibility to take care of this. |
| 364 | */ |
| 365 | static void __unregister_prot_hook(struct sock *sk, bool sync) |
| 366 | { |
| 367 | struct packet_sock *po = pkt_sk(sk); |
| 368 | |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 369 | lockdep_assert_held_once(&po->bind_lock); |
| 370 | |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 371 | po->running = 0; |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 372 | |
| 373 | if (po->fanout) |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 374 | __fanout_unlink(sk, po); |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 375 | else |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 376 | __dev_remove_pack(&po->prot_hook); |
Daniel Borkmann | e40526c | 2013-11-21 16:50:58 +0100 | [diff] [blame] | 377 | |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 378 | __sock_put(sk); |
| 379 | |
| 380 | if (sync) { |
| 381 | spin_unlock(&po->bind_lock); |
| 382 | synchronize_net(); |
| 383 | spin_lock(&po->bind_lock); |
| 384 | } |
| 385 | } |
| 386 | |
| 387 | static void unregister_prot_hook(struct sock *sk, bool sync) |
| 388 | { |
| 389 | struct packet_sock *po = pkt_sk(sk); |
| 390 | |
| 391 | if (po->running) |
| 392 | __unregister_prot_hook(sk, sync); |
| 393 | } |
| 394 | |
Michael S. Tsirkin | 6e58040 | 2014-11-24 13:32:16 +0200 | [diff] [blame] | 395 | static inline struct page * __pure pgv_to_page(void *addr) |
Changli Gao | 0af55bb | 2010-12-01 02:52:20 +0000 | [diff] [blame] | 396 | { |
| 397 | if (is_vmalloc_addr(addr)) |
| 398 | return vmalloc_to_page(addr); |
| 399 | return virt_to_page(addr); |
| 400 | } |
| 401 | |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 402 | static void __packet_set_status(struct packet_sock *po, void *frame, int status) |
| 403 | { |
Daniel Borkmann | 184f489 | 2013-04-16 01:57:46 +0000 | [diff] [blame] | 404 | union tpacket_uhdr h; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 405 | |
| 406 | h.raw = frame; |
| 407 | switch (po->tp_version) { |
| 408 | case TPACKET_V1: |
| 409 | h.h1->tp_status = status; |
Changli Gao | 0af55bb | 2010-12-01 02:52:20 +0000 | [diff] [blame] | 410 | flush_dcache_page(pgv_to_page(&h.h1->tp_status)); |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 411 | break; |
| 412 | case TPACKET_V2: |
| 413 | h.h2->tp_status = status; |
Changli Gao | 0af55bb | 2010-12-01 02:52:20 +0000 | [diff] [blame] | 414 | flush_dcache_page(pgv_to_page(&h.h2->tp_status)); |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 415 | break; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 416 | case TPACKET_V3: |
Sowmini Varadhan | 7f953ab | 2017-01-03 06:31:47 -0800 | [diff] [blame] | 417 | h.h3->tp_status = status; |
| 418 | flush_dcache_page(pgv_to_page(&h.h3->tp_status)); |
| 419 | break; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 420 | default: |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 421 | WARN(1, "TPACKET version not supported.\n"); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 422 | BUG(); |
| 423 | } |
| 424 | |
| 425 | smp_wmb(); |
| 426 | } |
| 427 | |
Eric Dumazet | 96f657e | 2019-06-12 09:52:26 -0700 | [diff] [blame] | 428 | static int __packet_get_status(const struct packet_sock *po, void *frame) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 429 | { |
Daniel Borkmann | 184f489 | 2013-04-16 01:57:46 +0000 | [diff] [blame] | 430 | union tpacket_uhdr h; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 431 | |
| 432 | smp_rmb(); |
| 433 | |
| 434 | h.raw = frame; |
| 435 | switch (po->tp_version) { |
| 436 | case TPACKET_V1: |
Changli Gao | 0af55bb | 2010-12-01 02:52:20 +0000 | [diff] [blame] | 437 | flush_dcache_page(pgv_to_page(&h.h1->tp_status)); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 438 | return h.h1->tp_status; |
| 439 | case TPACKET_V2: |
Changli Gao | 0af55bb | 2010-12-01 02:52:20 +0000 | [diff] [blame] | 440 | flush_dcache_page(pgv_to_page(&h.h2->tp_status)); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 441 | return h.h2->tp_status; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 442 | case TPACKET_V3: |
Sowmini Varadhan | 7f953ab | 2017-01-03 06:31:47 -0800 | [diff] [blame] | 443 | flush_dcache_page(pgv_to_page(&h.h3->tp_status)); |
| 444 | return h.h3->tp_status; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 445 | default: |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 446 | WARN(1, "TPACKET version not supported.\n"); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 447 | BUG(); |
| 448 | return 0; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 449 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | } |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 451 | |
Arnd Bergmann | d413fcb | 2017-11-27 10:09:24 +0100 | [diff] [blame] | 452 | static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts, |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 453 | unsigned int flags) |
Daniel Borkmann | 7a51384 | 2013-04-23 00:39:29 +0000 | [diff] [blame] | 454 | { |
| 455 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
| 456 | |
Willem de Bruijn | 68a360e | 2014-07-25 18:01:31 -0400 | [diff] [blame] | 457 | if (shhwtstamps && |
| 458 | (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && |
Arnd Bergmann | d413fcb | 2017-11-27 10:09:24 +0100 | [diff] [blame] | 459 | ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts)) |
Willem de Bruijn | 68a360e | 2014-07-25 18:01:31 -0400 | [diff] [blame] | 460 | return TP_STATUS_TS_RAW_HARDWARE; |
Daniel Borkmann | 7a51384 | 2013-04-23 00:39:29 +0000 | [diff] [blame] | 461 | |
Richard Sanger | 171c3b1 | 2021-05-12 13:31:22 +1200 | [diff] [blame] | 462 | if ((flags & SOF_TIMESTAMPING_SOFTWARE) && |
| 463 | ktime_to_timespec64_cond(skb->tstamp, ts)) |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 464 | return TP_STATUS_TS_SOFTWARE; |
Daniel Borkmann | 7a51384 | 2013-04-23 00:39:29 +0000 | [diff] [blame] | 465 | |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 466 | return 0; |
Daniel Borkmann | 7a51384 | 2013-04-23 00:39:29 +0000 | [diff] [blame] | 467 | } |
| 468 | |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 469 | static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, |
| 470 | struct sk_buff *skb) |
Willem de Bruijn | 2e31396 | 2013-04-23 00:39:28 +0000 | [diff] [blame] | 471 | { |
| 472 | union tpacket_uhdr h; |
Arnd Bergmann | d413fcb | 2017-11-27 10:09:24 +0100 | [diff] [blame] | 473 | struct timespec64 ts; |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 474 | __u32 ts_status; |
Willem de Bruijn | 2e31396 | 2013-04-23 00:39:28 +0000 | [diff] [blame] | 475 | |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 476 | if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) |
| 477 | return 0; |
Willem de Bruijn | 2e31396 | 2013-04-23 00:39:28 +0000 | [diff] [blame] | 478 | |
| 479 | h.raw = frame; |
Arnd Bergmann | d413fcb | 2017-11-27 10:09:24 +0100 | [diff] [blame] | 480 | /* |
| 481 | * versions 1 through 3 overflow the timestamps in y2106, since they |
| 482 | * all store the seconds in a 32-bit unsigned integer. |
| 483 | * If we create a version 4, that should have a 64-bit timestamp, |
| 484 | * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit |
| 485 | * nanoseconds. |
| 486 | */ |
Willem de Bruijn | 2e31396 | 2013-04-23 00:39:28 +0000 | [diff] [blame] | 487 | switch (po->tp_version) { |
| 488 | case TPACKET_V1: |
| 489 | h.h1->tp_sec = ts.tv_sec; |
| 490 | h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; |
| 491 | break; |
| 492 | case TPACKET_V2: |
| 493 | h.h2->tp_sec = ts.tv_sec; |
| 494 | h.h2->tp_nsec = ts.tv_nsec; |
| 495 | break; |
| 496 | case TPACKET_V3: |
Daniel Borkmann | 57ea884 | 2017-01-05 02:34:28 +0100 | [diff] [blame] | 497 | h.h3->tp_sec = ts.tv_sec; |
| 498 | h.h3->tp_nsec = ts.tv_nsec; |
| 499 | break; |
Willem de Bruijn | 2e31396 | 2013-04-23 00:39:28 +0000 | [diff] [blame] | 500 | default: |
| 501 | WARN(1, "TPACKET version not supported.\n"); |
| 502 | BUG(); |
| 503 | } |
| 504 | |
| 505 | /* one flush is safe, as both fields always lie on the same cacheline */ |
| 506 | flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); |
| 507 | smp_wmb(); |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 508 | |
| 509 | return ts_status; |
Willem de Bruijn | 2e31396 | 2013-04-23 00:39:28 +0000 | [diff] [blame] | 510 | } |
| 511 | |
Eric Dumazet | d4b5bd9 | 2019-06-12 09:52:27 -0700 | [diff] [blame] | 512 | static void *packet_lookup_frame(const struct packet_sock *po, |
| 513 | const struct packet_ring_buffer *rb, |
| 514 | unsigned int position, |
| 515 | int status) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 516 | { |
| 517 | unsigned int pg_vec_pos, frame_offset; |
Daniel Borkmann | 184f489 | 2013-04-16 01:57:46 +0000 | [diff] [blame] | 518 | union tpacket_uhdr h; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 519 | |
| 520 | pg_vec_pos = position / rb->frames_per_block; |
| 521 | frame_offset = position % rb->frames_per_block; |
| 522 | |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 523 | h.raw = rb->pg_vec[pg_vec_pos].buffer + |
| 524 | (frame_offset * rb->frame_size); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 525 | |
| 526 | if (status != __packet_get_status(po, h.raw)) |
| 527 | return NULL; |
| 528 | |
| 529 | return h.raw; |
| 530 | } |
| 531 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 532 | static void *packet_current_frame(struct packet_sock *po, |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 533 | struct packet_ring_buffer *rb, |
| 534 | int status) |
| 535 | { |
| 536 | return packet_lookup_frame(po, rb, rb->head, status); |
| 537 | } |
| 538 | |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 539 | static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 540 | { |
| 541 | del_timer_sync(&pkc->retire_blk_timer); |
| 542 | } |
| 543 | |
| 544 | static void prb_shutdown_retire_blk_timer(struct packet_sock *po, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 545 | struct sk_buff_head *rb_queue) |
| 546 | { |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 547 | struct tpacket_kbdq_core *pkc; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 548 | |
Tobias Klauser | 73d0fcf | 2015-07-28 14:21:26 +0200 | [diff] [blame] | 549 | pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 550 | |
Veaceslav Falico | ec6f809 | 2013-11-29 09:53:23 +0100 | [diff] [blame] | 551 | spin_lock_bh(&rb_queue->lock); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 552 | pkc->delete_blk_timer = 1; |
Veaceslav Falico | ec6f809 | 2013-11-29 09:53:23 +0100 | [diff] [blame] | 553 | spin_unlock_bh(&rb_queue->lock); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 554 | |
| 555 | prb_del_retire_blk_timer(pkc); |
| 556 | } |
| 557 | |
Maninder Singh | e8e85cc | 2015-06-22 12:39:16 +0530 | [diff] [blame] | 558 | static void prb_setup_retire_blk_timer(struct packet_sock *po) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 559 | { |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 560 | struct tpacket_kbdq_core *pkc; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 561 | |
Maninder Singh | e8e85cc | 2015-06-22 12:39:16 +0530 | [diff] [blame] | 562 | pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
Kees Cook | 17bfd8c | 2017-10-24 01:46:26 -0700 | [diff] [blame] | 563 | timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired, |
| 564 | 0); |
| 565 | pkc->retire_blk_timer.expires = jiffies; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 566 | } |
| 567 | |
| 568 | static int prb_calc_retire_blk_tmo(struct packet_sock *po, |
| 569 | int blk_size_in_bytes) |
| 570 | { |
| 571 | struct net_device *dev; |
Mao Wenan | 0914d2b | 2019-12-23 18:42:57 +0800 | [diff] [blame] | 572 | unsigned int mbits, div; |
David Decotigny | 7cad1ba | 2016-02-24 10:58:10 -0800 | [diff] [blame] | 573 | struct ethtool_link_ksettings ecmd; |
Jiri Pirko | 4bc71cb | 2011-09-03 03:34:30 +0000 | [diff] [blame] | 574 | int err; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 575 | |
Jiri Pirko | 4bc71cb | 2011-09-03 03:34:30 +0000 | [diff] [blame] | 576 | rtnl_lock(); |
| 577 | dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); |
| 578 | if (unlikely(!dev)) { |
| 579 | rtnl_unlock(); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 580 | return DEFAULT_PRB_RETIRE_TOV; |
Jiri Pirko | 4bc71cb | 2011-09-03 03:34:30 +0000 | [diff] [blame] | 581 | } |
David Decotigny | 7cad1ba | 2016-02-24 10:58:10 -0800 | [diff] [blame] | 582 | err = __ethtool_get_link_ksettings(dev, &ecmd); |
Jiri Pirko | 4bc71cb | 2011-09-03 03:34:30 +0000 | [diff] [blame] | 583 | rtnl_unlock(); |
Mao Wenan | 0914d2b | 2019-12-23 18:42:57 +0800 | [diff] [blame] | 584 | if (err) |
Mao Wenan | b43d1f9 | 2019-12-09 21:31:25 +0800 | [diff] [blame] | 585 | return DEFAULT_PRB_RETIRE_TOV; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 586 | |
Mao Wenan | 0914d2b | 2019-12-23 18:42:57 +0800 | [diff] [blame] | 587 | /* If the link speed is so slow you don't really |
| 588 | * need to worry about perf anyways |
| 589 | */ |
| 590 | if (ecmd.base.speed < SPEED_1000 || |
| 591 | ecmd.base.speed == SPEED_UNKNOWN) |
| 592 | return DEFAULT_PRB_RETIRE_TOV; |
| 593 | |
| 594 | div = ecmd.base.speed / 1000; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 595 | mbits = (blk_size_in_bytes * 8) / (1024 * 1024); |
| 596 | |
| 597 | if (div) |
| 598 | mbits /= div; |
| 599 | |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 600 | if (div) |
Mao Wenan | 0914d2b | 2019-12-23 18:42:57 +0800 | [diff] [blame] | 601 | return mbits + 1; |
| 602 | return mbits; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 603 | } |
| 604 | |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 605 | static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 606 | union tpacket_req_u *req_u) |
| 607 | { |
| 608 | p1->feature_req_word = req_u->req3.tp_feature_req_word; |
| 609 | } |
| 610 | |
| 611 | static void init_prb_bdqc(struct packet_sock *po, |
| 612 | struct packet_ring_buffer *rb, |
| 613 | struct pgv *pg_vec, |
Maninder Singh | e8e85cc | 2015-06-22 12:39:16 +0530 | [diff] [blame] | 614 | union tpacket_req_u *req_u) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 615 | { |
Duan Jiong | 22781a5 | 2013-12-06 13:29:36 +0800 | [diff] [blame] | 616 | struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 617 | struct tpacket_block_desc *pbd; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 618 | |
| 619 | memset(p1, 0x0, sizeof(*p1)); |
| 620 | |
| 621 | p1->knxt_seq_num = 1; |
| 622 | p1->pkbdq = pg_vec; |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 623 | pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; |
Joe Perches | e319269 | 2012-06-03 17:41:40 +0000 | [diff] [blame] | 624 | p1->pkblk_start = pg_vec[0].buffer; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 625 | p1->kblk_size = req_u->req3.tp_block_size; |
| 626 | p1->knum_blocks = req_u->req3.tp_block_nr; |
| 627 | p1->hdrlen = po->tp_hdrlen; |
| 628 | p1->version = po->tp_version; |
| 629 | p1->last_kactive_blk_num = 0; |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 630 | po->stats.stats3.tp_freeze_q_cnt = 0; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 631 | if (req_u->req3.tp_retire_blk_tov) |
| 632 | p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; |
| 633 | else |
| 634 | p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, |
| 635 | req_u->req3.tp_block_size); |
| 636 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); |
| 637 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; |
John Ogness | 632ca50 | 2020-07-07 17:28:04 +0206 | [diff] [blame] | 638 | rwlock_init(&p1->blk_fill_in_prog_lock); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 639 | |
Eric Dumazet | dc80811 | 2014-08-15 09:16:04 -0700 | [diff] [blame] | 640 | p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 641 | prb_init_ft_ops(p1, req_u); |
Maninder Singh | e8e85cc | 2015-06-22 12:39:16 +0530 | [diff] [blame] | 642 | prb_setup_retire_blk_timer(po); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 643 | prb_open_block(p1, pbd); |
| 644 | } |
| 645 | |
| 646 | /* Do NOT update the last_blk_num first. |
| 647 | * Assumes sk_buff_head lock is held. |
| 648 | */ |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 649 | static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 650 | { |
| 651 | mod_timer(&pkc->retire_blk_timer, |
| 652 | jiffies + pkc->tov_in_jiffies); |
| 653 | pkc->last_kactive_blk_num = pkc->kactive_blk_num; |
| 654 | } |
| 655 | |
| 656 | /* |
| 657 | * Timer logic: |
| 658 | * 1) We refresh the timer only when we open a block. |
| 659 | * By doing this we don't waste cycles refreshing the timer |
| 660 | * on packet-by-packet basis. |
| 661 | * |
| 662 | * With a 1MB block-size, on a 1Gbps line, it will take |
| 663 | * i) ~8 ms to fill a block + ii) memcpy etc. |
| 664 | * In this cut we are not accounting for the memcpy time. |
| 665 | * |
| 666 | * So, if the user sets the 'tmo' to 10ms then the timer |
| 667 | * will never fire while the block is still getting filled |
| 668 | * (which is what we want). However, the user could choose |
| 669 | * to close a block early and that's fine. |
| 670 | * |
| 671 | * But when the timer does fire, we check whether or not to refresh it. |
| 672 | * Since the tmo granularity is in msecs, it is not too expensive |
| 673 | * to refresh the timer, lets say every '8' msecs. |
| 674 | * Either the user can set the 'tmo' or we can derive it based on |
| 675 | * a) line-speed and b) block-size. |
| 676 | * prb_calc_retire_blk_tmo() calculates the tmo. |
| 677 | * |
| 678 | */ |
Kees Cook | 17bfd8c | 2017-10-24 01:46:26 -0700 | [diff] [blame] | 679 | static void prb_retire_rx_blk_timer_expired(struct timer_list *t) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 680 | { |
Kees Cook | 17bfd8c | 2017-10-24 01:46:26 -0700 | [diff] [blame] | 681 | struct packet_sock *po = |
| 682 | from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer); |
Duan Jiong | 22781a5 | 2013-12-06 13:29:36 +0800 | [diff] [blame] | 683 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 684 | unsigned int frozen; |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 685 | struct tpacket_block_desc *pbd; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 686 | |
| 687 | spin_lock(&po->sk.sk_receive_queue.lock); |
| 688 | |
| 689 | frozen = prb_queue_frozen(pkc); |
| 690 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
| 691 | |
| 692 | if (unlikely(pkc->delete_blk_timer)) |
| 693 | goto out; |
| 694 | |
| 695 | /* We only need to plug the race when the block is partially filled. |
| 696 | * tpacket_rcv: |
| 697 | * lock(); increment BLOCK_NUM_PKTS; unlock() |
| 698 | * copy_bits() is in progress ... |
| 699 | * timer fires on other cpu: |
| 700 | * we can't retire the current block because copy_bits |
| 701 | * is in progress. |
| 702 | * |
| 703 | */ |
| 704 | if (BLOCK_NUM_PKTS(pbd)) { |
John Ogness | 632ca50 | 2020-07-07 17:28:04 +0206 | [diff] [blame] | 705 | /* Waiting for skb_copy_bits to finish... */ |
| 706 | write_lock(&pkc->blk_fill_in_prog_lock); |
| 707 | write_unlock(&pkc->blk_fill_in_prog_lock); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 708 | } |
| 709 | |
| 710 | if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { |
| 711 | if (!frozen) { |
Alexander Drozdov | 41a50d6 | 2015-02-24 08:18:28 +0300 | [diff] [blame] | 712 | if (!BLOCK_NUM_PKTS(pbd)) { |
| 713 | /* An empty block. Just refresh the timer. */ |
| 714 | goto refresh_timer; |
| 715 | } |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 716 | prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); |
| 717 | if (!prb_dispatch_next_block(pkc, po)) |
| 718 | goto refresh_timer; |
| 719 | else |
| 720 | goto out; |
| 721 | } else { |
| 722 | /* Case 1. Queue was frozen because user-space was |
| 723 | * lagging behind. |
| 724 | */ |
Rosen, Rami | 878cd3b | 2017-05-24 18:34:11 +0300 | [diff] [blame] | 725 | if (prb_curr_blk_in_use(pbd)) { |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 726 | /* |
| 727 | * Ok, user-space is still behind. |
| 728 | * So just refresh the timer. |
| 729 | */ |
| 730 | goto refresh_timer; |
| 731 | } else { |
| 732 | /* Case 2. queue was frozen,user-space caught up, |
| 733 | * now the link went idle && the timer fired. |
| 734 | * We don't have a block to close.So we open this |
| 735 | * block and restart the timer. |
| 736 | * opening a block thaws the queue,restarts timer |
| 737 | * Thawing/timer-refresh is a side effect. |
| 738 | */ |
| 739 | prb_open_block(pkc, pbd); |
| 740 | goto out; |
| 741 | } |
| 742 | } |
| 743 | } |
| 744 | |
| 745 | refresh_timer: |
| 746 | _prb_refresh_rx_retire_blk_timer(pkc); |
| 747 | |
| 748 | out: |
| 749 | spin_unlock(&po->sk.sk_receive_queue.lock); |
| 750 | } |
| 751 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 752 | static void prb_flush_block(struct tpacket_kbdq_core *pkc1, |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 753 | struct tpacket_block_desc *pbd1, __u32 status) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 754 | { |
| 755 | /* Flush everything minus the block header */ |
| 756 | |
| 757 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 |
| 758 | u8 *start, *end; |
| 759 | |
| 760 | start = (u8 *)pbd1; |
| 761 | |
| 762 | /* Skip the block header(we know header WILL fit in 4K) */ |
| 763 | start += PAGE_SIZE; |
| 764 | |
| 765 | end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); |
| 766 | for (; start < end; start += PAGE_SIZE) |
| 767 | flush_dcache_page(pgv_to_page(start)); |
| 768 | |
| 769 | smp_wmb(); |
| 770 | #endif |
| 771 | |
| 772 | /* Now update the block status. */ |
| 773 | |
| 774 | BLOCK_STATUS(pbd1) = status; |
| 775 | |
| 776 | /* Flush the block header */ |
| 777 | |
| 778 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 |
| 779 | start = (u8 *)pbd1; |
| 780 | flush_dcache_page(pgv_to_page(start)); |
| 781 | |
| 782 | smp_wmb(); |
| 783 | #endif |
| 784 | } |
| 785 | |
| 786 | /* |
| 787 | * Side effect: |
| 788 | * |
| 789 | * 1) flush the block |
| 790 | * 2) Increment active_blk_num |
| 791 | * |
| 792 | * Note:We DONT refresh the timer on purpose. |
| 793 | * Because almost always the next block will be opened. |
| 794 | */ |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 795 | static void prb_close_block(struct tpacket_kbdq_core *pkc1, |
| 796 | struct tpacket_block_desc *pbd1, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 797 | struct packet_sock *po, unsigned int stat) |
| 798 | { |
| 799 | __u32 status = TP_STATUS_USER | stat; |
| 800 | |
| 801 | struct tpacket3_hdr *last_pkt; |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 802 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; |
Dan Collins | da413ee | 2014-12-19 16:49:25 +1300 | [diff] [blame] | 803 | struct sock *sk = &po->sk; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 804 | |
Eric Dumazet | 8e8e295 | 2019-06-12 09:52:30 -0700 | [diff] [blame] | 805 | if (atomic_read(&po->tp_drops)) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 806 | status |= TP_STATUS_LOSING; |
| 807 | |
| 808 | last_pkt = (struct tpacket3_hdr *)pkc1->prev; |
| 809 | last_pkt->tp_next_offset = 0; |
| 810 | |
| 811 | /* Get the ts of the last pkt */ |
| 812 | if (BLOCK_NUM_PKTS(pbd1)) { |
| 813 | h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; |
| 814 | h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; |
| 815 | } else { |
Alexander Drozdov | 41a50d6 | 2015-02-24 08:18:28 +0300 | [diff] [blame] | 816 | /* Ok, we tmo'd - so get the current time. |
| 817 | * |
| 818 | * It shouldn't really happen as we don't close empty |
| 819 | * blocks. See prb_retire_rx_blk_timer_expired(). |
| 820 | */ |
Arnd Bergmann | d413fcb | 2017-11-27 10:09:24 +0100 | [diff] [blame] | 821 | struct timespec64 ts; |
| 822 | ktime_get_real_ts64(&ts); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 823 | h1->ts_last_pkt.ts_sec = ts.tv_sec; |
| 824 | h1->ts_last_pkt.ts_nsec = ts.tv_nsec; |
| 825 | } |
| 826 | |
| 827 | smp_wmb(); |
| 828 | |
| 829 | /* Flush the block */ |
| 830 | prb_flush_block(pkc1, pbd1, status); |
| 831 | |
Dan Collins | da413ee | 2014-12-19 16:49:25 +1300 | [diff] [blame] | 832 | sk->sk_data_ready(sk); |
| 833 | |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 834 | pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); |
| 835 | } |
| 836 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 837 | static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 838 | { |
| 839 | pkc->reset_pending_on_curr_blk = 0; |
| 840 | } |
| 841 | |
| 842 | /* |
| 843 | * Side effect of opening a block: |
| 844 | * |
| 845 | * 1) prb_queue is thawed. |
| 846 | * 2) retire_blk_timer is refreshed. |
| 847 | * |
| 848 | */ |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 849 | static void prb_open_block(struct tpacket_kbdq_core *pkc1, |
| 850 | struct tpacket_block_desc *pbd1) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 851 | { |
Arnd Bergmann | d413fcb | 2017-11-27 10:09:24 +0100 | [diff] [blame] | 852 | struct timespec64 ts; |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 853 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 854 | |
| 855 | smp_rmb(); |
| 856 | |
Daniel Borkmann | 8da3056 | 2013-05-03 02:57:00 +0000 | [diff] [blame] | 857 | /* We could have just memset this but we will lose the |
| 858 | * flexibility of making the priv area sticky |
| 859 | */ |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 860 | |
Daniel Borkmann | 8da3056 | 2013-05-03 02:57:00 +0000 | [diff] [blame] | 861 | BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; |
| 862 | BLOCK_NUM_PKTS(pbd1) = 0; |
| 863 | BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 864 | |
Arnd Bergmann | d413fcb | 2017-11-27 10:09:24 +0100 | [diff] [blame] | 865 | ktime_get_real_ts64(&ts); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 866 | |
Daniel Borkmann | 8da3056 | 2013-05-03 02:57:00 +0000 | [diff] [blame] | 867 | h1->ts_first_pkt.ts_sec = ts.tv_sec; |
| 868 | h1->ts_first_pkt.ts_nsec = ts.tv_nsec; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 869 | |
Daniel Borkmann | 8da3056 | 2013-05-03 02:57:00 +0000 | [diff] [blame] | 870 | pkc1->pkblk_start = (char *)pbd1; |
| 871 | pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); |
| 872 | |
| 873 | BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); |
| 874 | BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; |
| 875 | |
| 876 | pbd1->version = pkc1->version; |
| 877 | pkc1->prev = pkc1->nxt_offset; |
| 878 | pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; |
| 879 | |
| 880 | prb_thaw_queue(pkc1); |
| 881 | _prb_refresh_rx_retire_blk_timer(pkc1); |
| 882 | |
| 883 | smp_wmb(); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 884 | } |
| 885 | |
| 886 | /* |
| 887 | * Queue freeze logic: |
| 888 | * 1) Assume tp_block_nr = 8 blocks. |
| 889 | * 2) At time 't0', user opens Rx ring. |
| 890 | * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 |
| 891 | * 4) user-space is either sleeping or processing block '0'. |
| 892 | * 5) tpacket_rcv is currently filling block '7', since there is no space left, |
| 893 | * it will close block-7,loop around and try to fill block '0'. |
| 894 | * call-flow: |
| 895 | * __packet_lookup_frame_in_block |
| 896 | * prb_retire_current_block() |
| 897 | * prb_dispatch_next_block() |
| 898 | * |->(BLOCK_STATUS == USER) evaluates to true |
| 899 | * 5.1) Since block-0 is currently in-use, we just freeze the queue. |
| 900 | * 6) Now there are two cases: |
| 901 | * 6.1) Link goes idle right after the queue is frozen. |
| 902 | * But remember, the last open_block() refreshed the timer. |
| 903 | * When this timer expires,it will refresh itself so that we can |
| 904 | * re-open block-0 in near future. |
| 905 | * 6.2) Link is busy and keeps on receiving packets. This is a simple |
| 906 | * case and __packet_lookup_frame_in_block will check if block-0 |
| 907 | * is free and can now be re-used. |
| 908 | */ |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 909 | static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 910 | struct packet_sock *po) |
| 911 | { |
| 912 | pkc->reset_pending_on_curr_blk = 1; |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 913 | po->stats.stats3.tp_freeze_q_cnt++; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 914 | } |
| 915 | |
| 916 | #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) |
| 917 | |
| 918 | /* |
| 919 | * If the next block is free then we will dispatch it |
| 920 | * and return a good offset. |
| 921 | * Else, we will freeze the queue. |
| 922 | * So, caller must check the return value. |
| 923 | */ |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 924 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 925 | struct packet_sock *po) |
| 926 | { |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 927 | struct tpacket_block_desc *pbd; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 928 | |
| 929 | smp_rmb(); |
| 930 | |
| 931 | /* 1. Get current block num */ |
| 932 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
| 933 | |
| 934 | /* 2. If this block is currently in_use then freeze the queue */ |
| 935 | if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { |
| 936 | prb_freeze_queue(pkc, po); |
| 937 | return NULL; |
| 938 | } |
| 939 | |
| 940 | /* |
| 941 | * 3. |
| 942 | * open this block and return the offset where the first packet |
| 943 | * needs to get stored. |
| 944 | */ |
| 945 | prb_open_block(pkc, pbd); |
| 946 | return (void *)pkc->nxt_offset; |
| 947 | } |
| 948 | |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 949 | static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 950 | struct packet_sock *po, unsigned int status) |
| 951 | { |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 952 | struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 953 | |
| 954 | /* retire/close the current block */ |
| 955 | if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { |
| 956 | /* |
| 957 | * Plug the case where copy_bits() is in progress on |
| 958 | * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't |
| 959 | * have space to copy the pkt in the current block and |
| 960 | * called prb_retire_current_block() |
| 961 | * |
| 962 | * We don't need to worry about the TMO case because |
| 963 | * the timer-handler already handled this case. |
| 964 | */ |
| 965 | if (!(status & TP_STATUS_BLK_TMO)) { |
John Ogness | 632ca50 | 2020-07-07 17:28:04 +0206 | [diff] [blame] | 966 | /* Waiting for skb_copy_bits to finish... */ |
| 967 | write_lock(&pkc->blk_fill_in_prog_lock); |
| 968 | write_unlock(&pkc->blk_fill_in_prog_lock); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 969 | } |
| 970 | prb_close_block(pkc, pbd, po, status); |
| 971 | return; |
| 972 | } |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 973 | } |
| 974 | |
Rosen, Rami | 878cd3b | 2017-05-24 18:34:11 +0300 | [diff] [blame] | 975 | static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 976 | { |
| 977 | return TP_STATUS_USER & BLOCK_STATUS(pbd); |
| 978 | } |
| 979 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 980 | static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 981 | { |
| 982 | return pkc->reset_pending_on_curr_blk; |
| 983 | } |
| 984 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 985 | static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) |
John Ogness | 88fd1cb | 2020-08-13 21:45:25 +0206 | [diff] [blame] | 986 | __releases(&pkc->blk_fill_in_prog_lock) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 987 | { |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 988 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); |
John Ogness | 632ca50 | 2020-07-07 17:28:04 +0206 | [diff] [blame] | 989 | |
| 990 | read_unlock(&pkc->blk_fill_in_prog_lock); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 991 | } |
| 992 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 993 | static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 994 | struct tpacket3_hdr *ppd) |
| 995 | { |
Tom Herbert | 3958afa1b | 2013-12-15 22:12:06 -0800 | [diff] [blame] | 996 | ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 997 | } |
| 998 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 999 | static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1000 | struct tpacket3_hdr *ppd) |
| 1001 | { |
| 1002 | ppd->hv1.tp_rxhash = 0; |
| 1003 | } |
| 1004 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 1005 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1006 | struct tpacket3_hdr *ppd) |
| 1007 | { |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 1008 | if (skb_vlan_tag_present(pkc->skb)) { |
| 1009 | ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); |
Atzm Watanabe | a0cdfcf | 2013-12-17 22:53:40 +0900 | [diff] [blame] | 1010 | ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); |
| 1011 | ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1012 | } else { |
danborkmann@iogearbox.net | 9e67030 | 2012-08-20 03:34:03 +0000 | [diff] [blame] | 1013 | ppd->hv1.tp_vlan_tci = 0; |
Atzm Watanabe | a0cdfcf | 2013-12-17 22:53:40 +0900 | [diff] [blame] | 1014 | ppd->hv1.tp_vlan_tpid = 0; |
danborkmann@iogearbox.net | 9e67030 | 2012-08-20 03:34:03 +0000 | [diff] [blame] | 1015 | ppd->tp_status = TP_STATUS_AVAILABLE; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1016 | } |
| 1017 | } |
| 1018 | |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 1019 | static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1020 | struct tpacket3_hdr *ppd) |
| 1021 | { |
Atzm Watanabe | a0cdfcf | 2013-12-17 22:53:40 +0900 | [diff] [blame] | 1022 | ppd->hv1.tp_padding = 0; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1023 | prb_fill_vlan_info(pkc, ppd); |
| 1024 | |
| 1025 | if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) |
| 1026 | prb_fill_rxhash(pkc, ppd); |
| 1027 | else |
| 1028 | prb_clear_rxhash(pkc, ppd); |
| 1029 | } |
| 1030 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 1031 | static void prb_fill_curr_block(char *curr, |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 1032 | struct tpacket_kbdq_core *pkc, |
| 1033 | struct tpacket_block_desc *pbd, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1034 | unsigned int len) |
John Ogness | 88fd1cb | 2020-08-13 21:45:25 +0206 | [diff] [blame] | 1035 | __acquires(&pkc->blk_fill_in_prog_lock) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1036 | { |
| 1037 | struct tpacket3_hdr *ppd; |
| 1038 | |
| 1039 | ppd = (struct tpacket3_hdr *)curr; |
| 1040 | ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); |
| 1041 | pkc->prev = curr; |
| 1042 | pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); |
| 1043 | BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); |
| 1044 | BLOCK_NUM_PKTS(pbd) += 1; |
John Ogness | 632ca50 | 2020-07-07 17:28:04 +0206 | [diff] [blame] | 1045 | read_lock(&pkc->blk_fill_in_prog_lock); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1046 | prb_run_all_ft_ops(pkc, ppd); |
| 1047 | } |
| 1048 | |
| 1049 | /* Assumes caller has the sk->rx_queue.lock */ |
| 1050 | static void *__packet_lookup_frame_in_block(struct packet_sock *po, |
| 1051 | struct sk_buff *skb, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1052 | unsigned int len |
| 1053 | ) |
| 1054 | { |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 1055 | struct tpacket_kbdq_core *pkc; |
| 1056 | struct tpacket_block_desc *pbd; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1057 | char *curr, *end; |
| 1058 | |
Joe Perches | e319269 | 2012-06-03 17:41:40 +0000 | [diff] [blame] | 1059 | pkc = GET_PBDQC_FROM_RB(&po->rx_ring); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1060 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
| 1061 | |
| 1062 | /* Queue is frozen when user space is lagging behind */ |
| 1063 | if (prb_queue_frozen(pkc)) { |
| 1064 | /* |
| 1065 | * Check if that last block which caused the queue to freeze, |
| 1066 | * is still in_use by user-space. |
| 1067 | */ |
Rosen, Rami | 878cd3b | 2017-05-24 18:34:11 +0300 | [diff] [blame] | 1068 | if (prb_curr_blk_in_use(pbd)) { |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1069 | /* Can't record this packet */ |
| 1070 | return NULL; |
| 1071 | } else { |
| 1072 | /* |
| 1073 | * Ok, the block was released by user-space. |
| 1074 | * Now let's open that block. |
| 1075 | * opening a block also thaws the queue. |
| 1076 | * Thawing is a side effect. |
| 1077 | */ |
| 1078 | prb_open_block(pkc, pbd); |
| 1079 | } |
| 1080 | } |
| 1081 | |
| 1082 | smp_mb(); |
| 1083 | curr = pkc->nxt_offset; |
| 1084 | pkc->skb = skb; |
Joe Perches | e319269 | 2012-06-03 17:41:40 +0000 | [diff] [blame] | 1085 | end = (char *)pbd + pkc->kblk_size; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1086 | |
| 1087 | /* first try the current block */ |
| 1088 | if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { |
| 1089 | prb_fill_curr_block(curr, pkc, pbd, len); |
| 1090 | return (void *)curr; |
| 1091 | } |
| 1092 | |
| 1093 | /* Ok, close the current block */ |
| 1094 | prb_retire_current_block(pkc, po, 0); |
| 1095 | |
| 1096 | /* Now, try to dispatch the next block */ |
| 1097 | curr = (char *)prb_dispatch_next_block(pkc, po); |
| 1098 | if (curr) { |
| 1099 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); |
| 1100 | prb_fill_curr_block(curr, pkc, pbd, len); |
| 1101 | return (void *)curr; |
| 1102 | } |
| 1103 | |
| 1104 | /* |
| 1105 | * No free blocks are available.user_space hasn't caught up yet. |
| 1106 | * Queue was just frozen and now this packet will get dropped. |
| 1107 | */ |
| 1108 | return NULL; |
| 1109 | } |
| 1110 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 1111 | static void *packet_current_rx_frame(struct packet_sock *po, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1112 | struct sk_buff *skb, |
| 1113 | int status, unsigned int len) |
| 1114 | { |
| 1115 | char *curr = NULL; |
| 1116 | switch (po->tp_version) { |
| 1117 | case TPACKET_V1: |
| 1118 | case TPACKET_V2: |
| 1119 | curr = packet_lookup_frame(po, &po->rx_ring, |
| 1120 | po->rx_ring.head, status); |
| 1121 | return curr; |
| 1122 | case TPACKET_V3: |
Mao Wenan | 4608805 | 2019-06-11 09:32:13 +0800 | [diff] [blame] | 1123 | return __packet_lookup_frame_in_block(po, skb, len); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1124 | default: |
| 1125 | WARN(1, "TPACKET version not supported\n"); |
| 1126 | BUG(); |
Ying Xue | 99aa347 | 2012-08-06 16:27:10 +0000 | [diff] [blame] | 1127 | return NULL; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1128 | } |
| 1129 | } |
| 1130 | |
Eric Dumazet | dcf70ce | 2019-06-12 09:52:28 -0700 | [diff] [blame] | 1131 | static void *prb_lookup_block(const struct packet_sock *po, |
| 1132 | const struct packet_ring_buffer *rb, |
| 1133 | unsigned int idx, |
| 1134 | int status) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1135 | { |
chetan loke | bc59ba3 | 2011-08-25 10:43:30 +0000 | [diff] [blame] | 1136 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1137 | struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1138 | |
| 1139 | if (status != BLOCK_STATUS(pbd)) |
| 1140 | return NULL; |
| 1141 | return pbd; |
| 1142 | } |
| 1143 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 1144 | static int prb_previous_blk_num(struct packet_ring_buffer *rb) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1145 | { |
| 1146 | unsigned int prev; |
| 1147 | if (rb->prb_bdqc.kactive_blk_num) |
| 1148 | prev = rb->prb_bdqc.kactive_blk_num-1; |
| 1149 | else |
| 1150 | prev = rb->prb_bdqc.knum_blocks-1; |
| 1151 | return prev; |
| 1152 | } |
| 1153 | |
| 1154 | /* Assumes caller has held the rx_queue.lock */ |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 1155 | static void *__prb_previous_block(struct packet_sock *po, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1156 | struct packet_ring_buffer *rb, |
| 1157 | int status) |
| 1158 | { |
| 1159 | unsigned int previous = prb_previous_blk_num(rb); |
| 1160 | return prb_lookup_block(po, rb, previous, status); |
| 1161 | } |
| 1162 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 1163 | static void *packet_previous_rx_frame(struct packet_sock *po, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1164 | struct packet_ring_buffer *rb, |
| 1165 | int status) |
| 1166 | { |
| 1167 | if (po->tp_version <= TPACKET_V2) |
| 1168 | return packet_previous_frame(po, rb, status); |
| 1169 | |
| 1170 | return __prb_previous_block(po, rb, status); |
| 1171 | } |
| 1172 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 1173 | static void packet_increment_rx_head(struct packet_sock *po, |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 1174 | struct packet_ring_buffer *rb) |
| 1175 | { |
| 1176 | switch (po->tp_version) { |
| 1177 | case TPACKET_V1: |
| 1178 | case TPACKET_V2: |
| 1179 | return packet_increment_head(rb); |
| 1180 | case TPACKET_V3: |
| 1181 | default: |
| 1182 | WARN(1, "TPACKET version not supported.\n"); |
| 1183 | BUG(); |
| 1184 | return; |
| 1185 | } |
| 1186 | } |
| 1187 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 1188 | static void *packet_previous_frame(struct packet_sock *po, |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 1189 | struct packet_ring_buffer *rb, |
| 1190 | int status) |
| 1191 | { |
| 1192 | unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; |
| 1193 | return packet_lookup_frame(po, rb, previous, status); |
| 1194 | } |
| 1195 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 1196 | static void packet_increment_head(struct packet_ring_buffer *buff) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 1197 | { |
| 1198 | buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; |
| 1199 | } |
| 1200 | |
Daniel Borkmann | b013840 | 2014-01-15 16:25:36 +0100 | [diff] [blame] | 1201 | static void packet_inc_pending(struct packet_ring_buffer *rb) |
| 1202 | { |
| 1203 | this_cpu_inc(*rb->pending_refcnt); |
| 1204 | } |
| 1205 | |
| 1206 | static void packet_dec_pending(struct packet_ring_buffer *rb) |
| 1207 | { |
| 1208 | this_cpu_dec(*rb->pending_refcnt); |
| 1209 | } |
| 1210 | |
| 1211 | static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) |
| 1212 | { |
| 1213 | unsigned int refcnt = 0; |
| 1214 | int cpu; |
| 1215 | |
| 1216 | /* We don't use pending refcount in rx_ring. */ |
| 1217 | if (rb->pending_refcnt == NULL) |
| 1218 | return 0; |
| 1219 | |
| 1220 | for_each_possible_cpu(cpu) |
| 1221 | refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); |
| 1222 | |
| 1223 | return refcnt; |
| 1224 | } |
| 1225 | |
| 1226 | static int packet_alloc_pending(struct packet_sock *po) |
| 1227 | { |
| 1228 | po->rx_ring.pending_refcnt = NULL; |
| 1229 | |
| 1230 | po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); |
| 1231 | if (unlikely(po->tx_ring.pending_refcnt == NULL)) |
| 1232 | return -ENOBUFS; |
| 1233 | |
| 1234 | return 0; |
| 1235 | } |
| 1236 | |
| 1237 | static void packet_free_pending(struct packet_sock *po) |
| 1238 | { |
| 1239 | free_percpu(po->tx_ring.pending_refcnt); |
| 1240 | } |
| 1241 | |
Willem de Bruijn | 9954729 | 2015-05-12 11:56:47 -0400 | [diff] [blame] | 1242 | #define ROOM_POW_OFF 2 |
| 1243 | #define ROOM_NONE 0x0 |
| 1244 | #define ROOM_LOW 0x1 |
| 1245 | #define ROOM_NORMAL 0x2 |
| 1246 | |
Eric Dumazet | d4b5bd9 | 2019-06-12 09:52:27 -0700 | [diff] [blame] | 1247 | static bool __tpacket_has_room(const struct packet_sock *po, int pow_off) |
Willem de Bruijn | 9954729 | 2015-05-12 11:56:47 -0400 | [diff] [blame] | 1248 | { |
| 1249 | int idx, len; |
| 1250 | |
Eric Dumazet | d4b5bd9 | 2019-06-12 09:52:27 -0700 | [diff] [blame] | 1251 | len = READ_ONCE(po->rx_ring.frame_max) + 1; |
| 1252 | idx = READ_ONCE(po->rx_ring.head); |
Willem de Bruijn | 9954729 | 2015-05-12 11:56:47 -0400 | [diff] [blame] | 1253 | if (pow_off) |
| 1254 | idx += len >> pow_off; |
| 1255 | if (idx >= len) |
| 1256 | idx -= len; |
| 1257 | return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); |
| 1258 | } |
| 1259 | |
Eric Dumazet | dcf70ce | 2019-06-12 09:52:28 -0700 | [diff] [blame] | 1260 | static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off) |
Willem de Bruijn | 9954729 | 2015-05-12 11:56:47 -0400 | [diff] [blame] | 1261 | { |
| 1262 | int idx, len; |
| 1263 | |
Eric Dumazet | dcf70ce | 2019-06-12 09:52:28 -0700 | [diff] [blame] | 1264 | len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks); |
| 1265 | idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num); |
Willem de Bruijn | 9954729 | 2015-05-12 11:56:47 -0400 | [diff] [blame] | 1266 | if (pow_off) |
| 1267 | idx += len >> pow_off; |
| 1268 | if (idx >= len) |
| 1269 | idx -= len; |
| 1270 | return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); |
| 1271 | } |
| 1272 | |
Eric Dumazet | 0338a14 | 2019-06-12 09:52:29 -0700 | [diff] [blame] | 1273 | static int __packet_rcv_has_room(const struct packet_sock *po, |
| 1274 | const struct sk_buff *skb) |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1275 | { |
Eric Dumazet | 0338a14 | 2019-06-12 09:52:29 -0700 | [diff] [blame] | 1276 | const struct sock *sk = &po->sk; |
Willem de Bruijn | 9954729 | 2015-05-12 11:56:47 -0400 | [diff] [blame] | 1277 | int ret = ROOM_NONE; |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1278 | |
Willem de Bruijn | 9954729 | 2015-05-12 11:56:47 -0400 | [diff] [blame] | 1279 | if (po->prot_hook.func != tpacket_rcv) { |
Eric Dumazet | 0338a14 | 2019-06-12 09:52:29 -0700 | [diff] [blame] | 1280 | int rcvbuf = READ_ONCE(sk->sk_rcvbuf); |
| 1281 | int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc) |
| 1282 | - (skb ? skb->truesize : 0); |
| 1283 | |
| 1284 | if (avail > (rcvbuf >> ROOM_POW_OFF)) |
Willem de Bruijn | 9954729 | 2015-05-12 11:56:47 -0400 | [diff] [blame] | 1285 | return ROOM_NORMAL; |
| 1286 | else if (avail > 0) |
| 1287 | return ROOM_LOW; |
| 1288 | else |
| 1289 | return ROOM_NONE; |
| 1290 | } |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1291 | |
Willem de Bruijn | 9954729 | 2015-05-12 11:56:47 -0400 | [diff] [blame] | 1292 | if (po->tp_version == TPACKET_V3) { |
| 1293 | if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) |
| 1294 | ret = ROOM_NORMAL; |
| 1295 | else if (__tpacket_v3_has_room(po, 0)) |
| 1296 | ret = ROOM_LOW; |
| 1297 | } else { |
| 1298 | if (__tpacket_has_room(po, ROOM_POW_OFF)) |
| 1299 | ret = ROOM_NORMAL; |
| 1300 | else if (__tpacket_has_room(po, 0)) |
| 1301 | ret = ROOM_LOW; |
| 1302 | } |
Willem de Bruijn | 2ccdbaa | 2015-05-12 11:56:48 -0400 | [diff] [blame] | 1303 | |
| 1304 | return ret; |
| 1305 | } |
| 1306 | |
| 1307 | static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) |
| 1308 | { |
Eric Dumazet | 3a2bb84 | 2019-06-12 09:52:32 -0700 | [diff] [blame] | 1309 | int pressure, ret; |
Willem de Bruijn | 2ccdbaa | 2015-05-12 11:56:48 -0400 | [diff] [blame] | 1310 | |
Willem de Bruijn | 54d7c01 | 2015-05-14 15:25:02 -0400 | [diff] [blame] | 1311 | ret = __packet_rcv_has_room(po, skb); |
Eric Dumazet | 3a2bb84 | 2019-06-12 09:52:32 -0700 | [diff] [blame] | 1312 | pressure = ret != ROOM_NORMAL; |
| 1313 | |
| 1314 | if (READ_ONCE(po->pressure) != pressure) |
| 1315 | WRITE_ONCE(po->pressure, pressure); |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1316 | |
Willem de Bruijn | 9954729 | 2015-05-12 11:56:47 -0400 | [diff] [blame] | 1317 | return ret; |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1318 | } |
| 1319 | |
Eric Dumazet | 9bb6cd6 | 2019-06-12 09:52:33 -0700 | [diff] [blame] | 1320 | static void packet_rcv_try_clear_pressure(struct packet_sock *po) |
| 1321 | { |
| 1322 | if (READ_ONCE(po->pressure) && |
| 1323 | __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) |
| 1324 | WRITE_ONCE(po->pressure, 0); |
| 1325 | } |
| 1326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | static void packet_sock_destruct(struct sock *sk) |
| 1328 | { |
Richard Cochran | ed85b56 | 2010-04-07 22:41:28 +0000 | [diff] [blame] | 1329 | skb_queue_purge(&sk->sk_error_queue); |
| 1330 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 1331 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); |
Reshetova, Elena | 14afee4 | 2017-06-30 13:08:00 +0300 | [diff] [blame] | 1332 | WARN_ON(refcount_read(&sk->sk_wmem_alloc)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1333 | |
| 1334 | if (!sock_flag(sk, SOCK_DEAD)) { |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 1335 | pr_err("Attempt to release alive packet socket: %p\n", sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 | return; |
| 1337 | } |
| 1338 | |
Pavel Emelyanov | 17ab56a | 2007-11-10 21:38:48 -0800 | [diff] [blame] | 1339 | sk_refcnt_debug_dec(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1340 | } |
| 1341 | |
Willem de Bruijn | 3b3a5b0 | 2015-05-12 11:56:49 -0400 | [diff] [blame] | 1342 | static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) |
| 1343 | { |
Eric Dumazet | b756ad9 | 2019-11-08 05:07:46 -0800 | [diff] [blame] | 1344 | u32 *history = po->rollover->history; |
| 1345 | u32 victim, rxhash; |
Willem de Bruijn | 3b3a5b0 | 2015-05-12 11:56:49 -0400 | [diff] [blame] | 1346 | int i, count = 0; |
| 1347 | |
| 1348 | rxhash = skb_get_hash(skb); |
| 1349 | for (i = 0; i < ROLLOVER_HLEN; i++) |
Eric Dumazet | b756ad9 | 2019-11-08 05:07:46 -0800 | [diff] [blame] | 1350 | if (READ_ONCE(history[i]) == rxhash) |
Willem de Bruijn | 3b3a5b0 | 2015-05-12 11:56:49 -0400 | [diff] [blame] | 1351 | count++; |
| 1352 | |
Eric Dumazet | b756ad9 | 2019-11-08 05:07:46 -0800 | [diff] [blame] | 1353 | victim = prandom_u32() % ROLLOVER_HLEN; |
| 1354 | |
| 1355 | /* Avoid dirtying the cache line if possible */ |
| 1356 | if (READ_ONCE(history[victim]) != rxhash) |
| 1357 | WRITE_ONCE(history[victim], rxhash); |
| 1358 | |
Willem de Bruijn | 3b3a5b0 | 2015-05-12 11:56:49 -0400 | [diff] [blame] | 1359 | return count > (ROLLOVER_HLEN >> 1); |
| 1360 | } |
| 1361 | |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1362 | static unsigned int fanout_demux_hash(struct packet_fanout *f, |
| 1363 | struct sk_buff *skb, |
| 1364 | unsigned int num) |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1365 | { |
David S. Miller | eb70db8 | 2016-07-01 16:07:50 -0400 | [diff] [blame] | 1366 | return reciprocal_scale(__skb_get_hash_symmetric(skb), num); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1367 | } |
| 1368 | |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1369 | static unsigned int fanout_demux_lb(struct packet_fanout *f, |
| 1370 | struct sk_buff *skb, |
| 1371 | unsigned int num) |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1372 | { |
Willem de Bruijn | 468479e | 2015-06-17 15:59:34 -0400 | [diff] [blame] | 1373 | unsigned int val = atomic_inc_return(&f->rr_cur); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1374 | |
Willem de Bruijn | 468479e | 2015-06-17 15:59:34 -0400 | [diff] [blame] | 1375 | return val % num; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1376 | } |
| 1377 | |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1378 | static unsigned int fanout_demux_cpu(struct packet_fanout *f, |
| 1379 | struct sk_buff *skb, |
| 1380 | unsigned int num) |
David S. Miller | 95ec3eb | 2011-07-06 01:56:38 -0700 | [diff] [blame] | 1381 | { |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1382 | return smp_processor_id() % num; |
| 1383 | } |
David S. Miller | 95ec3eb | 2011-07-06 01:56:38 -0700 | [diff] [blame] | 1384 | |
Daniel Borkmann | 5df0ddf | 2013-08-28 22:13:09 +0200 | [diff] [blame] | 1385 | static unsigned int fanout_demux_rnd(struct packet_fanout *f, |
| 1386 | struct sk_buff *skb, |
| 1387 | unsigned int num) |
| 1388 | { |
Daniel Borkmann | f337db6 | 2014-01-22 02:29:39 +0100 | [diff] [blame] | 1389 | return prandom_u32_max(num); |
Daniel Borkmann | 5df0ddf | 2013-08-28 22:13:09 +0200 | [diff] [blame] | 1390 | } |
| 1391 | |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1392 | static unsigned int fanout_demux_rollover(struct packet_fanout *f, |
| 1393 | struct sk_buff *skb, |
Willem de Bruijn | ad377ca | 2015-05-12 11:56:45 -0400 | [diff] [blame] | 1394 | unsigned int idx, bool try_self, |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1395 | unsigned int num) |
| 1396 | { |
Willem de Bruijn | 4633c9e | 2015-05-17 19:44:02 -0400 | [diff] [blame] | 1397 | struct packet_sock *po, *po_next, *po_skip = NULL; |
Willem de Bruijn | a9b6391 | 2015-05-12 11:56:50 -0400 | [diff] [blame] | 1398 | unsigned int i, j, room = ROOM_NONE; |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1399 | |
Eric Dumazet | 94f633e | 2021-04-14 12:36:44 -0700 | [diff] [blame] | 1400 | po = pkt_sk(rcu_dereference(f->arr[idx])); |
Willem de Bruijn | 3b3a5b0 | 2015-05-12 11:56:49 -0400 | [diff] [blame] | 1401 | |
| 1402 | if (try_self) { |
| 1403 | room = packet_rcv_has_room(po, skb); |
| 1404 | if (room == ROOM_NORMAL || |
| 1405 | (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) |
| 1406 | return idx; |
Willem de Bruijn | 4633c9e | 2015-05-17 19:44:02 -0400 | [diff] [blame] | 1407 | po_skip = po; |
Willem de Bruijn | 3b3a5b0 | 2015-05-12 11:56:49 -0400 | [diff] [blame] | 1408 | } |
Willem de Bruijn | ad377ca | 2015-05-12 11:56:45 -0400 | [diff] [blame] | 1409 | |
Willem de Bruijn | 0648ab7 | 2015-05-12 11:56:46 -0400 | [diff] [blame] | 1410 | i = j = min_t(int, po->rollover->sock, num - 1); |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1411 | do { |
Eric Dumazet | 94f633e | 2021-04-14 12:36:44 -0700 | [diff] [blame] | 1412 | po_next = pkt_sk(rcu_dereference(f->arr[i])); |
Eric Dumazet | 3a2bb84 | 2019-06-12 09:52:32 -0700 | [diff] [blame] | 1413 | if (po_next != po_skip && !READ_ONCE(po_next->pressure) && |
Willem de Bruijn | 2ccdbaa | 2015-05-12 11:56:48 -0400 | [diff] [blame] | 1414 | packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1415 | if (i != j) |
Willem de Bruijn | 0648ab7 | 2015-05-12 11:56:46 -0400 | [diff] [blame] | 1416 | po->rollover->sock = i; |
Willem de Bruijn | a9b6391 | 2015-05-12 11:56:50 -0400 | [diff] [blame] | 1417 | atomic_long_inc(&po->rollover->num); |
| 1418 | if (room == ROOM_LOW) |
| 1419 | atomic_long_inc(&po->rollover->num_huge); |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1420 | return i; |
| 1421 | } |
Willem de Bruijn | ad377ca | 2015-05-12 11:56:45 -0400 | [diff] [blame] | 1422 | |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1423 | if (++i == num) |
| 1424 | i = 0; |
| 1425 | } while (i != j); |
| 1426 | |
Willem de Bruijn | a9b6391 | 2015-05-12 11:56:50 -0400 | [diff] [blame] | 1427 | atomic_long_inc(&po->rollover->num_failed); |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1428 | return idx; |
| 1429 | } |
| 1430 | |
Neil Horman | 2d36097 | 2014-01-22 16:01:44 -0500 | [diff] [blame] | 1431 | static unsigned int fanout_demux_qm(struct packet_fanout *f, |
| 1432 | struct sk_buff *skb, |
| 1433 | unsigned int num) |
| 1434 | { |
| 1435 | return skb_get_queue_mapping(skb) % num; |
| 1436 | } |
| 1437 | |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1438 | static unsigned int fanout_demux_bpf(struct packet_fanout *f, |
| 1439 | struct sk_buff *skb, |
| 1440 | unsigned int num) |
| 1441 | { |
| 1442 | struct bpf_prog *prog; |
| 1443 | unsigned int ret = 0; |
| 1444 | |
| 1445 | rcu_read_lock(); |
| 1446 | prog = rcu_dereference(f->bpf_prog); |
| 1447 | if (prog) |
Alexei Starovoitov | ff936a0 | 2015-10-07 10:55:41 -0700 | [diff] [blame] | 1448 | ret = bpf_prog_run_clear_cb(prog, skb) % num; |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1449 | rcu_read_unlock(); |
| 1450 | |
| 1451 | return ret; |
| 1452 | } |
| 1453 | |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1454 | static bool fanout_has_flag(struct packet_fanout *f, u16 flag) |
| 1455 | { |
| 1456 | return f->flags & (flag >> 8); |
David S. Miller | 95ec3eb | 2011-07-06 01:56:38 -0700 | [diff] [blame] | 1457 | } |
| 1458 | |
David S. Miller | 95ec3eb | 2011-07-06 01:56:38 -0700 | [diff] [blame] | 1459 | static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, |
| 1460 | struct packet_type *pt, struct net_device *orig_dev) |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1461 | { |
| 1462 | struct packet_fanout *f = pt->af_packet_priv; |
Eric Dumazet | f98f451 | 2015-06-16 07:59:11 -0700 | [diff] [blame] | 1463 | unsigned int num = READ_ONCE(f->num_members); |
Eric W. Biederman | 19bcf9f | 2015-10-09 13:44:54 -0500 | [diff] [blame] | 1464 | struct net *net = read_pnet(&f->net); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1465 | struct packet_sock *po; |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1466 | unsigned int idx; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1467 | |
Eric W. Biederman | 19bcf9f | 2015-10-09 13:44:54 -0500 | [diff] [blame] | 1468 | if (!net_eq(dev_net(dev), net) || !num) { |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1469 | kfree_skb(skb); |
| 1470 | return 0; |
| 1471 | } |
| 1472 | |
Alexander Drozdov | 3f34b24 | 2015-02-20 08:24:27 +0300 | [diff] [blame] | 1473 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { |
Eric W. Biederman | 19bcf9f | 2015-10-09 13:44:54 -0500 | [diff] [blame] | 1474 | skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); |
Alexander Drozdov | 3f34b24 | 2015-02-20 08:24:27 +0300 | [diff] [blame] | 1475 | if (!skb) |
| 1476 | return 0; |
| 1477 | } |
David S. Miller | 95ec3eb | 2011-07-06 01:56:38 -0700 | [diff] [blame] | 1478 | switch (f->type) { |
| 1479 | case PACKET_FANOUT_HASH: |
| 1480 | default: |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1481 | idx = fanout_demux_hash(f, skb, num); |
David S. Miller | 95ec3eb | 2011-07-06 01:56:38 -0700 | [diff] [blame] | 1482 | break; |
| 1483 | case PACKET_FANOUT_LB: |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1484 | idx = fanout_demux_lb(f, skb, num); |
David S. Miller | 95ec3eb | 2011-07-06 01:56:38 -0700 | [diff] [blame] | 1485 | break; |
| 1486 | case PACKET_FANOUT_CPU: |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1487 | idx = fanout_demux_cpu(f, skb, num); |
| 1488 | break; |
Daniel Borkmann | 5df0ddf | 2013-08-28 22:13:09 +0200 | [diff] [blame] | 1489 | case PACKET_FANOUT_RND: |
| 1490 | idx = fanout_demux_rnd(f, skb, num); |
| 1491 | break; |
Neil Horman | 2d36097 | 2014-01-22 16:01:44 -0500 | [diff] [blame] | 1492 | case PACKET_FANOUT_QM: |
| 1493 | idx = fanout_demux_qm(f, skb, num); |
| 1494 | break; |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1495 | case PACKET_FANOUT_ROLLOVER: |
Willem de Bruijn | ad377ca | 2015-05-12 11:56:45 -0400 | [diff] [blame] | 1496 | idx = fanout_demux_rollover(f, skb, 0, false, num); |
David S. Miller | 95ec3eb | 2011-07-06 01:56:38 -0700 | [diff] [blame] | 1497 | break; |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1498 | case PACKET_FANOUT_CBPF: |
Willem de Bruijn | f2e5209 | 2015-08-14 22:31:35 -0400 | [diff] [blame] | 1499 | case PACKET_FANOUT_EBPF: |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1500 | idx = fanout_demux_bpf(f, skb, num); |
| 1501 | break; |
David S. Miller | 7736d33 | 2011-07-05 01:43:20 -0700 | [diff] [blame] | 1502 | } |
| 1503 | |
Willem de Bruijn | ad377ca | 2015-05-12 11:56:45 -0400 | [diff] [blame] | 1504 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) |
| 1505 | idx = fanout_demux_rollover(f, skb, idx, true, num); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1506 | |
Eric Dumazet | 94f633e | 2021-04-14 12:36:44 -0700 | [diff] [blame] | 1507 | po = pkt_sk(rcu_dereference(f->arr[idx])); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1508 | return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); |
| 1509 | } |
| 1510 | |
Pavel Emelyanov | fff3321 | 2012-08-16 05:36:48 +0000 | [diff] [blame] | 1511 | DEFINE_MUTEX(fanout_mutex); |
| 1512 | EXPORT_SYMBOL_GPL(fanout_mutex); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1513 | static LIST_HEAD(fanout_list); |
Mike Maloney | 4a69a86 | 2017-04-21 10:56:11 -0400 | [diff] [blame] | 1514 | static u16 fanout_next_id; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1515 | |
| 1516 | static void __fanout_link(struct sock *sk, struct packet_sock *po) |
| 1517 | { |
| 1518 | struct packet_fanout *f = po->fanout; |
| 1519 | |
| 1520 | spin_lock(&f->lock); |
Eric Dumazet | 94f633e | 2021-04-14 12:36:44 -0700 | [diff] [blame] | 1521 | rcu_assign_pointer(f->arr[f->num_members], sk); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1522 | smp_wmb(); |
| 1523 | f->num_members++; |
Anoob Soman | 2bd624b | 2017-02-15 20:25:39 +0000 | [diff] [blame] | 1524 | if (f->num_members == 1) |
| 1525 | dev_add_pack(&f->prot_hook); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1526 | spin_unlock(&f->lock); |
| 1527 | } |
| 1528 | |
| 1529 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po) |
| 1530 | { |
| 1531 | struct packet_fanout *f = po->fanout; |
| 1532 | int i; |
| 1533 | |
| 1534 | spin_lock(&f->lock); |
| 1535 | for (i = 0; i < f->num_members; i++) { |
Eric Dumazet | 94f633e | 2021-04-14 12:36:44 -0700 | [diff] [blame] | 1536 | if (rcu_dereference_protected(f->arr[i], |
| 1537 | lockdep_is_held(&f->lock)) == sk) |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1538 | break; |
| 1539 | } |
| 1540 | BUG_ON(i >= f->num_members); |
Eric Dumazet | 94f633e | 2021-04-14 12:36:44 -0700 | [diff] [blame] | 1541 | rcu_assign_pointer(f->arr[i], |
| 1542 | rcu_dereference_protected(f->arr[f->num_members - 1], |
| 1543 | lockdep_is_held(&f->lock))); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1544 | f->num_members--; |
Anoob Soman | 2bd624b | 2017-02-15 20:25:39 +0000 | [diff] [blame] | 1545 | if (f->num_members == 0) |
| 1546 | __dev_remove_pack(&f->prot_hook); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1547 | spin_unlock(&f->lock); |
| 1548 | } |
| 1549 | |
Weilong Chen | d4dd8ae | 2013-12-23 11:31:38 +0800 | [diff] [blame] | 1550 | static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) |
Eric Leblond | c0de08d | 2012-08-16 22:02:58 +0000 | [diff] [blame] | 1551 | { |
Eric Dumazet | 161642e | 2015-10-09 11:29:32 -0700 | [diff] [blame] | 1552 | if (sk->sk_family != PF_PACKET) |
| 1553 | return false; |
Eric Leblond | c0de08d | 2012-08-16 22:02:58 +0000 | [diff] [blame] | 1554 | |
Eric Dumazet | 161642e | 2015-10-09 11:29:32 -0700 | [diff] [blame] | 1555 | return ptype->af_packet_priv == pkt_sk(sk)->fanout; |
Eric Leblond | c0de08d | 2012-08-16 22:02:58 +0000 | [diff] [blame] | 1556 | } |
| 1557 | |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1558 | static void fanout_init_data(struct packet_fanout *f) |
| 1559 | { |
| 1560 | switch (f->type) { |
| 1561 | case PACKET_FANOUT_LB: |
| 1562 | atomic_set(&f->rr_cur, 0); |
| 1563 | break; |
| 1564 | case PACKET_FANOUT_CBPF: |
Willem de Bruijn | f2e5209 | 2015-08-14 22:31:35 -0400 | [diff] [blame] | 1565 | case PACKET_FANOUT_EBPF: |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1566 | RCU_INIT_POINTER(f->bpf_prog, NULL); |
| 1567 | break; |
| 1568 | } |
| 1569 | } |
| 1570 | |
| 1571 | static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) |
| 1572 | { |
| 1573 | struct bpf_prog *old; |
| 1574 | |
| 1575 | spin_lock(&f->lock); |
| 1576 | old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); |
| 1577 | rcu_assign_pointer(f->bpf_prog, new); |
| 1578 | spin_unlock(&f->lock); |
| 1579 | |
| 1580 | if (old) { |
| 1581 | synchronize_net(); |
| 1582 | bpf_prog_destroy(old); |
| 1583 | } |
| 1584 | } |
| 1585 | |
Christoph Hellwig | b1ea9ff | 2020-07-23 08:08:47 +0200 | [diff] [blame] | 1586 | static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1587 | unsigned int len) |
| 1588 | { |
| 1589 | struct bpf_prog *new; |
| 1590 | struct sock_fprog fprog; |
| 1591 | int ret; |
| 1592 | |
| 1593 | if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) |
| 1594 | return -EPERM; |
Christoph Hellwig | 4d295e5 | 2020-07-17 08:23:13 +0200 | [diff] [blame] | 1595 | |
| 1596 | ret = copy_bpf_fprog_from_user(&fprog, data, len); |
| 1597 | if (ret) |
| 1598 | return ret; |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1599 | |
Daniel Borkmann | bab1899 | 2015-10-02 15:17:33 +0200 | [diff] [blame] | 1600 | ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1601 | if (ret) |
| 1602 | return ret; |
| 1603 | |
| 1604 | __fanout_set_data_bpf(po->fanout, new); |
| 1605 | return 0; |
| 1606 | } |
| 1607 | |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 1608 | static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data, |
Willem de Bruijn | f2e5209 | 2015-08-14 22:31:35 -0400 | [diff] [blame] | 1609 | unsigned int len) |
| 1610 | { |
| 1611 | struct bpf_prog *new; |
| 1612 | u32 fd; |
| 1613 | |
| 1614 | if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) |
| 1615 | return -EPERM; |
| 1616 | if (len != sizeof(fd)) |
| 1617 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 1618 | if (copy_from_sockptr(&fd, data, len)) |
Willem de Bruijn | f2e5209 | 2015-08-14 22:31:35 -0400 | [diff] [blame] | 1619 | return -EFAULT; |
| 1620 | |
Daniel Borkmann | 113214b | 2016-06-30 17:24:44 +0200 | [diff] [blame] | 1621 | new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); |
Willem de Bruijn | f2e5209 | 2015-08-14 22:31:35 -0400 | [diff] [blame] | 1622 | if (IS_ERR(new)) |
| 1623 | return PTR_ERR(new); |
Willem de Bruijn | f2e5209 | 2015-08-14 22:31:35 -0400 | [diff] [blame] | 1624 | |
| 1625 | __fanout_set_data_bpf(po->fanout, new); |
| 1626 | return 0; |
| 1627 | } |
| 1628 | |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 1629 | static int fanout_set_data(struct packet_sock *po, sockptr_t data, |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1630 | unsigned int len) |
| 1631 | { |
| 1632 | switch (po->fanout->type) { |
| 1633 | case PACKET_FANOUT_CBPF: |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 1634 | return fanout_set_data_cbpf(po, data, len); |
Willem de Bruijn | f2e5209 | 2015-08-14 22:31:35 -0400 | [diff] [blame] | 1635 | case PACKET_FANOUT_EBPF: |
| 1636 | return fanout_set_data_ebpf(po, data, len); |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1637 | default: |
| 1638 | return -EINVAL; |
zhong jiang | 07d53ae | 2018-08-04 19:41:41 +0800 | [diff] [blame] | 1639 | } |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1640 | } |
| 1641 | |
| 1642 | static void fanout_release_data(struct packet_fanout *f) |
| 1643 | { |
| 1644 | switch (f->type) { |
| 1645 | case PACKET_FANOUT_CBPF: |
Willem de Bruijn | f2e5209 | 2015-08-14 22:31:35 -0400 | [diff] [blame] | 1646 | case PACKET_FANOUT_EBPF: |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1647 | __fanout_set_data_bpf(f, NULL); |
zhong jiang | 07d53ae | 2018-08-04 19:41:41 +0800 | [diff] [blame] | 1648 | } |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1649 | } |
| 1650 | |
Mike Maloney | 4a69a86 | 2017-04-21 10:56:11 -0400 | [diff] [blame] | 1651 | static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) |
| 1652 | { |
| 1653 | struct packet_fanout *f; |
| 1654 | |
| 1655 | list_for_each_entry(f, &fanout_list, list) { |
| 1656 | if (f->id == candidate_id && |
| 1657 | read_pnet(&f->net) == sock_net(sk)) { |
| 1658 | return false; |
| 1659 | } |
| 1660 | } |
| 1661 | return true; |
| 1662 | } |
| 1663 | |
| 1664 | static bool fanout_find_new_id(struct sock *sk, u16 *new_id) |
| 1665 | { |
| 1666 | u16 id = fanout_next_id; |
| 1667 | |
| 1668 | do { |
| 1669 | if (__fanout_id_is_free(sk, id)) { |
| 1670 | *new_id = id; |
| 1671 | fanout_next_id = id + 1; |
| 1672 | return true; |
| 1673 | } |
| 1674 | |
| 1675 | id++; |
| 1676 | } while (id != fanout_next_id); |
| 1677 | |
| 1678 | return false; |
| 1679 | } |
| 1680 | |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 1681 | static int fanout_add(struct sock *sk, struct fanout_args *args) |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1682 | { |
Eric Dumazet | d199fab | 2017-02-14 09:03:51 -0800 | [diff] [blame] | 1683 | struct packet_rollover *rollover = NULL; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1684 | struct packet_sock *po = pkt_sk(sk); |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 1685 | u16 type_flags = args->type_flags; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1686 | struct packet_fanout *f, *match; |
David S. Miller | 7736d33 | 2011-07-05 01:43:20 -0700 | [diff] [blame] | 1687 | u8 type = type_flags & 0xff; |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1688 | u8 flags = type_flags >> 8; |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 1689 | u16 id = args->id; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1690 | int err; |
| 1691 | |
| 1692 | switch (type) { |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1693 | case PACKET_FANOUT_ROLLOVER: |
| 1694 | if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) |
| 1695 | return -EINVAL; |
Gustavo A. R. Silva | 5af5a02 | 2020-11-20 12:38:20 -0600 | [diff] [blame] | 1696 | break; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1697 | case PACKET_FANOUT_HASH: |
| 1698 | case PACKET_FANOUT_LB: |
David S. Miller | 95ec3eb | 2011-07-06 01:56:38 -0700 | [diff] [blame] | 1699 | case PACKET_FANOUT_CPU: |
Daniel Borkmann | 5df0ddf | 2013-08-28 22:13:09 +0200 | [diff] [blame] | 1700 | case PACKET_FANOUT_RND: |
Neil Horman | 2d36097 | 2014-01-22 16:01:44 -0500 | [diff] [blame] | 1701 | case PACKET_FANOUT_QM: |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1702 | case PACKET_FANOUT_CBPF: |
Willem de Bruijn | f2e5209 | 2015-08-14 22:31:35 -0400 | [diff] [blame] | 1703 | case PACKET_FANOUT_EBPF: |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1704 | break; |
| 1705 | default: |
| 1706 | return -EINVAL; |
| 1707 | } |
| 1708 | |
Eric Dumazet | d199fab | 2017-02-14 09:03:51 -0800 | [diff] [blame] | 1709 | mutex_lock(&fanout_mutex); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1710 | |
Eric Dumazet | d199fab | 2017-02-14 09:03:51 -0800 | [diff] [blame] | 1711 | err = -EALREADY; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1712 | if (po->fanout) |
Eric Dumazet | d199fab | 2017-02-14 09:03:51 -0800 | [diff] [blame] | 1713 | goto out; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1714 | |
Willem de Bruijn | 4633c9e | 2015-05-17 19:44:02 -0400 | [diff] [blame] | 1715 | if (type == PACKET_FANOUT_ROLLOVER || |
| 1716 | (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { |
Eric Dumazet | d199fab | 2017-02-14 09:03:51 -0800 | [diff] [blame] | 1717 | err = -ENOMEM; |
| 1718 | rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); |
| 1719 | if (!rollover) |
| 1720 | goto out; |
| 1721 | atomic_long_set(&rollover->num, 0); |
| 1722 | atomic_long_set(&rollover->num_huge, 0); |
| 1723 | atomic_long_set(&rollover->num_failed, 0); |
Willem de Bruijn | 0648ab7 | 2015-05-12 11:56:46 -0400 | [diff] [blame] | 1724 | } |
| 1725 | |
Mike Maloney | 4a69a86 | 2017-04-21 10:56:11 -0400 | [diff] [blame] | 1726 | if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { |
| 1727 | if (id != 0) { |
| 1728 | err = -EINVAL; |
| 1729 | goto out; |
| 1730 | } |
| 1731 | if (!fanout_find_new_id(sk, &id)) { |
| 1732 | err = -ENOMEM; |
| 1733 | goto out; |
| 1734 | } |
| 1735 | /* ephemeral flag for the first socket in the group: drop it */ |
| 1736 | flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); |
| 1737 | } |
| 1738 | |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1739 | match = NULL; |
| 1740 | list_for_each_entry(f, &fanout_list, list) { |
| 1741 | if (f->id == id && |
| 1742 | read_pnet(&f->net) == sock_net(sk)) { |
| 1743 | match = f; |
| 1744 | break; |
| 1745 | } |
| 1746 | } |
Eric Dumazet | afe62c6 | 2011-07-07 06:41:29 -0700 | [diff] [blame] | 1747 | err = -EINVAL; |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 1748 | if (match) { |
| 1749 | if (match->flags != flags) |
| 1750 | goto out; |
| 1751 | if (args->max_num_members && |
| 1752 | args->max_num_members != match->max_num_members) |
| 1753 | goto out; |
| 1754 | } else { |
| 1755 | if (args->max_num_members > PACKET_FANOUT_MAX) |
| 1756 | goto out; |
| 1757 | if (!args->max_num_members) |
| 1758 | /* legacy PACKET_FANOUT_MAX */ |
| 1759 | args->max_num_members = 256; |
Eric Dumazet | afe62c6 | 2011-07-07 06:41:29 -0700 | [diff] [blame] | 1760 | err = -ENOMEM; |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 1761 | match = kvzalloc(struct_size(match, arr, args->max_num_members), |
| 1762 | GFP_KERNEL); |
Eric Dumazet | afe62c6 | 2011-07-07 06:41:29 -0700 | [diff] [blame] | 1763 | if (!match) |
| 1764 | goto out; |
| 1765 | write_pnet(&match->net, sock_net(sk)); |
| 1766 | match->id = id; |
| 1767 | match->type = type; |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 1768 | match->flags = flags; |
Eric Dumazet | afe62c6 | 2011-07-07 06:41:29 -0700 | [diff] [blame] | 1769 | INIT_LIST_HEAD(&match->list); |
| 1770 | spin_lock_init(&match->lock); |
Reshetova, Elena | fb5c2c1 | 2017-06-30 13:08:10 +0300 | [diff] [blame] | 1771 | refcount_set(&match->sk_ref, 0); |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 1772 | fanout_init_data(match); |
Eric Dumazet | afe62c6 | 2011-07-07 06:41:29 -0700 | [diff] [blame] | 1773 | match->prot_hook.type = po->prot_hook.type; |
| 1774 | match->prot_hook.dev = po->prot_hook.dev; |
| 1775 | match->prot_hook.func = packet_rcv_fanout; |
| 1776 | match->prot_hook.af_packet_priv = match; |
Eric Leblond | c0de08d | 2012-08-16 22:02:58 +0000 | [diff] [blame] | 1777 | match->prot_hook.id_match = match_fanout_group; |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 1778 | match->max_num_members = args->max_num_members; |
Eric Dumazet | afe62c6 | 2011-07-07 06:41:29 -0700 | [diff] [blame] | 1779 | list_add(&match->list, &fanout_list); |
| 1780 | } |
| 1781 | err = -EINVAL; |
Willem de Bruijn | 008ba2a | 2017-09-14 17:14:41 -0400 | [diff] [blame] | 1782 | |
| 1783 | spin_lock(&po->bind_lock); |
| 1784 | if (po->running && |
| 1785 | match->type == type && |
Eric Dumazet | afe62c6 | 2011-07-07 06:41:29 -0700 | [diff] [blame] | 1786 | match->prot_hook.type == po->prot_hook.type && |
| 1787 | match->prot_hook.dev == po->prot_hook.dev) { |
| 1788 | err = -ENOSPC; |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 1789 | if (refcount_read(&match->sk_ref) < match->max_num_members) { |
Eric Dumazet | afe62c6 | 2011-07-07 06:41:29 -0700 | [diff] [blame] | 1790 | __dev_remove_pack(&po->prot_hook); |
| 1791 | po->fanout = match; |
Mike Maloney | 57f015f | 2017-11-28 10:44:29 -0500 | [diff] [blame] | 1792 | po->rollover = rollover; |
| 1793 | rollover = NULL; |
Reshetova, Elena | fb5c2c1 | 2017-06-30 13:08:10 +0300 | [diff] [blame] | 1794 | refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); |
Eric Dumazet | afe62c6 | 2011-07-07 06:41:29 -0700 | [diff] [blame] | 1795 | __fanout_link(sk, po); |
| 1796 | err = 0; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1797 | } |
| 1798 | } |
Willem de Bruijn | 008ba2a | 2017-09-14 17:14:41 -0400 | [diff] [blame] | 1799 | spin_unlock(&po->bind_lock); |
| 1800 | |
| 1801 | if (err && !refcount_read(&match->sk_ref)) { |
| 1802 | list_del(&match->list); |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 1803 | kvfree(match); |
Willem de Bruijn | 008ba2a | 2017-09-14 17:14:41 -0400 | [diff] [blame] | 1804 | } |
| 1805 | |
Eric Dumazet | afe62c6 | 2011-07-07 06:41:29 -0700 | [diff] [blame] | 1806 | out: |
Mike Maloney | 57f015f | 2017-11-28 10:44:29 -0500 | [diff] [blame] | 1807 | kfree(rollover); |
Eric Dumazet | d199fab | 2017-02-14 09:03:51 -0800 | [diff] [blame] | 1808 | mutex_unlock(&fanout_mutex); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1809 | return err; |
| 1810 | } |
| 1811 | |
Anoob Soman | 2bd624b | 2017-02-15 20:25:39 +0000 | [diff] [blame] | 1812 | /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes |
| 1813 | * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. |
| 1814 | * It is the responsibility of the caller to call fanout_release_data() and |
| 1815 | * free the returned packet_fanout (after synchronize_net()) |
| 1816 | */ |
| 1817 | static struct packet_fanout *fanout_release(struct sock *sk) |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1818 | { |
| 1819 | struct packet_sock *po = pkt_sk(sk); |
| 1820 | struct packet_fanout *f; |
| 1821 | |
Pavel Emelyanov | fff3321 | 2012-08-16 05:36:48 +0000 | [diff] [blame] | 1822 | mutex_lock(&fanout_mutex); |
Eric Dumazet | d199fab | 2017-02-14 09:03:51 -0800 | [diff] [blame] | 1823 | f = po->fanout; |
| 1824 | if (f) { |
| 1825 | po->fanout = NULL; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1826 | |
Reshetova, Elena | fb5c2c1 | 2017-06-30 13:08:10 +0300 | [diff] [blame] | 1827 | if (refcount_dec_and_test(&f->sk_ref)) |
Eric Dumazet | d199fab | 2017-02-14 09:03:51 -0800 | [diff] [blame] | 1828 | list_del(&f->list); |
Anoob Soman | 2bd624b | 2017-02-15 20:25:39 +0000 | [diff] [blame] | 1829 | else |
| 1830 | f = NULL; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1831 | } |
| 1832 | mutex_unlock(&fanout_mutex); |
Anoob Soman | 2bd624b | 2017-02-15 20:25:39 +0000 | [diff] [blame] | 1833 | |
| 1834 | return f; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 1835 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1836 | |
Daniel Borkmann | 3c70c13 | 2015-11-11 23:25:42 +0100 | [diff] [blame] | 1837 | static bool packet_extra_vlan_len_allowed(const struct net_device *dev, |
| 1838 | struct sk_buff *skb) |
| 1839 | { |
| 1840 | /* Earlier code assumed this would be a VLAN pkt, double-check |
| 1841 | * this now that we have the actual packet in hand. We can only |
| 1842 | * do this check on Ethernet devices. |
| 1843 | */ |
| 1844 | if (unlikely(dev->type != ARPHRD_ETHER)) |
| 1845 | return false; |
| 1846 | |
| 1847 | skb_reset_mac_header(skb); |
| 1848 | return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); |
| 1849 | } |
| 1850 | |
Eric Dumazet | 90ddc4f | 2005-12-22 12:49:22 -0800 | [diff] [blame] | 1851 | static const struct proto_ops packet_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1852 | |
Eric Dumazet | 90ddc4f | 2005-12-22 12:49:22 -0800 | [diff] [blame] | 1853 | static const struct proto_ops packet_ops_spkt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1854 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 1855 | static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, |
| 1856 | struct packet_type *pt, struct net_device *orig_dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1857 | { |
| 1858 | struct sock *sk; |
| 1859 | struct sockaddr_pkt *spkt; |
| 1860 | |
| 1861 | /* |
| 1862 | * When we registered the protocol we saved the socket in the data |
| 1863 | * field for just this event. |
| 1864 | */ |
| 1865 | |
| 1866 | sk = pt->af_packet_priv; |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 1867 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1868 | /* |
| 1869 | * Yank back the headers [hope the device set this |
| 1870 | * right or kerboom...] |
| 1871 | * |
| 1872 | * Incoming packets have ll header pulled, |
| 1873 | * push it back. |
| 1874 | * |
Arnaldo Carvalho de Melo | 98e399f | 2007-03-19 15:33:04 -0700 | [diff] [blame] | 1875 | * For outgoing ones skb->data == skb_mac_header(skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1876 | * so that this procedure is noop. |
| 1877 | */ |
| 1878 | |
| 1879 | if (skb->pkt_type == PACKET_LOOPBACK) |
| 1880 | goto out; |
| 1881 | |
Octavian Purdila | 09ad9bc | 2009-11-25 15:14:13 -0800 | [diff] [blame] | 1882 | if (!net_eq(dev_net(dev), sock_net(sk))) |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 1883 | goto out; |
| 1884 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 1885 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 1886 | if (skb == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1887 | goto oom; |
| 1888 | |
| 1889 | /* drop any routing info */ |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 1890 | skb_dst_drop(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1891 | |
Phil Oester | 84531c2 | 2005-07-12 11:57:52 -0700 | [diff] [blame] | 1892 | /* drop conntrack reference */ |
Florian Westphal | 895b5c9 | 2019-09-29 20:54:03 +0200 | [diff] [blame] | 1893 | nf_reset_ct(skb); |
Phil Oester | 84531c2 | 2005-07-12 11:57:52 -0700 | [diff] [blame] | 1894 | |
Herbert Xu | ffbc611 | 2007-02-04 23:33:10 -0800 | [diff] [blame] | 1895 | spkt = &PACKET_SKB_CB(skb)->sa.pkt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1896 | |
Arnaldo Carvalho de Melo | 98e399f | 2007-03-19 15:33:04 -0700 | [diff] [blame] | 1897 | skb_push(skb, skb->data - skb_mac_header(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1898 | |
| 1899 | /* |
| 1900 | * The SOCK_PACKET socket receives _all_ frames. |
| 1901 | */ |
| 1902 | |
| 1903 | spkt->spkt_family = dev->type; |
| 1904 | strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); |
| 1905 | spkt->spkt_protocol = skb->protocol; |
| 1906 | |
| 1907 | /* |
| 1908 | * Charge the memory to the socket. This is done specifically |
| 1909 | * to prevent sockets using all the memory up. |
| 1910 | */ |
| 1911 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 1912 | if (sock_queue_rcv_skb(sk, skb) == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1913 | return 0; |
| 1914 | |
| 1915 | out: |
| 1916 | kfree_skb(skb); |
| 1917 | oom: |
| 1918 | return 0; |
| 1919 | } |
| 1920 | |
Maxim Mikityanskiy | 75c6577 | 2019-02-21 12:40:01 +0000 | [diff] [blame] | 1921 | static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) |
| 1922 | { |
Yoshiki Komachi | 18bed89 | 2019-03-18 14:39:52 +0900 | [diff] [blame] | 1923 | if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && |
| 1924 | sock->type == SOCK_RAW) { |
Maxim Mikityanskiy | 75c6577 | 2019-02-21 12:40:01 +0000 | [diff] [blame] | 1925 | skb_reset_mac_header(skb); |
| 1926 | skb->protocol = dev_parse_header_protocol(skb); |
| 1927 | } |
| 1928 | |
| 1929 | skb_probe_transport_header(skb); |
| 1930 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1931 | |
| 1932 | /* |
| 1933 | * Output a raw packet to a device layer. This bypasses all the other |
| 1934 | * protocol layers and you must therefore supply it with a complete frame |
| 1935 | */ |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 1936 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 1937 | static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, |
| 1938 | size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1939 | { |
| 1940 | struct sock *sk = sock->sk; |
Steffen Hurrle | 342dfc3 | 2014-01-17 22:53:15 +0100 | [diff] [blame] | 1941 | DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); |
Eric Dumazet | 1a35ca8 | 2009-12-15 05:47:03 +0000 | [diff] [blame] | 1942 | struct sk_buff *skb = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1943 | struct net_device *dev; |
Soheil Hassas Yeganeh | c14ac94 | 2016-04-02 23:08:12 -0400 | [diff] [blame] | 1944 | struct sockcm_cookie sockc; |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 1945 | __be16 proto = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1946 | int err; |
Ben Greear | 3bdc0eb | 2012-02-11 15:39:30 +0000 | [diff] [blame] | 1947 | int extra_len = 0; |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 1948 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1949 | /* |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 1950 | * Get and verify the address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1951 | */ |
| 1952 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 1953 | if (saddr) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1954 | if (msg->msg_namelen < sizeof(struct sockaddr)) |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 1955 | return -EINVAL; |
| 1956 | if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) |
| 1957 | proto = saddr->spkt_protocol; |
| 1958 | } else |
| 1959 | return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1960 | |
| 1961 | /* |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 1962 | * Find the device first to size check it |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1963 | */ |
| 1964 | |
danborkmann@iogearbox.net | de74e92 | 2012-06-10 08:59:28 +0000 | [diff] [blame] | 1965 | saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; |
Eric Dumazet | 1a35ca8 | 2009-12-15 05:47:03 +0000 | [diff] [blame] | 1966 | retry: |
Eric Dumazet | 654d1f8 | 2009-11-02 10:43:32 +0100 | [diff] [blame] | 1967 | rcu_read_lock(); |
| 1968 | dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1969 | err = -ENODEV; |
| 1970 | if (dev == NULL) |
| 1971 | goto out_unlock; |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 1972 | |
David S. Miller | d5e76b0 | 2007-01-25 19:30:36 -0800 | [diff] [blame] | 1973 | err = -ENETDOWN; |
| 1974 | if (!(dev->flags & IFF_UP)) |
| 1975 | goto out_unlock; |
| 1976 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1977 | /* |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 1978 | * You may not queue a frame bigger than the mtu. This is the lowest level |
| 1979 | * raw protocol and you must do your own fragmentation at this level. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1980 | */ |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 1981 | |
Ben Greear | 3bdc0eb | 2012-02-11 15:39:30 +0000 | [diff] [blame] | 1982 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { |
| 1983 | if (!netif_supports_nofcs(dev)) { |
| 1984 | err = -EPROTONOSUPPORT; |
| 1985 | goto out_unlock; |
| 1986 | } |
| 1987 | extra_len = 4; /* We're doing our own CRC */ |
| 1988 | } |
| 1989 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1990 | err = -EMSGSIZE; |
Ben Greear | 3bdc0eb | 2012-02-11 15:39:30 +0000 | [diff] [blame] | 1991 | if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1992 | goto out_unlock; |
| 1993 | |
Eric Dumazet | 1a35ca8 | 2009-12-15 05:47:03 +0000 | [diff] [blame] | 1994 | if (!skb) { |
| 1995 | size_t reserved = LL_RESERVED_SPACE(dev); |
Herbert Xu | 4ce4091 | 2011-11-18 02:20:05 +0000 | [diff] [blame] | 1996 | int tlen = dev->needed_tailroom; |
Eric Dumazet | 1a35ca8 | 2009-12-15 05:47:03 +0000 | [diff] [blame] | 1997 | unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1998 | |
Eric Dumazet | 1a35ca8 | 2009-12-15 05:47:03 +0000 | [diff] [blame] | 1999 | rcu_read_unlock(); |
Herbert Xu | 4ce4091 | 2011-11-18 02:20:05 +0000 | [diff] [blame] | 2000 | skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); |
Eric Dumazet | 1a35ca8 | 2009-12-15 05:47:03 +0000 | [diff] [blame] | 2001 | if (skb == NULL) |
| 2002 | return -ENOBUFS; |
| 2003 | /* FIXME: Save some space for broken drivers that write a hard |
| 2004 | * header at transmission time by themselves. PPP is the notable |
| 2005 | * one here. This should really be fixed at the driver level. |
| 2006 | */ |
| 2007 | skb_reserve(skb, reserved); |
| 2008 | skb_reset_network_header(skb); |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 2009 | |
Eric Dumazet | 1a35ca8 | 2009-12-15 05:47:03 +0000 | [diff] [blame] | 2010 | /* Try to align data part correctly */ |
| 2011 | if (hhlen) { |
| 2012 | skb->data -= hhlen; |
| 2013 | skb->tail -= hhlen; |
| 2014 | if (len < hhlen) |
| 2015 | skb_reset_network_header(skb); |
| 2016 | } |
Al Viro | 6ce8e9c | 2014-04-06 21:25:44 -0400 | [diff] [blame] | 2017 | err = memcpy_from_msg(skb_put(skb, len), msg, len); |
Eric Dumazet | 1a35ca8 | 2009-12-15 05:47:03 +0000 | [diff] [blame] | 2018 | if (err) |
| 2019 | goto out_free; |
| 2020 | goto retry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2021 | } |
| 2022 | |
Willem de Bruijn | 9ed988c | 2016-03-09 21:58:34 -0500 | [diff] [blame] | 2023 | if (!dev_validate_header(dev, skb->data, len)) { |
| 2024 | err = -EINVAL; |
| 2025 | goto out_unlock; |
| 2026 | } |
Daniel Borkmann | 3c70c13 | 2015-11-11 23:25:42 +0100 | [diff] [blame] | 2027 | if (len > (dev->mtu + dev->hard_header_len + extra_len) && |
| 2028 | !packet_extra_vlan_len_allowed(dev, skb)) { |
| 2029 | err = -EMSGSIZE; |
| 2030 | goto out_unlock; |
Ben Greear | 57f89bf | 2011-02-11 09:35:18 +0000 | [diff] [blame] | 2031 | } |
Eric Dumazet | 1a35ca8 | 2009-12-15 05:47:03 +0000 | [diff] [blame] | 2032 | |
Willem de Bruijn | 657a066 | 2018-07-06 10:12:56 -0400 | [diff] [blame] | 2033 | sockcm_init(&sockc, sk); |
Soheil Hassas Yeganeh | c14ac94 | 2016-04-02 23:08:12 -0400 | [diff] [blame] | 2034 | if (msg->msg_controllen) { |
| 2035 | err = sock_cmsg_send(sk, msg, &sockc); |
Soheil Hassas Yeganeh | f8e7718 | 2016-07-20 18:01:18 -0400 | [diff] [blame] | 2036 | if (unlikely(err)) |
Soheil Hassas Yeganeh | c14ac94 | 2016-04-02 23:08:12 -0400 | [diff] [blame] | 2037 | goto out_unlock; |
Soheil Hassas Yeganeh | c14ac94 | 2016-04-02 23:08:12 -0400 | [diff] [blame] | 2038 | } |
| 2039 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2040 | skb->protocol = proto; |
| 2041 | skb->dev = dev; |
| 2042 | skb->priority = sk->sk_priority; |
Eric Dumazet | 2d37a18 | 2009-10-01 19:14:46 +0000 | [diff] [blame] | 2043 | skb->mark = sk->sk_mark; |
Richard Cochran | 3d0ba8c | 2018-07-03 15:42:51 -0700 | [diff] [blame] | 2044 | skb->tstamp = sockc.transmit_time; |
Daniel Borkmann | bf84a010 | 2013-04-14 08:08:13 +0000 | [diff] [blame] | 2045 | |
Willem de Bruijn | 8f932f7 | 2018-12-17 12:24:00 -0500 | [diff] [blame] | 2046 | skb_setup_tx_timestamp(skb, sockc.tsflags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2047 | |
Ben Greear | 3bdc0eb | 2012-02-11 15:39:30 +0000 | [diff] [blame] | 2048 | if (unlikely(extra_len == 4)) |
| 2049 | skb->no_fcs = 1; |
| 2050 | |
Maxim Mikityanskiy | 75c6577 | 2019-02-21 12:40:01 +0000 | [diff] [blame] | 2051 | packet_parse_headers(skb, sock); |
Jason Wang | c1aad27 | 2013-03-25 20:19:57 +0000 | [diff] [blame] | 2052 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2053 | dev_queue_xmit(skb); |
Eric Dumazet | 654d1f8 | 2009-11-02 10:43:32 +0100 | [diff] [blame] | 2054 | rcu_read_unlock(); |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 2055 | return len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2056 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2057 | out_unlock: |
Eric Dumazet | 654d1f8 | 2009-11-02 10:43:32 +0100 | [diff] [blame] | 2058 | rcu_read_unlock(); |
Eric Dumazet | 1a35ca8 | 2009-12-15 05:47:03 +0000 | [diff] [blame] | 2059 | out_free: |
| 2060 | kfree_skb(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2061 | return err; |
| 2062 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2063 | |
Alexei Starovoitov | ff936a0 | 2015-10-07 10:55:41 -0700 | [diff] [blame] | 2064 | static unsigned int run_filter(struct sk_buff *skb, |
| 2065 | const struct sock *sk, |
| 2066 | unsigned int res) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2067 | { |
| 2068 | struct sk_filter *filter; |
| 2069 | |
Eric Dumazet | 80f8f10 | 2011-01-18 07:46:52 +0000 | [diff] [blame] | 2070 | rcu_read_lock(); |
| 2071 | filter = rcu_dereference(sk->sk_filter); |
David S. Miller | dbcb585 | 2007-01-24 15:21:02 -0800 | [diff] [blame] | 2072 | if (filter != NULL) |
Alexei Starovoitov | ff936a0 | 2015-10-07 10:55:41 -0700 | [diff] [blame] | 2073 | res = bpf_prog_run_clear_cb(filter->prog, skb); |
Eric Dumazet | 80f8f10 | 2011-01-18 07:46:52 +0000 | [diff] [blame] | 2074 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2075 | |
David S. Miller | dbcb585 | 2007-01-24 15:21:02 -0800 | [diff] [blame] | 2076 | return res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2077 | } |
| 2078 | |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 2079 | static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, |
| 2080 | size_t *len) |
| 2081 | { |
| 2082 | struct virtio_net_hdr vnet_hdr; |
| 2083 | |
| 2084 | if (*len < sizeof(vnet_hdr)) |
| 2085 | return -EINVAL; |
| 2086 | *len -= sizeof(vnet_hdr); |
| 2087 | |
Willem de Bruijn | fd3a886 | 2018-06-06 11:23:01 -0400 | [diff] [blame] | 2088 | if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0)) |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 2089 | return -EINVAL; |
| 2090 | |
| 2091 | return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); |
| 2092 | } |
| 2093 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 | /* |
Eric Dumazet | 62ab081 | 2010-12-06 20:50:09 +0000 | [diff] [blame] | 2095 | * This function makes lazy skb cloning in hope that most of packets |
| 2096 | * are discarded by BPF. |
| 2097 | * |
| 2098 | * Note tricky part: we DO mangle shared skb! skb->data, skb->len |
| 2099 | * and skb->cb are mangled. It works because (and until) packets |
| 2100 | * falling here are owned by current CPU. Output packets are cloned |
| 2101 | * by dev_queue_xmit_nit(), input packets are processed by net_bh |
Wang Hai | 0e4161d | 2021-03-24 14:19:31 +0800 | [diff] [blame] | 2102 | * sequentially, so that if we return skb to original state on exit, |
Eric Dumazet | 62ab081 | 2010-12-06 20:50:09 +0000 | [diff] [blame] | 2103 | * we will not harm anyone. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2104 | */ |
| 2105 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 2106 | static int packet_rcv(struct sk_buff *skb, struct net_device *dev, |
| 2107 | struct packet_type *pt, struct net_device *orig_dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2108 | { |
| 2109 | struct sock *sk; |
| 2110 | struct sockaddr_ll *sll; |
| 2111 | struct packet_sock *po; |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 2112 | u8 *skb_head = skb->data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2113 | int skb_len = skb->len; |
David S. Miller | dbcb585 | 2007-01-24 15:21:02 -0800 | [diff] [blame] | 2114 | unsigned int snaplen, res; |
Weongyo Jeong | da37845 | 2016-04-14 14:10:04 -0700 | [diff] [blame] | 2115 | bool is_drop_n_account = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2116 | |
| 2117 | if (skb->pkt_type == PACKET_LOOPBACK) |
| 2118 | goto drop; |
| 2119 | |
| 2120 | sk = pt->af_packet_priv; |
| 2121 | po = pkt_sk(sk); |
| 2122 | |
Octavian Purdila | 09ad9bc | 2009-11-25 15:14:13 -0800 | [diff] [blame] | 2123 | if (!net_eq(dev_net(dev), sock_net(sk))) |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 2124 | goto drop; |
| 2125 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2126 | skb->dev = dev; |
| 2127 | |
Eyal Birger | d549699 | 2020-11-21 08:28:17 +0200 | [diff] [blame] | 2128 | if (dev_has_header(dev)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2129 | /* The device has an explicit notion of ll header, |
Eric Dumazet | 62ab081 | 2010-12-06 20:50:09 +0000 | [diff] [blame] | 2130 | * exported to higher levels. |
| 2131 | * |
| 2132 | * Otherwise, the device hides details of its frame |
| 2133 | * structure, so that corresponding packet head is |
| 2134 | * never delivered to user. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2135 | */ |
| 2136 | if (sk->sk_type != SOCK_DGRAM) |
Arnaldo Carvalho de Melo | 98e399f | 2007-03-19 15:33:04 -0700 | [diff] [blame] | 2137 | skb_push(skb, skb->data - skb_mac_header(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2138 | else if (skb->pkt_type == PACKET_OUTGOING) { |
| 2139 | /* Special case: outgoing packets have ll header at head */ |
Arnaldo Carvalho de Melo | bbe735e | 2007-03-10 22:16:10 -0300 | [diff] [blame] | 2140 | skb_pull(skb, skb_network_offset(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2141 | } |
| 2142 | } |
| 2143 | |
| 2144 | snaplen = skb->len; |
| 2145 | |
David S. Miller | dbcb585 | 2007-01-24 15:21:02 -0800 | [diff] [blame] | 2146 | res = run_filter(skb, sk, snaplen); |
| 2147 | if (!res) |
Dmitry Mishin | fda9ef5 | 2006-08-31 15:28:39 -0700 | [diff] [blame] | 2148 | goto drop_n_restore; |
David S. Miller | dbcb585 | 2007-01-24 15:21:02 -0800 | [diff] [blame] | 2149 | if (snaplen > res) |
| 2150 | snaplen = res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2151 | |
Eric Dumazet | 0fd7bac | 2011-12-21 07:11:44 +0000 | [diff] [blame] | 2152 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2153 | goto drop_n_acct; |
| 2154 | |
| 2155 | if (skb_shared(skb)) { |
| 2156 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); |
| 2157 | if (nskb == NULL) |
| 2158 | goto drop_n_acct; |
| 2159 | |
| 2160 | if (skb_head != skb->data) { |
| 2161 | skb->data = skb_head; |
| 2162 | skb->len = skb_len; |
| 2163 | } |
Eric Dumazet | abc4e4f | 2012-04-19 02:24:42 +0000 | [diff] [blame] | 2164 | consume_skb(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2165 | skb = nskb; |
| 2166 | } |
| 2167 | |
Eyal Birger | b4772ef | 2015-03-01 14:58:29 +0200 | [diff] [blame] | 2168 | sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); |
Herbert Xu | ffbc611 | 2007-02-04 23:33:10 -0800 | [diff] [blame] | 2169 | |
| 2170 | sll = &PACKET_SKB_CB(skb)->sa.ll; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2171 | sll->sll_hatype = dev->type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2172 | sll->sll_pkttype = skb->pkt_type; |
Peter P Waskiewicz Jr | 8032b46 | 2007-11-10 22:03:25 -0800 | [diff] [blame] | 2173 | if (unlikely(po->origdev)) |
Peter P. Waskiewicz Jr | 80feaac | 2007-04-20 16:05:39 -0700 | [diff] [blame] | 2174 | sll->sll_ifindex = orig_dev->ifindex; |
| 2175 | else |
| 2176 | sll->sll_ifindex = dev->ifindex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2177 | |
Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 2178 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2179 | |
Eyal Birger | 2472d76 | 2015-03-01 14:58:28 +0200 | [diff] [blame] | 2180 | /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). |
| 2181 | * Use their space for storing the original skb length. |
| 2182 | */ |
| 2183 | PACKET_SKB_CB(skb)->sa.origlen = skb->len; |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 2184 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2185 | if (pskb_trim(skb, snaplen)) |
| 2186 | goto drop_n_acct; |
| 2187 | |
| 2188 | skb_set_owner_r(skb, sk); |
| 2189 | skb->dev = NULL; |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 2190 | skb_dst_drop(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2191 | |
Phil Oester | 84531c2 | 2005-07-12 11:57:52 -0700 | [diff] [blame] | 2192 | /* drop conntrack reference */ |
Florian Westphal | 895b5c9 | 2019-09-29 20:54:03 +0200 | [diff] [blame] | 2193 | nf_reset_ct(skb); |
Phil Oester | 84531c2 | 2005-07-12 11:57:52 -0700 | [diff] [blame] | 2194 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2195 | spin_lock(&sk->sk_receive_queue.lock); |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 2196 | po->stats.stats1.tp_packets++; |
Eyal Birger | 3bc3b96 | 2015-03-01 14:58:30 +0200 | [diff] [blame] | 2197 | sock_skb_set_dropcount(sk, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2198 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
| 2199 | spin_unlock(&sk->sk_receive_queue.lock); |
David S. Miller | 676d236 | 2014-04-11 16:15:36 -0400 | [diff] [blame] | 2200 | sk->sk_data_ready(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2201 | return 0; |
| 2202 | |
| 2203 | drop_n_acct: |
Weongyo Jeong | da37845 | 2016-04-14 14:10:04 -0700 | [diff] [blame] | 2204 | is_drop_n_account = true; |
Eric Dumazet | 8e8e295 | 2019-06-12 09:52:30 -0700 | [diff] [blame] | 2205 | atomic_inc(&po->tp_drops); |
Willem de Bruijn | 7091fbd | 2011-09-30 10:38:28 +0000 | [diff] [blame] | 2206 | atomic_inc(&sk->sk_drops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2207 | |
| 2208 | drop_n_restore: |
| 2209 | if (skb_head != skb->data && skb_shared(skb)) { |
| 2210 | skb->data = skb_head; |
| 2211 | skb->len = skb_len; |
| 2212 | } |
| 2213 | drop: |
Weongyo Jeong | da37845 | 2016-04-14 14:10:04 -0700 | [diff] [blame] | 2214 | if (!is_drop_n_account) |
| 2215 | consume_skb(skb); |
| 2216 | else |
| 2217 | kfree_skb(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2218 | return 0; |
| 2219 | } |
| 2220 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 2221 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
| 2222 | struct packet_type *pt, struct net_device *orig_dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2223 | { |
| 2224 | struct sock *sk; |
| 2225 | struct packet_sock *po; |
| 2226 | struct sockaddr_ll *sll; |
Daniel Borkmann | 184f489 | 2013-04-16 01:57:46 +0000 | [diff] [blame] | 2227 | union tpacket_uhdr h; |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 2228 | u8 *skb_head = skb->data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2229 | int skb_len = skb->len; |
David S. Miller | dbcb585 | 2007-01-24 15:21:02 -0800 | [diff] [blame] | 2230 | unsigned int snaplen, res; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2231 | unsigned long status = TP_STATUS_USER; |
Or Cohen | acf69c9 | 2020-09-03 21:05:28 -0700 | [diff] [blame] | 2232 | unsigned short macoff, hdrlen; |
| 2233 | unsigned int netoff; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2234 | struct sk_buff *copy_skb = NULL; |
Arnd Bergmann | d413fcb | 2017-11-27 10:09:24 +0100 | [diff] [blame] | 2235 | struct timespec64 ts; |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 2236 | __u32 ts_status; |
Weongyo Jeong | da37845 | 2016-04-14 14:10:04 -0700 | [diff] [blame] | 2237 | bool is_drop_n_account = false; |
Willem de Bruijn | 61fad68 | 2020-03-13 12:18:09 -0400 | [diff] [blame] | 2238 | unsigned int slot_id = 0; |
Benjamin Poirier | edbd58b | 2017-08-28 14:29:41 -0400 | [diff] [blame] | 2239 | bool do_vnet = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2240 | |
Atzm Watanabe | 5184635 | 2013-12-17 22:53:32 +0900 | [diff] [blame] | 2241 | /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. |
| 2242 | * We may add members to them until current aligned size without forcing |
| 2243 | * userspace to call getsockopt(..., PACKET_HDRLEN, ...). |
| 2244 | */ |
| 2245 | BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); |
| 2246 | BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); |
| 2247 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2248 | if (skb->pkt_type == PACKET_LOOPBACK) |
| 2249 | goto drop; |
| 2250 | |
| 2251 | sk = pt->af_packet_priv; |
| 2252 | po = pkt_sk(sk); |
| 2253 | |
Octavian Purdila | 09ad9bc | 2009-11-25 15:14:13 -0800 | [diff] [blame] | 2254 | if (!net_eq(dev_net(dev), sock_net(sk))) |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 2255 | goto drop; |
| 2256 | |
Eyal Birger | d549699 | 2020-11-21 08:28:17 +0200 | [diff] [blame] | 2257 | if (dev_has_header(dev)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2258 | if (sk->sk_type != SOCK_DGRAM) |
Arnaldo Carvalho de Melo | 98e399f | 2007-03-19 15:33:04 -0700 | [diff] [blame] | 2259 | skb_push(skb, skb->data - skb_mac_header(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2260 | else if (skb->pkt_type == PACKET_OUTGOING) { |
| 2261 | /* Special case: outgoing packets have ll header at head */ |
Arnaldo Carvalho de Melo | bbe735e | 2007-03-10 22:16:10 -0300 | [diff] [blame] | 2262 | skb_pull(skb, skb_network_offset(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2263 | } |
| 2264 | } |
| 2265 | |
| 2266 | snaplen = skb->len; |
| 2267 | |
David S. Miller | dbcb585 | 2007-01-24 15:21:02 -0800 | [diff] [blame] | 2268 | res = run_filter(skb, sk, snaplen); |
| 2269 | if (!res) |
Dmitry Mishin | fda9ef5 | 2006-08-31 15:28:39 -0700 | [diff] [blame] | 2270 | goto drop_n_restore; |
Alexander Drozdov | 68c2e5d | 2015-03-23 09:11:12 +0300 | [diff] [blame] | 2271 | |
Eric Dumazet | 2c51c62 | 2019-06-12 09:52:31 -0700 | [diff] [blame] | 2272 | /* If we are flooded, just give up */ |
| 2273 | if (__packet_rcv_has_room(po, skb) == ROOM_NONE) { |
| 2274 | atomic_inc(&po->tp_drops); |
| 2275 | goto drop_n_restore; |
| 2276 | } |
| 2277 | |
Alexander Drozdov | 68c2e5d | 2015-03-23 09:11:12 +0300 | [diff] [blame] | 2278 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 2279 | status |= TP_STATUS_CSUMNOTREADY; |
Alexander Drozdov | 682f048 | 2015-03-23 09:11:13 +0300 | [diff] [blame] | 2280 | else if (skb->pkt_type != PACKET_OUTGOING && |
| 2281 | (skb->ip_summed == CHECKSUM_COMPLETE || |
| 2282 | skb_csum_unnecessary(skb))) |
| 2283 | status |= TP_STATUS_CSUM_VALID; |
Alexander Drozdov | 68c2e5d | 2015-03-23 09:11:12 +0300 | [diff] [blame] | 2284 | |
David S. Miller | dbcb585 | 2007-01-24 15:21:02 -0800 | [diff] [blame] | 2285 | if (snaplen > res) |
| 2286 | snaplen = res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2287 | |
| 2288 | if (sk->sk_type == SOCK_DGRAM) { |
Patrick McHardy | 8913336a | 2008-07-18 18:05:19 -0700 | [diff] [blame] | 2289 | macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + |
| 2290 | po->tp_reserve; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2291 | } else { |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 2292 | unsigned int maclen = skb_network_offset(skb); |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 2293 | netoff = TPACKET_ALIGN(po->tp_hdrlen + |
Patrick McHardy | 8913336a | 2008-07-18 18:05:19 -0700 | [diff] [blame] | 2294 | (maclen < 16 ? 16 : maclen)) + |
Willem de Bruijn | 58d19b1 | 2016-02-03 18:02:15 -0500 | [diff] [blame] | 2295 | po->tp_reserve; |
Benjamin Poirier | edbd58b | 2017-08-28 14:29:41 -0400 | [diff] [blame] | 2296 | if (po->has_vnet_hdr) { |
Willem de Bruijn | 58d19b1 | 2016-02-03 18:02:15 -0500 | [diff] [blame] | 2297 | netoff += sizeof(struct virtio_net_hdr); |
Benjamin Poirier | edbd58b | 2017-08-28 14:29:41 -0400 | [diff] [blame] | 2298 | do_vnet = true; |
| 2299 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2300 | macoff = netoff - maclen; |
| 2301 | } |
Or Cohen | acf69c9 | 2020-09-03 21:05:28 -0700 | [diff] [blame] | 2302 | if (netoff > USHRT_MAX) { |
| 2303 | atomic_inc(&po->tp_drops); |
| 2304 | goto drop_n_restore; |
| 2305 | } |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2306 | if (po->tp_version <= TPACKET_V2) { |
| 2307 | if (macoff + snaplen > po->rx_ring.frame_size) { |
| 2308 | if (po->copy_thresh && |
Eric Dumazet | 0fd7bac | 2011-12-21 07:11:44 +0000 | [diff] [blame] | 2309 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2310 | if (skb_shared(skb)) { |
| 2311 | copy_skb = skb_clone(skb, GFP_ATOMIC); |
| 2312 | } else { |
| 2313 | copy_skb = skb_get(skb); |
| 2314 | skb_head = skb->data; |
| 2315 | } |
| 2316 | if (copy_skb) |
| 2317 | skb_set_owner_r(copy_skb, sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2318 | } |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2319 | snaplen = po->rx_ring.frame_size - macoff; |
Benjamin Poirier | edbd58b | 2017-08-28 14:29:41 -0400 | [diff] [blame] | 2320 | if ((int)snaplen < 0) { |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2321 | snaplen = 0; |
Benjamin Poirier | edbd58b | 2017-08-28 14:29:41 -0400 | [diff] [blame] | 2322 | do_vnet = false; |
| 2323 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2324 | } |
Eric Dumazet | dc80811 | 2014-08-15 09:16:04 -0700 | [diff] [blame] | 2325 | } else if (unlikely(macoff + snaplen > |
| 2326 | GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { |
| 2327 | u32 nval; |
| 2328 | |
| 2329 | nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; |
| 2330 | pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", |
| 2331 | snaplen, nval, macoff); |
| 2332 | snaplen = nval; |
| 2333 | if (unlikely((int)snaplen < 0)) { |
| 2334 | snaplen = 0; |
| 2335 | macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; |
Benjamin Poirier | edbd58b | 2017-08-28 14:29:41 -0400 | [diff] [blame] | 2336 | do_vnet = false; |
Eric Dumazet | dc80811 | 2014-08-15 09:16:04 -0700 | [diff] [blame] | 2337 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2338 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2339 | spin_lock(&sk->sk_receive_queue.lock); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2340 | h.raw = packet_current_rx_frame(po, skb, |
| 2341 | TP_STATUS_KERNEL, (macoff+snaplen)); |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 2342 | if (!h.raw) |
Willem de Bruijn | 58d19b1 | 2016-02-03 18:02:15 -0500 | [diff] [blame] | 2343 | goto drop_n_account; |
Willem de Bruijn | 46e4c42 | 2020-03-09 11:34:35 -0400 | [diff] [blame] | 2344 | |
Willem de Bruijn | 61fad68 | 2020-03-13 12:18:09 -0400 | [diff] [blame] | 2345 | if (po->tp_version <= TPACKET_V2) { |
| 2346 | slot_id = po->rx_ring.head; |
| 2347 | if (test_bit(slot_id, po->rx_ring.rx_owner_map)) |
| 2348 | goto drop_n_account; |
| 2349 | __set_bit(slot_id, po->rx_ring.rx_owner_map); |
| 2350 | } |
| 2351 | |
Willem de Bruijn | 46e4c42 | 2020-03-09 11:34:35 -0400 | [diff] [blame] | 2352 | if (do_vnet && |
| 2353 | virtio_net_hdr_from_skb(skb, h.raw + macoff - |
| 2354 | sizeof(struct virtio_net_hdr), |
John Ogness | 88fd1cb | 2020-08-13 21:45:25 +0206 | [diff] [blame] | 2355 | vio_le(), true, 0)) { |
| 2356 | if (po->tp_version == TPACKET_V3) |
| 2357 | prb_clear_blk_fill_status(&po->rx_ring); |
Willem de Bruijn | 46e4c42 | 2020-03-09 11:34:35 -0400 | [diff] [blame] | 2358 | goto drop_n_account; |
John Ogness | 88fd1cb | 2020-08-13 21:45:25 +0206 | [diff] [blame] | 2359 | } |
Willem de Bruijn | 46e4c42 | 2020-03-09 11:34:35 -0400 | [diff] [blame] | 2360 | |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2361 | if (po->tp_version <= TPACKET_V2) { |
| 2362 | packet_increment_rx_head(po, &po->rx_ring); |
| 2363 | /* |
| 2364 | * LOSING will be reported till you read the stats, |
| 2365 | * because it's COR - Clear On Read. |
| 2366 | * Anyways, moving it for V1/V2 only as V3 doesn't need this |
| 2367 | * at packet level. |
| 2368 | */ |
Eric Dumazet | 8e8e295 | 2019-06-12 09:52:30 -0700 | [diff] [blame] | 2369 | if (atomic_read(&po->tp_drops)) |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2370 | status |= TP_STATUS_LOSING; |
| 2371 | } |
Eric Dumazet | 945d015 | 2018-06-21 14:16:02 -0700 | [diff] [blame] | 2372 | |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 2373 | po->stats.stats1.tp_packets++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2374 | if (copy_skb) { |
| 2375 | status |= TP_STATUS_COPY; |
| 2376 | __skb_queue_tail(&sk->sk_receive_queue, copy_skb); |
| 2377 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2378 | spin_unlock(&sk->sk_receive_queue.lock); |
| 2379 | |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 2380 | skb_copy_bits(skb, 0, h.raw + macoff, snaplen); |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 2381 | |
Richard Sanger | 171c3b1 | 2021-05-12 13:31:22 +1200 | [diff] [blame] | 2382 | /* Always timestamp; prefer an existing software timestamp taken |
| 2383 | * closer to the time of capture. |
| 2384 | */ |
| 2385 | ts_status = tpacket_get_timestamp(skb, &ts, |
| 2386 | po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE); |
| 2387 | if (!ts_status) |
Arnd Bergmann | d413fcb | 2017-11-27 10:09:24 +0100 | [diff] [blame] | 2388 | ktime_get_real_ts64(&ts); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2389 | |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 2390 | status |= ts_status; |
| 2391 | |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 2392 | switch (po->tp_version) { |
| 2393 | case TPACKET_V1: |
| 2394 | h.h1->tp_len = skb->len; |
| 2395 | h.h1->tp_snaplen = snaplen; |
| 2396 | h.h1->tp_mac = macoff; |
| 2397 | h.h1->tp_net = netoff; |
Daniel Borkmann | 4b457bd | 2013-04-16 01:29:11 +0000 | [diff] [blame] | 2398 | h.h1->tp_sec = ts.tv_sec; |
| 2399 | h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 2400 | hdrlen = sizeof(*h.h1); |
| 2401 | break; |
| 2402 | case TPACKET_V2: |
| 2403 | h.h2->tp_len = skb->len; |
| 2404 | h.h2->tp_snaplen = snaplen; |
| 2405 | h.h2->tp_mac = macoff; |
| 2406 | h.h2->tp_net = netoff; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 2407 | h.h2->tp_sec = ts.tv_sec; |
| 2408 | h.h2->tp_nsec = ts.tv_nsec; |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 2409 | if (skb_vlan_tag_present(skb)) { |
| 2410 | h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); |
Atzm Watanabe | a0cdfcf | 2013-12-17 22:53:40 +0900 | [diff] [blame] | 2411 | h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); |
| 2412 | status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; |
Ben Greear | a3bcc23 | 2011-06-01 06:49:10 +0000 | [diff] [blame] | 2413 | } else { |
| 2414 | h.h2->tp_vlan_tci = 0; |
Atzm Watanabe | a0cdfcf | 2013-12-17 22:53:40 +0900 | [diff] [blame] | 2415 | h.h2->tp_vlan_tpid = 0; |
Ben Greear | a3bcc23 | 2011-06-01 06:49:10 +0000 | [diff] [blame] | 2416 | } |
Atzm Watanabe | e4d26f4 | 2013-12-17 22:53:36 +0900 | [diff] [blame] | 2417 | memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 2418 | hdrlen = sizeof(*h.h2); |
| 2419 | break; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2420 | case TPACKET_V3: |
| 2421 | /* tp_nxt_offset,vlan are already populated above. |
| 2422 | * So DONT clear those fields here |
| 2423 | */ |
| 2424 | h.h3->tp_status |= status; |
| 2425 | h.h3->tp_len = skb->len; |
| 2426 | h.h3->tp_snaplen = snaplen; |
| 2427 | h.h3->tp_mac = macoff; |
| 2428 | h.h3->tp_net = netoff; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2429 | h.h3->tp_sec = ts.tv_sec; |
| 2430 | h.h3->tp_nsec = ts.tv_nsec; |
Atzm Watanabe | e4d26f4 | 2013-12-17 22:53:36 +0900 | [diff] [blame] | 2431 | memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2432 | hdrlen = sizeof(*h.h3); |
| 2433 | break; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 2434 | default: |
| 2435 | BUG(); |
| 2436 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2437 | |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 2438 | sll = h.raw + TPACKET_ALIGN(hdrlen); |
Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 2439 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2440 | sll->sll_family = AF_PACKET; |
| 2441 | sll->sll_hatype = dev->type; |
| 2442 | sll->sll_protocol = skb->protocol; |
| 2443 | sll->sll_pkttype = skb->pkt_type; |
Peter P Waskiewicz Jr | 8032b46 | 2007-11-10 22:03:25 -0800 | [diff] [blame] | 2444 | if (unlikely(po->origdev)) |
Peter P. Waskiewicz Jr | 80feaac | 2007-04-20 16:05:39 -0700 | [diff] [blame] | 2445 | sll->sll_ifindex = orig_dev->ifindex; |
| 2446 | else |
| 2447 | sll->sll_ifindex = dev->ifindex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2448 | |
Ralf Baechle | e16aa20 | 2006-12-07 00:11:33 -0800 | [diff] [blame] | 2449 | smp_mb(); |
Daniel Borkmann | f0d4eb2 | 2014-01-19 11:46:53 +0100 | [diff] [blame] | 2450 | |
Changli Gao | f6dafa9 | 2010-12-07 04:26:16 +0000 | [diff] [blame] | 2451 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 |
Daniel Borkmann | f0d4eb2 | 2014-01-19 11:46:53 +0100 | [diff] [blame] | 2452 | if (po->tp_version <= TPACKET_V2) { |
Changli Gao | 0af55bb | 2010-12-01 02:52:20 +0000 | [diff] [blame] | 2453 | u8 *start, *end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2454 | |
Daniel Borkmann | f0d4eb2 | 2014-01-19 11:46:53 +0100 | [diff] [blame] | 2455 | end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + |
| 2456 | macoff + snaplen); |
| 2457 | |
| 2458 | for (start = h.raw; start < end; start += PAGE_SIZE) |
| 2459 | flush_dcache_page(pgv_to_page(start)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2460 | } |
Daniel Borkmann | f0d4eb2 | 2014-01-19 11:46:53 +0100 | [diff] [blame] | 2461 | smp_wmb(); |
Changli Gao | f6dafa9 | 2010-12-07 04:26:16 +0000 | [diff] [blame] | 2462 | #endif |
Daniel Borkmann | f0d4eb2 | 2014-01-19 11:46:53 +0100 | [diff] [blame] | 2463 | |
Dan Collins | da413ee | 2014-12-19 16:49:25 +1300 | [diff] [blame] | 2464 | if (po->tp_version <= TPACKET_V2) { |
Willem de Bruijn | 61fad68 | 2020-03-13 12:18:09 -0400 | [diff] [blame] | 2465 | spin_lock(&sk->sk_receive_queue.lock); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2466 | __packet_set_status(po, h.raw, status); |
Willem de Bruijn | 61fad68 | 2020-03-13 12:18:09 -0400 | [diff] [blame] | 2467 | __clear_bit(slot_id, po->rx_ring.rx_owner_map); |
| 2468 | spin_unlock(&sk->sk_receive_queue.lock); |
Dan Collins | da413ee | 2014-12-19 16:49:25 +1300 | [diff] [blame] | 2469 | sk->sk_data_ready(sk); |
John Ogness | 88fd1cb | 2020-08-13 21:45:25 +0206 | [diff] [blame] | 2470 | } else if (po->tp_version == TPACKET_V3) { |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 2471 | prb_clear_blk_fill_status(&po->rx_ring); |
Dan Collins | da413ee | 2014-12-19 16:49:25 +1300 | [diff] [blame] | 2472 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2473 | |
| 2474 | drop_n_restore: |
| 2475 | if (skb_head != skb->data && skb_shared(skb)) { |
| 2476 | skb->data = skb_head; |
| 2477 | skb->len = skb_len; |
| 2478 | } |
| 2479 | drop: |
Weongyo Jeong | da37845 | 2016-04-14 14:10:04 -0700 | [diff] [blame] | 2480 | if (!is_drop_n_account) |
| 2481 | consume_skb(skb); |
| 2482 | else |
| 2483 | kfree_skb(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2484 | return 0; |
| 2485 | |
Willem de Bruijn | 58d19b1 | 2016-02-03 18:02:15 -0500 | [diff] [blame] | 2486 | drop_n_account: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2487 | spin_unlock(&sk->sk_receive_queue.lock); |
Eric Dumazet | 8e8e295 | 2019-06-12 09:52:30 -0700 | [diff] [blame] | 2488 | atomic_inc(&po->tp_drops); |
| 2489 | is_drop_n_account = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2490 | |
David S. Miller | 676d236 | 2014-04-11 16:15:36 -0400 | [diff] [blame] | 2491 | sk->sk_data_ready(sk); |
Wei Yongjun | acb5d75 | 2009-02-25 00:36:42 +0000 | [diff] [blame] | 2492 | kfree_skb(copy_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | goto drop_n_restore; |
| 2494 | } |
| 2495 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2496 | static void tpacket_destruct_skb(struct sk_buff *skb) |
| 2497 | { |
| 2498 | struct packet_sock *po = pkt_sk(skb->sk); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2499 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2500 | if (likely(po->tx_ring.pg_vec)) { |
Daniel Borkmann | f0d4eb2 | 2014-01-19 11:46:53 +0100 | [diff] [blame] | 2501 | void *ph; |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 2502 | __u32 ts; |
| 2503 | |
Willem de Bruijn | 5cd8d46 | 2018-11-20 13:00:18 -0500 | [diff] [blame] | 2504 | ph = skb_zcopy_get_nouarg(skb); |
Daniel Borkmann | b013840 | 2014-01-15 16:25:36 +0100 | [diff] [blame] | 2505 | packet_dec_pending(&po->tx_ring); |
Daniel Borkmann | b9c32fb | 2013-04-23 00:39:31 +0000 | [diff] [blame] | 2506 | |
| 2507 | ts = __packet_set_timestamp(po, ph, skb); |
| 2508 | __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); |
Neil Horman | 89ed5b5 | 2019-06-25 17:57:49 -0400 | [diff] [blame] | 2509 | |
| 2510 | if (!packet_read_pending(&po->tx_ring)) |
| 2511 | complete(&po->skb_completion); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2512 | } |
| 2513 | |
| 2514 | sock_wfree(skb); |
| 2515 | } |
| 2516 | |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 2517 | static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) |
| 2518 | { |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 2519 | if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
| 2520 | (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + |
| 2521 | __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > |
| 2522 | __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) |
| 2523 | vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), |
| 2524 | __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + |
| 2525 | __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); |
| 2526 | |
| 2527 | if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) |
| 2528 | return -EINVAL; |
| 2529 | |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 2530 | return 0; |
| 2531 | } |
| 2532 | |
| 2533 | static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, |
| 2534 | struct virtio_net_hdr *vnet_hdr) |
| 2535 | { |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 2536 | if (*len < sizeof(*vnet_hdr)) |
| 2537 | return -EINVAL; |
| 2538 | *len -= sizeof(*vnet_hdr); |
| 2539 | |
Al Viro | cbbd26b | 2016-11-01 22:09:04 -0400 | [diff] [blame] | 2540 | if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 2541 | return -EFAULT; |
| 2542 | |
| 2543 | return __packet_snd_vnet_parse(vnet_hdr, *len); |
| 2544 | } |
| 2545 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 2546 | static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, |
Willem de Bruijn | 8d39b4a | 2016-02-03 18:02:16 -0500 | [diff] [blame] | 2547 | void *frame, struct net_device *dev, void *data, int tp_len, |
Soheil Hassas Yeganeh | c14ac94 | 2016-04-02 23:08:12 -0400 | [diff] [blame] | 2548 | __be16 proto, unsigned char *addr, int hlen, int copylen, |
| 2549 | const struct sockcm_cookie *sockc) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2550 | { |
Daniel Borkmann | 184f489 | 2013-04-16 01:57:46 +0000 | [diff] [blame] | 2551 | union tpacket_uhdr ph; |
Willem de Bruijn | 8d39b4a | 2016-02-03 18:02:16 -0500 | [diff] [blame] | 2552 | int to_write, offset, len, nr_frags, len_max; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2553 | struct socket *sock = po->sk.sk_socket; |
| 2554 | struct page *page; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2555 | int err; |
| 2556 | |
| 2557 | ph.raw = frame; |
| 2558 | |
| 2559 | skb->protocol = proto; |
| 2560 | skb->dev = dev; |
| 2561 | skb->priority = po->sk.sk_priority; |
Eric Dumazet | 2d37a18 | 2009-10-01 19:14:46 +0000 | [diff] [blame] | 2562 | skb->mark = po->sk.sk_mark; |
Richard Cochran | 3d0ba8c | 2018-07-03 15:42:51 -0700 | [diff] [blame] | 2563 | skb->tstamp = sockc->transmit_time; |
Willem de Bruijn | 8f932f7 | 2018-12-17 12:24:00 -0500 | [diff] [blame] | 2564 | skb_setup_tx_timestamp(skb, sockc->tsflags); |
Willem de Bruijn | 5cd8d46 | 2018-11-20 13:00:18 -0500 | [diff] [blame] | 2565 | skb_zcopy_set_nouarg(skb, ph.raw); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2566 | |
Herbert Xu | ae64194 | 2011-11-18 02:20:04 +0000 | [diff] [blame] | 2567 | skb_reserve(skb, hlen); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2568 | skb_reset_network_header(skb); |
Jason Wang | c1aad27 | 2013-03-25 20:19:57 +0000 | [diff] [blame] | 2569 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2570 | to_write = tp_len; |
| 2571 | |
| 2572 | if (sock->type == SOCK_DGRAM) { |
| 2573 | err = dev_hard_header(skb, dev, ntohs(proto), addr, |
| 2574 | NULL, tp_len); |
| 2575 | if (unlikely(err < 0)) |
| 2576 | return -EINVAL; |
Willem de Bruijn | 1d036d2 | 2016-02-03 18:02:17 -0500 | [diff] [blame] | 2577 | } else if (copylen) { |
Willem de Bruijn | 9ed988c | 2016-03-09 21:58:34 -0500 | [diff] [blame] | 2578 | int hdrlen = min_t(int, copylen, tp_len); |
| 2579 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2580 | skb_push(skb, dev->hard_header_len); |
Willem de Bruijn | 1d036d2 | 2016-02-03 18:02:17 -0500 | [diff] [blame] | 2581 | skb_put(skb, copylen - dev->hard_header_len); |
Willem de Bruijn | 9ed988c | 2016-03-09 21:58:34 -0500 | [diff] [blame] | 2582 | err = skb_store_bits(skb, 0, data, hdrlen); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2583 | if (unlikely(err)) |
| 2584 | return err; |
Willem de Bruijn | 9ed988c | 2016-03-09 21:58:34 -0500 | [diff] [blame] | 2585 | if (!dev_validate_header(dev, skb->data, hdrlen)) |
| 2586 | return -EINVAL; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2587 | |
Willem de Bruijn | 9ed988c | 2016-03-09 21:58:34 -0500 | [diff] [blame] | 2588 | data += hdrlen; |
| 2589 | to_write -= hdrlen; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2590 | } |
| 2591 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2592 | offset = offset_in_page(data); |
| 2593 | len_max = PAGE_SIZE - offset; |
| 2594 | len = ((to_write > len_max) ? len_max : to_write); |
| 2595 | |
| 2596 | skb->data_len = to_write; |
| 2597 | skb->len += to_write; |
| 2598 | skb->truesize += to_write; |
Reshetova, Elena | 14afee4 | 2017-06-30 13:08:00 +0300 | [diff] [blame] | 2599 | refcount_add(to_write, &po->sk.sk_wmem_alloc); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2600 | |
| 2601 | while (likely(to_write)) { |
| 2602 | nr_frags = skb_shinfo(skb)->nr_frags; |
| 2603 | |
| 2604 | if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 2605 | pr_err("Packet exceed the number of skb frags(%lu)\n", |
| 2606 | MAX_SKB_FRAGS); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2607 | return -EFAULT; |
| 2608 | } |
| 2609 | |
Changli Gao | 0af55bb | 2010-12-01 02:52:20 +0000 | [diff] [blame] | 2610 | page = pgv_to_page(data); |
| 2611 | data += len; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2612 | flush_dcache_page(page); |
| 2613 | get_page(page); |
Changli Gao | 0af55bb | 2010-12-01 02:52:20 +0000 | [diff] [blame] | 2614 | skb_fill_page_desc(skb, nr_frags, page, offset, len); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2615 | to_write -= len; |
| 2616 | offset = 0; |
| 2617 | len_max = PAGE_SIZE; |
| 2618 | len = ((to_write > len_max) ? len_max : to_write); |
| 2619 | } |
| 2620 | |
Maxim Mikityanskiy | 75c6577 | 2019-02-21 12:40:01 +0000 | [diff] [blame] | 2621 | packet_parse_headers(skb, sock); |
Daniel Borkmann | efdfa2f | 2015-11-11 23:25:40 +0100 | [diff] [blame] | 2622 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2623 | return tp_len; |
| 2624 | } |
| 2625 | |
Willem de Bruijn | 8d39b4a | 2016-02-03 18:02:16 -0500 | [diff] [blame] | 2626 | static int tpacket_parse_header(struct packet_sock *po, void *frame, |
| 2627 | int size_max, void **data) |
| 2628 | { |
| 2629 | union tpacket_uhdr ph; |
| 2630 | int tp_len, off; |
| 2631 | |
| 2632 | ph.raw = frame; |
| 2633 | |
| 2634 | switch (po->tp_version) { |
Sowmini Varadhan | 7f953ab | 2017-01-03 06:31:47 -0800 | [diff] [blame] | 2635 | case TPACKET_V3: |
| 2636 | if (ph.h3->tp_next_offset != 0) { |
| 2637 | pr_warn_once("variable sized slot not supported"); |
| 2638 | return -EINVAL; |
| 2639 | } |
| 2640 | tp_len = ph.h3->tp_len; |
| 2641 | break; |
Willem de Bruijn | 8d39b4a | 2016-02-03 18:02:16 -0500 | [diff] [blame] | 2642 | case TPACKET_V2: |
| 2643 | tp_len = ph.h2->tp_len; |
| 2644 | break; |
| 2645 | default: |
| 2646 | tp_len = ph.h1->tp_len; |
| 2647 | break; |
| 2648 | } |
| 2649 | if (unlikely(tp_len > size_max)) { |
| 2650 | pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); |
| 2651 | return -EMSGSIZE; |
| 2652 | } |
| 2653 | |
| 2654 | if (unlikely(po->tp_tx_has_off)) { |
| 2655 | int off_min, off_max; |
| 2656 | |
| 2657 | off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); |
| 2658 | off_max = po->tx_ring.frame_size - tp_len; |
| 2659 | if (po->sk.sk_type == SOCK_DGRAM) { |
| 2660 | switch (po->tp_version) { |
Sowmini Varadhan | 7f953ab | 2017-01-03 06:31:47 -0800 | [diff] [blame] | 2661 | case TPACKET_V3: |
| 2662 | off = ph.h3->tp_net; |
| 2663 | break; |
Willem de Bruijn | 8d39b4a | 2016-02-03 18:02:16 -0500 | [diff] [blame] | 2664 | case TPACKET_V2: |
| 2665 | off = ph.h2->tp_net; |
| 2666 | break; |
| 2667 | default: |
| 2668 | off = ph.h1->tp_net; |
| 2669 | break; |
| 2670 | } |
| 2671 | } else { |
| 2672 | switch (po->tp_version) { |
Sowmini Varadhan | 7f953ab | 2017-01-03 06:31:47 -0800 | [diff] [blame] | 2673 | case TPACKET_V3: |
| 2674 | off = ph.h3->tp_mac; |
| 2675 | break; |
Willem de Bruijn | 8d39b4a | 2016-02-03 18:02:16 -0500 | [diff] [blame] | 2676 | case TPACKET_V2: |
| 2677 | off = ph.h2->tp_mac; |
| 2678 | break; |
| 2679 | default: |
| 2680 | off = ph.h1->tp_mac; |
| 2681 | break; |
| 2682 | } |
| 2683 | } |
| 2684 | if (unlikely((off < off_min) || (off_max < off))) |
| 2685 | return -EINVAL; |
| 2686 | } else { |
| 2687 | off = po->tp_hdrlen - sizeof(struct sockaddr_ll); |
| 2688 | } |
| 2689 | |
| 2690 | *data = frame + off; |
| 2691 | return tp_len; |
| 2692 | } |
| 2693 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2694 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) |
| 2695 | { |
Neil Horman | 89ed5b5 | 2019-06-25 17:57:49 -0400 | [diff] [blame] | 2696 | struct sk_buff *skb = NULL; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2697 | struct net_device *dev; |
Willem de Bruijn | 1d036d2 | 2016-02-03 18:02:17 -0500 | [diff] [blame] | 2698 | struct virtio_net_hdr *vnet_hdr = NULL; |
Soheil Hassas Yeganeh | c14ac94 | 2016-04-02 23:08:12 -0400 | [diff] [blame] | 2699 | struct sockcm_cookie sockc; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2700 | __be16 proto; |
David S. Miller | 09effa6 | 2013-08-07 17:11:00 -0700 | [diff] [blame] | 2701 | int err, reserve = 0; |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 2702 | void *ph; |
Steffen Hurrle | 342dfc3 | 2014-01-17 22:53:15 +0100 | [diff] [blame] | 2703 | DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); |
Daniel Borkmann | 87a2fd2 | 2014-01-15 16:25:35 +0100 | [diff] [blame] | 2704 | bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); |
Willem de Bruijn | 486efdc | 2019-04-29 11:53:18 -0400 | [diff] [blame] | 2705 | unsigned char *addr = NULL; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2706 | int tp_len, size_max; |
Willem de Bruijn | 8d39b4a | 2016-02-03 18:02:16 -0500 | [diff] [blame] | 2707 | void *data; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2708 | int len_sum = 0; |
danborkmann@iogearbox.net | 9e67030 | 2012-08-20 03:34:03 +0000 | [diff] [blame] | 2709 | int status = TP_STATUS_AVAILABLE; |
Willem de Bruijn | 1d036d2 | 2016-02-03 18:02:17 -0500 | [diff] [blame] | 2710 | int hlen, tlen, copylen = 0; |
Neil Horman | 89ed5b5 | 2019-06-25 17:57:49 -0400 | [diff] [blame] | 2711 | long timeo = 0; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2712 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2713 | mutex_lock(&po->pg_vec_lock); |
| 2714 | |
Eric Dumazet | 32d3182 | 2019-08-14 02:11:57 -0700 | [diff] [blame] | 2715 | /* packet_sendmsg() check on tx_ring.pg_vec was lockless, |
| 2716 | * we need to confirm it under protection of pg_vec_lock. |
| 2717 | */ |
| 2718 | if (unlikely(!po->tx_ring.pg_vec)) { |
| 2719 | err = -EBUSY; |
| 2720 | goto out; |
| 2721 | } |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 2722 | if (likely(saddr == NULL)) { |
Daniel Borkmann | e40526c | 2013-11-21 16:50:58 +0100 | [diff] [blame] | 2723 | dev = packet_cached_dev_get(po); |
Eric Dumazet | c7d2ef5 | 2021-06-16 06:42:01 -0700 | [diff] [blame] | 2724 | proto = READ_ONCE(po->num); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2725 | } else { |
| 2726 | err = -EINVAL; |
| 2727 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) |
| 2728 | goto out; |
| 2729 | if (msg->msg_namelen < (saddr->sll_halen |
| 2730 | + offsetof(struct sockaddr_ll, |
| 2731 | sll_addr))) |
| 2732 | goto out; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2733 | proto = saddr->sll_protocol; |
Ben Greear | 827d978 | 2011-06-01 07:18:53 +0000 | [diff] [blame] | 2734 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); |
Willem de Bruijn | 486efdc | 2019-04-29 11:53:18 -0400 | [diff] [blame] | 2735 | if (po->sk.sk_socket->type == SOCK_DGRAM) { |
| 2736 | if (dev && msg->msg_namelen < dev->addr_len + |
| 2737 | offsetof(struct sockaddr_ll, sll_addr)) |
| 2738 | goto out_put; |
| 2739 | addr = saddr->sll_addr; |
| 2740 | } |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2741 | } |
| 2742 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2743 | err = -ENXIO; |
| 2744 | if (unlikely(dev == NULL)) |
| 2745 | goto out; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2746 | err = -ENETDOWN; |
| 2747 | if (unlikely(!(dev->flags & IFF_UP))) |
| 2748 | goto out_put; |
| 2749 | |
Willem de Bruijn | 657a066 | 2018-07-06 10:12:56 -0400 | [diff] [blame] | 2750 | sockcm_init(&sockc, &po->sk); |
Douglas Caetano dos Santos | d19b183 | 2017-05-12 15:19:15 -0300 | [diff] [blame] | 2751 | if (msg->msg_controllen) { |
| 2752 | err = sock_cmsg_send(&po->sk, msg, &sockc); |
| 2753 | if (unlikely(err)) |
| 2754 | goto out_put; |
| 2755 | } |
| 2756 | |
Daniel Borkmann | 5cfb4c8 | 2015-11-11 23:25:44 +0100 | [diff] [blame] | 2757 | if (po->sk.sk_socket->type == SOCK_RAW) |
| 2758 | reserve = dev->hard_header_len; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2759 | size_max = po->tx_ring.frame_size |
Gabor Gombas | b5dd884 | 2009-10-29 03:19:11 -0700 | [diff] [blame] | 2760 | - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2761 | |
Willem de Bruijn | 1d036d2 | 2016-02-03 18:02:17 -0500 | [diff] [blame] | 2762 | if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) |
Daniel Borkmann | 5cfb4c8 | 2015-11-11 23:25:44 +0100 | [diff] [blame] | 2763 | size_max = dev->mtu + reserve + VLAN_HLEN; |
David S. Miller | 09effa6 | 2013-08-07 17:11:00 -0700 | [diff] [blame] | 2764 | |
Neil Horman | 89ed5b5 | 2019-06-25 17:57:49 -0400 | [diff] [blame] | 2765 | reinit_completion(&po->skb_completion); |
| 2766 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2767 | do { |
| 2768 | ph = packet_current_frame(po, &po->tx_ring, |
Daniel Borkmann | 87a2fd2 | 2014-01-15 16:25:35 +0100 | [diff] [blame] | 2769 | TP_STATUS_SEND_REQUEST); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2770 | if (unlikely(ph == NULL)) { |
Neil Horman | 89ed5b5 | 2019-06-25 17:57:49 -0400 | [diff] [blame] | 2771 | if (need_wait && skb) { |
| 2772 | timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); |
| 2773 | timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo); |
| 2774 | if (timeo <= 0) { |
| 2775 | err = !timeo ? -ETIMEDOUT : -ERESTARTSYS; |
| 2776 | goto out_put; |
| 2777 | } |
| 2778 | } |
| 2779 | /* check for additional frames */ |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2780 | continue; |
| 2781 | } |
| 2782 | |
Willem de Bruijn | 8d39b4a | 2016-02-03 18:02:16 -0500 | [diff] [blame] | 2783 | skb = NULL; |
| 2784 | tp_len = tpacket_parse_header(po, ph, size_max, &data); |
| 2785 | if (tp_len < 0) |
| 2786 | goto tpacket_error; |
| 2787 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2788 | status = TP_STATUS_SEND_REQUEST; |
Herbert Xu | ae64194 | 2011-11-18 02:20:04 +0000 | [diff] [blame] | 2789 | hlen = LL_RESERVED_SPACE(dev); |
| 2790 | tlen = dev->needed_tailroom; |
Willem de Bruijn | 1d036d2 | 2016-02-03 18:02:17 -0500 | [diff] [blame] | 2791 | if (po->has_vnet_hdr) { |
| 2792 | vnet_hdr = data; |
| 2793 | data += sizeof(*vnet_hdr); |
| 2794 | tp_len -= sizeof(*vnet_hdr); |
| 2795 | if (tp_len < 0 || |
| 2796 | __packet_snd_vnet_parse(vnet_hdr, tp_len)) { |
| 2797 | tp_len = -EINVAL; |
| 2798 | goto tpacket_error; |
| 2799 | } |
| 2800 | copylen = __virtio16_to_cpu(vio_le(), |
| 2801 | vnet_hdr->hdr_len); |
| 2802 | } |
Willem de Bruijn | 9ed988c | 2016-03-09 21:58:34 -0500 | [diff] [blame] | 2803 | copylen = max_t(int, copylen, dev->hard_header_len); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2804 | skb = sock_alloc_send_skb(&po->sk, |
Willem de Bruijn | 1d036d2 | 2016-02-03 18:02:17 -0500 | [diff] [blame] | 2805 | hlen + tlen + sizeof(struct sockaddr_ll) + |
| 2806 | (copylen - dev->hard_header_len), |
Kretschmer, Mathias | fbf33a2 | 2015-05-08 15:44:37 +0200 | [diff] [blame] | 2807 | !need_wait, &err); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2808 | |
Kretschmer, Mathias | fbf33a2 | 2015-05-08 15:44:37 +0200 | [diff] [blame] | 2809 | if (unlikely(skb == NULL)) { |
| 2810 | /* we assume the socket was initially writeable ... */ |
| 2811 | if (likely(len_sum > 0)) |
| 2812 | err = len_sum; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2813 | goto out_status; |
Kretschmer, Mathias | fbf33a2 | 2015-05-08 15:44:37 +0200 | [diff] [blame] | 2814 | } |
Willem de Bruijn | 8d39b4a | 2016-02-03 18:02:16 -0500 | [diff] [blame] | 2815 | tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, |
Soheil Hassas Yeganeh | c14ac94 | 2016-04-02 23:08:12 -0400 | [diff] [blame] | 2816 | addr, hlen, copylen, &sockc); |
Alexander Drozdov | dbd46ab | 2015-07-28 13:57:01 +0300 | [diff] [blame] | 2817 | if (likely(tp_len >= 0) && |
Daniel Borkmann | 5cfb4c8 | 2015-11-11 23:25:44 +0100 | [diff] [blame] | 2818 | tp_len > dev->mtu + reserve && |
Willem de Bruijn | 1d036d2 | 2016-02-03 18:02:17 -0500 | [diff] [blame] | 2819 | !po->has_vnet_hdr && |
Daniel Borkmann | 3c70c13 | 2015-11-11 23:25:42 +0100 | [diff] [blame] | 2820 | !packet_extra_vlan_len_allowed(dev, skb)) |
| 2821 | tp_len = -EMSGSIZE; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2822 | |
| 2823 | if (unlikely(tp_len < 0)) { |
Willem de Bruijn | 8d39b4a | 2016-02-03 18:02:16 -0500 | [diff] [blame] | 2824 | tpacket_error: |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2825 | if (po->tp_loss) { |
| 2826 | __packet_set_status(po, ph, |
| 2827 | TP_STATUS_AVAILABLE); |
| 2828 | packet_increment_head(&po->tx_ring); |
| 2829 | kfree_skb(skb); |
| 2830 | continue; |
| 2831 | } else { |
| 2832 | status = TP_STATUS_WRONG_FORMAT; |
| 2833 | err = tp_len; |
| 2834 | goto out_status; |
| 2835 | } |
| 2836 | } |
| 2837 | |
Jianfeng Tan | 9d2f67e | 2018-09-29 15:41:27 +0000 | [diff] [blame] | 2838 | if (po->has_vnet_hdr) { |
| 2839 | if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { |
| 2840 | tp_len = -EINVAL; |
| 2841 | goto tpacket_error; |
| 2842 | } |
| 2843 | virtio_net_hdr_set_proto(skb, vnet_hdr); |
Willem de Bruijn | 1d036d2 | 2016-02-03 18:02:17 -0500 | [diff] [blame] | 2844 | } |
| 2845 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2846 | skb->destructor = tpacket_destruct_skb; |
| 2847 | __packet_set_status(po, ph, TP_STATUS_SENDING); |
Daniel Borkmann | b013840 | 2014-01-15 16:25:36 +0100 | [diff] [blame] | 2848 | packet_inc_pending(&po->tx_ring); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2849 | |
| 2850 | status = TP_STATUS_SEND_REQUEST; |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 2851 | err = po->xmit(skb); |
Jarek Poplawski | eb70df1 | 2010-01-10 22:04:19 +0000 | [diff] [blame] | 2852 | if (unlikely(err > 0)) { |
| 2853 | err = net_xmit_errno(err); |
| 2854 | if (err && __packet_get_status(po, ph) == |
| 2855 | TP_STATUS_AVAILABLE) { |
| 2856 | /* skb was destructed already */ |
| 2857 | skb = NULL; |
| 2858 | goto out_status; |
| 2859 | } |
| 2860 | /* |
| 2861 | * skb was dropped but not destructed yet; |
| 2862 | * let's treat it like congestion or err < 0 |
| 2863 | */ |
| 2864 | err = 0; |
| 2865 | } |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2866 | packet_increment_head(&po->tx_ring); |
| 2867 | len_sum += tp_len; |
Daniel Borkmann | b013840 | 2014-01-15 16:25:36 +0100 | [diff] [blame] | 2868 | } while (likely((ph != NULL) || |
| 2869 | /* Note: packet_read_pending() might be slow if we have |
| 2870 | * to call it as it's per_cpu variable, but in fast-path |
| 2871 | * we already short-circuit the loop with the first |
| 2872 | * condition, and luckily don't have to go that path |
| 2873 | * anyway. |
| 2874 | */ |
| 2875 | (need_wait && packet_read_pending(&po->tx_ring)))); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2876 | |
| 2877 | err = len_sum; |
| 2878 | goto out_put; |
| 2879 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2880 | out_status: |
| 2881 | __packet_set_status(po, ph, status); |
| 2882 | kfree_skb(skb); |
| 2883 | out_put: |
Daniel Borkmann | e40526c | 2013-11-21 16:50:58 +0100 | [diff] [blame] | 2884 | dev_put(dev); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 2885 | out: |
| 2886 | mutex_unlock(&po->pg_vec_lock); |
| 2887 | return err; |
| 2888 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2889 | |
Olof Johansson | eea49cc9 | 2011-11-02 11:00:49 +0000 | [diff] [blame] | 2890 | static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, |
| 2891 | size_t reserve, size_t len, |
| 2892 | size_t linear, int noblock, |
| 2893 | int *err) |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 2894 | { |
| 2895 | struct sk_buff *skb; |
| 2896 | |
| 2897 | /* Under a page? Don't bother with paged skb. */ |
| 2898 | if (prepad + len < PAGE_SIZE || !linear) |
| 2899 | linear = len; |
| 2900 | |
| 2901 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, |
Eric Dumazet | 28d6427 | 2013-08-08 14:38:47 -0700 | [diff] [blame] | 2902 | err, 0); |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 2903 | if (!skb) |
| 2904 | return NULL; |
| 2905 | |
| 2906 | skb_reserve(skb, reserve); |
| 2907 | skb_put(skb, linear); |
| 2908 | skb->data_len = len - linear; |
| 2909 | skb->len += len - linear; |
| 2910 | |
| 2911 | return skb; |
| 2912 | } |
| 2913 | |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 2914 | static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2915 | { |
| 2916 | struct sock *sk = sock->sk; |
Steffen Hurrle | 342dfc3 | 2014-01-17 22:53:15 +0100 | [diff] [blame] | 2917 | DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2918 | struct sk_buff *skb; |
| 2919 | struct net_device *dev; |
Al Viro | 0e11c91 | 2006-11-08 00:26:29 -0800 | [diff] [blame] | 2920 | __be16 proto; |
Willem de Bruijn | 486efdc | 2019-04-29 11:53:18 -0400 | [diff] [blame] | 2921 | unsigned char *addr = NULL; |
Ben Greear | 827d978 | 2011-06-01 07:18:53 +0000 | [diff] [blame] | 2922 | int err, reserve = 0; |
Edward Jee | c7d39e3 | 2015-10-08 14:56:49 -0700 | [diff] [blame] | 2923 | struct sockcm_cookie sockc; |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 2924 | struct virtio_net_hdr vnet_hdr = { 0 }; |
| 2925 | int offset = 0; |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 2926 | struct packet_sock *po = pkt_sk(sk); |
Willem de Bruijn | da7c956 | 2017-09-26 12:20:17 -0400 | [diff] [blame] | 2927 | bool has_vnet_hdr = false; |
Willem de Bruijn | 57031eb | 2017-02-07 15:57:21 -0500 | [diff] [blame] | 2928 | int hlen, tlen, linear; |
Ben Greear | 3bdc0eb | 2012-02-11 15:39:30 +0000 | [diff] [blame] | 2929 | int extra_len = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2930 | |
| 2931 | /* |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 2932 | * Get and verify the address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2933 | */ |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 2934 | |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 2935 | if (likely(saddr == NULL)) { |
Daniel Borkmann | e40526c | 2013-11-21 16:50:58 +0100 | [diff] [blame] | 2936 | dev = packet_cached_dev_get(po); |
Eric Dumazet | c7d2ef5 | 2021-06-16 06:42:01 -0700 | [diff] [blame] | 2937 | proto = READ_ONCE(po->num); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2938 | } else { |
| 2939 | err = -EINVAL; |
| 2940 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) |
| 2941 | goto out; |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 2942 | if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) |
| 2943 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2944 | proto = saddr->sll_protocol; |
Ben Greear | 827d978 | 2011-06-01 07:18:53 +0000 | [diff] [blame] | 2945 | dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); |
Willem de Bruijn | 486efdc | 2019-04-29 11:53:18 -0400 | [diff] [blame] | 2946 | if (sock->type == SOCK_DGRAM) { |
| 2947 | if (dev && msg->msg_namelen < dev->addr_len + |
| 2948 | offsetof(struct sockaddr_ll, sll_addr)) |
| 2949 | goto out_unlock; |
| 2950 | addr = saddr->sll_addr; |
| 2951 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2952 | } |
| 2953 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2954 | err = -ENXIO; |
Daniel Borkmann | e40526c | 2013-11-21 16:50:58 +0100 | [diff] [blame] | 2955 | if (unlikely(dev == NULL)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2956 | goto out_unlock; |
Daniel Borkmann | e40526c | 2013-11-21 16:50:58 +0100 | [diff] [blame] | 2957 | err = -ENETDOWN; |
| 2958 | if (unlikely(!(dev->flags & IFF_UP))) |
| 2959 | goto out_unlock; |
| 2960 | |
Willem de Bruijn | 657a066 | 2018-07-06 10:12:56 -0400 | [diff] [blame] | 2961 | sockcm_init(&sockc, sk); |
Edward Jee | c7d39e3 | 2015-10-08 14:56:49 -0700 | [diff] [blame] | 2962 | sockc.mark = sk->sk_mark; |
| 2963 | if (msg->msg_controllen) { |
| 2964 | err = sock_cmsg_send(sk, msg, &sockc); |
| 2965 | if (unlikely(err)) |
| 2966 | goto out_unlock; |
| 2967 | } |
| 2968 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2969 | if (sock->type == SOCK_RAW) |
| 2970 | reserve = dev->hard_header_len; |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 2971 | if (po->has_vnet_hdr) { |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 2972 | err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); |
| 2973 | if (err) |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 2974 | goto out_unlock; |
Willem de Bruijn | da7c956 | 2017-09-26 12:20:17 -0400 | [diff] [blame] | 2975 | has_vnet_hdr = true; |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 2976 | } |
| 2977 | |
Ben Greear | 3bdc0eb | 2012-02-11 15:39:30 +0000 | [diff] [blame] | 2978 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { |
| 2979 | if (!netif_supports_nofcs(dev)) { |
| 2980 | err = -EPROTONOSUPPORT; |
| 2981 | goto out_unlock; |
| 2982 | } |
| 2983 | extra_len = 4; /* We're doing our own CRC */ |
| 2984 | } |
| 2985 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2986 | err = -EMSGSIZE; |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 2987 | if (!vnet_hdr.gso_type && |
| 2988 | (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2989 | goto out_unlock; |
| 2990 | |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 2991 | err = -ENOBUFS; |
Herbert Xu | ae64194 | 2011-11-18 02:20:04 +0000 | [diff] [blame] | 2992 | hlen = LL_RESERVED_SPACE(dev); |
| 2993 | tlen = dev->needed_tailroom; |
Willem de Bruijn | 57031eb | 2017-02-07 15:57:21 -0500 | [diff] [blame] | 2994 | linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); |
| 2995 | linear = max(linear, min_t(int, len, dev->hard_header_len)); |
| 2996 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 2997 | msg->msg_flags & MSG_DONTWAIT, &err); |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 2998 | if (skb == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2999 | goto out_unlock; |
| 3000 | |
Willem de Bruijn | b84bbaf | 2018-05-11 13:24:25 -0400 | [diff] [blame] | 3001 | skb_reset_network_header(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3002 | |
Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 3003 | err = -EINVAL; |
Willem de Bruijn | 9c70776 | 2014-11-19 13:10:16 -0500 | [diff] [blame] | 3004 | if (sock->type == SOCK_DGRAM) { |
| 3005 | offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); |
Christoph Jaeger | 46d2cfb | 2015-01-11 13:01:16 -0500 | [diff] [blame] | 3006 | if (unlikely(offset < 0)) |
Willem de Bruijn | 9c70776 | 2014-11-19 13:10:16 -0500 | [diff] [blame] | 3007 | goto out_free; |
Willem de Bruijn | b84bbaf | 2018-05-11 13:24:25 -0400 | [diff] [blame] | 3008 | } else if (reserve) { |
Willem de Bruijn | 9aad13b | 2018-05-24 18:10:30 -0400 | [diff] [blame] | 3009 | skb_reserve(skb, -reserve); |
Nicolas Dichtel | 88a8121 | 2019-01-17 11:27:22 +0100 | [diff] [blame] | 3010 | if (len < reserve + sizeof(struct ipv6hdr) && |
| 3011 | dev->min_header_len != dev->hard_header_len) |
Willem de Bruijn | 993675a | 2018-07-11 12:00:45 -0400 | [diff] [blame] | 3012 | skb_reset_network_header(skb); |
Willem de Bruijn | 9c70776 | 2014-11-19 13:10:16 -0500 | [diff] [blame] | 3013 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3014 | |
| 3015 | /* Returns -EFAULT on error */ |
Al Viro | c0371da | 2014-11-24 10:42:55 -0500 | [diff] [blame] | 3016 | err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3017 | if (err) |
| 3018 | goto out_free; |
Daniel Borkmann | bf84a010 | 2013-04-14 08:08:13 +0000 | [diff] [blame] | 3019 | |
Willem de Bruijn | 9ed988c | 2016-03-09 21:58:34 -0500 | [diff] [blame] | 3020 | if (sock->type == SOCK_RAW && |
| 3021 | !dev_validate_header(dev, skb->data, len)) { |
| 3022 | err = -EINVAL; |
| 3023 | goto out_free; |
| 3024 | } |
| 3025 | |
Willem de Bruijn | 8f932f7 | 2018-12-17 12:24:00 -0500 | [diff] [blame] | 3026 | skb_setup_tx_timestamp(skb, sockc.tsflags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3027 | |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 3028 | if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && |
Daniel Borkmann | 3c70c13 | 2015-11-11 23:25:42 +0100 | [diff] [blame] | 3029 | !packet_extra_vlan_len_allowed(dev, skb)) { |
| 3030 | err = -EMSGSIZE; |
| 3031 | goto out_free; |
Ben Greear | 57f89bf | 2011-02-11 09:35:18 +0000 | [diff] [blame] | 3032 | } |
| 3033 | |
David S. Miller | 09effa6 | 2013-08-07 17:11:00 -0700 | [diff] [blame] | 3034 | skb->protocol = proto; |
| 3035 | skb->dev = dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3036 | skb->priority = sk->sk_priority; |
Edward Jee | c7d39e3 | 2015-10-08 14:56:49 -0700 | [diff] [blame] | 3037 | skb->mark = sockc.mark; |
Richard Cochran | 3d0ba8c | 2018-07-03 15:42:51 -0700 | [diff] [blame] | 3038 | skb->tstamp = sockc.transmit_time; |
Daniel Borkmann | 0fd5d57 | 2014-02-16 15:55:22 +0100 | [diff] [blame] | 3039 | |
Willem de Bruijn | da7c956 | 2017-09-26 12:20:17 -0400 | [diff] [blame] | 3040 | if (has_vnet_hdr) { |
Jarno Rajahalme | db60eb5f | 2016-11-18 15:40:41 -0800 | [diff] [blame] | 3041 | err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 3042 | if (err) |
| 3043 | goto out_free; |
| 3044 | len += sizeof(vnet_hdr); |
Jianfeng Tan | 9d2f67e | 2018-09-29 15:41:27 +0000 | [diff] [blame] | 3045 | virtio_net_hdr_set_proto(skb, &vnet_hdr); |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 3046 | } |
| 3047 | |
Maxim Mikityanskiy | 75c6577 | 2019-02-21 12:40:01 +0000 | [diff] [blame] | 3048 | packet_parse_headers(skb, sock); |
Daniel Borkmann | 8fd6c80 | 2015-11-11 23:25:41 +0100 | [diff] [blame] | 3049 | |
Ben Greear | 3bdc0eb | 2012-02-11 15:39:30 +0000 | [diff] [blame] | 3050 | if (unlikely(extra_len == 4)) |
| 3051 | skb->no_fcs = 1; |
| 3052 | |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 3053 | err = po->xmit(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3054 | if (err > 0 && (err = net_xmit_errno(err)) != 0) |
| 3055 | goto out_unlock; |
| 3056 | |
Daniel Borkmann | e40526c | 2013-11-21 16:50:58 +0100 | [diff] [blame] | 3057 | dev_put(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3058 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 3059 | return len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3060 | |
| 3061 | out_free: |
| 3062 | kfree_skb(skb); |
| 3063 | out_unlock: |
Yajun Deng | 1160dfa | 2021-08-05 19:55:27 +0800 | [diff] [blame] | 3064 | dev_put(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3065 | out: |
| 3066 | return err; |
| 3067 | } |
| 3068 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 3069 | static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 3070 | { |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 3071 | struct sock *sk = sock->sk; |
| 3072 | struct packet_sock *po = pkt_sk(sk); |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 3073 | |
Eric Dumazet | d1b5bee | 2021-06-10 09:00:12 -0700 | [diff] [blame] | 3074 | /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy. |
| 3075 | * tpacket_snd() will redo the check safely. |
| 3076 | */ |
| 3077 | if (data_race(po->tx_ring.pg_vec)) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 3078 | return tpacket_snd(po, msg); |
Eric Dumazet | d1b5bee | 2021-06-10 09:00:12 -0700 | [diff] [blame] | 3079 | |
| 3080 | return packet_snd(sock, msg, len); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 3081 | } |
| 3082 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3083 | /* |
| 3084 | * Close a PACKET socket. This is fairly simple. We immediately go |
| 3085 | * to 'closed' state and remove our protocol entry in the device list. |
| 3086 | */ |
| 3087 | |
| 3088 | static int packet_release(struct socket *sock) |
| 3089 | { |
| 3090 | struct sock *sk = sock->sk; |
| 3091 | struct packet_sock *po; |
Anoob Soman | 2bd624b | 2017-02-15 20:25:39 +0000 | [diff] [blame] | 3092 | struct packet_fanout *f; |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 3093 | struct net *net; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 3094 | union tpacket_req_u req_u; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3095 | |
| 3096 | if (!sk) |
| 3097 | return 0; |
| 3098 | |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 3099 | net = sock_net(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3100 | po = pkt_sk(sk); |
| 3101 | |
Pavel Emelyanov | 0fa7fa9 | 2012-08-21 01:06:47 +0000 | [diff] [blame] | 3102 | mutex_lock(&net->packet.sklist_lock); |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 3103 | sk_del_node_init_rcu(sk); |
Pavel Emelyanov | 0fa7fa9 | 2012-08-21 01:06:47 +0000 | [diff] [blame] | 3104 | mutex_unlock(&net->packet.sklist_lock); |
| 3105 | |
Eric Dumazet | 920de80 | 2008-11-24 00:09:29 -0800 | [diff] [blame] | 3106 | sock_prot_inuse_add(net, sk->sk_prot, -1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3107 | |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 3108 | spin_lock(&po->bind_lock); |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 3109 | unregister_prot_hook(sk, false); |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 3110 | packet_cached_dev_reset(po); |
| 3111 | |
Ben Greear | 160ff18 | 2011-06-01 07:18:52 +0000 | [diff] [blame] | 3112 | if (po->prot_hook.dev) { |
Eric Dumazet | f1d9268 | 2021-12-14 07:09:33 -0800 | [diff] [blame] | 3113 | dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker); |
Ben Greear | 160ff18 | 2011-06-01 07:18:52 +0000 | [diff] [blame] | 3114 | po->prot_hook.dev = NULL; |
| 3115 | } |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 3116 | spin_unlock(&po->bind_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3117 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3118 | packet_flush_mclist(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3119 | |
Eric Dumazet | 5171b37 | 2018-04-15 17:52:04 -0700 | [diff] [blame] | 3120 | lock_sock(sk); |
Phil Sutter | 9665d5d | 2013-02-01 07:21:41 +0000 | [diff] [blame] | 3121 | if (po->rx_ring.pg_vec) { |
| 3122 | memset(&req_u, 0, sizeof(req_u)); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 3123 | packet_set_ring(sk, &req_u, 1, 0); |
Phil Sutter | 9665d5d | 2013-02-01 07:21:41 +0000 | [diff] [blame] | 3124 | } |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 3125 | |
Phil Sutter | 9665d5d | 2013-02-01 07:21:41 +0000 | [diff] [blame] | 3126 | if (po->tx_ring.pg_vec) { |
| 3127 | memset(&req_u, 0, sizeof(req_u)); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 3128 | packet_set_ring(sk, &req_u, 1, 1); |
Phil Sutter | 9665d5d | 2013-02-01 07:21:41 +0000 | [diff] [blame] | 3129 | } |
Eric Dumazet | 5171b37 | 2018-04-15 17:52:04 -0700 | [diff] [blame] | 3130 | release_sock(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3131 | |
Anoob Soman | 2bd624b | 2017-02-15 20:25:39 +0000 | [diff] [blame] | 3132 | f = fanout_release(sk); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 3133 | |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 3134 | synchronize_net(); |
Anoob Soman | 2bd624b | 2017-02-15 20:25:39 +0000 | [diff] [blame] | 3135 | |
Willem de Bruijn | afa0925 | 2019-05-31 12:37:23 -0400 | [diff] [blame] | 3136 | kfree(po->rollover); |
Anoob Soman | 2bd624b | 2017-02-15 20:25:39 +0000 | [diff] [blame] | 3137 | if (f) { |
| 3138 | fanout_release_data(f); |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 3139 | kvfree(f); |
Anoob Soman | 2bd624b | 2017-02-15 20:25:39 +0000 | [diff] [blame] | 3140 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3141 | /* |
| 3142 | * Now the socket is dead. No more input will appear. |
| 3143 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3144 | sock_orphan(sk); |
| 3145 | sock->sk = NULL; |
| 3146 | |
| 3147 | /* Purge queues */ |
| 3148 | |
| 3149 | skb_queue_purge(&sk->sk_receive_queue); |
Daniel Borkmann | b013840 | 2014-01-15 16:25:36 +0100 | [diff] [blame] | 3150 | packet_free_pending(po); |
Pavel Emelyanov | 17ab56a | 2007-11-10 21:38:48 -0800 | [diff] [blame] | 3151 | sk_refcnt_debug_release(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3152 | |
| 3153 | sock_put(sk); |
| 3154 | return 0; |
| 3155 | } |
| 3156 | |
| 3157 | /* |
| 3158 | * Attach a packet hook. |
| 3159 | */ |
| 3160 | |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3161 | static int packet_do_bind(struct sock *sk, const char *name, int ifindex, |
| 3162 | __be16 proto) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3163 | { |
| 3164 | struct packet_sock *po = pkt_sk(sk); |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3165 | struct net_device *dev = NULL; |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3166 | bool unlisted = false; |
Eric Dumazet | bf44077 | 2022-01-07 10:39:53 -0800 | [diff] [blame] | 3167 | bool need_rehook; |
| 3168 | int ret = 0; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 3169 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3170 | lock_sock(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3171 | spin_lock(&po->bind_lock); |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3172 | rcu_read_lock(); |
| 3173 | |
Willem de Bruijn | 4971613 | 2017-09-26 12:19:37 -0400 | [diff] [blame] | 3174 | if (po->fanout) { |
| 3175 | ret = -EINVAL; |
| 3176 | goto out_unlock; |
| 3177 | } |
| 3178 | |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3179 | if (name) { |
| 3180 | dev = dev_get_by_name_rcu(sock_net(sk), name); |
| 3181 | if (!dev) { |
| 3182 | ret = -ENODEV; |
| 3183 | goto out_unlock; |
| 3184 | } |
| 3185 | } else if (ifindex) { |
| 3186 | dev = dev_get_by_index_rcu(sock_net(sk), ifindex); |
| 3187 | if (!dev) { |
| 3188 | ret = -ENODEV; |
| 3189 | goto out_unlock; |
| 3190 | } |
| 3191 | } |
| 3192 | |
Eric Dumazet | bf44077 | 2022-01-07 10:39:53 -0800 | [diff] [blame] | 3193 | need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3194 | |
Daniel Borkmann | 902fefb | 2014-01-15 16:25:34 +0100 | [diff] [blame] | 3195 | if (need_rehook) { |
Eric Dumazet | bf44077 | 2022-01-07 10:39:53 -0800 | [diff] [blame] | 3196 | dev_hold(dev); |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3197 | if (po->running) { |
| 3198 | rcu_read_unlock(); |
Eric Dumazet | 15fe076 | 2017-11-28 08:03:30 -0800 | [diff] [blame] | 3199 | /* prevents packet_notifier() from calling |
| 3200 | * register_prot_hook() |
| 3201 | */ |
Eric Dumazet | c7d2ef5 | 2021-06-16 06:42:01 -0700 | [diff] [blame] | 3202 | WRITE_ONCE(po->num, 0); |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3203 | __unregister_prot_hook(sk, true); |
| 3204 | rcu_read_lock(); |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3205 | if (dev) |
| 3206 | unlisted = !dev_get_by_index_rcu(sock_net(sk), |
| 3207 | dev->ifindex); |
| 3208 | } |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 3209 | |
Eric Dumazet | 15fe076 | 2017-11-28 08:03:30 -0800 | [diff] [blame] | 3210 | BUG_ON(po->running); |
Eric Dumazet | c7d2ef5 | 2021-06-16 06:42:01 -0700 | [diff] [blame] | 3211 | WRITE_ONCE(po->num, proto); |
Daniel Borkmann | 902fefb | 2014-01-15 16:25:34 +0100 | [diff] [blame] | 3212 | po->prot_hook.type = proto; |
Daniel Borkmann | 902fefb | 2014-01-15 16:25:34 +0100 | [diff] [blame] | 3213 | |
Eric Dumazet | bf44077 | 2022-01-07 10:39:53 -0800 | [diff] [blame] | 3214 | dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker); |
Eric Dumazet | f1d9268 | 2021-12-14 07:09:33 -0800 | [diff] [blame] | 3215 | |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3216 | if (unlikely(unlisted)) { |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3217 | po->prot_hook.dev = NULL; |
Eric Dumazet | e032f7c | 2021-06-16 06:42:02 -0700 | [diff] [blame] | 3218 | WRITE_ONCE(po->ifindex, -1); |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3219 | packet_cached_dev_reset(po); |
| 3220 | } else { |
Eric Dumazet | bf44077 | 2022-01-07 10:39:53 -0800 | [diff] [blame] | 3221 | dev_hold_track(dev, &po->prot_hook.dev_tracker, |
| 3222 | GFP_ATOMIC); |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3223 | po->prot_hook.dev = dev; |
Eric Dumazet | e032f7c | 2021-06-16 06:42:02 -0700 | [diff] [blame] | 3224 | WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3225 | packet_cached_dev_assign(po, dev); |
| 3226 | } |
Eric Dumazet | bf44077 | 2022-01-07 10:39:53 -0800 | [diff] [blame] | 3227 | dev_put(dev); |
Daniel Borkmann | 902fefb | 2014-01-15 16:25:34 +0100 | [diff] [blame] | 3228 | } |
| 3229 | |
| 3230 | if (proto == 0 || !need_rehook) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3231 | goto out_unlock; |
| 3232 | |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3233 | if (!unlisted && (!dev || (dev->flags & IFF_UP))) { |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 3234 | register_prot_hook(sk); |
Urs Thuermann | be85d4a | 2007-11-12 21:05:20 -0800 | [diff] [blame] | 3235 | } else { |
| 3236 | sk->sk_err = ENETDOWN; |
| 3237 | if (!sock_flag(sk, SOCK_DEAD)) |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 3238 | sk_error_report(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3239 | } |
| 3240 | |
| 3241 | out_unlock: |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3242 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3243 | spin_unlock(&po->bind_lock); |
| 3244 | release_sock(sk); |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3245 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3246 | } |
| 3247 | |
| 3248 | /* |
| 3249 | * Bind a packet socket to a device |
| 3250 | */ |
| 3251 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 3252 | static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, |
| 3253 | int addr_len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3254 | { |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 3255 | struct sock *sk = sock->sk; |
Alexander Potapenko | 540e289 | 2017-03-01 12:57:20 +0100 | [diff] [blame] | 3256 | char name[sizeof(uaddr->sa_data) + 1]; |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 3257 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3258 | /* |
| 3259 | * Check legality |
| 3260 | */ |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 3261 | |
Kris Katterjohn | 8ae55f0 | 2006-01-23 16:28:02 -0800 | [diff] [blame] | 3262 | if (addr_len != sizeof(struct sockaddr)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3263 | return -EINVAL; |
Alexander Potapenko | 540e289 | 2017-03-01 12:57:20 +0100 | [diff] [blame] | 3264 | /* uaddr->sa_data comes from the userspace, it's not guaranteed to be |
| 3265 | * zero-terminated. |
| 3266 | */ |
| 3267 | memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); |
| 3268 | name[sizeof(uaddr->sa_data)] = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3269 | |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3270 | return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3271 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3272 | |
| 3273 | static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
| 3274 | { |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 3275 | struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; |
| 3276 | struct sock *sk = sock->sk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3277 | |
| 3278 | /* |
| 3279 | * Check legality |
| 3280 | */ |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 3281 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3282 | if (addr_len < sizeof(struct sockaddr_ll)) |
| 3283 | return -EINVAL; |
| 3284 | if (sll->sll_family != AF_PACKET) |
| 3285 | return -EINVAL; |
| 3286 | |
Francesco Ruggeri | 30f7ea1 | 2015-11-05 08:16:14 -0800 | [diff] [blame] | 3287 | return packet_do_bind(sk, NULL, sll->sll_ifindex, |
| 3288 | sll->sll_protocol ? : pkt_sk(sk)->num); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3289 | } |
| 3290 | |
| 3291 | static struct proto packet_proto = { |
| 3292 | .name = "PACKET", |
| 3293 | .owner = THIS_MODULE, |
| 3294 | .obj_size = sizeof(struct packet_sock), |
| 3295 | }; |
| 3296 | |
| 3297 | /* |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 3298 | * Create a packet of type SOCK_PACKET. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3299 | */ |
| 3300 | |
Eric Paris | 3f378b6 | 2009-11-05 22:18:14 -0800 | [diff] [blame] | 3301 | static int packet_create(struct net *net, struct socket *sock, int protocol, |
| 3302 | int kern) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3303 | { |
| 3304 | struct sock *sk; |
| 3305 | struct packet_sock *po; |
Al Viro | 0e11c91 | 2006-11-08 00:26:29 -0800 | [diff] [blame] | 3306 | __be16 proto = (__force __be16)protocol; /* weird, but documented */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3307 | int err; |
| 3308 | |
Eric W. Biederman | df008c9 | 2012-11-16 03:03:07 +0000 | [diff] [blame] | 3309 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3310 | return -EPERM; |
David S. Miller | be02097 | 2007-05-29 13:16:31 -0700 | [diff] [blame] | 3311 | if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && |
| 3312 | sock->type != SOCK_PACKET) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3313 | return -ESOCKTNOSUPPORT; |
| 3314 | |
| 3315 | sock->state = SS_UNCONNECTED; |
| 3316 | |
| 3317 | err = -ENOBUFS; |
Eric W. Biederman | 11aa9c2 | 2015-05-08 21:09:13 -0500 | [diff] [blame] | 3318 | sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3319 | if (sk == NULL) |
| 3320 | goto out; |
| 3321 | |
| 3322 | sock->ops = &packet_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3323 | if (sock->type == SOCK_PACKET) |
| 3324 | sock->ops = &packet_ops_spkt; |
David S. Miller | be02097 | 2007-05-29 13:16:31 -0700 | [diff] [blame] | 3325 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3326 | sock_init_data(sock, sk); |
| 3327 | |
| 3328 | po = pkt_sk(sk); |
Neil Horman | 89ed5b5 | 2019-06-25 17:57:49 -0400 | [diff] [blame] | 3329 | init_completion(&po->skb_completion); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3330 | sk->sk_family = PF_PACKET; |
Al Viro | 0e11c91 | 2006-11-08 00:26:29 -0800 | [diff] [blame] | 3331 | po->num = proto; |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 3332 | po->xmit = dev_queue_xmit; |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 3333 | |
Daniel Borkmann | b013840 | 2014-01-15 16:25:36 +0100 | [diff] [blame] | 3334 | err = packet_alloc_pending(po); |
| 3335 | if (err) |
| 3336 | goto out2; |
| 3337 | |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 3338 | packet_cached_dev_reset(po); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3339 | |
| 3340 | sk->sk_destruct = packet_sock_destruct; |
Pavel Emelyanov | 17ab56a | 2007-11-10 21:38:48 -0800 | [diff] [blame] | 3341 | sk_refcnt_debug_inc(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3342 | |
| 3343 | /* |
| 3344 | * Attach a protocol block |
| 3345 | */ |
| 3346 | |
| 3347 | spin_lock_init(&po->bind_lock); |
Herbert Xu | 905db44 | 2009-01-30 14:12:06 -0800 | [diff] [blame] | 3348 | mutex_init(&po->pg_vec_lock); |
Willem de Bruijn | 0648ab7 | 2015-05-12 11:56:46 -0400 | [diff] [blame] | 3349 | po->rollover = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3350 | po->prot_hook.func = packet_rcv; |
David S. Miller | be02097 | 2007-05-29 13:16:31 -0700 | [diff] [blame] | 3351 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3352 | if (sock->type == SOCK_PACKET) |
| 3353 | po->prot_hook.func = packet_rcv_spkt; |
David S. Miller | be02097 | 2007-05-29 13:16:31 -0700 | [diff] [blame] | 3354 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3355 | po->prot_hook.af_packet_priv = sk; |
| 3356 | |
Al Viro | 0e11c91 | 2006-11-08 00:26:29 -0800 | [diff] [blame] | 3357 | if (proto) { |
| 3358 | po->prot_hook.type = proto; |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 3359 | __register_prot_hook(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3360 | } |
| 3361 | |
Pavel Emelyanov | 0fa7fa9 | 2012-08-21 01:06:47 +0000 | [diff] [blame] | 3362 | mutex_lock(&net->packet.sklist_lock); |
Maxime Chevallier | a4dc6a4 | 2019-03-16 14:41:30 +0100 | [diff] [blame] | 3363 | sk_add_node_tail_rcu(sk, &net->packet.sklist); |
Pavel Emelyanov | 0fa7fa9 | 2012-08-21 01:06:47 +0000 | [diff] [blame] | 3364 | mutex_unlock(&net->packet.sklist_lock); |
| 3365 | |
Eric Dumazet | 3680453 | 2008-11-19 14:25:35 -0800 | [diff] [blame] | 3366 | sock_prot_inuse_add(net, &packet_proto, 1); |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 3367 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 3368 | return 0; |
Daniel Borkmann | b013840 | 2014-01-15 16:25:36 +0100 | [diff] [blame] | 3369 | out2: |
| 3370 | sk_free(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3371 | out: |
| 3372 | return err; |
| 3373 | } |
| 3374 | |
| 3375 | /* |
| 3376 | * Pull a packet from our receive queue and hand it to the user. |
| 3377 | * If necessary we block. |
| 3378 | */ |
| 3379 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 3380 | static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, |
| 3381 | int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3382 | { |
| 3383 | struct sock *sk = sock->sk; |
| 3384 | struct sk_buff *skb; |
| 3385 | int copied, err; |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 3386 | int vnet_hdr_len = 0; |
Eyal Birger | 2472d76 | 2015-03-01 14:58:28 +0200 | [diff] [blame] | 3387 | unsigned int origlen = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3388 | |
| 3389 | err = -EINVAL; |
Richard Cochran | ed85b56 | 2010-04-07 22:41:28 +0000 | [diff] [blame] | 3390 | if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3391 | goto out; |
| 3392 | |
| 3393 | #if 0 |
| 3394 | /* What error should we return now? EUNATTACH? */ |
| 3395 | if (pkt_sk(sk)->ifindex < 0) |
| 3396 | return -ENODEV; |
| 3397 | #endif |
| 3398 | |
Richard Cochran | ed85b56 | 2010-04-07 22:41:28 +0000 | [diff] [blame] | 3399 | if (flags & MSG_ERRQUEUE) { |
Richard Cochran | cb820f8 | 2013-07-19 19:40:09 +0200 | [diff] [blame] | 3400 | err = sock_recv_errqueue(sk, msg, len, |
| 3401 | SOL_PACKET, PACKET_TX_TIMESTAMP); |
Richard Cochran | ed85b56 | 2010-04-07 22:41:28 +0000 | [diff] [blame] | 3402 | goto out; |
| 3403 | } |
| 3404 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3405 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3406 | * Call the generic datagram receiver. This handles all sorts |
| 3407 | * of horrible races and re-entrancy so we can forget about it |
| 3408 | * in the protocol layers. |
| 3409 | * |
| 3410 | * Now it will return ENETDOWN, if device have just gone down, |
| 3411 | * but then it will block. |
| 3412 | */ |
| 3413 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 3414 | skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3415 | |
| 3416 | /* |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 3417 | * An error occurred so return it. Because skb_recv_datagram() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3418 | * handles the blocking we don't see and worry about blocking |
| 3419 | * retries. |
| 3420 | */ |
| 3421 | |
Kris Katterjohn | 8ae55f0 | 2006-01-23 16:28:02 -0800 | [diff] [blame] | 3422 | if (skb == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3423 | goto out; |
| 3424 | |
Eric Dumazet | 9bb6cd6 | 2019-06-12 09:52:33 -0700 | [diff] [blame] | 3425 | packet_rcv_try_clear_pressure(pkt_sk(sk)); |
Willem de Bruijn | 2ccdbaa | 2015-05-12 11:56:48 -0400 | [diff] [blame] | 3426 | |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 3427 | if (pkt_sk(sk)->has_vnet_hdr) { |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 3428 | err = packet_rcv_vnet(msg, skb, &len); |
| 3429 | if (err) |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 3430 | goto out_free; |
Willem de Bruijn | 16cc140 | 2016-02-03 18:02:14 -0500 | [diff] [blame] | 3431 | vnet_hdr_len = sizeof(struct virtio_net_hdr); |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 3432 | } |
| 3433 | |
Hannes Frederic Sowa | f3d3342 | 2013-11-21 03:14:22 +0100 | [diff] [blame] | 3434 | /* You lose any data beyond the buffer you gave. If it worries |
| 3435 | * a user program they can ask the device for its MTU |
| 3436 | * anyway. |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 3437 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3438 | copied = skb->len; |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 3439 | if (copied > len) { |
| 3440 | copied = len; |
| 3441 | msg->msg_flags |= MSG_TRUNC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3442 | } |
| 3443 | |
David S. Miller | 51f3d02 | 2014-11-05 16:46:40 -0500 | [diff] [blame] | 3444 | err = skb_copy_datagram_msg(skb, 0, msg, copied); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3445 | if (err) |
| 3446 | goto out_free; |
| 3447 | |
Eyal Birger | 2472d76 | 2015-03-01 14:58:28 +0200 | [diff] [blame] | 3448 | if (sock->type != SOCK_PACKET) { |
| 3449 | struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; |
| 3450 | |
| 3451 | /* Original length was stored in sockaddr_ll fields */ |
| 3452 | origlen = PACKET_SKB_CB(skb)->sa.origlen; |
| 3453 | sll->sll_family = AF_PACKET; |
| 3454 | sll->sll_protocol = skb->protocol; |
| 3455 | } |
| 3456 | |
Neil Horman | 3b88578 | 2009-10-12 13:26:31 -0700 | [diff] [blame] | 3457 | sock_recv_ts_and_drops(msg, sk, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3458 | |
Hannes Frederic Sowa | f3d3342 | 2013-11-21 03:14:22 +0100 | [diff] [blame] | 3459 | if (msg->msg_name) { |
Willem de Bruijn | b2cf86e | 2019-04-29 11:46:55 -0400 | [diff] [blame] | 3460 | int copy_len; |
| 3461 | |
Hannes Frederic Sowa | f3d3342 | 2013-11-21 03:14:22 +0100 | [diff] [blame] | 3462 | /* If the address length field is there to be filled |
| 3463 | * in, we fill it in now. |
| 3464 | */ |
| 3465 | if (sock->type == SOCK_PACKET) { |
Steffen Hurrle | 342dfc3 | 2014-01-17 22:53:15 +0100 | [diff] [blame] | 3466 | __sockaddr_check_size(sizeof(struct sockaddr_pkt)); |
Hannes Frederic Sowa | f3d3342 | 2013-11-21 03:14:22 +0100 | [diff] [blame] | 3467 | msg->msg_namelen = sizeof(struct sockaddr_pkt); |
Willem de Bruijn | b2cf86e | 2019-04-29 11:46:55 -0400 | [diff] [blame] | 3468 | copy_len = msg->msg_namelen; |
Hannes Frederic Sowa | f3d3342 | 2013-11-21 03:14:22 +0100 | [diff] [blame] | 3469 | } else { |
| 3470 | struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; |
Eyal Birger | 2472d76 | 2015-03-01 14:58:28 +0200 | [diff] [blame] | 3471 | |
Hannes Frederic Sowa | f3d3342 | 2013-11-21 03:14:22 +0100 | [diff] [blame] | 3472 | msg->msg_namelen = sll->sll_halen + |
| 3473 | offsetof(struct sockaddr_ll, sll_addr); |
Willem de Bruijn | b2cf86e | 2019-04-29 11:46:55 -0400 | [diff] [blame] | 3474 | copy_len = msg->msg_namelen; |
| 3475 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) { |
| 3476 | memset(msg->msg_name + |
| 3477 | offsetof(struct sockaddr_ll, sll_addr), |
| 3478 | 0, sizeof(sll->sll_addr)); |
| 3479 | msg->msg_namelen = sizeof(struct sockaddr_ll); |
| 3480 | } |
Hannes Frederic Sowa | f3d3342 | 2013-11-21 03:14:22 +0100 | [diff] [blame] | 3481 | } |
Willem de Bruijn | b2cf86e | 2019-04-29 11:46:55 -0400 | [diff] [blame] | 3482 | memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len); |
Hannes Frederic Sowa | f3d3342 | 2013-11-21 03:14:22 +0100 | [diff] [blame] | 3483 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3484 | |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 3485 | if (pkt_sk(sk)->auxdata) { |
Herbert Xu | ffbc611 | 2007-02-04 23:33:10 -0800 | [diff] [blame] | 3486 | struct tpacket_auxdata aux; |
| 3487 | |
| 3488 | aux.tp_status = TP_STATUS_USER; |
| 3489 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 3490 | aux.tp_status |= TP_STATUS_CSUMNOTREADY; |
Alexander Drozdov | 682f048 | 2015-03-23 09:11:13 +0300 | [diff] [blame] | 3491 | else if (skb->pkt_type != PACKET_OUTGOING && |
| 3492 | (skb->ip_summed == CHECKSUM_COMPLETE || |
| 3493 | skb_csum_unnecessary(skb))) |
| 3494 | aux.tp_status |= TP_STATUS_CSUM_VALID; |
| 3495 | |
Eyal Birger | 2472d76 | 2015-03-01 14:58:28 +0200 | [diff] [blame] | 3496 | aux.tp_len = origlen; |
Herbert Xu | ffbc611 | 2007-02-04 23:33:10 -0800 | [diff] [blame] | 3497 | aux.tp_snaplen = skb->len; |
| 3498 | aux.tp_mac = 0; |
Arnaldo Carvalho de Melo | bbe735e | 2007-03-10 22:16:10 -0300 | [diff] [blame] | 3499 | aux.tp_net = skb_network_offset(skb); |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 3500 | if (skb_vlan_tag_present(skb)) { |
| 3501 | aux.tp_vlan_tci = skb_vlan_tag_get(skb); |
Atzm Watanabe | a0cdfcf | 2013-12-17 22:53:40 +0900 | [diff] [blame] | 3502 | aux.tp_vlan_tpid = ntohs(skb->vlan_proto); |
| 3503 | aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; |
Ben Greear | a3bcc23 | 2011-06-01 06:49:10 +0000 | [diff] [blame] | 3504 | } else { |
| 3505 | aux.tp_vlan_tci = 0; |
Atzm Watanabe | a0cdfcf | 2013-12-17 22:53:40 +0900 | [diff] [blame] | 3506 | aux.tp_vlan_tpid = 0; |
Ben Greear | a3bcc23 | 2011-06-01 06:49:10 +0000 | [diff] [blame] | 3507 | } |
Herbert Xu | ffbc611 | 2007-02-04 23:33:10 -0800 | [diff] [blame] | 3508 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 3509 | } |
| 3510 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3511 | /* |
| 3512 | * Free or return the buffer as appropriate. Again this |
| 3513 | * hides all the races and re-entrancy issues from us. |
| 3514 | */ |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 3515 | err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3516 | |
| 3517 | out_free: |
| 3518 | skb_free_datagram(sk, skb); |
| 3519 | out: |
| 3520 | return err; |
| 3521 | } |
| 3522 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3523 | static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, |
Denys Vlasenko | 9b2c45d | 2018-02-12 20:00:20 +0100 | [diff] [blame] | 3524 | int peer) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3525 | { |
| 3526 | struct net_device *dev; |
| 3527 | struct sock *sk = sock->sk; |
| 3528 | |
| 3529 | if (peer) |
| 3530 | return -EOPNOTSUPP; |
| 3531 | |
| 3532 | uaddr->sa_family = AF_PACKET; |
Daniel Borkmann | 2dc85bf | 2013-06-12 16:02:27 +0200 | [diff] [blame] | 3533 | memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); |
Eric Dumazet | 654d1f8 | 2009-11-02 10:43:32 +0100 | [diff] [blame] | 3534 | rcu_read_lock(); |
Eric Dumazet | e032f7c | 2021-06-16 06:42:02 -0700 | [diff] [blame] | 3535 | dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex)); |
Eric Dumazet | 654d1f8 | 2009-11-02 10:43:32 +0100 | [diff] [blame] | 3536 | if (dev) |
Daniel Borkmann | 2dc85bf | 2013-06-12 16:02:27 +0200 | [diff] [blame] | 3537 | strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); |
Eric Dumazet | 654d1f8 | 2009-11-02 10:43:32 +0100 | [diff] [blame] | 3538 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3539 | |
Denys Vlasenko | 9b2c45d | 2018-02-12 20:00:20 +0100 | [diff] [blame] | 3540 | return sizeof(*uaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3541 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3542 | |
| 3543 | static int packet_getname(struct socket *sock, struct sockaddr *uaddr, |
Denys Vlasenko | 9b2c45d | 2018-02-12 20:00:20 +0100 | [diff] [blame] | 3544 | int peer) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3545 | { |
| 3546 | struct net_device *dev; |
| 3547 | struct sock *sk = sock->sk; |
| 3548 | struct packet_sock *po = pkt_sk(sk); |
Cyrill Gorcunov | 13cfa97 | 2009-11-08 05:51:19 +0000 | [diff] [blame] | 3549 | DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); |
Eric Dumazet | e032f7c | 2021-06-16 06:42:02 -0700 | [diff] [blame] | 3550 | int ifindex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3551 | |
| 3552 | if (peer) |
| 3553 | return -EOPNOTSUPP; |
| 3554 | |
Eric Dumazet | e032f7c | 2021-06-16 06:42:02 -0700 | [diff] [blame] | 3555 | ifindex = READ_ONCE(po->ifindex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3556 | sll->sll_family = AF_PACKET; |
Eric Dumazet | e032f7c | 2021-06-16 06:42:02 -0700 | [diff] [blame] | 3557 | sll->sll_ifindex = ifindex; |
Eric Dumazet | c7d2ef5 | 2021-06-16 06:42:01 -0700 | [diff] [blame] | 3558 | sll->sll_protocol = READ_ONCE(po->num); |
Vasiliy Kulikov | 6728664 | 2010-11-10 12:09:10 -0800 | [diff] [blame] | 3559 | sll->sll_pkttype = 0; |
Eric Dumazet | 654d1f8 | 2009-11-02 10:43:32 +0100 | [diff] [blame] | 3560 | rcu_read_lock(); |
Eric Dumazet | e032f7c | 2021-06-16 06:42:02 -0700 | [diff] [blame] | 3561 | dev = dev_get_by_index_rcu(sock_net(sk), ifindex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3562 | if (dev) { |
| 3563 | sll->sll_hatype = dev->type; |
| 3564 | sll->sll_halen = dev->addr_len; |
| 3565 | memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3566 | } else { |
| 3567 | sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ |
| 3568 | sll->sll_halen = 0; |
| 3569 | } |
Eric Dumazet | 654d1f8 | 2009-11-02 10:43:32 +0100 | [diff] [blame] | 3570 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3571 | |
Denys Vlasenko | 9b2c45d | 2018-02-12 20:00:20 +0100 | [diff] [blame] | 3572 | return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3573 | } |
| 3574 | |
Wang Chen | 2aeb0b8 | 2008-07-14 20:49:46 -0700 | [diff] [blame] | 3575 | static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, |
| 3576 | int what) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3577 | { |
| 3578 | switch (i->type) { |
| 3579 | case PACKET_MR_MULTICAST: |
Jiri Pirko | 1162563 | 2010-03-02 20:40:01 +0000 | [diff] [blame] | 3580 | if (i->alen != dev->addr_len) |
| 3581 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3582 | if (what > 0) |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 3583 | return dev_mc_add(dev, i->addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3584 | else |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 3585 | return dev_mc_del(dev, i->addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3586 | break; |
| 3587 | case PACKET_MR_PROMISC: |
Wang Chen | 2aeb0b8 | 2008-07-14 20:49:46 -0700 | [diff] [blame] | 3588 | return dev_set_promiscuity(dev, what); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3589 | case PACKET_MR_ALLMULTI: |
Wang Chen | 2aeb0b8 | 2008-07-14 20:49:46 -0700 | [diff] [blame] | 3590 | return dev_set_allmulti(dev, what); |
Eric W. Biederman | d95ed92 | 2009-05-19 18:27:17 +0000 | [diff] [blame] | 3591 | case PACKET_MR_UNICAST: |
Jiri Pirko | 1162563 | 2010-03-02 20:40:01 +0000 | [diff] [blame] | 3592 | if (i->alen != dev->addr_len) |
| 3593 | return -EINVAL; |
Eric W. Biederman | d95ed92 | 2009-05-19 18:27:17 +0000 | [diff] [blame] | 3594 | if (what > 0) |
Jiri Pirko | a748ee2 | 2010-04-01 21:22:09 +0000 | [diff] [blame] | 3595 | return dev_uc_add(dev, i->addr); |
Eric W. Biederman | d95ed92 | 2009-05-19 18:27:17 +0000 | [diff] [blame] | 3596 | else |
Jiri Pirko | a748ee2 | 2010-04-01 21:22:09 +0000 | [diff] [blame] | 3597 | return dev_uc_del(dev, i->addr); |
Eric W. Biederman | d95ed92 | 2009-05-19 18:27:17 +0000 | [diff] [blame] | 3598 | break; |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 3599 | default: |
| 3600 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3601 | } |
Wang Chen | 2aeb0b8 | 2008-07-14 20:49:46 -0700 | [diff] [blame] | 3602 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3603 | } |
| 3604 | |
Francesco Ruggeri | 82f1709 | 2015-03-09 11:51:04 -0700 | [diff] [blame] | 3605 | static void packet_dev_mclist_delete(struct net_device *dev, |
| 3606 | struct packet_mclist **mlp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3607 | { |
Francesco Ruggeri | 82f1709 | 2015-03-09 11:51:04 -0700 | [diff] [blame] | 3608 | struct packet_mclist *ml; |
| 3609 | |
| 3610 | while ((ml = *mlp) != NULL) { |
| 3611 | if (ml->ifindex == dev->ifindex) { |
| 3612 | packet_dev_mc(dev, ml, -1); |
| 3613 | *mlp = ml->next; |
| 3614 | kfree(ml); |
| 3615 | } else |
| 3616 | mlp = &ml->next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3617 | } |
| 3618 | } |
| 3619 | |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 3620 | static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3621 | { |
| 3622 | struct packet_sock *po = pkt_sk(sk); |
| 3623 | struct packet_mclist *ml, *i; |
| 3624 | struct net_device *dev; |
| 3625 | int err; |
| 3626 | |
| 3627 | rtnl_lock(); |
| 3628 | |
| 3629 | err = -ENODEV; |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 3630 | dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3631 | if (!dev) |
| 3632 | goto done; |
| 3633 | |
| 3634 | err = -EINVAL; |
Jiri Pirko | 1162563 | 2010-03-02 20:40:01 +0000 | [diff] [blame] | 3635 | if (mreq->mr_alen > dev->addr_len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3636 | goto done; |
| 3637 | |
| 3638 | err = -ENOBUFS; |
Kris Katterjohn | 8b3a700 | 2006-01-11 15:56:43 -0800 | [diff] [blame] | 3639 | i = kmalloc(sizeof(*i), GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3640 | if (i == NULL) |
| 3641 | goto done; |
| 3642 | |
| 3643 | err = 0; |
| 3644 | for (ml = po->mclist; ml; ml = ml->next) { |
| 3645 | if (ml->ifindex == mreq->mr_ifindex && |
| 3646 | ml->type == mreq->mr_type && |
| 3647 | ml->alen == mreq->mr_alen && |
| 3648 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { |
| 3649 | ml->count++; |
| 3650 | /* Free the new element ... */ |
| 3651 | kfree(i); |
| 3652 | goto done; |
| 3653 | } |
| 3654 | } |
| 3655 | |
| 3656 | i->type = mreq->mr_type; |
| 3657 | i->ifindex = mreq->mr_ifindex; |
| 3658 | i->alen = mreq->mr_alen; |
| 3659 | memcpy(i->addr, mreq->mr_address, i->alen); |
Mathias Krause | 309cf37 | 2016-04-10 12:52:28 +0200 | [diff] [blame] | 3660 | memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3661 | i->count = 1; |
| 3662 | i->next = po->mclist; |
| 3663 | po->mclist = i; |
Wang Chen | 2aeb0b8 | 2008-07-14 20:49:46 -0700 | [diff] [blame] | 3664 | err = packet_dev_mc(dev, i, 1); |
| 3665 | if (err) { |
| 3666 | po->mclist = i->next; |
| 3667 | kfree(i); |
| 3668 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3669 | |
| 3670 | done: |
| 3671 | rtnl_unlock(); |
| 3672 | return err; |
| 3673 | } |
| 3674 | |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 3675 | static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3676 | { |
| 3677 | struct packet_mclist *ml, **mlp; |
| 3678 | |
| 3679 | rtnl_lock(); |
| 3680 | |
| 3681 | for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { |
| 3682 | if (ml->ifindex == mreq->mr_ifindex && |
| 3683 | ml->type == mreq->mr_type && |
| 3684 | ml->alen == mreq->mr_alen && |
| 3685 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { |
| 3686 | if (--ml->count == 0) { |
| 3687 | struct net_device *dev; |
| 3688 | *mlp = ml->next; |
Eric Dumazet | ad959e7 | 2009-10-16 06:38:46 +0000 | [diff] [blame] | 3689 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
| 3690 | if (dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3691 | packet_dev_mc(dev, ml, -1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3692 | kfree(ml); |
| 3693 | } |
Francesco Ruggeri | 82f1709 | 2015-03-09 11:51:04 -0700 | [diff] [blame] | 3694 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3695 | } |
| 3696 | } |
| 3697 | rtnl_unlock(); |
Francesco Ruggeri | 82f1709 | 2015-03-09 11:51:04 -0700 | [diff] [blame] | 3698 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3699 | } |
| 3700 | |
| 3701 | static void packet_flush_mclist(struct sock *sk) |
| 3702 | { |
| 3703 | struct packet_sock *po = pkt_sk(sk); |
| 3704 | struct packet_mclist *ml; |
| 3705 | |
| 3706 | if (!po->mclist) |
| 3707 | return; |
| 3708 | |
| 3709 | rtnl_lock(); |
| 3710 | while ((ml = po->mclist) != NULL) { |
| 3711 | struct net_device *dev; |
| 3712 | |
| 3713 | po->mclist = ml->next; |
Eric Dumazet | ad959e7 | 2009-10-16 06:38:46 +0000 | [diff] [blame] | 3714 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
| 3715 | if (dev != NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3716 | packet_dev_mc(dev, ml, -1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3717 | kfree(ml); |
| 3718 | } |
| 3719 | rtnl_unlock(); |
| 3720 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3721 | |
| 3722 | static int |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3723 | packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, |
| 3724 | unsigned int optlen) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3725 | { |
| 3726 | struct sock *sk = sock->sk; |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 3727 | struct packet_sock *po = pkt_sk(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3728 | int ret; |
| 3729 | |
| 3730 | if (level != SOL_PACKET) |
| 3731 | return -ENOPROTOOPT; |
| 3732 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 3733 | switch (optname) { |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 3734 | case PACKET_ADD_MEMBERSHIP: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3735 | case PACKET_DROP_MEMBERSHIP: |
| 3736 | { |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 3737 | struct packet_mreq_max mreq; |
| 3738 | int len = optlen; |
| 3739 | memset(&mreq, 0, sizeof(mreq)); |
| 3740 | if (len < sizeof(struct packet_mreq)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3741 | return -EINVAL; |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 3742 | if (len > sizeof(mreq)) |
| 3743 | len = sizeof(mreq); |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3744 | if (copy_from_sockptr(&mreq, optval, len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3745 | return -EFAULT; |
Eric W. Biederman | 0fb375f | 2005-09-21 00:11:37 -0700 | [diff] [blame] | 3746 | if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) |
| 3747 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3748 | if (optname == PACKET_ADD_MEMBERSHIP) |
| 3749 | ret = packet_mc_add(sk, &mreq); |
| 3750 | else |
| 3751 | ret = packet_mc_drop(sk, &mreq); |
| 3752 | return ret; |
| 3753 | } |
David S. Miller | a2efcfa | 2007-05-29 13:12:50 -0700 | [diff] [blame] | 3754 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3755 | case PACKET_RX_RING: |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 3756 | case PACKET_TX_RING: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3757 | { |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 3758 | union tpacket_req_u req_u; |
| 3759 | int len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3760 | |
Eric Dumazet | 5171b37 | 2018-04-15 17:52:04 -0700 | [diff] [blame] | 3761 | lock_sock(sk); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 3762 | switch (po->tp_version) { |
| 3763 | case TPACKET_V1: |
| 3764 | case TPACKET_V2: |
| 3765 | len = sizeof(req_u.req); |
| 3766 | break; |
| 3767 | case TPACKET_V3: |
| 3768 | default: |
| 3769 | len = sizeof(req_u.req3); |
| 3770 | break; |
| 3771 | } |
Eric Dumazet | 5171b37 | 2018-04-15 17:52:04 -0700 | [diff] [blame] | 3772 | if (optlen < len) { |
| 3773 | ret = -EINVAL; |
| 3774 | } else { |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3775 | if (copy_from_sockptr(&req_u.req, optval, len)) |
Eric Dumazet | 5171b37 | 2018-04-15 17:52:04 -0700 | [diff] [blame] | 3776 | ret = -EFAULT; |
| 3777 | else |
| 3778 | ret = packet_set_ring(sk, &req_u, 0, |
| 3779 | optname == PACKET_TX_RING); |
| 3780 | } |
| 3781 | release_sock(sk); |
| 3782 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3783 | } |
| 3784 | case PACKET_COPY_THRESH: |
| 3785 | { |
| 3786 | int val; |
| 3787 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 3788 | if (optlen != sizeof(val)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3789 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3790 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3791 | return -EFAULT; |
| 3792 | |
| 3793 | pkt_sk(sk)->copy_thresh = val; |
| 3794 | return 0; |
| 3795 | } |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 3796 | case PACKET_VERSION: |
| 3797 | { |
| 3798 | int val; |
| 3799 | |
| 3800 | if (optlen != sizeof(val)) |
| 3801 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3802 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 3803 | return -EFAULT; |
| 3804 | switch (val) { |
| 3805 | case TPACKET_V1: |
| 3806 | case TPACKET_V2: |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 3807 | case TPACKET_V3: |
Philip Pettersson | 84ac726 | 2016-11-30 14:55:36 -0800 | [diff] [blame] | 3808 | break; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 3809 | default: |
| 3810 | return -EINVAL; |
| 3811 | } |
Philip Pettersson | 84ac726 | 2016-11-30 14:55:36 -0800 | [diff] [blame] | 3812 | lock_sock(sk); |
| 3813 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { |
| 3814 | ret = -EBUSY; |
| 3815 | } else { |
| 3816 | po->tp_version = val; |
| 3817 | ret = 0; |
| 3818 | } |
| 3819 | release_sock(sk); |
| 3820 | return ret; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 3821 | } |
Patrick McHardy | 8913336a | 2008-07-18 18:05:19 -0700 | [diff] [blame] | 3822 | case PACKET_RESERVE: |
| 3823 | { |
| 3824 | unsigned int val; |
| 3825 | |
| 3826 | if (optlen != sizeof(val)) |
| 3827 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3828 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Patrick McHardy | 8913336a | 2008-07-18 18:05:19 -0700 | [diff] [blame] | 3829 | return -EFAULT; |
Andrey Konovalov | bcc5364 | 2017-03-29 16:11:22 +0200 | [diff] [blame] | 3830 | if (val > INT_MAX) |
| 3831 | return -EINVAL; |
Willem de Bruijn | c27927e | 2017-08-10 12:41:58 -0400 | [diff] [blame] | 3832 | lock_sock(sk); |
| 3833 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { |
| 3834 | ret = -EBUSY; |
| 3835 | } else { |
| 3836 | po->tp_reserve = val; |
| 3837 | ret = 0; |
| 3838 | } |
| 3839 | release_sock(sk); |
| 3840 | return ret; |
Patrick McHardy | 8913336a | 2008-07-18 18:05:19 -0700 | [diff] [blame] | 3841 | } |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 3842 | case PACKET_LOSS: |
| 3843 | { |
| 3844 | unsigned int val; |
| 3845 | |
| 3846 | if (optlen != sizeof(val)) |
| 3847 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3848 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 3849 | return -EFAULT; |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 3850 | |
| 3851 | lock_sock(sk); |
| 3852 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { |
| 3853 | ret = -EBUSY; |
| 3854 | } else { |
| 3855 | po->tp_loss = !!val; |
| 3856 | ret = 0; |
| 3857 | } |
| 3858 | release_sock(sk); |
| 3859 | return ret; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 3860 | } |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 3861 | case PACKET_AUXDATA: |
| 3862 | { |
| 3863 | int val; |
| 3864 | |
| 3865 | if (optlen < sizeof(val)) |
| 3866 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3867 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 3868 | return -EFAULT; |
| 3869 | |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 3870 | lock_sock(sk); |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 3871 | po->auxdata = !!val; |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 3872 | release_sock(sk); |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 3873 | return 0; |
| 3874 | } |
Peter P. Waskiewicz Jr | 80feaac | 2007-04-20 16:05:39 -0700 | [diff] [blame] | 3875 | case PACKET_ORIGDEV: |
| 3876 | { |
| 3877 | int val; |
| 3878 | |
| 3879 | if (optlen < sizeof(val)) |
| 3880 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3881 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Peter P. Waskiewicz Jr | 80feaac | 2007-04-20 16:05:39 -0700 | [diff] [blame] | 3882 | return -EFAULT; |
| 3883 | |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 3884 | lock_sock(sk); |
Peter P. Waskiewicz Jr | 80feaac | 2007-04-20 16:05:39 -0700 | [diff] [blame] | 3885 | po->origdev = !!val; |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 3886 | release_sock(sk); |
Peter P. Waskiewicz Jr | 80feaac | 2007-04-20 16:05:39 -0700 | [diff] [blame] | 3887 | return 0; |
| 3888 | } |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 3889 | case PACKET_VNET_HDR: |
| 3890 | { |
| 3891 | int val; |
| 3892 | |
| 3893 | if (sock->type != SOCK_RAW) |
| 3894 | return -EINVAL; |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 3895 | if (optlen < sizeof(val)) |
| 3896 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3897 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 3898 | return -EFAULT; |
| 3899 | |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 3900 | lock_sock(sk); |
| 3901 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { |
| 3902 | ret = -EBUSY; |
| 3903 | } else { |
| 3904 | po->has_vnet_hdr = !!val; |
| 3905 | ret = 0; |
| 3906 | } |
| 3907 | release_sock(sk); |
| 3908 | return ret; |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 3909 | } |
Scott McMillan | 614f60f | 2010-06-02 05:53:56 -0700 | [diff] [blame] | 3910 | case PACKET_TIMESTAMP: |
| 3911 | { |
| 3912 | int val; |
| 3913 | |
| 3914 | if (optlen != sizeof(val)) |
| 3915 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3916 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Scott McMillan | 614f60f | 2010-06-02 05:53:56 -0700 | [diff] [blame] | 3917 | return -EFAULT; |
| 3918 | |
| 3919 | po->tp_tstamp = val; |
| 3920 | return 0; |
| 3921 | } |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 3922 | case PACKET_FANOUT: |
| 3923 | { |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 3924 | struct fanout_args args = { 0 }; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 3925 | |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 3926 | if (optlen != sizeof(int) && optlen != sizeof(args)) |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 3927 | return -EINVAL; |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 3928 | if (copy_from_sockptr(&args, optval, optlen)) |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 3929 | return -EFAULT; |
| 3930 | |
Tanner Love | 9c661b0 | 2020-11-06 13:07:40 -0500 | [diff] [blame] | 3931 | return fanout_add(sk, &args); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 3932 | } |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 3933 | case PACKET_FANOUT_DATA: |
| 3934 | { |
| 3935 | if (!po->fanout) |
| 3936 | return -EINVAL; |
| 3937 | |
| 3938 | return fanout_set_data(po, optval, optlen); |
| 3939 | } |
Vincent Whitchurch | fa788d9 | 2018-09-03 16:23:36 +0200 | [diff] [blame] | 3940 | case PACKET_IGNORE_OUTGOING: |
| 3941 | { |
| 3942 | int val; |
| 3943 | |
| 3944 | if (optlen != sizeof(val)) |
| 3945 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3946 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Vincent Whitchurch | fa788d9 | 2018-09-03 16:23:36 +0200 | [diff] [blame] | 3947 | return -EFAULT; |
| 3948 | if (val < 0 || val > 1) |
| 3949 | return -EINVAL; |
| 3950 | |
| 3951 | po->prot_hook.ignore_outgoing = !!val; |
| 3952 | return 0; |
| 3953 | } |
Paul Chavent | 5920cd3a | 2012-11-06 23:10:47 +0000 | [diff] [blame] | 3954 | case PACKET_TX_HAS_OFF: |
| 3955 | { |
| 3956 | unsigned int val; |
| 3957 | |
| 3958 | if (optlen != sizeof(val)) |
| 3959 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3960 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Paul Chavent | 5920cd3a | 2012-11-06 23:10:47 +0000 | [diff] [blame] | 3961 | return -EFAULT; |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 3962 | |
| 3963 | lock_sock(sk); |
Jiapeng Chong | 25c55b3 | 2021-05-17 18:15:25 +0800 | [diff] [blame] | 3964 | if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec) |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 3965 | po->tp_tx_has_off = !!val; |
Jiapeng Chong | 25c55b3 | 2021-05-17 18:15:25 +0800 | [diff] [blame] | 3966 | |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 3967 | release_sock(sk); |
Paul Chavent | 5920cd3a | 2012-11-06 23:10:47 +0000 | [diff] [blame] | 3968 | return 0; |
| 3969 | } |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 3970 | case PACKET_QDISC_BYPASS: |
| 3971 | { |
| 3972 | int val; |
| 3973 | |
| 3974 | if (optlen != sizeof(val)) |
| 3975 | return -EINVAL; |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 3976 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 3977 | return -EFAULT; |
| 3978 | |
| 3979 | po->xmit = val ? packet_direct_xmit : dev_queue_xmit; |
| 3980 | return 0; |
| 3981 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3982 | default: |
| 3983 | return -ENOPROTOOPT; |
| 3984 | } |
| 3985 | } |
| 3986 | |
| 3987 | static int packet_getsockopt(struct socket *sock, int level, int optname, |
| 3988 | char __user *optval, int __user *optlen) |
| 3989 | { |
| 3990 | int len; |
Eric Dumazet | c06fff6 | 2012-04-19 21:56:11 +0000 | [diff] [blame] | 3991 | int val, lv = sizeof(val); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3992 | struct sock *sk = sock->sk; |
| 3993 | struct packet_sock *po = pkt_sk(sk); |
Eric Dumazet | c06fff6 | 2012-04-19 21:56:11 +0000 | [diff] [blame] | 3994 | void *data = &val; |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 3995 | union tpacket_stats_u st; |
Willem de Bruijn | a9b6391 | 2015-05-12 11:56:50 -0400 | [diff] [blame] | 3996 | struct tpacket_rollover_stats rstats; |
Eric Dumazet | 8e8e295 | 2019-06-12 09:52:30 -0700 | [diff] [blame] | 3997 | int drops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3998 | |
| 3999 | if (level != SOL_PACKET) |
| 4000 | return -ENOPROTOOPT; |
| 4001 | |
Kris Katterjohn | 8ae55f0 | 2006-01-23 16:28:02 -0800 | [diff] [blame] | 4002 | if (get_user(len, optlen)) |
| 4003 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4004 | |
| 4005 | if (len < 0) |
| 4006 | return -EINVAL; |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 4007 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4008 | switch (optname) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4009 | case PACKET_STATISTICS: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4010 | spin_lock_bh(&sk->sk_receive_queue.lock); |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 4011 | memcpy(&st, &po->stats, sizeof(st)); |
| 4012 | memset(&po->stats, 0, sizeof(po->stats)); |
| 4013 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
Eric Dumazet | 8e8e295 | 2019-06-12 09:52:30 -0700 | [diff] [blame] | 4014 | drops = atomic_xchg(&po->tp_drops, 0); |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 4015 | |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4016 | if (po->tp_version == TPACKET_V3) { |
Eric Dumazet | c06fff6 | 2012-04-19 21:56:11 +0000 | [diff] [blame] | 4017 | lv = sizeof(struct tpacket_stats_v3); |
Eric Dumazet | 8e8e295 | 2019-06-12 09:52:30 -0700 | [diff] [blame] | 4018 | st.stats3.tp_drops = drops; |
| 4019 | st.stats3.tp_packets += drops; |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 4020 | data = &st.stats3; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4021 | } else { |
Eric Dumazet | c06fff6 | 2012-04-19 21:56:11 +0000 | [diff] [blame] | 4022 | lv = sizeof(struct tpacket_stats); |
Eric Dumazet | 8e8e295 | 2019-06-12 09:52:30 -0700 | [diff] [blame] | 4023 | st.stats1.tp_drops = drops; |
| 4024 | st.stats1.tp_packets += drops; |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 4025 | data = &st.stats1; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4026 | } |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 4027 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4028 | break; |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 4029 | case PACKET_AUXDATA: |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 4030 | val = po->auxdata; |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 4031 | break; |
Peter P. Waskiewicz Jr | 80feaac | 2007-04-20 16:05:39 -0700 | [diff] [blame] | 4032 | case PACKET_ORIGDEV: |
Peter P. Waskiewicz Jr | 80feaac | 2007-04-20 16:05:39 -0700 | [diff] [blame] | 4033 | val = po->origdev; |
Peter P. Waskiewicz Jr | 80feaac | 2007-04-20 16:05:39 -0700 | [diff] [blame] | 4034 | break; |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 4035 | case PACKET_VNET_HDR: |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 4036 | val = po->has_vnet_hdr; |
Sridhar Samudrala | bfd5f4a | 2010-02-04 20:24:10 -0800 | [diff] [blame] | 4037 | break; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 4038 | case PACKET_VERSION: |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 4039 | val = po->tp_version; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 4040 | break; |
| 4041 | case PACKET_HDRLEN: |
| 4042 | if (len > sizeof(int)) |
| 4043 | len = sizeof(int); |
Alexander Potapenko | fd2c83b | 2017-04-25 18:51:46 +0200 | [diff] [blame] | 4044 | if (len < sizeof(int)) |
| 4045 | return -EINVAL; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 4046 | if (copy_from_user(&val, optval, len)) |
| 4047 | return -EFAULT; |
| 4048 | switch (val) { |
| 4049 | case TPACKET_V1: |
| 4050 | val = sizeof(struct tpacket_hdr); |
| 4051 | break; |
| 4052 | case TPACKET_V2: |
| 4053 | val = sizeof(struct tpacket2_hdr); |
| 4054 | break; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4055 | case TPACKET_V3: |
| 4056 | val = sizeof(struct tpacket3_hdr); |
| 4057 | break; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 4058 | default: |
| 4059 | return -EINVAL; |
| 4060 | } |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 4061 | break; |
Patrick McHardy | 8913336a | 2008-07-18 18:05:19 -0700 | [diff] [blame] | 4062 | case PACKET_RESERVE: |
Patrick McHardy | 8913336a | 2008-07-18 18:05:19 -0700 | [diff] [blame] | 4063 | val = po->tp_reserve; |
Patrick McHardy | 8913336a | 2008-07-18 18:05:19 -0700 | [diff] [blame] | 4064 | break; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4065 | case PACKET_LOSS: |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4066 | val = po->tp_loss; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4067 | break; |
Scott McMillan | 614f60f | 2010-06-02 05:53:56 -0700 | [diff] [blame] | 4068 | case PACKET_TIMESTAMP: |
Scott McMillan | 614f60f | 2010-06-02 05:53:56 -0700 | [diff] [blame] | 4069 | val = po->tp_tstamp; |
Scott McMillan | 614f60f | 2010-06-02 05:53:56 -0700 | [diff] [blame] | 4070 | break; |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 4071 | case PACKET_FANOUT: |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 4072 | val = (po->fanout ? |
| 4073 | ((u32)po->fanout->id | |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 4074 | ((u32)po->fanout->type << 16) | |
| 4075 | ((u32)po->fanout->flags << 24)) : |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 4076 | 0); |
David S. Miller | dc99f60 | 2011-07-05 01:45:05 -0700 | [diff] [blame] | 4077 | break; |
Vincent Whitchurch | fa788d9 | 2018-09-03 16:23:36 +0200 | [diff] [blame] | 4078 | case PACKET_IGNORE_OUTGOING: |
| 4079 | val = po->prot_hook.ignore_outgoing; |
| 4080 | break; |
Willem de Bruijn | a9b6391 | 2015-05-12 11:56:50 -0400 | [diff] [blame] | 4081 | case PACKET_ROLLOVER_STATS: |
Mike Maloney | 57f015f | 2017-11-28 10:44:29 -0500 | [diff] [blame] | 4082 | if (!po->rollover) |
Willem de Bruijn | a9b6391 | 2015-05-12 11:56:50 -0400 | [diff] [blame] | 4083 | return -EINVAL; |
Mike Maloney | 57f015f | 2017-11-28 10:44:29 -0500 | [diff] [blame] | 4084 | rstats.tp_all = atomic_long_read(&po->rollover->num); |
| 4085 | rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); |
| 4086 | rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); |
| 4087 | data = &rstats; |
| 4088 | lv = sizeof(rstats); |
Willem de Bruijn | a9b6391 | 2015-05-12 11:56:50 -0400 | [diff] [blame] | 4089 | break; |
Paul Chavent | 5920cd3a | 2012-11-06 23:10:47 +0000 | [diff] [blame] | 4090 | case PACKET_TX_HAS_OFF: |
| 4091 | val = po->tp_tx_has_off; |
| 4092 | break; |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 4093 | case PACKET_QDISC_BYPASS: |
| 4094 | val = packet_use_direct_xmit(po); |
| 4095 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4096 | default: |
| 4097 | return -ENOPROTOOPT; |
| 4098 | } |
| 4099 | |
Eric Dumazet | c06fff6 | 2012-04-19 21:56:11 +0000 | [diff] [blame] | 4100 | if (len > lv) |
| 4101 | len = lv; |
Kris Katterjohn | 8ae55f0 | 2006-01-23 16:28:02 -0800 | [diff] [blame] | 4102 | if (put_user(len, optlen)) |
| 4103 | return -EFAULT; |
Herbert Xu | 8dc4194 | 2007-02-04 23:31:32 -0800 | [diff] [blame] | 4104 | if (copy_to_user(optval, data, len)) |
| 4105 | return -EFAULT; |
Kris Katterjohn | 8ae55f0 | 2006-01-23 16:28:02 -0800 | [diff] [blame] | 4106 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4107 | } |
| 4108 | |
Jiri Pirko | 351638e | 2013-05-28 01:30:21 +0000 | [diff] [blame] | 4109 | static int packet_notifier(struct notifier_block *this, |
| 4110 | unsigned long msg, void *ptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4111 | { |
| 4112 | struct sock *sk; |
Jiri Pirko | 351638e | 2013-05-28 01:30:21 +0000 | [diff] [blame] | 4113 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 4114 | struct net *net = dev_net(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4115 | |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 4116 | rcu_read_lock(); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 4117 | sk_for_each_rcu(sk, &net->packet.sklist) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4118 | struct packet_sock *po = pkt_sk(sk); |
| 4119 | |
| 4120 | switch (msg) { |
| 4121 | case NETDEV_UNREGISTER: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4122 | if (po->mclist) |
Francesco Ruggeri | 82f1709 | 2015-03-09 11:51:04 -0700 | [diff] [blame] | 4123 | packet_dev_mclist_delete(dev, &po->mclist); |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 4124 | fallthrough; |
David S. Miller | a2efcfa | 2007-05-29 13:12:50 -0700 | [diff] [blame] | 4125 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4126 | case NETDEV_DOWN: |
| 4127 | if (dev->ifindex == po->ifindex) { |
| 4128 | spin_lock(&po->bind_lock); |
| 4129 | if (po->running) { |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 4130 | __unregister_prot_hook(sk, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4131 | sk->sk_err = ENETDOWN; |
| 4132 | if (!sock_flag(sk, SOCK_DEAD)) |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 4133 | sk_error_report(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4134 | } |
| 4135 | if (msg == NETDEV_UNREGISTER) { |
Daniel Borkmann | 66e56cd | 2013-12-06 11:36:15 +0100 | [diff] [blame] | 4136 | packet_cached_dev_reset(po); |
Eric Dumazet | e032f7c | 2021-06-16 06:42:02 -0700 | [diff] [blame] | 4137 | WRITE_ONCE(po->ifindex, -1); |
Eric Dumazet | f1d9268 | 2021-12-14 07:09:33 -0800 | [diff] [blame] | 4138 | dev_put_track(po->prot_hook.dev, |
| 4139 | &po->prot_hook.dev_tracker); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4140 | po->prot_hook.dev = NULL; |
| 4141 | } |
| 4142 | spin_unlock(&po->bind_lock); |
| 4143 | } |
| 4144 | break; |
| 4145 | case NETDEV_UP: |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 4146 | if (dev->ifindex == po->ifindex) { |
| 4147 | spin_lock(&po->bind_lock); |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 4148 | if (po->num) |
| 4149 | register_prot_hook(sk); |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 4150 | spin_unlock(&po->bind_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4151 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4152 | break; |
| 4153 | } |
| 4154 | } |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 4155 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4156 | return NOTIFY_DONE; |
| 4157 | } |
| 4158 | |
| 4159 | |
| 4160 | static int packet_ioctl(struct socket *sock, unsigned int cmd, |
| 4161 | unsigned long arg) |
| 4162 | { |
| 4163 | struct sock *sk = sock->sk; |
| 4164 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4165 | switch (cmd) { |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4166 | case SIOCOUTQ: |
| 4167 | { |
| 4168 | int amount = sk_wmem_alloc_get(sk); |
Eric Dumazet | 31e6d36 | 2009-06-17 19:05:41 -0700 | [diff] [blame] | 4169 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4170 | return put_user(amount, (int __user *)arg); |
| 4171 | } |
| 4172 | case SIOCINQ: |
| 4173 | { |
| 4174 | struct sk_buff *skb; |
| 4175 | int amount = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4176 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4177 | spin_lock_bh(&sk->sk_receive_queue.lock); |
| 4178 | skb = skb_peek(&sk->sk_receive_queue); |
| 4179 | if (skb) |
| 4180 | amount = skb->len; |
| 4181 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
| 4182 | return put_user(amount, (int __user *)arg); |
| 4183 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4184 | #ifdef CONFIG_INET |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4185 | case SIOCADDRT: |
| 4186 | case SIOCDELRT: |
| 4187 | case SIOCDARP: |
| 4188 | case SIOCGARP: |
| 4189 | case SIOCSARP: |
| 4190 | case SIOCGIFADDR: |
| 4191 | case SIOCSIFADDR: |
| 4192 | case SIOCGIFBRDADDR: |
| 4193 | case SIOCSIFBRDADDR: |
| 4194 | case SIOCGIFNETMASK: |
| 4195 | case SIOCSIFNETMASK: |
| 4196 | case SIOCGIFDSTADDR: |
| 4197 | case SIOCSIFDSTADDR: |
| 4198 | case SIOCSIFFLAGS: |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4199 | return inet_dgram_ops.ioctl(sock, cmd, arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4200 | #endif |
| 4201 | |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4202 | default: |
| 4203 | return -ENOIOCTLCMD; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4204 | } |
| 4205 | return 0; |
| 4206 | } |
| 4207 | |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 4208 | static __poll_t packet_poll(struct file *file, struct socket *sock, |
| 4209 | poll_table *wait) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4210 | { |
| 4211 | struct sock *sk = sock->sk; |
| 4212 | struct packet_sock *po = pkt_sk(sk); |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 4213 | __poll_t mask = datagram_poll(file, sock, wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4214 | |
| 4215 | spin_lock_bh(&sk->sk_receive_queue.lock); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4216 | if (po->rx_ring.pg_vec) { |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4217 | if (!packet_previous_rx_frame(po, &po->rx_ring, |
| 4218 | TP_STATUS_KERNEL)) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 4219 | mask |= EPOLLIN | EPOLLRDNORM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4220 | } |
Eric Dumazet | 9bb6cd6 | 2019-06-12 09:52:33 -0700 | [diff] [blame] | 4221 | packet_rcv_try_clear_pressure(po); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4222 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4223 | spin_lock_bh(&sk->sk_write_queue.lock); |
| 4224 | if (po->tx_ring.pg_vec) { |
| 4225 | if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 4226 | mask |= EPOLLOUT | EPOLLWRNORM; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4227 | } |
| 4228 | spin_unlock_bh(&sk->sk_write_queue.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4229 | return mask; |
| 4230 | } |
| 4231 | |
| 4232 | |
| 4233 | /* Dirty? Well, I still did not learn better way to account |
| 4234 | * for user mmaps. |
| 4235 | */ |
| 4236 | |
| 4237 | static void packet_mm_open(struct vm_area_struct *vma) |
| 4238 | { |
| 4239 | struct file *file = vma->vm_file; |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4240 | struct socket *sock = file->private_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4241 | struct sock *sk = sock->sk; |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 4242 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4243 | if (sk) |
| 4244 | atomic_inc(&pkt_sk(sk)->mapped); |
| 4245 | } |
| 4246 | |
| 4247 | static void packet_mm_close(struct vm_area_struct *vma) |
| 4248 | { |
| 4249 | struct file *file = vma->vm_file; |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4250 | struct socket *sock = file->private_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4251 | struct sock *sk = sock->sk; |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 4252 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4253 | if (sk) |
| 4254 | atomic_dec(&pkt_sk(sk)->mapped); |
| 4255 | } |
| 4256 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 4257 | static const struct vm_operations_struct packet_mmap_ops = { |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4258 | .open = packet_mm_open, |
| 4259 | .close = packet_mm_close, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4260 | }; |
| 4261 | |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4262 | static void free_pg_vec(struct pgv *pg_vec, unsigned int order, |
| 4263 | unsigned int len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4264 | { |
| 4265 | int i; |
| 4266 | |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4267 | for (i = 0; i < len; i++) { |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 4268 | if (likely(pg_vec[i].buffer)) { |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4269 | if (is_vmalloc_addr(pg_vec[i].buffer)) |
| 4270 | vfree(pg_vec[i].buffer); |
| 4271 | else |
| 4272 | free_pages((unsigned long)pg_vec[i].buffer, |
| 4273 | order); |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 4274 | pg_vec[i].buffer = NULL; |
| 4275 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4276 | } |
| 4277 | kfree(pg_vec); |
| 4278 | } |
| 4279 | |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4280 | static char *alloc_one_pg_vec_page(unsigned long order) |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4281 | { |
Daniel Borkmann | f0d4eb2 | 2014-01-19 11:46:53 +0100 | [diff] [blame] | 4282 | char *buffer; |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4283 | gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | |
| 4284 | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; |
Eric Dumazet | 719bfea | 2009-04-15 03:39:52 -0700 | [diff] [blame] | 4285 | |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4286 | buffer = (char *) __get_free_pages(gfp_flags, order); |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 4287 | if (buffer) |
| 4288 | return buffer; |
| 4289 | |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4290 | /* __get_free_pages failed, fall back to vmalloc */ |
| 4291 | buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); |
| 4292 | if (buffer) |
| 4293 | return buffer; |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 4294 | |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4295 | /* vmalloc failed, lets dig into swap here */ |
| 4296 | gfp_flags &= ~__GFP_NORETRY; |
| 4297 | buffer = (char *) __get_free_pages(gfp_flags, order); |
| 4298 | if (buffer) |
| 4299 | return buffer; |
| 4300 | |
| 4301 | /* complete and utter failure */ |
| 4302 | return NULL; |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4303 | } |
| 4304 | |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4305 | static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4306 | { |
| 4307 | unsigned int block_nr = req->tp_block_nr; |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 4308 | struct pgv *pg_vec; |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4309 | int i; |
| 4310 | |
Christoph Paasch | 398f013 | 2019-03-18 23:14:52 -0700 | [diff] [blame] | 4311 | pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN); |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4312 | if (unlikely(!pg_vec)) |
| 4313 | goto out; |
| 4314 | |
| 4315 | for (i = 0; i < block_nr; i++) { |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4316 | pg_vec[i].buffer = alloc_one_pg_vec_page(order); |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 4317 | if (unlikely(!pg_vec[i].buffer)) |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4318 | goto out_free_pgvec; |
| 4319 | } |
| 4320 | |
| 4321 | out: |
| 4322 | return pg_vec; |
| 4323 | |
| 4324 | out_free_pgvec: |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4325 | free_pg_vec(pg_vec, order, block_nr); |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4326 | pg_vec = NULL; |
| 4327 | goto out; |
| 4328 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4329 | |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4330 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4331 | int closing, int tx_ring) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4332 | { |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 4333 | struct pgv *pg_vec = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4334 | struct packet_sock *po = pkt_sk(sk); |
Willem de Bruijn | 61fad68 | 2020-03-13 12:18:09 -0400 | [diff] [blame] | 4335 | unsigned long *rx_owner_map = NULL; |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4336 | int was_running, order = 0; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4337 | struct packet_ring_buffer *rb; |
| 4338 | struct sk_buff_head *rb_queue; |
Al Viro | 0e11c91 | 2006-11-08 00:26:29 -0800 | [diff] [blame] | 4339 | __be16 num; |
Colin Ian King | 2a6d6c3 | 2020-07-01 16:04:33 +0100 | [diff] [blame] | 4340 | int err; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4341 | /* Added to avoid minimal code churn */ |
| 4342 | struct tpacket_req *req = &req_u->req; |
| 4343 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4344 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; |
| 4345 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; |
| 4346 | |
| 4347 | err = -EBUSY; |
| 4348 | if (!closing) { |
| 4349 | if (atomic_read(&po->mapped)) |
| 4350 | goto out; |
Daniel Borkmann | b013840 | 2014-01-15 16:25:36 +0100 | [diff] [blame] | 4351 | if (packet_read_pending(rb)) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4352 | goto out; |
| 4353 | } |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 4354 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4355 | if (req->tp_block_nr) { |
Willem de Bruijn | 4576cd4 | 2018-08-06 10:38:34 -0400 | [diff] [blame] | 4356 | unsigned int min_frame_size; |
| 4357 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4358 | /* Sanity tests and some calculations */ |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4359 | err = -EBUSY; |
| 4360 | if (unlikely(rb->pg_vec)) |
| 4361 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4362 | |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 4363 | switch (po->tp_version) { |
| 4364 | case TPACKET_V1: |
| 4365 | po->tp_hdrlen = TPACKET_HDRLEN; |
| 4366 | break; |
| 4367 | case TPACKET_V2: |
| 4368 | po->tp_hdrlen = TPACKET2_HDRLEN; |
| 4369 | break; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4370 | case TPACKET_V3: |
| 4371 | po->tp_hdrlen = TPACKET3_HDRLEN; |
| 4372 | break; |
Patrick McHardy | bbd6ef8 | 2008-07-14 22:50:15 -0700 | [diff] [blame] | 4373 | } |
| 4374 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4375 | err = -EINVAL; |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4376 | if (unlikely((int)req->tp_block_size <= 0)) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4377 | goto out; |
Tobias Klauser | 90836b6 | 2015-11-17 10:40:21 +0100 | [diff] [blame] | 4378 | if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4379 | goto out; |
Willem de Bruijn | 4576cd4 | 2018-08-06 10:38:34 -0400 | [diff] [blame] | 4380 | min_frame_size = po->tp_hdrlen + po->tp_reserve; |
Eric Dumazet | dc80811 | 2014-08-15 09:16:04 -0700 | [diff] [blame] | 4381 | if (po->tp_version >= TPACKET_V3 && |
Willem de Bruijn | 4576cd4 | 2018-08-06 10:38:34 -0400 | [diff] [blame] | 4382 | req->tp_block_size < |
| 4383 | BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) |
Eric Dumazet | dc80811 | 2014-08-15 09:16:04 -0700 | [diff] [blame] | 4384 | goto out; |
Willem de Bruijn | 4576cd4 | 2018-08-06 10:38:34 -0400 | [diff] [blame] | 4385 | if (unlikely(req->tp_frame_size < min_frame_size)) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4386 | goto out; |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4387 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4388 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4389 | |
Tobias Klauser | 4194b49 | 2015-11-17 10:38:36 +0100 | [diff] [blame] | 4390 | rb->frames_per_block = req->tp_block_size / req->tp_frame_size; |
| 4391 | if (unlikely(rb->frames_per_block == 0)) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4392 | goto out; |
Kal Conley | fc62814 | 2019-02-10 09:57:11 +0100 | [diff] [blame] | 4393 | if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) |
Andrey Konovalov | 8f8d28e | 2017-03-29 16:11:21 +0200 | [diff] [blame] | 4394 | goto out; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4395 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != |
| 4396 | req->tp_frame_nr)) |
| 4397 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4398 | |
| 4399 | err = -ENOMEM; |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4400 | order = get_order(req->tp_block_size); |
| 4401 | pg_vec = alloc_pg_vec(req, order); |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4402 | if (unlikely(!pg_vec)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4403 | goto out; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4404 | switch (po->tp_version) { |
| 4405 | case TPACKET_V3: |
Sowmini Varadhan | 7f953ab | 2017-01-03 06:31:47 -0800 | [diff] [blame] | 4406 | /* Block transmit is not supported yet */ |
| 4407 | if (!tx_ring) { |
Maninder Singh | e8e85cc | 2015-06-22 12:39:16 +0530 | [diff] [blame] | 4408 | init_prb_bdqc(po, rb, pg_vec, req_u); |
Sowmini Varadhan | 7f953ab | 2017-01-03 06:31:47 -0800 | [diff] [blame] | 4409 | } else { |
| 4410 | struct tpacket_req3 *req3 = &req_u->req3; |
| 4411 | |
| 4412 | if (req3->tp_retire_blk_tov || |
| 4413 | req3->tp_sizeof_priv || |
| 4414 | req3->tp_feature_req_word) { |
| 4415 | err = -EINVAL; |
Eric Dumazet | 55655e3 | 2019-06-24 02:38:20 -0700 | [diff] [blame] | 4416 | goto out_free_pg_vec; |
Sowmini Varadhan | 7f953ab | 2017-01-03 06:31:47 -0800 | [diff] [blame] | 4417 | } |
| 4418 | } |
Dan Carpenter | d7cf0c3 | 2014-02-18 15:20:51 +0300 | [diff] [blame] | 4419 | break; |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4420 | default: |
Willem de Bruijn | 61fad68 | 2020-03-13 12:18:09 -0400 | [diff] [blame] | 4421 | if (!tx_ring) { |
| 4422 | rx_owner_map = bitmap_alloc(req->tp_frame_nr, |
| 4423 | GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); |
| 4424 | if (!rx_owner_map) |
| 4425 | goto out_free_pg_vec; |
| 4426 | } |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4427 | break; |
| 4428 | } |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4429 | } |
| 4430 | /* Done */ |
| 4431 | else { |
| 4432 | err = -EINVAL; |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4433 | if (unlikely(req->tp_frame_nr)) |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4434 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4435 | } |
| 4436 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4437 | |
| 4438 | /* Detach socket from network */ |
| 4439 | spin_lock(&po->bind_lock); |
| 4440 | was_running = po->running; |
| 4441 | num = po->num; |
| 4442 | if (was_running) { |
Eric Dumazet | c7d2ef5 | 2021-06-16 06:42:01 -0700 | [diff] [blame] | 4443 | WRITE_ONCE(po->num, 0); |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 4444 | __unregister_prot_hook(sk, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4445 | } |
| 4446 | spin_unlock(&po->bind_lock); |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 4447 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4448 | synchronize_net(); |
| 4449 | |
| 4450 | err = -EBUSY; |
Herbert Xu | 905db44 | 2009-01-30 14:12:06 -0800 | [diff] [blame] | 4451 | mutex_lock(&po->pg_vec_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4452 | if (closing || atomic_read(&po->mapped) == 0) { |
| 4453 | err = 0; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4454 | spin_lock_bh(&rb_queue->lock); |
Changli Gao | c053fd9 | 2010-12-10 16:02:20 -0800 | [diff] [blame] | 4455 | swap(rb->pg_vec, pg_vec); |
Willem de Bruijn | 61fad68 | 2020-03-13 12:18:09 -0400 | [diff] [blame] | 4456 | if (po->tp_version <= TPACKET_V2) |
| 4457 | swap(rb->rx_owner_map, rx_owner_map); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4458 | rb->frame_max = (req->tp_frame_nr - 1); |
| 4459 | rb->head = 0; |
| 4460 | rb->frame_size = req->tp_frame_size; |
| 4461 | spin_unlock_bh(&rb_queue->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4462 | |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4463 | swap(rb->pg_vec_order, order); |
Changli Gao | c053fd9 | 2010-12-10 16:02:20 -0800 | [diff] [blame] | 4464 | swap(rb->pg_vec_len, req->tp_block_nr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4465 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4466 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; |
| 4467 | po->prot_hook.func = (po->rx_ring.pg_vec) ? |
| 4468 | tpacket_rcv : packet_rcv; |
| 4469 | skb_queue_purge(rb_queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4470 | if (atomic_read(&po->mapped)) |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4471 | pr_err("packet_mmap: vma is busy: %d\n", |
| 4472 | atomic_read(&po->mapped)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4473 | } |
Herbert Xu | 905db44 | 2009-01-30 14:12:06 -0800 | [diff] [blame] | 4474 | mutex_unlock(&po->pg_vec_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4475 | |
| 4476 | spin_lock(&po->bind_lock); |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 4477 | if (was_running) { |
Eric Dumazet | c7d2ef5 | 2021-06-16 06:42:01 -0700 | [diff] [blame] | 4478 | WRITE_ONCE(po->num, num); |
David S. Miller | ce06b03 | 2011-07-04 01:44:29 -0700 | [diff] [blame] | 4479 | register_prot_hook(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4480 | } |
| 4481 | spin_unlock(&po->bind_lock); |
WANG Cong | c800aaf | 2017-07-24 10:07:32 -0700 | [diff] [blame] | 4482 | if (pg_vec && (po->tp_version > TPACKET_V2)) { |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4483 | /* Because we don't support block-based V3 on tx-ring */ |
| 4484 | if (!tx_ring) |
Tobias Klauser | 73d0fcf | 2015-07-28 14:21:26 +0200 | [diff] [blame] | 4485 | prb_shutdown_retire_blk_timer(po, rb_queue); |
chetan loke | f6fb8f1 | 2011-08-19 10:18:16 +0000 | [diff] [blame] | 4486 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4487 | |
Eric Dumazet | 55655e3 | 2019-06-24 02:38:20 -0700 | [diff] [blame] | 4488 | out_free_pg_vec: |
Willem de Bruijn | ec6af09 | 2021-12-15 09:39:37 -0500 | [diff] [blame] | 4489 | if (pg_vec) { |
| 4490 | bitmap_free(rx_owner_map); |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 4491 | free_pg_vec(pg_vec, order, req->tp_block_nr); |
Willem de Bruijn | ec6af09 | 2021-12-15 09:39:37 -0500 | [diff] [blame] | 4492 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4493 | out: |
| 4494 | return err; |
| 4495 | } |
| 4496 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4497 | static int packet_mmap(struct file *file, struct socket *sock, |
| 4498 | struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4499 | { |
| 4500 | struct sock *sk = sock->sk; |
| 4501 | struct packet_sock *po = pkt_sk(sk); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4502 | unsigned long size, expected_size; |
| 4503 | struct packet_ring_buffer *rb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4504 | unsigned long start; |
| 4505 | int err = -EINVAL; |
| 4506 | int i; |
| 4507 | |
| 4508 | if (vma->vm_pgoff) |
| 4509 | return -EINVAL; |
| 4510 | |
Herbert Xu | 905db44 | 2009-01-30 14:12:06 -0800 | [diff] [blame] | 4511 | mutex_lock(&po->pg_vec_lock); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4512 | |
| 4513 | expected_size = 0; |
| 4514 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { |
| 4515 | if (rb->pg_vec) { |
| 4516 | expected_size += rb->pg_vec_len |
| 4517 | * rb->pg_vec_pages |
| 4518 | * PAGE_SIZE; |
| 4519 | } |
| 4520 | } |
| 4521 | |
| 4522 | if (expected_size == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4523 | goto out; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4524 | |
| 4525 | size = vma->vm_end - vma->vm_start; |
| 4526 | if (size != expected_size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4527 | goto out; |
| 4528 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4529 | start = vma->vm_start; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4530 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { |
| 4531 | if (rb->pg_vec == NULL) |
| 4532 | continue; |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4533 | |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4534 | for (i = 0; i < rb->pg_vec_len; i++) { |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 4535 | struct page *page; |
| 4536 | void *kaddr = rb->pg_vec[i].buffer; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4537 | int pg_num; |
| 4538 | |
Changli Gao | c56b4d9 | 2010-12-01 02:52:57 +0000 | [diff] [blame] | 4539 | for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { |
| 4540 | page = pgv_to_page(kaddr); |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4541 | err = vm_insert_page(vma, start, page); |
| 4542 | if (unlikely(err)) |
| 4543 | goto out; |
| 4544 | start += PAGE_SIZE; |
Neil Horman | 0e3125c | 2010-11-16 10:26:47 -0800 | [diff] [blame] | 4545 | kaddr += PAGE_SIZE; |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4546 | } |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4547 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4548 | } |
Johann Baudy | 69e3c75 | 2009-05-18 22:11:22 -0700 | [diff] [blame] | 4549 | |
David S. Miller | 4ebf0ae | 2005-12-06 16:38:35 -0800 | [diff] [blame] | 4550 | atomic_inc(&po->mapped); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4551 | vma->vm_ops = &packet_mmap_ops; |
| 4552 | err = 0; |
| 4553 | |
| 4554 | out: |
Herbert Xu | 905db44 | 2009-01-30 14:12:06 -0800 | [diff] [blame] | 4555 | mutex_unlock(&po->pg_vec_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4556 | return err; |
| 4557 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4558 | |
Eric Dumazet | 90ddc4f | 2005-12-22 12:49:22 -0800 | [diff] [blame] | 4559 | static const struct proto_ops packet_ops_spkt = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4560 | .family = PF_PACKET, |
| 4561 | .owner = THIS_MODULE, |
| 4562 | .release = packet_release, |
| 4563 | .bind = packet_bind_spkt, |
| 4564 | .connect = sock_no_connect, |
| 4565 | .socketpair = sock_no_socketpair, |
| 4566 | .accept = sock_no_accept, |
| 4567 | .getname = packet_getname_spkt, |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 4568 | .poll = datagram_poll, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4569 | .ioctl = packet_ioctl, |
Arnd Bergmann | c7cbdbf | 2019-04-17 22:51:48 +0200 | [diff] [blame] | 4570 | .gettstamp = sock_gettstamp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4571 | .listen = sock_no_listen, |
| 4572 | .shutdown = sock_no_shutdown, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4573 | .sendmsg = packet_sendmsg_spkt, |
| 4574 | .recvmsg = packet_recvmsg, |
| 4575 | .mmap = sock_no_mmap, |
| 4576 | .sendpage = sock_no_sendpage, |
| 4577 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4578 | |
Eric Dumazet | 90ddc4f | 2005-12-22 12:49:22 -0800 | [diff] [blame] | 4579 | static const struct proto_ops packet_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4580 | .family = PF_PACKET, |
| 4581 | .owner = THIS_MODULE, |
| 4582 | .release = packet_release, |
| 4583 | .bind = packet_bind, |
| 4584 | .connect = sock_no_connect, |
| 4585 | .socketpair = sock_no_socketpair, |
| 4586 | .accept = sock_no_accept, |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 4587 | .getname = packet_getname, |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 4588 | .poll = packet_poll, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4589 | .ioctl = packet_ioctl, |
Arnd Bergmann | c7cbdbf | 2019-04-17 22:51:48 +0200 | [diff] [blame] | 4590 | .gettstamp = sock_gettstamp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4591 | .listen = sock_no_listen, |
| 4592 | .shutdown = sock_no_shutdown, |
| 4593 | .setsockopt = packet_setsockopt, |
| 4594 | .getsockopt = packet_getsockopt, |
| 4595 | .sendmsg = packet_sendmsg, |
| 4596 | .recvmsg = packet_recvmsg, |
| 4597 | .mmap = packet_mmap, |
| 4598 | .sendpage = sock_no_sendpage, |
| 4599 | }; |
| 4600 | |
Stephen Hemminger | ec1b4cf | 2009-10-05 05:58:39 +0000 | [diff] [blame] | 4601 | static const struct net_proto_family packet_family_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4602 | .family = PF_PACKET, |
| 4603 | .create = packet_create, |
| 4604 | .owner = THIS_MODULE, |
| 4605 | }; |
| 4606 | |
| 4607 | static struct notifier_block packet_netdev_notifier = { |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4608 | .notifier_call = packet_notifier, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4609 | }; |
| 4610 | |
| 4611 | #ifdef CONFIG_PROC_FS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4612 | |
| 4613 | static void *packet_seq_start(struct seq_file *seq, loff_t *pos) |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 4614 | __acquires(RCU) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4615 | { |
Denis V. Lunev | e372c41 | 2007-11-19 22:31:54 -0800 | [diff] [blame] | 4616 | struct net *net = seq_file_net(seq); |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 4617 | |
| 4618 | rcu_read_lock(); |
| 4619 | return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4620 | } |
| 4621 | |
| 4622 | static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 4623 | { |
Herbert Xu | 1bf4095 | 2007-12-16 14:04:02 -0800 | [diff] [blame] | 4624 | struct net *net = seq_file_net(seq); |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 4625 | return seq_hlist_next_rcu(v, &net->packet.sklist, pos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4626 | } |
| 4627 | |
| 4628 | static void packet_seq_stop(struct seq_file *seq, void *v) |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 4629 | __releases(RCU) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4630 | { |
stephen hemminger | 808f511 | 2010-02-22 07:57:18 +0000 | [diff] [blame] | 4631 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4632 | } |
| 4633 | |
YOSHIFUJI Hideaki | 1ce4f28 | 2007-02-09 23:25:10 +0900 | [diff] [blame] | 4634 | static int packet_seq_show(struct seq_file *seq, void *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4635 | { |
| 4636 | if (v == SEQ_START_TOKEN) |
Baruch Siach | abdcd06 | 2020-12-16 09:28:04 +0200 | [diff] [blame] | 4637 | seq_printf(seq, |
| 4638 | "%*sRefCnt Type Proto Iface R Rmem User Inode\n", |
| 4639 | IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4640 | else { |
Li Zefan | b7ceabd | 2010-02-08 23:19:29 +0000 | [diff] [blame] | 4641 | struct sock *s = sk_entry(v); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4642 | const struct packet_sock *po = pkt_sk(s); |
| 4643 | |
| 4644 | seq_printf(seq, |
Dan Rosenberg | 71338aa | 2011-05-23 12:17:35 +0000 | [diff] [blame] | 4645 | "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4646 | s, |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 4647 | refcount_read(&s->sk_refcnt), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4648 | s->sk_type, |
Eric Dumazet | c7d2ef5 | 2021-06-16 06:42:01 -0700 | [diff] [blame] | 4649 | ntohs(READ_ONCE(po->num)), |
Eric Dumazet | e032f7c | 2021-06-16 06:42:02 -0700 | [diff] [blame] | 4650 | READ_ONCE(po->ifindex), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4651 | po->running, |
| 4652 | atomic_read(&s->sk_rmem_alloc), |
Eric W. Biederman | a7cb5a4 | 2012-05-24 01:10:10 -0600 | [diff] [blame] | 4653 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), |
Eric Dumazet | 40d4e3d | 2009-07-21 21:57:59 +0000 | [diff] [blame] | 4654 | sock_i_ino(s)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4655 | } |
| 4656 | |
| 4657 | return 0; |
| 4658 | } |
| 4659 | |
Philippe De Muyter | 56b3d97 | 2007-07-10 23:07:31 -0700 | [diff] [blame] | 4660 | static const struct seq_operations packet_seq_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4661 | .start = packet_seq_start, |
| 4662 | .next = packet_seq_next, |
| 4663 | .stop = packet_seq_stop, |
| 4664 | .show = packet_seq_show, |
| 4665 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4666 | #endif |
| 4667 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 4668 | static int __net_init packet_net_init(struct net *net) |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 4669 | { |
Pavel Emelyanov | 0fa7fa9 | 2012-08-21 01:06:47 +0000 | [diff] [blame] | 4670 | mutex_init(&net->packet.sklist_lock); |
Denis V. Lunev | 2aaef4e | 2007-12-11 04:19:54 -0800 | [diff] [blame] | 4671 | INIT_HLIST_HEAD(&net->packet.sklist); |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 4672 | |
Yonatan Linik | a268e0f | 2020-12-14 22:25:50 +0200 | [diff] [blame] | 4673 | #ifdef CONFIG_PROC_FS |
Christoph Hellwig | c350637 | 2018-04-10 19:42:55 +0200 | [diff] [blame] | 4674 | if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops, |
| 4675 | sizeof(struct seq_net_private))) |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 4676 | return -ENOMEM; |
Yonatan Linik | a268e0f | 2020-12-14 22:25:50 +0200 | [diff] [blame] | 4677 | #endif /* CONFIG_PROC_FS */ |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 4678 | |
| 4679 | return 0; |
| 4680 | } |
| 4681 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 4682 | static void __net_exit packet_net_exit(struct net *net) |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 4683 | { |
Gao feng | ece31ff | 2013-02-18 01:34:56 +0000 | [diff] [blame] | 4684 | remove_proc_entry("packet", net->proc_net); |
Vasily Averin | 669f8f1 | 2017-11-12 22:27:49 +0300 | [diff] [blame] | 4685 | WARN_ON_ONCE(!hlist_empty(&net->packet.sklist)); |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 4686 | } |
| 4687 | |
| 4688 | static struct pernet_operations packet_net_ops = { |
| 4689 | .init = packet_net_init, |
| 4690 | .exit = packet_net_exit, |
| 4691 | }; |
| 4692 | |
| 4693 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4694 | static void __exit packet_exit(void) |
| 4695 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4696 | unregister_netdevice_notifier(&packet_netdev_notifier); |
Denis V. Lunev | d12d01d | 2007-11-19 22:28:35 -0800 | [diff] [blame] | 4697 | unregister_pernet_subsys(&packet_net_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4698 | sock_unregister(PF_PACKET); |
| 4699 | proto_unregister(&packet_proto); |
| 4700 | } |
| 4701 | |
| 4702 | static int __init packet_init(void) |
| 4703 | { |
YueHaibing | 36096f2 | 2019-05-09 22:52:20 +0800 | [diff] [blame] | 4704 | int rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4705 | |
YueHaibing | 36096f2 | 2019-05-09 22:52:20 +0800 | [diff] [blame] | 4706 | rc = proto_register(&packet_proto, 0); |
| 4707 | if (rc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4708 | goto out; |
YueHaibing | 36096f2 | 2019-05-09 22:52:20 +0800 | [diff] [blame] | 4709 | rc = sock_register(&packet_family_ops); |
| 4710 | if (rc) |
| 4711 | goto out_proto; |
| 4712 | rc = register_pernet_subsys(&packet_net_ops); |
| 4713 | if (rc) |
| 4714 | goto out_sock; |
| 4715 | rc = register_netdevice_notifier(&packet_netdev_notifier); |
| 4716 | if (rc) |
| 4717 | goto out_pernet; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4718 | |
YueHaibing | 36096f2 | 2019-05-09 22:52:20 +0800 | [diff] [blame] | 4719 | return 0; |
| 4720 | |
| 4721 | out_pernet: |
| 4722 | unregister_pernet_subsys(&packet_net_ops); |
| 4723 | out_sock: |
| 4724 | sock_unregister(PF_PACKET); |
| 4725 | out_proto: |
| 4726 | proto_unregister(&packet_proto); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4727 | out: |
| 4728 | return rc; |
| 4729 | } |
| 4730 | |
| 4731 | module_init(packet_init); |
| 4732 | module_exit(packet_exit); |
| 4733 | MODULE_LICENSE("GPL"); |
| 4734 | MODULE_ALIAS_NETPROTO(PF_PACKET); |