Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Routines having to do with the 'struct sk_buff' memory handlers. |
| 3 | * |
Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 4 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Florian La Roche <rzsfl@rz.uni-sb.de> |
| 6 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * Fixes: |
| 8 | * Alan Cox : Fixed the worst of the load |
| 9 | * balancer bugs. |
| 10 | * Dave Platt : Interrupt stacking fix. |
| 11 | * Richard Kooijman : Timestamp fixes. |
| 12 | * Alan Cox : Changed buffer format. |
| 13 | * Alan Cox : destructor hook for AF_UNIX etc. |
| 14 | * Linus Torvalds : Better skb_clone. |
| 15 | * Alan Cox : Added skb_copy. |
| 16 | * Alan Cox : Added all the changed routines Linus |
| 17 | * only put in the headers |
| 18 | * Ray VanTassle : Fixed --skb->lock in free |
| 19 | * Alan Cox : skb_copy copy arp field |
| 20 | * Andi Kleen : slabified it. |
| 21 | * Robert Olsson : Removed skb_head_pool |
| 22 | * |
| 23 | * NOTE: |
| 24 | * The __skb_ routines should be called with interrupts |
| 25 | * disabled, or you better be *real* sure that the operation is atomic |
| 26 | * with respect to whatever list is being frobbed (e.g. via lock_sock() |
| 27 | * or via disabling bottom half handlers, etc). |
| 28 | * |
| 29 | * This program is free software; you can redistribute it and/or |
| 30 | * modify it under the terms of the GNU General Public License |
| 31 | * as published by the Free Software Foundation; either version |
| 32 | * 2 of the License, or (at your option) any later version. |
| 33 | */ |
| 34 | |
| 35 | /* |
| 36 | * The functions in this file will not compile correctly with gcc 2.4.x |
| 37 | */ |
| 38 | |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 39 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 40 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/module.h> |
| 42 | #include <linux/types.h> |
| 43 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <linux/mm.h> |
| 45 | #include <linux/interrupt.h> |
| 46 | #include <linux/in.h> |
| 47 | #include <linux/inet.h> |
| 48 | #include <linux/slab.h> |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 49 | #include <linux/tcp.h> |
| 50 | #include <linux/udp.h> |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 51 | #include <linux/sctp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #include <linux/netdevice.h> |
| 53 | #ifdef CONFIG_NET_CLS_ACT |
| 54 | #include <net/pkt_sched.h> |
| 55 | #endif |
| 56 | #include <linux/string.h> |
| 57 | #include <linux/skbuff.h> |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 58 | #include <linux/splice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #include <linux/cache.h> |
| 60 | #include <linux/rtnetlink.h> |
| 61 | #include <linux/init.h> |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 62 | #include <linux/scatterlist.h> |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 63 | #include <linux/errqueue.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 64 | #include <linux/prefetch.h> |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 65 | #include <linux/if_vlan.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
| 67 | #include <net/protocol.h> |
| 68 | #include <net/dst.h> |
| 69 | #include <net/sock.h> |
| 70 | #include <net/checksum.h> |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 71 | #include <net/ip6_checksum.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | #include <net/xfrm.h> |
| 73 | |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 74 | #include <linux/uaccess.h> |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 75 | #include <trace/events/skb.h> |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 76 | #include <linux/highmem.h> |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 77 | #include <linux/capability.h> |
| 78 | #include <linux/user_namespace.h> |
Al Viro | a1f8e7f7 | 2006-10-19 16:08:53 -0400 | [diff] [blame] | 79 | |
Alexey Dobriyan | 08009a7 | 2018-02-24 21:20:33 +0300 | [diff] [blame] | 80 | struct kmem_cache *skbuff_head_cache __ro_after_init; |
| 81 | static struct kmem_cache *skbuff_fclone_cache __ro_after_init; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 82 | #ifdef CONFIG_SKB_EXTENSIONS |
| 83 | static struct kmem_cache *skbuff_ext_cache __ro_after_init; |
| 84 | #endif |
Hans Westgaard Ry | 5f74f82e | 2016-02-03 09:26:57 +0100 | [diff] [blame] | 85 | int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; |
| 86 | EXPORT_SYMBOL(sysctl_max_skb_frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | /** |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 89 | * skb_panic - private function for out-of-line support |
| 90 | * @skb: buffer |
| 91 | * @sz: size |
| 92 | * @addr: address |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 93 | * @msg: skb_over_panic or skb_under_panic |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | * |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 95 | * Out-of-line support for skb_put() and skb_push(). |
| 96 | * Called via the wrapper skb_over_panic() or skb_under_panic(). |
| 97 | * Keep out of line to prevent kernel bloat. |
| 98 | * __builtin_return_address is not used because it is not always reliable. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | */ |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 100 | static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 101 | const char msg[]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | { |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 103 | pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 104 | msg, addr, skb->len, sz, skb->head, skb->data, |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 105 | (unsigned long)skb->tail, (unsigned long)skb->end, |
| 106 | skb->dev ? skb->dev->name : "<NULL>"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | BUG(); |
| 108 | } |
| 109 | |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 110 | static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | { |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 112 | skb_panic(skb, sz, addr, __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 115 | static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
| 116 | { |
| 117 | skb_panic(skb, sz, addr, __func__); |
| 118 | } |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 119 | |
| 120 | /* |
| 121 | * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells |
| 122 | * the caller if emergency pfmemalloc reserves are being used. If it is and |
| 123 | * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves |
| 124 | * may be used. Otherwise, the packet data may be discarded until enough |
| 125 | * memory is free |
| 126 | */ |
| 127 | #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ |
| 128 | __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) |
stephen hemminger | 61c5e88 | 2012-12-28 18:24:28 +0000 | [diff] [blame] | 129 | |
| 130 | static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, |
| 131 | unsigned long ip, bool *pfmemalloc) |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 132 | { |
| 133 | void *obj; |
| 134 | bool ret_pfmemalloc = false; |
| 135 | |
| 136 | /* |
| 137 | * Try a regular allocation, when that fails and we're not entitled |
| 138 | * to the reserves, fail. |
| 139 | */ |
| 140 | obj = kmalloc_node_track_caller(size, |
| 141 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, |
| 142 | node); |
| 143 | if (obj || !(gfp_pfmemalloc_allowed(flags))) |
| 144 | goto out; |
| 145 | |
| 146 | /* Try again but now we are using pfmemalloc reserves */ |
| 147 | ret_pfmemalloc = true; |
| 148 | obj = kmalloc_node_track_caller(size, flags, node); |
| 149 | |
| 150 | out: |
| 151 | if (pfmemalloc) |
| 152 | *pfmemalloc = ret_pfmemalloc; |
| 153 | |
| 154 | return obj; |
| 155 | } |
| 156 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
| 158 | * 'private' fields and also do memory statistics to find all the |
| 159 | * [BEEP] leaks. |
| 160 | * |
| 161 | */ |
| 162 | |
| 163 | /** |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 164 | * __alloc_skb - allocate a network buffer |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | * @size: size to allocate |
| 166 | * @gfp_mask: allocation mask |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 167 | * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache |
| 168 | * instead of head cache and allocate a cloned (child) skb. |
| 169 | * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for |
| 170 | * allocations in case the data is required for writeback |
Christoph Hellwig | b30973f | 2006-12-06 20:32:36 -0800 | [diff] [blame] | 171 | * @node: numa node to allocate memory on |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | * |
| 173 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
Ben Hutchings | 94b6042 | 2012-06-06 15:23:37 +0000 | [diff] [blame] | 174 | * tail room of at least size bytes. The object has a reference count |
| 175 | * of one. The return is the buffer. On a failure the return is %NULL. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | * |
| 177 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
| 178 | * %GFP_ATOMIC. |
| 179 | */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 180 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 181 | int flags, int node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | { |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 183 | struct kmem_cache *cache; |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 184 | struct skb_shared_info *shinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | struct sk_buff *skb; |
| 186 | u8 *data; |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 187 | bool pfmemalloc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 189 | cache = (flags & SKB_ALLOC_FCLONE) |
| 190 | ? skbuff_fclone_cache : skbuff_head_cache; |
| 191 | |
| 192 | if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) |
| 193 | gfp_mask |= __GFP_MEMALLOC; |
Herbert Xu | 8798b3f | 2006-01-23 16:32:45 -0800 | [diff] [blame] | 194 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | /* Get the HEAD */ |
Christoph Hellwig | b30973f | 2006-12-06 20:32:36 -0800 | [diff] [blame] | 196 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | if (!skb) |
| 198 | goto out; |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 199 | prefetchw(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 201 | /* We do our best to align skb_shared_info on a separate cache |
| 202 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives |
| 203 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. |
| 204 | * Both skb->head and skb_shared_info are cache line aligned. |
| 205 | */ |
Tony Lindgren | bc417e3 | 2011-11-02 13:40:28 +0000 | [diff] [blame] | 206 | size = SKB_DATA_ALIGN(size); |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 207 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 208 | data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | if (!data) |
| 210 | goto nodata; |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 211 | /* kmalloc(size) might give us more room than requested. |
| 212 | * Put skb_shared_info exactly at the end of allocated zone, |
| 213 | * to allow max possible filling before reallocation. |
| 214 | */ |
| 215 | size = SKB_WITH_OVERHEAD(ksize(data)); |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 216 | prefetchw(data + size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | |
Arnaldo Carvalho de Melo | ca0605a | 2007-03-19 10:48:59 -0300 | [diff] [blame] | 218 | /* |
Johannes Berg | c800578 | 2008-05-03 20:56:42 -0700 | [diff] [blame] | 219 | * Only clear those fields we need to clear, not those that we will |
| 220 | * actually initialise below. Hence, don't put any more fields after |
| 221 | * the tail pointer in struct sk_buff! |
Arnaldo Carvalho de Melo | ca0605a | 2007-03-19 10:48:59 -0300 | [diff] [blame] | 222 | */ |
| 223 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 224 | /* Account for allocated memory : skb + skb->head */ |
| 225 | skb->truesize = SKB_TRUESIZE(size); |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 226 | skb->pfmemalloc = pfmemalloc; |
Reshetova, Elena | 6335479 | 2017-06-30 13:07:58 +0300 | [diff] [blame] | 227 | refcount_set(&skb->users, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | skb->head = data; |
| 229 | skb->data = data; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 230 | skb_reset_tail_pointer(skb); |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 231 | skb->end = skb->tail + size; |
Cong Wang | 35d0461 | 2013-05-29 15:16:05 +0800 | [diff] [blame] | 232 | skb->mac_header = (typeof(skb->mac_header))~0U; |
| 233 | skb->transport_header = (typeof(skb->transport_header))~0U; |
Stephen Hemminger | 19633e1 | 2009-06-17 05:23:27 +0000 | [diff] [blame] | 234 | |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 235 | /* make sure we initialize shinfo sequentially */ |
| 236 | shinfo = skb_shinfo(skb); |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 237 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 238 | atomic_set(&shinfo->dataref, 1); |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 239 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 240 | if (flags & SKB_ALLOC_FCLONE) { |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 241 | struct sk_buff_fclones *fclones; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 243 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
| 244 | |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 245 | skb->fclone = SKB_FCLONE_ORIG; |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 246 | refcount_set(&fclones->fclone_ref, 1); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 247 | |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 248 | fclones->skb2.fclone = SKB_FCLONE_CLONE; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 249 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | out: |
| 251 | return skb; |
| 252 | nodata: |
Herbert Xu | 8798b3f | 2006-01-23 16:32:45 -0800 | [diff] [blame] | 253 | kmem_cache_free(cache, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | skb = NULL; |
| 255 | goto out; |
| 256 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 257 | EXPORT_SYMBOL(__alloc_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | |
| 259 | /** |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 260 | * __build_skb - build a network buffer |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 261 | * @data: data buffer provided by caller |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 262 | * @frag_size: size of data, or 0 if head was kmalloced |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 263 | * |
| 264 | * Allocate a new &sk_buff. Caller provides space holding head and |
Florian Fainelli | deceb4c | 2013-07-23 20:22:39 +0100 | [diff] [blame] | 265 | * skb_shared_info. @data must have been allocated by kmalloc() only if |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 266 | * @frag_size is 0, otherwise data should come from the page allocator |
| 267 | * or vmalloc() |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 268 | * The return is the new skb buffer. |
| 269 | * On a failure the return is %NULL, and @data is not freed. |
| 270 | * Notes : |
| 271 | * Before IO, driver allocates only data buffer where NIC put incoming frame |
| 272 | * Driver should add room at head (NET_SKB_PAD) and |
| 273 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) |
| 274 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it |
| 275 | * before giving packet to stack. |
| 276 | * RX rings only contains data buffers, not full skbs. |
| 277 | */ |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 278 | struct sk_buff *__build_skb(void *data, unsigned int frag_size) |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 279 | { |
| 280 | struct skb_shared_info *shinfo; |
| 281 | struct sk_buff *skb; |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 282 | unsigned int size = frag_size ? : ksize(data); |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 283 | |
| 284 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); |
| 285 | if (!skb) |
| 286 | return NULL; |
| 287 | |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 288 | size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 289 | |
| 290 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
| 291 | skb->truesize = SKB_TRUESIZE(size); |
Reshetova, Elena | 6335479 | 2017-06-30 13:07:58 +0300 | [diff] [blame] | 292 | refcount_set(&skb->users, 1); |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 293 | skb->head = data; |
| 294 | skb->data = data; |
| 295 | skb_reset_tail_pointer(skb); |
| 296 | skb->end = skb->tail + size; |
Cong Wang | 35d0461 | 2013-05-29 15:16:05 +0800 | [diff] [blame] | 297 | skb->mac_header = (typeof(skb->mac_header))~0U; |
| 298 | skb->transport_header = (typeof(skb->transport_header))~0U; |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 299 | |
| 300 | /* make sure we initialize shinfo sequentially */ |
| 301 | shinfo = skb_shinfo(skb); |
| 302 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
| 303 | atomic_set(&shinfo->dataref, 1); |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 304 | |
| 305 | return skb; |
| 306 | } |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 307 | |
| 308 | /* build_skb() is wrapper over __build_skb(), that specifically |
| 309 | * takes care of skb->head and skb->pfmemalloc |
| 310 | * This means that if @frag_size is not zero, then @data must be backed |
| 311 | * by a page fragment, not kmalloc() or vmalloc() |
| 312 | */ |
| 313 | struct sk_buff *build_skb(void *data, unsigned int frag_size) |
| 314 | { |
| 315 | struct sk_buff *skb = __build_skb(data, frag_size); |
| 316 | |
| 317 | if (skb && frag_size) { |
| 318 | skb->head_frag = 1; |
Michal Hocko | 2f064f3 | 2015-08-21 14:11:51 -0700 | [diff] [blame] | 319 | if (page_is_pfmemalloc(virt_to_head_page(data))) |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 320 | skb->pfmemalloc = 1; |
| 321 | } |
| 322 | return skb; |
| 323 | } |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 324 | EXPORT_SYMBOL(build_skb); |
| 325 | |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 326 | #define NAPI_SKB_CACHE_SIZE 64 |
| 327 | |
| 328 | struct napi_alloc_cache { |
| 329 | struct page_frag_cache page; |
Alexey Dobriyan | e0d7924 | 2016-11-19 03:47:56 +0300 | [diff] [blame] | 330 | unsigned int skb_count; |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 331 | void *skb_cache[NAPI_SKB_CACHE_SIZE]; |
| 332 | }; |
| 333 | |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 334 | static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 335 | static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 336 | |
| 337 | static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
| 338 | { |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 339 | struct page_frag_cache *nc; |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 340 | unsigned long flags; |
| 341 | void *data; |
| 342 | |
| 343 | local_irq_save(flags); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 344 | nc = this_cpu_ptr(&netdev_alloc_cache); |
Alexander Duyck | 8c2dd3e | 2017-01-10 16:58:06 -0800 | [diff] [blame] | 345 | data = page_frag_alloc(nc, fragsz, gfp_mask); |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 346 | local_irq_restore(flags); |
| 347 | return data; |
| 348 | } |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 349 | |
| 350 | /** |
| 351 | * netdev_alloc_frag - allocate a page fragment |
| 352 | * @fragsz: fragment size |
| 353 | * |
| 354 | * Allocates a frag from a page for receive buffer. |
| 355 | * Uses GFP_ATOMIC allocations. |
| 356 | */ |
| 357 | void *netdev_alloc_frag(unsigned int fragsz) |
| 358 | { |
Mel Gorman | 453f85d | 2017-11-15 17:38:03 -0800 | [diff] [blame] | 359 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC); |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 360 | } |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 361 | EXPORT_SYMBOL(netdev_alloc_frag); |
| 362 | |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 363 | static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
| 364 | { |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 365 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 366 | |
Alexander Duyck | 8c2dd3e | 2017-01-10 16:58:06 -0800 | [diff] [blame] | 367 | return page_frag_alloc(&nc->page, fragsz, gfp_mask); |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 368 | } |
| 369 | |
| 370 | void *napi_alloc_frag(unsigned int fragsz) |
| 371 | { |
Mel Gorman | 453f85d | 2017-11-15 17:38:03 -0800 | [diff] [blame] | 372 | return __napi_alloc_frag(fragsz, GFP_ATOMIC); |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 373 | } |
| 374 | EXPORT_SYMBOL(napi_alloc_frag); |
| 375 | |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 376 | /** |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 377 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |
| 378 | * @dev: network device to receive on |
Masanari Iida | d749916 | 2015-08-24 22:56:54 +0900 | [diff] [blame] | 379 | * @len: length to allocate |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 380 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
| 381 | * |
| 382 | * Allocate a new &sk_buff and assign it a usage count of one. The |
| 383 | * buffer has NET_SKB_PAD headroom built in. Users should allocate |
| 384 | * the headroom they think they need without accounting for the |
| 385 | * built in space. The built in space is used for optimisations. |
| 386 | * |
| 387 | * %NULL is returned if there is no free memory. |
| 388 | */ |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 389 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, |
| 390 | gfp_t gfp_mask) |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 391 | { |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 392 | struct page_frag_cache *nc; |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 393 | unsigned long flags; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 394 | struct sk_buff *skb; |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 395 | bool pfmemalloc; |
| 396 | void *data; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 397 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 398 | len += NET_SKB_PAD; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 399 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 400 | if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 401 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 402 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); |
| 403 | if (!skb) |
| 404 | goto skb_fail; |
| 405 | goto skb_success; |
| 406 | } |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 407 | |
| 408 | len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 409 | len = SKB_DATA_ALIGN(len); |
| 410 | |
| 411 | if (sk_memalloc_socks()) |
| 412 | gfp_mask |= __GFP_MEMALLOC; |
| 413 | |
| 414 | local_irq_save(flags); |
| 415 | |
| 416 | nc = this_cpu_ptr(&netdev_alloc_cache); |
Alexander Duyck | 8c2dd3e | 2017-01-10 16:58:06 -0800 | [diff] [blame] | 417 | data = page_frag_alloc(nc, len, gfp_mask); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 418 | pfmemalloc = nc->pfmemalloc; |
| 419 | |
| 420 | local_irq_restore(flags); |
| 421 | |
| 422 | if (unlikely(!data)) |
| 423 | return NULL; |
| 424 | |
| 425 | skb = __build_skb(data, len); |
| 426 | if (unlikely(!skb)) { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 427 | skb_free_frag(data); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 428 | return NULL; |
Christoph Hellwig | 7b2e497 | 2006-08-07 16:09:04 -0700 | [diff] [blame] | 429 | } |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 430 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 431 | /* use OR instead of assignment to avoid clearing of bits in mask */ |
| 432 | if (pfmemalloc) |
| 433 | skb->pfmemalloc = 1; |
| 434 | skb->head_frag = 1; |
| 435 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 436 | skb_success: |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 437 | skb_reserve(skb, NET_SKB_PAD); |
| 438 | skb->dev = dev; |
| 439 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 440 | skb_fail: |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 441 | return skb; |
| 442 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 443 | EXPORT_SYMBOL(__netdev_alloc_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 445 | /** |
| 446 | * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance |
| 447 | * @napi: napi instance this buffer was allocated for |
Masanari Iida | d749916 | 2015-08-24 22:56:54 +0900 | [diff] [blame] | 448 | * @len: length to allocate |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 449 | * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages |
| 450 | * |
| 451 | * Allocate a new sk_buff for use in NAPI receive. This buffer will |
| 452 | * attempt to allocate the head from a special reserved region used |
| 453 | * only for NAPI Rx allocation. By doing this we can save several |
| 454 | * CPU cycles by avoiding having to disable and re-enable IRQs. |
| 455 | * |
| 456 | * %NULL is returned if there is no free memory. |
| 457 | */ |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 458 | struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, |
| 459 | gfp_t gfp_mask) |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 460 | { |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 461 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 462 | struct sk_buff *skb; |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 463 | void *data; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 464 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 465 | len += NET_SKB_PAD + NET_IP_ALIGN; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 466 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 467 | if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 468 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 469 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); |
| 470 | if (!skb) |
| 471 | goto skb_fail; |
| 472 | goto skb_success; |
| 473 | } |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 474 | |
| 475 | len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 476 | len = SKB_DATA_ALIGN(len); |
| 477 | |
| 478 | if (sk_memalloc_socks()) |
| 479 | gfp_mask |= __GFP_MEMALLOC; |
| 480 | |
Alexander Duyck | 8c2dd3e | 2017-01-10 16:58:06 -0800 | [diff] [blame] | 481 | data = page_frag_alloc(&nc->page, len, gfp_mask); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 482 | if (unlikely(!data)) |
| 483 | return NULL; |
| 484 | |
| 485 | skb = __build_skb(data, len); |
| 486 | if (unlikely(!skb)) { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 487 | skb_free_frag(data); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 488 | return NULL; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 489 | } |
| 490 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 491 | /* use OR instead of assignment to avoid clearing of bits in mask */ |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 492 | if (nc->page.pfmemalloc) |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 493 | skb->pfmemalloc = 1; |
| 494 | skb->head_frag = 1; |
| 495 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 496 | skb_success: |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 497 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
| 498 | skb->dev = napi->dev; |
| 499 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 500 | skb_fail: |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 501 | return skb; |
| 502 | } |
| 503 | EXPORT_SYMBOL(__napi_alloc_skb); |
| 504 | |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 505 | void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
Eric Dumazet | 50269e1 | 2012-03-23 23:59:33 +0000 | [diff] [blame] | 506 | int size, unsigned int truesize) |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 507 | { |
| 508 | skb_fill_page_desc(skb, i, page, off, size); |
| 509 | skb->len += size; |
| 510 | skb->data_len += size; |
Eric Dumazet | 50269e1 | 2012-03-23 23:59:33 +0000 | [diff] [blame] | 511 | skb->truesize += truesize; |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 512 | } |
| 513 | EXPORT_SYMBOL(skb_add_rx_frag); |
| 514 | |
Jason Wang | f8e617e | 2013-11-01 14:07:47 +0800 | [diff] [blame] | 515 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, |
| 516 | unsigned int truesize) |
| 517 | { |
| 518 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 519 | |
| 520 | skb_frag_size_add(frag, size); |
| 521 | skb->len += size; |
| 522 | skb->data_len += size; |
| 523 | skb->truesize += truesize; |
| 524 | } |
| 525 | EXPORT_SYMBOL(skb_coalesce_rx_frag); |
| 526 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 527 | static void skb_drop_list(struct sk_buff **listp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | { |
Eric Dumazet | bd8a703 | 2013-06-24 06:26:00 -0700 | [diff] [blame] | 529 | kfree_skb_list(*listp); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 530 | *listp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | } |
| 532 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 533 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
| 534 | { |
| 535 | skb_drop_list(&skb_shinfo(skb)->frag_list); |
| 536 | } |
| 537 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | static void skb_clone_fraglist(struct sk_buff *skb) |
| 539 | { |
| 540 | struct sk_buff *list; |
| 541 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 542 | skb_walk_frags(skb, list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | skb_get(list); |
| 544 | } |
| 545 | |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 546 | static void skb_free_head(struct sk_buff *skb) |
| 547 | { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 548 | unsigned char *head = skb->head; |
| 549 | |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 550 | if (skb->head_frag) |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 551 | skb_free_frag(head); |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 552 | else |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 553 | kfree(head); |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 554 | } |
| 555 | |
Adrian Bunk | 5bba171 | 2006-06-29 13:02:35 -0700 | [diff] [blame] | 556 | static void skb_release_data(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | { |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 558 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 559 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 561 | if (skb->cloned && |
| 562 | atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, |
| 563 | &shinfo->dataref)) |
| 564 | return; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 565 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 566 | for (i = 0; i < shinfo->nr_frags; i++) |
| 567 | __skb_frag_unref(&shinfo->frags[i]); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 568 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 569 | if (shinfo->frag_list) |
| 570 | kfree_skb_list(shinfo->frag_list); |
| 571 | |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 572 | skb_zcopy_clear(skb, true); |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 573 | skb_free_head(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | } |
| 575 | |
| 576 | /* |
| 577 | * Free an skbuff by memory without cleaning the state. |
| 578 | */ |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 579 | static void kfree_skbmem(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | { |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 581 | struct sk_buff_fclones *fclones; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 582 | |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 583 | switch (skb->fclone) { |
| 584 | case SKB_FCLONE_UNAVAILABLE: |
| 585 | kmem_cache_free(skbuff_head_cache, skb); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 586 | return; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 587 | |
| 588 | case SKB_FCLONE_ORIG: |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 589 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 590 | |
| 591 | /* We usually free the clone (TX completion) before original skb |
| 592 | * This test would have no chance to be true for the clone, |
| 593 | * while here, branch prediction will be good. |
| 594 | */ |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 595 | if (refcount_read(&fclones->fclone_ref) == 1) |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 596 | goto fastpath; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 597 | break; |
| 598 | |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 599 | default: /* SKB_FCLONE_CLONE */ |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 600 | fclones = container_of(skb, struct sk_buff_fclones, skb2); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 601 | break; |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 602 | } |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 603 | if (!refcount_dec_and_test(&fclones->fclone_ref)) |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 604 | return; |
| 605 | fastpath: |
| 606 | kmem_cache_free(skbuff_fclone_cache, fclones); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | } |
| 608 | |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 609 | void skb_release_head_state(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | { |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 611 | skb_dst_drop(skb); |
Stephen Hemminger | 9c2b332 | 2005-04-19 22:39:42 -0700 | [diff] [blame] | 612 | if (skb->destructor) { |
| 613 | WARN_ON(in_irq()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | skb->destructor(skb); |
| 615 | } |
Igor Maravić | a3bf7ae | 2011-12-12 02:58:22 +0000 | [diff] [blame] | 616 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
Florian Westphal | cb9c683 | 2017-01-23 18:21:56 +0100 | [diff] [blame] | 617 | nf_conntrack_put(skb_nfct(skb)); |
KOVACS Krisztian | 2fc72c7 | 2011-01-12 20:25:08 +0100 | [diff] [blame] | 618 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 619 | skb_ext_put(skb); |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 620 | } |
| 621 | |
| 622 | /* Free everything but the sk_buff shell. */ |
| 623 | static void skb_release_all(struct sk_buff *skb) |
| 624 | { |
| 625 | skb_release_head_state(skb); |
Florian Westphal | a28b1b9 | 2017-07-23 19:54:47 +0200 | [diff] [blame] | 626 | if (likely(skb->head)) |
| 627 | skb_release_data(skb); |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 628 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 630 | /** |
| 631 | * __kfree_skb - private function |
| 632 | * @skb: buffer |
| 633 | * |
| 634 | * Free an sk_buff. Release anything attached to the buffer. |
| 635 | * Clean the state. This is an internal helper function. Users should |
| 636 | * always call kfree_skb |
| 637 | */ |
| 638 | |
| 639 | void __kfree_skb(struct sk_buff *skb) |
| 640 | { |
| 641 | skb_release_all(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | kfree_skbmem(skb); |
| 643 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 644 | EXPORT_SYMBOL(__kfree_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | |
| 646 | /** |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 647 | * kfree_skb - free an sk_buff |
| 648 | * @skb: buffer to free |
| 649 | * |
| 650 | * Drop a reference to the buffer and free it if the usage count has |
| 651 | * hit zero. |
| 652 | */ |
| 653 | void kfree_skb(struct sk_buff *skb) |
| 654 | { |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 655 | if (!skb_unref(skb)) |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 656 | return; |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 657 | |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 658 | trace_kfree_skb(skb, __builtin_return_address(0)); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 659 | __kfree_skb(skb); |
| 660 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 661 | EXPORT_SYMBOL(kfree_skb); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 662 | |
Eric Dumazet | bd8a703 | 2013-06-24 06:26:00 -0700 | [diff] [blame] | 663 | void kfree_skb_list(struct sk_buff *segs) |
| 664 | { |
| 665 | while (segs) { |
| 666 | struct sk_buff *next = segs->next; |
| 667 | |
| 668 | kfree_skb(segs); |
| 669 | segs = next; |
| 670 | } |
| 671 | } |
| 672 | EXPORT_SYMBOL(kfree_skb_list); |
| 673 | |
Stephen Hemminger | d1a203e | 2008-11-01 21:01:09 -0700 | [diff] [blame] | 674 | /** |
Michael S. Tsirkin | 2512117 | 2012-11-01 09:16:28 +0000 | [diff] [blame] | 675 | * skb_tx_error - report an sk_buff xmit error |
| 676 | * @skb: buffer that triggered an error |
| 677 | * |
| 678 | * Report xmit error if a device callback is tracking this skb. |
| 679 | * skb must be freed afterwards. |
| 680 | */ |
| 681 | void skb_tx_error(struct sk_buff *skb) |
| 682 | { |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 683 | skb_zcopy_clear(skb, true); |
Michael S. Tsirkin | 2512117 | 2012-11-01 09:16:28 +0000 | [diff] [blame] | 684 | } |
| 685 | EXPORT_SYMBOL(skb_tx_error); |
| 686 | |
| 687 | /** |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 688 | * consume_skb - free an skbuff |
| 689 | * @skb: buffer to free |
| 690 | * |
| 691 | * Drop a ref to the buffer and free it if the usage count has hit zero |
| 692 | * Functions identically to kfree_skb, but kfree_skb assumes that the frame |
| 693 | * is being dropped after a failure and notes that |
| 694 | */ |
| 695 | void consume_skb(struct sk_buff *skb) |
| 696 | { |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 697 | if (!skb_unref(skb)) |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 698 | return; |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 699 | |
Koki Sanagi | 07dc22e | 2010-08-23 18:46:12 +0900 | [diff] [blame] | 700 | trace_consume_skb(skb); |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 701 | __kfree_skb(skb); |
| 702 | } |
| 703 | EXPORT_SYMBOL(consume_skb); |
| 704 | |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 705 | /** |
| 706 | * consume_stateless_skb - free an skbuff, assuming it is stateless |
| 707 | * @skb: buffer to free |
| 708 | * |
Paolo Abeni | ca2c141 | 2017-09-06 14:44:36 +0200 | [diff] [blame] | 709 | * Alike consume_skb(), but this variant assumes that this is the last |
| 710 | * skb reference and all the head states have been already dropped |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 711 | */ |
Paolo Abeni | ca2c141 | 2017-09-06 14:44:36 +0200 | [diff] [blame] | 712 | void __consume_stateless_skb(struct sk_buff *skb) |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 713 | { |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 714 | trace_consume_skb(skb); |
Florian Westphal | 06dc75a | 2017-07-17 18:56:54 +0200 | [diff] [blame] | 715 | skb_release_data(skb); |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 716 | kfree_skbmem(skb); |
| 717 | } |
| 718 | |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 719 | void __kfree_skb_flush(void) |
| 720 | { |
| 721 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
| 722 | |
| 723 | /* flush skb_cache if containing objects */ |
| 724 | if (nc->skb_count) { |
| 725 | kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, |
| 726 | nc->skb_cache); |
| 727 | nc->skb_count = 0; |
| 728 | } |
| 729 | } |
| 730 | |
Jesper Dangaard Brouer | 15fad71 | 2016-02-08 13:15:04 +0100 | [diff] [blame] | 731 | static inline void _kfree_skb_defer(struct sk_buff *skb) |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 732 | { |
| 733 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
| 734 | |
| 735 | /* drop skb->head and call any destructors for packet */ |
| 736 | skb_release_all(skb); |
| 737 | |
| 738 | /* record skb to CPU local list */ |
| 739 | nc->skb_cache[nc->skb_count++] = skb; |
| 740 | |
| 741 | #ifdef CONFIG_SLUB |
| 742 | /* SLUB writes into objects when freeing */ |
| 743 | prefetchw(skb); |
| 744 | #endif |
| 745 | |
| 746 | /* flush skb_cache if it is filled */ |
| 747 | if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { |
| 748 | kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE, |
| 749 | nc->skb_cache); |
| 750 | nc->skb_count = 0; |
| 751 | } |
| 752 | } |
Jesper Dangaard Brouer | 15fad71 | 2016-02-08 13:15:04 +0100 | [diff] [blame] | 753 | void __kfree_skb_defer(struct sk_buff *skb) |
| 754 | { |
| 755 | _kfree_skb_defer(skb); |
| 756 | } |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 757 | |
| 758 | void napi_consume_skb(struct sk_buff *skb, int budget) |
| 759 | { |
| 760 | if (unlikely(!skb)) |
| 761 | return; |
| 762 | |
Jesper Dangaard Brouer | 885eb0a | 2016-03-11 09:43:58 +0100 | [diff] [blame] | 763 | /* Zero budget indicate non-NAPI context called us, like netpoll */ |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 764 | if (unlikely(!budget)) { |
Jesper Dangaard Brouer | 885eb0a | 2016-03-11 09:43:58 +0100 | [diff] [blame] | 765 | dev_consume_skb_any(skb); |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 766 | return; |
| 767 | } |
| 768 | |
Paolo Abeni | 7608894 | 2017-06-14 11:48:48 +0200 | [diff] [blame] | 769 | if (!skb_unref(skb)) |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 770 | return; |
Paolo Abeni | 7608894 | 2017-06-14 11:48:48 +0200 | [diff] [blame] | 771 | |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 772 | /* if reaching here SKB is ready to free */ |
| 773 | trace_consume_skb(skb); |
| 774 | |
| 775 | /* if SKB is a clone, don't handle this case */ |
Eric Dumazet | abbdb5a | 2016-03-20 11:27:47 -0700 | [diff] [blame] | 776 | if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 777 | __kfree_skb(skb); |
| 778 | return; |
| 779 | } |
| 780 | |
Jesper Dangaard Brouer | 15fad71 | 2016-02-08 13:15:04 +0100 | [diff] [blame] | 781 | _kfree_skb_defer(skb); |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 782 | } |
| 783 | EXPORT_SYMBOL(napi_consume_skb); |
| 784 | |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 785 | /* Make sure a field is enclosed inside headers_start/headers_end section */ |
| 786 | #define CHECK_SKB_FIELD(field) \ |
| 787 | BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ |
| 788 | offsetof(struct sk_buff, headers_start)); \ |
| 789 | BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ |
| 790 | offsetof(struct sk_buff, headers_end)); \ |
| 791 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 792 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
| 793 | { |
| 794 | new->tstamp = old->tstamp; |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 795 | /* We do not copy old->sk */ |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 796 | new->dev = old->dev; |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 797 | memcpy(new->cb, old->cb, sizeof(old->cb)); |
Eric Dumazet | 7fee226 | 2010-05-11 23:19:48 +0000 | [diff] [blame] | 798 | skb_dst_copy(new, old); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 799 | __skb_ext_copy(new, old); |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 800 | __nf_copy(new, old, false); |
Patrick McHardy | 6aa895b | 2008-07-14 22:49:06 -0700 | [diff] [blame] | 801 | |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 802 | /* Note : this field could be in headers_start/headers_end section |
| 803 | * It is not yet because we do not want to have a 16 bit hole |
| 804 | */ |
| 805 | new->queue_mapping = old->queue_mapping; |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 806 | |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 807 | memcpy(&new->headers_start, &old->headers_start, |
| 808 | offsetof(struct sk_buff, headers_end) - |
| 809 | offsetof(struct sk_buff, headers_start)); |
| 810 | CHECK_SKB_FIELD(protocol); |
| 811 | CHECK_SKB_FIELD(csum); |
| 812 | CHECK_SKB_FIELD(hash); |
| 813 | CHECK_SKB_FIELD(priority); |
| 814 | CHECK_SKB_FIELD(skb_iif); |
| 815 | CHECK_SKB_FIELD(vlan_proto); |
| 816 | CHECK_SKB_FIELD(vlan_tci); |
| 817 | CHECK_SKB_FIELD(transport_header); |
| 818 | CHECK_SKB_FIELD(network_header); |
| 819 | CHECK_SKB_FIELD(mac_header); |
| 820 | CHECK_SKB_FIELD(inner_protocol); |
| 821 | CHECK_SKB_FIELD(inner_transport_header); |
| 822 | CHECK_SKB_FIELD(inner_network_header); |
| 823 | CHECK_SKB_FIELD(inner_mac_header); |
| 824 | CHECK_SKB_FIELD(mark); |
| 825 | #ifdef CONFIG_NETWORK_SECMARK |
| 826 | CHECK_SKB_FIELD(secmark); |
| 827 | #endif |
Cong Wang | e0d1095 | 2013-08-01 11:10:25 +0800 | [diff] [blame] | 828 | #ifdef CONFIG_NET_RX_BUSY_POLL |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 829 | CHECK_SKB_FIELD(napi_id); |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 830 | #endif |
Eric Dumazet | 2bd8248 | 2015-02-03 23:48:24 -0800 | [diff] [blame] | 831 | #ifdef CONFIG_XPS |
| 832 | CHECK_SKB_FIELD(sender_cpu); |
| 833 | #endif |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 834 | #ifdef CONFIG_NET_SCHED |
| 835 | CHECK_SKB_FIELD(tc_index); |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 836 | #endif |
| 837 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 838 | } |
| 839 | |
Herbert Xu | 82c49a3 | 2009-05-22 22:11:37 +0000 | [diff] [blame] | 840 | /* |
| 841 | * You should not add any new code to this function. Add it to |
| 842 | * __copy_skb_header above instead. |
| 843 | */ |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 844 | static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | #define C(x) n->x = skb->x |
| 847 | |
| 848 | n->next = n->prev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | n->sk = NULL; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 850 | __copy_skb_header(n, skb); |
| 851 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | C(len); |
| 853 | C(data_len); |
Alexey Dobriyan | 3e6b3b2 | 2007-03-16 15:00:46 -0700 | [diff] [blame] | 854 | C(mac_len); |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 855 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 856 | n->cloned = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | n->nohdr = 0; |
Eric Dumazet | b13dda9 | 2018-04-07 13:42:39 -0700 | [diff] [blame] | 858 | n->peeked = 0; |
Stefano Brivio | e78bfb0 | 2018-07-13 13:21:07 +0200 | [diff] [blame] | 859 | C(pfmemalloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | n->destructor = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | C(tail); |
| 862 | C(end); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 863 | C(head); |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 864 | C(head_frag); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 865 | C(data); |
| 866 | C(truesize); |
Reshetova, Elena | 6335479 | 2017-06-30 13:07:58 +0300 | [diff] [blame] | 867 | refcount_set(&n->users, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | |
| 869 | atomic_inc(&(skb_shinfo(skb)->dataref)); |
| 870 | skb->cloned = 1; |
| 871 | |
| 872 | return n; |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 873 | #undef C |
| 874 | } |
| 875 | |
| 876 | /** |
| 877 | * skb_morph - morph one skb into another |
| 878 | * @dst: the skb to receive the contents |
| 879 | * @src: the skb to supply the contents |
| 880 | * |
| 881 | * This is identical to skb_clone except that the target skb is |
| 882 | * supplied by the user. |
| 883 | * |
| 884 | * The target skb is returned upon exit. |
| 885 | */ |
| 886 | struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) |
| 887 | { |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 888 | skb_release_all(dst); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 889 | return __skb_clone(dst, src); |
| 890 | } |
| 891 | EXPORT_SYMBOL_GPL(skb_morph); |
| 892 | |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 893 | int mm_account_pinned_pages(struct mmpin *mmp, size_t size) |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 894 | { |
| 895 | unsigned long max_pg, num_pg, new_pg, old_pg; |
| 896 | struct user_struct *user; |
| 897 | |
| 898 | if (capable(CAP_IPC_LOCK) || !size) |
| 899 | return 0; |
| 900 | |
| 901 | num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ |
| 902 | max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 903 | user = mmp->user ? : current_user(); |
| 904 | |
| 905 | do { |
| 906 | old_pg = atomic_long_read(&user->locked_vm); |
| 907 | new_pg = old_pg + num_pg; |
| 908 | if (new_pg > max_pg) |
| 909 | return -ENOBUFS; |
| 910 | } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != |
| 911 | old_pg); |
| 912 | |
| 913 | if (!mmp->user) { |
| 914 | mmp->user = get_uid(user); |
| 915 | mmp->num_pg = num_pg; |
| 916 | } else { |
| 917 | mmp->num_pg += num_pg; |
| 918 | } |
| 919 | |
| 920 | return 0; |
| 921 | } |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 922 | EXPORT_SYMBOL_GPL(mm_account_pinned_pages); |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 923 | |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 924 | void mm_unaccount_pinned_pages(struct mmpin *mmp) |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 925 | { |
| 926 | if (mmp->user) { |
| 927 | atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); |
| 928 | free_uid(mmp->user); |
| 929 | } |
| 930 | } |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 931 | EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 932 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 933 | struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) |
| 934 | { |
| 935 | struct ubuf_info *uarg; |
| 936 | struct sk_buff *skb; |
| 937 | |
| 938 | WARN_ON_ONCE(!in_task()); |
| 939 | |
| 940 | skb = sock_omalloc(sk, 0, GFP_KERNEL); |
| 941 | if (!skb) |
| 942 | return NULL; |
| 943 | |
| 944 | BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); |
| 945 | uarg = (void *)skb->cb; |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 946 | uarg->mmp.user = NULL; |
| 947 | |
| 948 | if (mm_account_pinned_pages(&uarg->mmp, size)) { |
| 949 | kfree_skb(skb); |
| 950 | return NULL; |
| 951 | } |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 952 | |
| 953 | uarg->callback = sock_zerocopy_callback; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 954 | uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; |
| 955 | uarg->len = 1; |
| 956 | uarg->bytelen = size; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 957 | uarg->zerocopy = 1; |
Eric Dumazet | c1d1b43 | 2017-08-31 16:48:22 -0700 | [diff] [blame] | 958 | refcount_set(&uarg->refcnt, 1); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 959 | sock_hold(sk); |
| 960 | |
| 961 | return uarg; |
| 962 | } |
| 963 | EXPORT_SYMBOL_GPL(sock_zerocopy_alloc); |
| 964 | |
| 965 | static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) |
| 966 | { |
| 967 | return container_of((void *)uarg, struct sk_buff, cb); |
| 968 | } |
| 969 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 970 | struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, |
| 971 | struct ubuf_info *uarg) |
| 972 | { |
| 973 | if (uarg) { |
| 974 | const u32 byte_limit = 1 << 19; /* limit to a few TSO */ |
| 975 | u32 bytelen, next; |
| 976 | |
| 977 | /* realloc only when socket is locked (TCP, UDP cork), |
| 978 | * so uarg->len and sk_zckey access is serialized |
| 979 | */ |
| 980 | if (!sock_owned_by_user(sk)) { |
| 981 | WARN_ON_ONCE(1); |
| 982 | return NULL; |
| 983 | } |
| 984 | |
| 985 | bytelen = uarg->bytelen + size; |
| 986 | if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) { |
| 987 | /* TCP can create new skb to attach new uarg */ |
| 988 | if (sk->sk_type == SOCK_STREAM) |
| 989 | goto new_alloc; |
| 990 | return NULL; |
| 991 | } |
| 992 | |
| 993 | next = (u32)atomic_read(&sk->sk_zckey); |
| 994 | if ((u32)(uarg->id + uarg->len) == next) { |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 995 | if (mm_account_pinned_pages(&uarg->mmp, size)) |
| 996 | return NULL; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 997 | uarg->len++; |
| 998 | uarg->bytelen = bytelen; |
| 999 | atomic_set(&sk->sk_zckey, ++next); |
Eric Dumazet | db5bce3 | 2017-08-31 16:48:21 -0700 | [diff] [blame] | 1000 | sock_zerocopy_get(uarg); |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1001 | return uarg; |
| 1002 | } |
| 1003 | } |
| 1004 | |
| 1005 | new_alloc: |
| 1006 | return sock_zerocopy_alloc(sk, size); |
| 1007 | } |
| 1008 | EXPORT_SYMBOL_GPL(sock_zerocopy_realloc); |
| 1009 | |
| 1010 | static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) |
| 1011 | { |
| 1012 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
| 1013 | u32 old_lo, old_hi; |
| 1014 | u64 sum_len; |
| 1015 | |
| 1016 | old_lo = serr->ee.ee_info; |
| 1017 | old_hi = serr->ee.ee_data; |
| 1018 | sum_len = old_hi - old_lo + 1ULL + len; |
| 1019 | |
| 1020 | if (sum_len >= (1ULL << 32)) |
| 1021 | return false; |
| 1022 | |
| 1023 | if (lo != old_hi + 1) |
| 1024 | return false; |
| 1025 | |
| 1026 | serr->ee.ee_data += len; |
| 1027 | return true; |
| 1028 | } |
| 1029 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1030 | void sock_zerocopy_callback(struct ubuf_info *uarg, bool success) |
| 1031 | { |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1032 | struct sk_buff *tail, *skb = skb_from_uarg(uarg); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1033 | struct sock_exterr_skb *serr; |
| 1034 | struct sock *sk = skb->sk; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1035 | struct sk_buff_head *q; |
| 1036 | unsigned long flags; |
| 1037 | u32 lo, hi; |
| 1038 | u16 len; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1039 | |
Willem de Bruijn | ccaffff | 2017-08-09 19:09:43 -0400 | [diff] [blame] | 1040 | mm_unaccount_pinned_pages(&uarg->mmp); |
| 1041 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1042 | /* if !len, there was only 1 call, and it was aborted |
| 1043 | * so do not queue a completion notification |
| 1044 | */ |
| 1045 | if (!uarg->len || sock_flag(sk, SOCK_DEAD)) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1046 | goto release; |
| 1047 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1048 | len = uarg->len; |
| 1049 | lo = uarg->id; |
| 1050 | hi = uarg->id + len - 1; |
| 1051 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1052 | serr = SKB_EXT_ERR(skb); |
| 1053 | memset(serr, 0, sizeof(*serr)); |
| 1054 | serr->ee.ee_errno = 0; |
| 1055 | serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1056 | serr->ee.ee_data = hi; |
| 1057 | serr->ee.ee_info = lo; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1058 | if (!success) |
| 1059 | serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; |
| 1060 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1061 | q = &sk->sk_error_queue; |
| 1062 | spin_lock_irqsave(&q->lock, flags); |
| 1063 | tail = skb_peek_tail(q); |
| 1064 | if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || |
| 1065 | !skb_zerocopy_notify_extend(tail, lo, len)) { |
| 1066 | __skb_queue_tail(q, skb); |
| 1067 | skb = NULL; |
| 1068 | } |
| 1069 | spin_unlock_irqrestore(&q->lock, flags); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1070 | |
| 1071 | sk->sk_error_report(sk); |
| 1072 | |
| 1073 | release: |
| 1074 | consume_skb(skb); |
| 1075 | sock_put(sk); |
| 1076 | } |
| 1077 | EXPORT_SYMBOL_GPL(sock_zerocopy_callback); |
| 1078 | |
| 1079 | void sock_zerocopy_put(struct ubuf_info *uarg) |
| 1080 | { |
Eric Dumazet | c1d1b43 | 2017-08-31 16:48:22 -0700 | [diff] [blame] | 1081 | if (uarg && refcount_dec_and_test(&uarg->refcnt)) { |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1082 | if (uarg->callback) |
| 1083 | uarg->callback(uarg, uarg->zerocopy); |
| 1084 | else |
| 1085 | consume_skb(skb_from_uarg(uarg)); |
| 1086 | } |
| 1087 | } |
| 1088 | EXPORT_SYMBOL_GPL(sock_zerocopy_put); |
| 1089 | |
Willem de Bruijn | 52900d2 | 2018-11-30 15:32:40 -0500 | [diff] [blame] | 1090 | void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1091 | { |
| 1092 | if (uarg) { |
| 1093 | struct sock *sk = skb_from_uarg(uarg)->sk; |
| 1094 | |
| 1095 | atomic_dec(&sk->sk_zckey); |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1096 | uarg->len--; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1097 | |
Willem de Bruijn | 52900d2 | 2018-11-30 15:32:40 -0500 | [diff] [blame] | 1098 | if (have_uref) |
| 1099 | sock_zerocopy_put(uarg); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1100 | } |
| 1101 | } |
| 1102 | EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort); |
| 1103 | |
| 1104 | extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb, |
| 1105 | struct iov_iter *from, size_t length); |
| 1106 | |
Willem de Bruijn | b5947e5 | 2018-11-30 15:32:39 -0500 | [diff] [blame] | 1107 | int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) |
| 1108 | { |
| 1109 | return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); |
| 1110 | } |
| 1111 | EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram); |
| 1112 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1113 | int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, |
| 1114 | struct msghdr *msg, int len, |
| 1115 | struct ubuf_info *uarg) |
| 1116 | { |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1117 | struct ubuf_info *orig_uarg = skb_zcopy(skb); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1118 | struct iov_iter orig_iter = msg->msg_iter; |
| 1119 | int err, orig_len = skb->len; |
| 1120 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1121 | /* An skb can only point to one uarg. This edge case happens when |
| 1122 | * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. |
| 1123 | */ |
| 1124 | if (orig_uarg && uarg != orig_uarg) |
| 1125 | return -EEXIST; |
| 1126 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1127 | err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); |
| 1128 | if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { |
Willem de Bruijn | 54d43117 | 2017-10-19 12:40:39 -0400 | [diff] [blame] | 1129 | struct sock *save_sk = skb->sk; |
| 1130 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1131 | /* Streams do not free skb on error. Reset to prev state. */ |
| 1132 | msg->msg_iter = orig_iter; |
Willem de Bruijn | 54d43117 | 2017-10-19 12:40:39 -0400 | [diff] [blame] | 1133 | skb->sk = sk; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1134 | ___pskb_trim(skb, orig_len); |
Willem de Bruijn | 54d43117 | 2017-10-19 12:40:39 -0400 | [diff] [blame] | 1135 | skb->sk = save_sk; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1136 | return err; |
| 1137 | } |
| 1138 | |
Willem de Bruijn | 52900d2 | 2018-11-30 15:32:40 -0500 | [diff] [blame] | 1139 | skb_zcopy_set(skb, uarg, NULL); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1140 | return skb->len - orig_len; |
| 1141 | } |
| 1142 | EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); |
| 1143 | |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1144 | static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1145 | gfp_t gfp_mask) |
| 1146 | { |
| 1147 | if (skb_zcopy(orig)) { |
| 1148 | if (skb_zcopy(nskb)) { |
| 1149 | /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ |
| 1150 | if (!gfp_mask) { |
| 1151 | WARN_ON_ONCE(1); |
| 1152 | return -ENOMEM; |
| 1153 | } |
| 1154 | if (skb_uarg(nskb) == skb_uarg(orig)) |
| 1155 | return 0; |
| 1156 | if (skb_copy_ubufs(nskb, GFP_ATOMIC)) |
| 1157 | return -EIO; |
| 1158 | } |
Willem de Bruijn | 52900d2 | 2018-11-30 15:32:40 -0500 | [diff] [blame] | 1159 | skb_zcopy_set(nskb, skb_uarg(orig), NULL); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1160 | } |
| 1161 | return 0; |
| 1162 | } |
| 1163 | |
Ben Hutchings | 2c53040 | 2012-07-10 10:55:09 +0000 | [diff] [blame] | 1164 | /** |
| 1165 | * skb_copy_ubufs - copy userspace skb frags buffers to kernel |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 1166 | * @skb: the skb to modify |
| 1167 | * @gfp_mask: allocation priority |
| 1168 | * |
| 1169 | * This must be called on SKBTX_DEV_ZEROCOPY skb. |
| 1170 | * It will copy all frags into kernel and drop the reference |
| 1171 | * to userspace pages. |
| 1172 | * |
| 1173 | * If this function is called from an interrupt gfp_mask() must be |
| 1174 | * %GFP_ATOMIC. |
| 1175 | * |
| 1176 | * Returns 0 on success or a negative error code on failure |
| 1177 | * to allocate kernel memory to copy to. |
| 1178 | */ |
| 1179 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1180 | { |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1181 | int num_frags = skb_shinfo(skb)->nr_frags; |
| 1182 | struct page *page, *head = NULL; |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1183 | int i, new_frags; |
| 1184 | u32 d_off; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1185 | |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1186 | if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) |
| 1187 | return -EINVAL; |
| 1188 | |
Willem de Bruijn | f72c4ac | 2017-12-28 12:38:13 -0500 | [diff] [blame] | 1189 | if (!num_frags) |
| 1190 | goto release; |
| 1191 | |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1192 | new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 1193 | for (i = 0; i < new_frags; i++) { |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 1194 | page = alloc_page(gfp_mask); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1195 | if (!page) { |
| 1196 | while (head) { |
Sunghan Suh | 40dadff | 2013-07-12 16:17:23 +0900 | [diff] [blame] | 1197 | struct page *next = (struct page *)page_private(head); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1198 | put_page(head); |
| 1199 | head = next; |
| 1200 | } |
| 1201 | return -ENOMEM; |
| 1202 | } |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1203 | set_page_private(page, (unsigned long)head); |
| 1204 | head = page; |
| 1205 | } |
| 1206 | |
| 1207 | page = head; |
| 1208 | d_off = 0; |
| 1209 | for (i = 0; i < num_frags; i++) { |
| 1210 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
| 1211 | u32 p_off, p_len, copied; |
| 1212 | struct page *p; |
| 1213 | u8 *vaddr; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1214 | |
| 1215 | skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f), |
| 1216 | p, p_off, p_len, copied) { |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1217 | u32 copy, done = 0; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1218 | vaddr = kmap_atomic(p); |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1219 | |
| 1220 | while (done < p_len) { |
| 1221 | if (d_off == PAGE_SIZE) { |
| 1222 | d_off = 0; |
| 1223 | page = (struct page *)page_private(page); |
| 1224 | } |
| 1225 | copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); |
| 1226 | memcpy(page_address(page) + d_off, |
| 1227 | vaddr + p_off + done, copy); |
| 1228 | done += copy; |
| 1229 | d_off += copy; |
| 1230 | } |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1231 | kunmap_atomic(vaddr); |
| 1232 | } |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1233 | } |
| 1234 | |
| 1235 | /* skb frags release userspace buffers */ |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 1236 | for (i = 0; i < num_frags; i++) |
Ian Campbell | a8605c6 | 2011-10-19 23:01:49 +0000 | [diff] [blame] | 1237 | skb_frag_unref(skb, i); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1238 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1239 | /* skb frags point to kernel buffers */ |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1240 | for (i = 0; i < new_frags - 1; i++) { |
| 1241 | __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); |
Sunghan Suh | 40dadff | 2013-07-12 16:17:23 +0900 | [diff] [blame] | 1242 | head = (struct page *)page_private(head); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1243 | } |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1244 | __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); |
| 1245 | skb_shinfo(skb)->nr_frags = new_frags; |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 1246 | |
Willem de Bruijn | b90ddd5 | 2017-12-20 17:37:50 -0500 | [diff] [blame] | 1247 | release: |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1248 | skb_zcopy_clear(skb, false); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1249 | return 0; |
| 1250 | } |
Michael S. Tsirkin | dcc0fb7 | 2012-07-20 09:23:20 +0000 | [diff] [blame] | 1251 | EXPORT_SYMBOL_GPL(skb_copy_ubufs); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1252 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1253 | /** |
| 1254 | * skb_clone - duplicate an sk_buff |
| 1255 | * @skb: buffer to clone |
| 1256 | * @gfp_mask: allocation priority |
| 1257 | * |
| 1258 | * Duplicate an &sk_buff. The new one is not owned by a socket. Both |
| 1259 | * copies share the same packet data but not structure. The new |
| 1260 | * buffer has a reference count of 1. If the allocation fails the |
| 1261 | * function returns %NULL otherwise the new buffer is returned. |
| 1262 | * |
| 1263 | * If this function is called from an interrupt gfp_mask() must be |
| 1264 | * %GFP_ATOMIC. |
| 1265 | */ |
| 1266 | |
| 1267 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) |
| 1268 | { |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 1269 | struct sk_buff_fclones *fclones = container_of(skb, |
| 1270 | struct sk_buff_fclones, |
| 1271 | skb1); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 1272 | struct sk_buff *n; |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1273 | |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1274 | if (skb_orphan_frags(skb, gfp_mask)) |
| 1275 | return NULL; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1276 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1277 | if (skb->fclone == SKB_FCLONE_ORIG && |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 1278 | refcount_read(&fclones->fclone_ref) == 1) { |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 1279 | n = &fclones->skb2; |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 1280 | refcount_set(&fclones->fclone_ref, 2); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1281 | } else { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1282 | if (skb_pfmemalloc(skb)) |
| 1283 | gfp_mask |= __GFP_MEMALLOC; |
| 1284 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1285 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); |
| 1286 | if (!n) |
| 1287 | return NULL; |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 1288 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1289 | n->fclone = SKB_FCLONE_UNAVAILABLE; |
| 1290 | } |
| 1291 | |
| 1292 | return __skb_clone(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1293 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1294 | EXPORT_SYMBOL(skb_clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | |
Toshiaki Makita | b0768a8 | 2018-08-03 16:58:09 +0900 | [diff] [blame] | 1296 | void skb_headers_offset_update(struct sk_buff *skb, int off) |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1297 | { |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 1298 | /* Only adjust this if it actually is csum_start rather than csum */ |
| 1299 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 1300 | skb->csum_start += off; |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1301 | /* {transport,network,mac}_header and tail are relative to skb->head */ |
| 1302 | skb->transport_header += off; |
| 1303 | skb->network_header += off; |
| 1304 | if (skb_mac_header_was_set(skb)) |
| 1305 | skb->mac_header += off; |
| 1306 | skb->inner_transport_header += off; |
| 1307 | skb->inner_network_header += off; |
Pravin B Shelar | aefbd2b | 2013-03-07 13:21:46 +0000 | [diff] [blame] | 1308 | skb->inner_mac_header += off; |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1309 | } |
Toshiaki Makita | b0768a8 | 2018-08-03 16:58:09 +0900 | [diff] [blame] | 1310 | EXPORT_SYMBOL(skb_headers_offset_update); |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1311 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1312 | void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1313 | { |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1314 | __copy_skb_header(new, old); |
| 1315 | |
Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 1316 | skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; |
| 1317 | skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; |
| 1318 | skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | } |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1320 | EXPORT_SYMBOL(skb_copy_header); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1321 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1322 | static inline int skb_alloc_rx_flag(const struct sk_buff *skb) |
| 1323 | { |
| 1324 | if (skb_pfmemalloc(skb)) |
| 1325 | return SKB_ALLOC_RX; |
| 1326 | return 0; |
| 1327 | } |
| 1328 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 | /** |
| 1330 | * skb_copy - create private copy of an sk_buff |
| 1331 | * @skb: buffer to copy |
| 1332 | * @gfp_mask: allocation priority |
| 1333 | * |
| 1334 | * Make a copy of both an &sk_buff and its data. This is used when the |
| 1335 | * caller wishes to modify the data and needs a private copy of the |
| 1336 | * data to alter. Returns %NULL on failure or the pointer to the buffer |
| 1337 | * on success. The returned buffer has a reference count of 1. |
| 1338 | * |
| 1339 | * As by-product this function converts non-linear &sk_buff to linear |
| 1340 | * one, so that &sk_buff becomes completely private and caller is allowed |
| 1341 | * to modify all the data of returned buffer. This means that this |
| 1342 | * function is not recommended for use in circumstances when only |
| 1343 | * header is going to be modified. Use pskb_copy() instead. |
| 1344 | */ |
| 1345 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1346 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 | { |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1348 | int headerlen = skb_headroom(skb); |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 1349 | unsigned int size = skb_end_offset(skb) + skb->data_len; |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1350 | struct sk_buff *n = __alloc_skb(size, gfp_mask, |
| 1351 | skb_alloc_rx_flag(skb), NUMA_NO_NODE); |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1352 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1353 | if (!n) |
| 1354 | return NULL; |
| 1355 | |
| 1356 | /* Set the data pointer */ |
| 1357 | skb_reserve(n, headerlen); |
| 1358 | /* Set the tail pointer and length */ |
| 1359 | skb_put(n, skb->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 1361 | BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1363 | skb_copy_header(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1364 | return n; |
| 1365 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1366 | EXPORT_SYMBOL(skb_copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1367 | |
| 1368 | /** |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1369 | * __pskb_copy_fclone - create copy of an sk_buff with private head. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 | * @skb: buffer to copy |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1371 | * @headroom: headroom of new skb |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1372 | * @gfp_mask: allocation priority |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1373 | * @fclone: if true allocate the copy of the skb from the fclone |
| 1374 | * cache instead of the head cache; it is recommended to set this |
| 1375 | * to true for the cases where the copy will likely be cloned |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | * |
| 1377 | * Make a copy of both an &sk_buff and part of its data, located |
| 1378 | * in header. Fragmented data remain shared. This is used when |
| 1379 | * the caller wishes to modify only header of &sk_buff and needs |
| 1380 | * private copy of the header to alter. Returns %NULL on failure |
| 1381 | * or the pointer to the buffer on success. |
| 1382 | * The returned buffer has a reference count of 1. |
| 1383 | */ |
| 1384 | |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1385 | struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, |
| 1386 | gfp_t gfp_mask, bool fclone) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1387 | { |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1388 | unsigned int size = skb_headlen(skb) + headroom; |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1389 | int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); |
| 1390 | struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1391 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | if (!n) |
| 1393 | goto out; |
| 1394 | |
| 1395 | /* Set the data pointer */ |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1396 | skb_reserve(n, headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1397 | /* Set the tail pointer and length */ |
| 1398 | skb_put(n, skb_headlen(skb)); |
| 1399 | /* Copy the bytes */ |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 1400 | skb_copy_from_linear_data(skb, n->data, n->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 | |
Herbert Xu | 25f484a | 2006-11-07 14:57:15 -0800 | [diff] [blame] | 1402 | n->truesize += skb->data_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1403 | n->data_len = skb->data_len; |
| 1404 | n->len = skb->len; |
| 1405 | |
| 1406 | if (skb_shinfo(skb)->nr_frags) { |
| 1407 | int i; |
| 1408 | |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1409 | if (skb_orphan_frags(skb, gfp_mask) || |
| 1410 | skb_zerocopy_clone(n, skb, gfp_mask)) { |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1411 | kfree_skb(n); |
| 1412 | n = NULL; |
| 1413 | goto out; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1414 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 1416 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1417 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1418 | } |
| 1419 | skb_shinfo(n)->nr_frags = i; |
| 1420 | } |
| 1421 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1422 | if (skb_has_frag_list(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; |
| 1424 | skb_clone_fraglist(n); |
| 1425 | } |
| 1426 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1427 | skb_copy_header(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1428 | out: |
| 1429 | return n; |
| 1430 | } |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1431 | EXPORT_SYMBOL(__pskb_copy_fclone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1432 | |
| 1433 | /** |
| 1434 | * pskb_expand_head - reallocate header of &sk_buff |
| 1435 | * @skb: buffer to reallocate |
| 1436 | * @nhead: room to add at head |
| 1437 | * @ntail: room to add at tail |
| 1438 | * @gfp_mask: allocation priority |
| 1439 | * |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 1440 | * Expands (or creates identical copy, if @nhead and @ntail are zero) |
| 1441 | * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1442 | * reference count of 1. Returns zero in the case of success or error, |
| 1443 | * if expansion failed. In the last case, &sk_buff is not changed. |
| 1444 | * |
| 1445 | * All the pointers pointing into skb header may change and must be |
| 1446 | * reloaded after call to this function. |
| 1447 | */ |
| 1448 | |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1449 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1450 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | { |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1452 | int i, osize = skb_end_offset(skb); |
| 1453 | int size = osize + nhead + ntail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1454 | long off; |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1455 | u8 *data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1456 | |
Herbert Xu | 4edd87a | 2008-10-01 07:09:38 -0700 | [diff] [blame] | 1457 | BUG_ON(nhead < 0); |
| 1458 | |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 1459 | BUG_ON(skb_shared(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1460 | |
| 1461 | size = SKB_DATA_ALIGN(size); |
| 1462 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1463 | if (skb_pfmemalloc(skb)) |
| 1464 | gfp_mask |= __GFP_MEMALLOC; |
| 1465 | data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 1466 | gfp_mask, NUMA_NO_NODE, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1467 | if (!data) |
| 1468 | goto nodata; |
Eric Dumazet | 87151b8 | 2012-04-10 20:08:39 +0000 | [diff] [blame] | 1469 | size = SKB_WITH_OVERHEAD(ksize(data)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1470 | |
| 1471 | /* Copy only real data... and, alas, header. This should be |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1472 | * optimized for the cases when header is void. |
| 1473 | */ |
| 1474 | memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); |
| 1475 | |
| 1476 | memcpy((struct skb_shared_info *)(data + size), |
| 1477 | skb_shinfo(skb), |
Eric Dumazet | fed6638 | 2010-07-22 19:09:08 +0000 | [diff] [blame] | 1478 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1479 | |
Alexander Duyck | 3e24591 | 2012-05-04 14:26:51 +0000 | [diff] [blame] | 1480 | /* |
| 1481 | * if shinfo is shared we must drop the old head gracefully, but if it |
| 1482 | * is not we can just drop the old head and let the existing refcount |
| 1483 | * be since all we did is relocate the values |
| 1484 | */ |
| 1485 | if (skb_cloned(skb)) { |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1486 | if (skb_orphan_frags(skb, gfp_mask)) |
| 1487 | goto nofrags; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1488 | if (skb_zcopy(skb)) |
Eric Dumazet | c1d1b43 | 2017-08-31 16:48:22 -0700 | [diff] [blame] | 1489 | refcount_inc(&skb_uarg(skb)->refcnt); |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1490 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1491 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1492 | |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1493 | if (skb_has_frag_list(skb)) |
| 1494 | skb_clone_fraglist(skb); |
| 1495 | |
| 1496 | skb_release_data(skb); |
Alexander Duyck | 3e24591 | 2012-05-04 14:26:51 +0000 | [diff] [blame] | 1497 | } else { |
| 1498 | skb_free_head(skb); |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1499 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1500 | off = (data + nhead) - skb->head; |
| 1501 | |
| 1502 | skb->head = data; |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 1503 | skb->head_frag = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1504 | skb->data += off; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1505 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 1506 | skb->end = size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 1507 | off = nhead; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1508 | #else |
| 1509 | skb->end = skb->head + size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 1510 | #endif |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1511 | skb->tail += off; |
Peter Pan(潘卫平) | b41abb4 | 2013-06-06 21:27:21 +0800 | [diff] [blame] | 1512 | skb_headers_offset_update(skb, nhead); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | skb->cloned = 0; |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 1514 | skb->hdr_len = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | skb->nohdr = 0; |
| 1516 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1517 | |
Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 1518 | skb_metadata_clear(skb); |
| 1519 | |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1520 | /* It is not generally safe to change skb->truesize. |
| 1521 | * For the moment, we really care of rx path, or |
| 1522 | * when skb is orphaned (not attached to a socket). |
| 1523 | */ |
| 1524 | if (!skb->sk || skb->destructor == sock_edemux) |
| 1525 | skb->truesize += size - osize; |
| 1526 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1527 | return 0; |
| 1528 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1529 | nofrags: |
| 1530 | kfree(data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1531 | nodata: |
| 1532 | return -ENOMEM; |
| 1533 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1534 | EXPORT_SYMBOL(pskb_expand_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | |
| 1536 | /* Make private copy of skb with writable head and some headroom */ |
| 1537 | |
| 1538 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) |
| 1539 | { |
| 1540 | struct sk_buff *skb2; |
| 1541 | int delta = headroom - skb_headroom(skb); |
| 1542 | |
| 1543 | if (delta <= 0) |
| 1544 | skb2 = pskb_copy(skb, GFP_ATOMIC); |
| 1545 | else { |
| 1546 | skb2 = skb_clone(skb, GFP_ATOMIC); |
| 1547 | if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, |
| 1548 | GFP_ATOMIC)) { |
| 1549 | kfree_skb(skb2); |
| 1550 | skb2 = NULL; |
| 1551 | } |
| 1552 | } |
| 1553 | return skb2; |
| 1554 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1555 | EXPORT_SYMBOL(skb_realloc_headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 | |
| 1557 | /** |
| 1558 | * skb_copy_expand - copy and expand sk_buff |
| 1559 | * @skb: buffer to copy |
| 1560 | * @newheadroom: new free bytes at head |
| 1561 | * @newtailroom: new free bytes at tail |
| 1562 | * @gfp_mask: allocation priority |
| 1563 | * |
| 1564 | * Make a copy of both an &sk_buff and its data and while doing so |
| 1565 | * allocate additional space. |
| 1566 | * |
| 1567 | * This is used when the caller wishes to modify the data and needs a |
| 1568 | * private copy of the data to alter as well as more space for new fields. |
| 1569 | * Returns %NULL on failure or the pointer to the buffer |
| 1570 | * on success. The returned buffer has a reference count of 1. |
| 1571 | * |
| 1572 | * You must pass %GFP_ATOMIC as the allocation priority if this function |
| 1573 | * is called from an interrupt. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | */ |
| 1575 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1576 | int newheadroom, int newtailroom, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1577 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1578 | { |
| 1579 | /* |
| 1580 | * Allocate the copy buffer |
| 1581 | */ |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1582 | struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, |
| 1583 | gfp_mask, skb_alloc_rx_flag(skb), |
| 1584 | NUMA_NO_NODE); |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1585 | int oldheadroom = skb_headroom(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1586 | int head_copy_len, head_copy_off; |
| 1587 | |
| 1588 | if (!n) |
| 1589 | return NULL; |
| 1590 | |
| 1591 | skb_reserve(n, newheadroom); |
| 1592 | |
| 1593 | /* Set the tail pointer and length */ |
| 1594 | skb_put(n, skb->len); |
| 1595 | |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1596 | head_copy_len = oldheadroom; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | head_copy_off = 0; |
| 1598 | if (newheadroom <= head_copy_len) |
| 1599 | head_copy_len = newheadroom; |
| 1600 | else |
| 1601 | head_copy_off = newheadroom - head_copy_len; |
| 1602 | |
| 1603 | /* Copy the linear header and data. */ |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 1604 | BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, |
| 1605 | skb->len + head_copy_len)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1606 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1607 | skb_copy_header(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1608 | |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 1609 | skb_headers_offset_update(n, newheadroom - oldheadroom); |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1610 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1611 | return n; |
| 1612 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1613 | EXPORT_SYMBOL(skb_copy_expand); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1614 | |
| 1615 | /** |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1616 | * __skb_pad - zero pad the tail of an skb |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1617 | * @skb: buffer to pad |
| 1618 | * @pad: space to pad |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1619 | * @free_on_error: free buffer on error |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1620 | * |
| 1621 | * Ensure that a buffer is followed by a padding area that is zero |
| 1622 | * filled. Used by network drivers which may DMA or transfer data |
| 1623 | * beyond the buffer end onto the wire. |
| 1624 | * |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1625 | * May return error in out of memory cases. The skb is freed on error |
| 1626 | * if @free_on_error is true. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1627 | */ |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1628 | |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1629 | int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1630 | { |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1631 | int err; |
| 1632 | int ntail; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1633 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1634 | /* If the skbuff is non linear tailroom is always zero.. */ |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1635 | if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1636 | memset(skb->data+skb->len, 0, pad); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1637 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1638 | } |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1639 | |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1640 | ntail = skb->data_len + pad - (skb->end - skb->tail); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1641 | if (likely(skb_cloned(skb) || ntail > 0)) { |
| 1642 | err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); |
| 1643 | if (unlikely(err)) |
| 1644 | goto free_skb; |
| 1645 | } |
| 1646 | |
| 1647 | /* FIXME: The use of this function with non-linear skb's really needs |
| 1648 | * to be audited. |
| 1649 | */ |
| 1650 | err = skb_linearize(skb); |
| 1651 | if (unlikely(err)) |
| 1652 | goto free_skb; |
| 1653 | |
| 1654 | memset(skb->data + skb->len, 0, pad); |
| 1655 | return 0; |
| 1656 | |
| 1657 | free_skb: |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1658 | if (free_on_error) |
| 1659 | kfree_skb(skb); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1660 | return err; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1661 | } |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1662 | EXPORT_SYMBOL(__skb_pad); |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1663 | |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1664 | /** |
Mathias Krause | 0c7ddf3 | 2013-11-07 14:18:24 +0100 | [diff] [blame] | 1665 | * pskb_put - add data to the tail of a potentially fragmented buffer |
| 1666 | * @skb: start of the buffer to use |
| 1667 | * @tail: tail fragment of the buffer to use |
| 1668 | * @len: amount of data to add |
| 1669 | * |
| 1670 | * This function extends the used data area of the potentially |
| 1671 | * fragmented buffer. @tail must be the last fragment of @skb -- or |
| 1672 | * @skb itself. If this would exceed the total buffer size the kernel |
| 1673 | * will panic. A pointer to the first byte of the extra data is |
| 1674 | * returned. |
| 1675 | */ |
| 1676 | |
Johannes Berg | 4df864c | 2017-06-16 14:29:21 +0200 | [diff] [blame] | 1677 | void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) |
Mathias Krause | 0c7ddf3 | 2013-11-07 14:18:24 +0100 | [diff] [blame] | 1678 | { |
| 1679 | if (tail != skb) { |
| 1680 | skb->data_len += len; |
| 1681 | skb->len += len; |
| 1682 | } |
| 1683 | return skb_put(tail, len); |
| 1684 | } |
| 1685 | EXPORT_SYMBOL_GPL(pskb_put); |
| 1686 | |
| 1687 | /** |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1688 | * skb_put - add data to a buffer |
| 1689 | * @skb: buffer to use |
| 1690 | * @len: amount of data to add |
| 1691 | * |
| 1692 | * This function extends the used data area of the buffer. If this would |
| 1693 | * exceed the total buffer size the kernel will panic. A pointer to the |
| 1694 | * first byte of the extra data is returned. |
| 1695 | */ |
Johannes Berg | 4df864c | 2017-06-16 14:29:21 +0200 | [diff] [blame] | 1696 | void *skb_put(struct sk_buff *skb, unsigned int len) |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1697 | { |
Johannes Berg | 4df864c | 2017-06-16 14:29:21 +0200 | [diff] [blame] | 1698 | void *tmp = skb_tail_pointer(skb); |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1699 | SKB_LINEAR_ASSERT(skb); |
| 1700 | skb->tail += len; |
| 1701 | skb->len += len; |
| 1702 | if (unlikely(skb->tail > skb->end)) |
| 1703 | skb_over_panic(skb, len, __builtin_return_address(0)); |
| 1704 | return tmp; |
| 1705 | } |
| 1706 | EXPORT_SYMBOL(skb_put); |
| 1707 | |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1708 | /** |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 1709 | * skb_push - add data to the start of a buffer |
| 1710 | * @skb: buffer to use |
| 1711 | * @len: amount of data to add |
| 1712 | * |
| 1713 | * This function extends the used data area of the buffer at the buffer |
| 1714 | * start. If this would exceed the total buffer headroom the kernel will |
| 1715 | * panic. A pointer to the first byte of the extra data is returned. |
| 1716 | */ |
Johannes Berg | d58ff35 | 2017-06-16 14:29:23 +0200 | [diff] [blame] | 1717 | void *skb_push(struct sk_buff *skb, unsigned int len) |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 1718 | { |
| 1719 | skb->data -= len; |
| 1720 | skb->len += len; |
Ganesh Goudar | 9aba2f8 | 2018-08-02 15:34:52 +0530 | [diff] [blame] | 1721 | if (unlikely(skb->data < skb->head)) |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 1722 | skb_under_panic(skb, len, __builtin_return_address(0)); |
| 1723 | return skb->data; |
| 1724 | } |
| 1725 | EXPORT_SYMBOL(skb_push); |
| 1726 | |
| 1727 | /** |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1728 | * skb_pull - remove data from the start of a buffer |
| 1729 | * @skb: buffer to use |
| 1730 | * @len: amount of data to remove |
| 1731 | * |
| 1732 | * This function removes data from the start of a buffer, returning |
| 1733 | * the memory to the headroom. A pointer to the next data in the buffer |
| 1734 | * is returned. Once the data has been pulled future pushes will overwrite |
| 1735 | * the old data. |
| 1736 | */ |
Johannes Berg | af72868 | 2017-06-16 14:29:22 +0200 | [diff] [blame] | 1737 | void *skb_pull(struct sk_buff *skb, unsigned int len) |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1738 | { |
David S. Miller | 47d2964 | 2010-05-02 02:21:44 -0700 | [diff] [blame] | 1739 | return skb_pull_inline(skb, len); |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1740 | } |
| 1741 | EXPORT_SYMBOL(skb_pull); |
| 1742 | |
Ilpo Järvinen | 419ae74 | 2008-03-27 17:54:01 -0700 | [diff] [blame] | 1743 | /** |
| 1744 | * skb_trim - remove end from a buffer |
| 1745 | * @skb: buffer to alter |
| 1746 | * @len: new length |
| 1747 | * |
| 1748 | * Cut the length of a buffer down by removing data from the tail. If |
| 1749 | * the buffer is already under the length specified it is not modified. |
| 1750 | * The skb must be linear. |
| 1751 | */ |
| 1752 | void skb_trim(struct sk_buff *skb, unsigned int len) |
| 1753 | { |
| 1754 | if (skb->len > len) |
| 1755 | __skb_trim(skb, len); |
| 1756 | } |
| 1757 | EXPORT_SYMBOL(skb_trim); |
| 1758 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 1759 | /* Trims skb to length len. It can change skb pointers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1760 | */ |
| 1761 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 1762 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1763 | { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1764 | struct sk_buff **fragp; |
| 1765 | struct sk_buff *frag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1766 | int offset = skb_headlen(skb); |
| 1767 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 1768 | int i; |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1769 | int err; |
| 1770 | |
| 1771 | if (skb_cloned(skb) && |
| 1772 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) |
| 1773 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1774 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1775 | i = 0; |
| 1776 | if (offset >= len) |
| 1777 | goto drop_pages; |
| 1778 | |
| 1779 | for (; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1780 | int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1781 | |
| 1782 | if (end < len) { |
| 1783 | offset = end; |
| 1784 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1785 | } |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1786 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1787 | skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1788 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1789 | drop_pages: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1790 | skb_shinfo(skb)->nr_frags = i; |
| 1791 | |
| 1792 | for (; i < nfrags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1793 | skb_frag_unref(skb, i); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1794 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1795 | if (skb_has_frag_list(skb)) |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1796 | skb_drop_fraglist(skb); |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1797 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1798 | } |
| 1799 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1800 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); |
| 1801 | fragp = &frag->next) { |
| 1802 | int end = offset + frag->len; |
| 1803 | |
| 1804 | if (skb_shared(frag)) { |
| 1805 | struct sk_buff *nfrag; |
| 1806 | |
| 1807 | nfrag = skb_clone(frag, GFP_ATOMIC); |
| 1808 | if (unlikely(!nfrag)) |
| 1809 | return -ENOMEM; |
| 1810 | |
| 1811 | nfrag->next = frag->next; |
Eric Dumazet | 85bb2a6 | 2012-04-19 02:24:53 +0000 | [diff] [blame] | 1812 | consume_skb(frag); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1813 | frag = nfrag; |
| 1814 | *fragp = frag; |
| 1815 | } |
| 1816 | |
| 1817 | if (end < len) { |
| 1818 | offset = end; |
| 1819 | continue; |
| 1820 | } |
| 1821 | |
| 1822 | if (end > len && |
| 1823 | unlikely((err = pskb_trim(frag, len - offset)))) |
| 1824 | return err; |
| 1825 | |
| 1826 | if (frag->next) |
| 1827 | skb_drop_list(&frag->next); |
| 1828 | break; |
| 1829 | } |
| 1830 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1831 | done: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1832 | if (len > skb_headlen(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1833 | skb->data_len -= skb->len - len; |
| 1834 | skb->len = len; |
| 1835 | } else { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1836 | skb->len = len; |
| 1837 | skb->data_len = 0; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1838 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1839 | } |
| 1840 | |
Eric Dumazet | c21b48c | 2017-04-26 09:07:46 -0700 | [diff] [blame] | 1841 | if (!skb->sk || skb->destructor == sock_edemux) |
| 1842 | skb_condense(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1843 | return 0; |
| 1844 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1845 | EXPORT_SYMBOL(___pskb_trim); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1846 | |
Eric Dumazet | 88078d9 | 2018-04-18 11:43:15 -0700 | [diff] [blame] | 1847 | /* Note : use pskb_trim_rcsum() instead of calling this directly |
| 1848 | */ |
| 1849 | int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) |
| 1850 | { |
| 1851 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
| 1852 | int delta = skb->len - len; |
| 1853 | |
Dimitris Michailidis | d55bef50 | 2018-10-19 17:07:13 -0700 | [diff] [blame] | 1854 | skb->csum = csum_block_sub(skb->csum, |
| 1855 | skb_checksum(skb, len, delta, 0), |
| 1856 | len); |
Eric Dumazet | 88078d9 | 2018-04-18 11:43:15 -0700 | [diff] [blame] | 1857 | } |
| 1858 | return __pskb_trim(skb, len); |
| 1859 | } |
| 1860 | EXPORT_SYMBOL(pskb_trim_rcsum_slow); |
| 1861 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1862 | /** |
| 1863 | * __pskb_pull_tail - advance tail of skb header |
| 1864 | * @skb: buffer to reallocate |
| 1865 | * @delta: number of bytes to advance tail |
| 1866 | * |
| 1867 | * The function makes a sense only on a fragmented &sk_buff, |
| 1868 | * it expands header moving its tail forward and copying necessary |
| 1869 | * data from fragmented part. |
| 1870 | * |
| 1871 | * &sk_buff MUST have reference count of 1. |
| 1872 | * |
| 1873 | * Returns %NULL (and &sk_buff does not change) if pull failed |
| 1874 | * or value of new tail of skb in the case of success. |
| 1875 | * |
| 1876 | * All the pointers pointing into skb header may change and must be |
| 1877 | * reloaded after call to this function. |
| 1878 | */ |
| 1879 | |
| 1880 | /* Moves tail of skb head forward, copying data from fragmented part, |
| 1881 | * when it is necessary. |
| 1882 | * 1. It may fail due to malloc failure. |
| 1883 | * 2. It may change skb pointers. |
| 1884 | * |
| 1885 | * It is pretty complicated. Luckily, it is called only in exceptional cases. |
| 1886 | */ |
Johannes Berg | af72868 | 2017-06-16 14:29:22 +0200 | [diff] [blame] | 1887 | void *__pskb_pull_tail(struct sk_buff *skb, int delta) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1888 | { |
| 1889 | /* If skb has not enough free space at tail, get new one |
| 1890 | * plus 128 bytes for future expansions. If we have enough |
| 1891 | * room at tail, reallocate without expansion only if skb is cloned. |
| 1892 | */ |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1893 | int i, k, eat = (skb->tail + delta) - skb->end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1894 | |
| 1895 | if (eat > 0 || skb_cloned(skb)) { |
| 1896 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, |
| 1897 | GFP_ATOMIC)) |
| 1898 | return NULL; |
| 1899 | } |
| 1900 | |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 1901 | BUG_ON(skb_copy_bits(skb, skb_headlen(skb), |
| 1902 | skb_tail_pointer(skb), delta)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1903 | |
| 1904 | /* Optimization: no fragments, no reasons to preestimate |
| 1905 | * size of pulled pages. Superb. |
| 1906 | */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1907 | if (!skb_has_frag_list(skb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1908 | goto pull_pages; |
| 1909 | |
| 1910 | /* Estimate size of pulled pages. */ |
| 1911 | eat = delta; |
| 1912 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1913 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 1914 | |
| 1915 | if (size >= eat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1916 | goto pull_pages; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1917 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1918 | } |
| 1919 | |
| 1920 | /* If we need update frag list, we are in troubles. |
Wenhua Shi | 09001b0 | 2017-10-14 18:51:36 +0200 | [diff] [blame] | 1921 | * Certainly, it is possible to add an offset to skb data, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1922 | * but taking into account that pulling is expected to |
| 1923 | * be very rare operation, it is worth to fight against |
| 1924 | * further bloating skb head and crucify ourselves here instead. |
| 1925 | * Pure masohism, indeed. 8)8) |
| 1926 | */ |
| 1927 | if (eat) { |
| 1928 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
| 1929 | struct sk_buff *clone = NULL; |
| 1930 | struct sk_buff *insp = NULL; |
| 1931 | |
| 1932 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1933 | if (list->len <= eat) { |
| 1934 | /* Eaten as whole. */ |
| 1935 | eat -= list->len; |
| 1936 | list = list->next; |
| 1937 | insp = list; |
| 1938 | } else { |
| 1939 | /* Eaten partially. */ |
| 1940 | |
| 1941 | if (skb_shared(list)) { |
| 1942 | /* Sucks! We need to fork list. :-( */ |
| 1943 | clone = skb_clone(list, GFP_ATOMIC); |
| 1944 | if (!clone) |
| 1945 | return NULL; |
| 1946 | insp = list->next; |
| 1947 | list = clone; |
| 1948 | } else { |
| 1949 | /* This may be pulled without |
| 1950 | * problems. */ |
| 1951 | insp = list; |
| 1952 | } |
| 1953 | if (!pskb_pull(list, eat)) { |
Wei Yongjun | f3fbbe0 | 2009-02-25 00:37:32 +0000 | [diff] [blame] | 1954 | kfree_skb(clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1955 | return NULL; |
| 1956 | } |
| 1957 | break; |
| 1958 | } |
| 1959 | } while (eat); |
| 1960 | |
| 1961 | /* Free pulled out fragments. */ |
| 1962 | while ((list = skb_shinfo(skb)->frag_list) != insp) { |
| 1963 | skb_shinfo(skb)->frag_list = list->next; |
| 1964 | kfree_skb(list); |
| 1965 | } |
| 1966 | /* And insert new clone at head. */ |
| 1967 | if (clone) { |
| 1968 | clone->next = list; |
| 1969 | skb_shinfo(skb)->frag_list = clone; |
| 1970 | } |
| 1971 | } |
| 1972 | /* Success! Now we may commit changes to skb data. */ |
| 1973 | |
| 1974 | pull_pages: |
| 1975 | eat = delta; |
| 1976 | k = 0; |
| 1977 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1978 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 1979 | |
| 1980 | if (size <= eat) { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1981 | skb_frag_unref(skb, i); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1982 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1983 | } else { |
| 1984 | skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 1985 | if (eat) { |
| 1986 | skb_shinfo(skb)->frags[k].page_offset += eat; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1987 | skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); |
linzhang | 3ccc6c6 | 2017-07-17 17:25:02 +0800 | [diff] [blame] | 1988 | if (!i) |
| 1989 | goto end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1990 | eat = 0; |
| 1991 | } |
| 1992 | k++; |
| 1993 | } |
| 1994 | } |
| 1995 | skb_shinfo(skb)->nr_frags = k; |
| 1996 | |
linzhang | 3ccc6c6 | 2017-07-17 17:25:02 +0800 | [diff] [blame] | 1997 | end: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1998 | skb->tail += delta; |
| 1999 | skb->data_len -= delta; |
| 2000 | |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 2001 | if (!skb->data_len) |
| 2002 | skb_zcopy_clear(skb, false); |
| 2003 | |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 2004 | return skb_tail_pointer(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2005 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2006 | EXPORT_SYMBOL(__pskb_pull_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2007 | |
Eric Dumazet | 22019b1 | 2011-07-29 18:37:31 +0000 | [diff] [blame] | 2008 | /** |
| 2009 | * skb_copy_bits - copy bits from skb to kernel buffer |
| 2010 | * @skb: source skb |
| 2011 | * @offset: offset in source |
| 2012 | * @to: destination buffer |
| 2013 | * @len: number of bytes to copy |
| 2014 | * |
| 2015 | * Copy the specified number of bytes from the source skb to the |
| 2016 | * destination buffer. |
| 2017 | * |
| 2018 | * CAUTION ! : |
| 2019 | * If its prototype is ever changed, |
| 2020 | * check arch/{*}/net/{*}.S files, |
| 2021 | * since it is called from BPF assembly code. |
| 2022 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2023 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) |
| 2024 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2025 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2026 | struct sk_buff *frag_iter; |
| 2027 | int i, copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2028 | |
| 2029 | if (offset > (int)skb->len - len) |
| 2030 | goto fault; |
| 2031 | |
| 2032 | /* Copy header. */ |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2033 | if ((copy = start - offset) > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2034 | if (copy > len) |
| 2035 | copy = len; |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2036 | skb_copy_from_linear_data_offset(skb, offset, to, copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2037 | if ((len -= copy) == 0) |
| 2038 | return 0; |
| 2039 | offset += copy; |
| 2040 | to += copy; |
| 2041 | } |
| 2042 | |
| 2043 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2044 | int end; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2045 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2046 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2047 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2048 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2049 | end = start + skb_frag_size(f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2050 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2051 | u32 p_off, p_len, copied; |
| 2052 | struct page *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2053 | u8 *vaddr; |
| 2054 | |
| 2055 | if (copy > len) |
| 2056 | copy = len; |
| 2057 | |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2058 | skb_frag_foreach_page(f, |
| 2059 | f->page_offset + offset - start, |
| 2060 | copy, p, p_off, p_len, copied) { |
| 2061 | vaddr = kmap_atomic(p); |
| 2062 | memcpy(to + copied, vaddr + p_off, p_len); |
| 2063 | kunmap_atomic(vaddr); |
| 2064 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2065 | |
| 2066 | if ((len -= copy) == 0) |
| 2067 | return 0; |
| 2068 | offset += copy; |
| 2069 | to += copy; |
| 2070 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2071 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2072 | } |
| 2073 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2074 | skb_walk_frags(skb, frag_iter) { |
| 2075 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2076 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2077 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2078 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2079 | end = start + frag_iter->len; |
| 2080 | if ((copy = end - offset) > 0) { |
| 2081 | if (copy > len) |
| 2082 | copy = len; |
| 2083 | if (skb_copy_bits(frag_iter, offset - start, to, copy)) |
| 2084 | goto fault; |
| 2085 | if ((len -= copy) == 0) |
| 2086 | return 0; |
| 2087 | offset += copy; |
| 2088 | to += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2089 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2090 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2091 | } |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 2092 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2093 | if (!len) |
| 2094 | return 0; |
| 2095 | |
| 2096 | fault: |
| 2097 | return -EFAULT; |
| 2098 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2099 | EXPORT_SYMBOL(skb_copy_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2100 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2101 | /* |
| 2102 | * Callback from splice_to_pipe(), if we need to release some pages |
| 2103 | * at the end of the spd in case we error'ed out in filling the pipe. |
| 2104 | */ |
| 2105 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) |
| 2106 | { |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2107 | put_page(spd->pages[i]); |
| 2108 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2109 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2110 | static struct page *linear_to_page(struct page *page, unsigned int *len, |
| 2111 | unsigned int *offset, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2112 | struct sock *sk) |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2113 | { |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2114 | struct page_frag *pfrag = sk_page_frag(sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2115 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2116 | if (!sk_page_frag_refill(sk, pfrag)) |
| 2117 | return NULL; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2118 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2119 | *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2120 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2121 | memcpy(page_address(pfrag->page) + pfrag->offset, |
| 2122 | page_address(page) + *offset, *len); |
| 2123 | *offset = pfrag->offset; |
| 2124 | pfrag->offset += *len; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2125 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2126 | return pfrag->page; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2127 | } |
| 2128 | |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2129 | static bool spd_can_coalesce(const struct splice_pipe_desc *spd, |
| 2130 | struct page *page, |
| 2131 | unsigned int offset) |
| 2132 | { |
| 2133 | return spd->nr_pages && |
| 2134 | spd->pages[spd->nr_pages - 1] == page && |
| 2135 | (spd->partial[spd->nr_pages - 1].offset + |
| 2136 | spd->partial[spd->nr_pages - 1].len == offset); |
| 2137 | } |
| 2138 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2139 | /* |
| 2140 | * Fill page/offset/length into spd, if it can hold more pages. |
| 2141 | */ |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2142 | static bool spd_fill_page(struct splice_pipe_desc *spd, |
| 2143 | struct pipe_inode_info *pipe, struct page *page, |
| 2144 | unsigned int *len, unsigned int offset, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2145 | bool linear, |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2146 | struct sock *sk) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2147 | { |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2148 | if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2149 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2150 | |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2151 | if (linear) { |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2152 | page = linear_to_page(page, len, &offset, sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2153 | if (!page) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2154 | return true; |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2155 | } |
| 2156 | if (spd_can_coalesce(spd, page, offset)) { |
| 2157 | spd->partial[spd->nr_pages - 1].len += *len; |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2158 | return false; |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2159 | } |
| 2160 | get_page(page); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2161 | spd->pages[spd->nr_pages] = page; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2162 | spd->partial[spd->nr_pages].len = *len; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2163 | spd->partial[spd->nr_pages].offset = offset; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2164 | spd->nr_pages++; |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2165 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2166 | return false; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2167 | } |
| 2168 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2169 | static bool __splice_segment(struct page *page, unsigned int poff, |
| 2170 | unsigned int plen, unsigned int *off, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2171 | unsigned int *len, |
Eric Dumazet | d7ccf7c | 2012-04-23 23:35:04 -0400 | [diff] [blame] | 2172 | struct splice_pipe_desc *spd, bool linear, |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2173 | struct sock *sk, |
| 2174 | struct pipe_inode_info *pipe) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2175 | { |
| 2176 | if (!*len) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2177 | return true; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2178 | |
| 2179 | /* skip this segment if already processed */ |
| 2180 | if (*off >= plen) { |
| 2181 | *off -= plen; |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2182 | return false; |
Octavian Purdila | db43a28 | 2008-06-27 17:27:21 -0700 | [diff] [blame] | 2183 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2184 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2185 | /* ignore any bits we already processed */ |
Eric Dumazet | 9ca1b22 | 2013-01-05 21:31:18 +0000 | [diff] [blame] | 2186 | poff += *off; |
| 2187 | plen -= *off; |
| 2188 | *off = 0; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2189 | |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2190 | do { |
| 2191 | unsigned int flen = min(*len, plen); |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2192 | |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2193 | if (spd_fill_page(spd, pipe, page, &flen, poff, |
| 2194 | linear, sk)) |
| 2195 | return true; |
| 2196 | poff += flen; |
| 2197 | plen -= flen; |
| 2198 | *len -= flen; |
| 2199 | } while (*len && plen); |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2200 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2201 | return false; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2202 | } |
| 2203 | |
| 2204 | /* |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2205 | * Map linear and fragment data from the skb to spd. It reports true if the |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2206 | * pipe is full or if we already spliced the requested length. |
| 2207 | */ |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2208 | static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, |
| 2209 | unsigned int *offset, unsigned int *len, |
| 2210 | struct splice_pipe_desc *spd, struct sock *sk) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2211 | { |
| 2212 | int seg; |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2213 | struct sk_buff *iter; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2214 | |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 2215 | /* map the linear part : |
Alexander Duyck | 2996d31 | 2012-05-02 18:18:42 +0000 | [diff] [blame] | 2216 | * If skb->head_frag is set, this 'linear' part is backed by a |
| 2217 | * fragment, and if the head is not shared with any clones then |
| 2218 | * we can avoid a copy since we own the head portion of this page. |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2219 | */ |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2220 | if (__splice_segment(virt_to_page(skb->data), |
| 2221 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
| 2222 | skb_headlen(skb), |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2223 | offset, len, spd, |
Alexander Duyck | 3a7c1ee4 | 2012-05-03 01:09:42 +0000 | [diff] [blame] | 2224 | skb_head_is_locked(skb), |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 2225 | sk, pipe)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2226 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2227 | |
| 2228 | /* |
| 2229 | * then map the fragments |
| 2230 | */ |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2231 | for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { |
| 2232 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
| 2233 | |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2234 | if (__splice_segment(skb_frag_page(f), |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2235 | f->page_offset, skb_frag_size(f), |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2236 | offset, len, spd, false, sk, pipe)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2237 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2238 | } |
| 2239 | |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2240 | skb_walk_frags(skb, iter) { |
| 2241 | if (*offset >= iter->len) { |
| 2242 | *offset -= iter->len; |
| 2243 | continue; |
| 2244 | } |
| 2245 | /* __skb_splice_bits() only fails if the output has no room |
| 2246 | * left, so no point in going over the frag_list for the error |
| 2247 | * case. |
| 2248 | */ |
| 2249 | if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) |
| 2250 | return true; |
| 2251 | } |
| 2252 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2253 | return false; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2254 | } |
| 2255 | |
| 2256 | /* |
| 2257 | * Map data from the skb to a pipe. Should handle both the linear part, |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2258 | * the fragments, and the frag list. |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2259 | */ |
Hannes Frederic Sowa | a60e3cc | 2015-05-21 17:00:00 +0200 | [diff] [blame] | 2260 | int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2261 | struct pipe_inode_info *pipe, unsigned int tlen, |
Al Viro | 2586926 | 2016-09-17 21:02:10 -0400 | [diff] [blame] | 2262 | unsigned int flags) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2263 | { |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2264 | struct partial_page partial[MAX_SKB_FRAGS]; |
| 2265 | struct page *pages[MAX_SKB_FRAGS]; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2266 | struct splice_pipe_desc spd = { |
| 2267 | .pages = pages, |
| 2268 | .partial = partial, |
Eric Dumazet | 047fe36 | 2012-06-12 15:24:40 +0200 | [diff] [blame] | 2269 | .nr_pages_max = MAX_SKB_FRAGS, |
Miklos Szeredi | 28a625c | 2014-01-22 19:36:57 +0100 | [diff] [blame] | 2270 | .ops = &nosteal_pipe_buf_ops, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2271 | .spd_release = sock_spd_release, |
| 2272 | }; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 2273 | int ret = 0; |
| 2274 | |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2275 | __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2276 | |
Hannes Frederic Sowa | a60e3cc | 2015-05-21 17:00:00 +0200 | [diff] [blame] | 2277 | if (spd.nr_pages) |
Al Viro | 2586926 | 2016-09-17 21:02:10 -0400 | [diff] [blame] | 2278 | ret = splice_to_pipe(pipe, &spd); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2279 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 2280 | return ret; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2281 | } |
Hannes Frederic Sowa | 2b51457 | 2015-05-21 17:00:01 +0200 | [diff] [blame] | 2282 | EXPORT_SYMBOL_GPL(skb_splice_bits); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2283 | |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2284 | /* Send skb data on a socket. Socket must be locked. */ |
| 2285 | int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, |
| 2286 | int len) |
| 2287 | { |
| 2288 | unsigned int orig_len = len; |
| 2289 | struct sk_buff *head = skb; |
| 2290 | unsigned short fragidx; |
| 2291 | int slen, ret; |
| 2292 | |
| 2293 | do_frag_list: |
| 2294 | |
| 2295 | /* Deal with head data */ |
| 2296 | while (offset < skb_headlen(skb) && len) { |
| 2297 | struct kvec kv; |
| 2298 | struct msghdr msg; |
| 2299 | |
| 2300 | slen = min_t(int, len, skb_headlen(skb) - offset); |
| 2301 | kv.iov_base = skb->data + offset; |
John Fastabend | db5980d | 2017-08-15 22:31:34 -0700 | [diff] [blame] | 2302 | kv.iov_len = slen; |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2303 | memset(&msg, 0, sizeof(msg)); |
| 2304 | |
| 2305 | ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen); |
| 2306 | if (ret <= 0) |
| 2307 | goto error; |
| 2308 | |
| 2309 | offset += ret; |
| 2310 | len -= ret; |
| 2311 | } |
| 2312 | |
| 2313 | /* All the data was skb head? */ |
| 2314 | if (!len) |
| 2315 | goto out; |
| 2316 | |
| 2317 | /* Make offset relative to start of frags */ |
| 2318 | offset -= skb_headlen(skb); |
| 2319 | |
| 2320 | /* Find where we are in frag list */ |
| 2321 | for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { |
| 2322 | skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; |
| 2323 | |
| 2324 | if (offset < frag->size) |
| 2325 | break; |
| 2326 | |
| 2327 | offset -= frag->size; |
| 2328 | } |
| 2329 | |
| 2330 | for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { |
| 2331 | skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; |
| 2332 | |
| 2333 | slen = min_t(size_t, len, frag->size - offset); |
| 2334 | |
| 2335 | while (slen) { |
| 2336 | ret = kernel_sendpage_locked(sk, frag->page.p, |
| 2337 | frag->page_offset + offset, |
| 2338 | slen, MSG_DONTWAIT); |
| 2339 | if (ret <= 0) |
| 2340 | goto error; |
| 2341 | |
| 2342 | len -= ret; |
| 2343 | offset += ret; |
| 2344 | slen -= ret; |
| 2345 | } |
| 2346 | |
| 2347 | offset = 0; |
| 2348 | } |
| 2349 | |
| 2350 | if (len) { |
| 2351 | /* Process any frag lists */ |
| 2352 | |
| 2353 | if (skb == head) { |
| 2354 | if (skb_has_frag_list(skb)) { |
| 2355 | skb = skb_shinfo(skb)->frag_list; |
| 2356 | goto do_frag_list; |
| 2357 | } |
| 2358 | } else if (skb->next) { |
| 2359 | skb = skb->next; |
| 2360 | goto do_frag_list; |
| 2361 | } |
| 2362 | } |
| 2363 | |
| 2364 | out: |
| 2365 | return orig_len - len; |
| 2366 | |
| 2367 | error: |
| 2368 | return orig_len == len ? ret : orig_len - len; |
| 2369 | } |
| 2370 | EXPORT_SYMBOL_GPL(skb_send_sock_locked); |
| 2371 | |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2372 | /** |
| 2373 | * skb_store_bits - store bits from kernel buffer to skb |
| 2374 | * @skb: destination buffer |
| 2375 | * @offset: offset in destination |
| 2376 | * @from: source buffer |
| 2377 | * @len: number of bytes to copy |
| 2378 | * |
| 2379 | * Copy the specified number of bytes from the source buffer to the |
| 2380 | * destination skb. This function handles all the messy bits of |
| 2381 | * traversing fragment lists and such. |
| 2382 | */ |
| 2383 | |
Stephen Hemminger | 0c6fcc8 | 2007-04-20 16:40:01 -0700 | [diff] [blame] | 2384 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2385 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2386 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2387 | struct sk_buff *frag_iter; |
| 2388 | int i, copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2389 | |
| 2390 | if (offset > (int)skb->len - len) |
| 2391 | goto fault; |
| 2392 | |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2393 | if ((copy = start - offset) > 0) { |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2394 | if (copy > len) |
| 2395 | copy = len; |
Arnaldo Carvalho de Melo | 27d7ff4 | 2007-03-31 11:55:19 -0300 | [diff] [blame] | 2396 | skb_copy_to_linear_data_offset(skb, offset, from, copy); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2397 | if ((len -= copy) == 0) |
| 2398 | return 0; |
| 2399 | offset += copy; |
| 2400 | from += copy; |
| 2401 | } |
| 2402 | |
| 2403 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 2404 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2405 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2406 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2407 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2408 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2409 | end = start + skb_frag_size(frag); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2410 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2411 | u32 p_off, p_len, copied; |
| 2412 | struct page *p; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2413 | u8 *vaddr; |
| 2414 | |
| 2415 | if (copy > len) |
| 2416 | copy = len; |
| 2417 | |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2418 | skb_frag_foreach_page(frag, |
| 2419 | frag->page_offset + offset - start, |
| 2420 | copy, p, p_off, p_len, copied) { |
| 2421 | vaddr = kmap_atomic(p); |
| 2422 | memcpy(vaddr + p_off, from + copied, p_len); |
| 2423 | kunmap_atomic(vaddr); |
| 2424 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2425 | |
| 2426 | if ((len -= copy) == 0) |
| 2427 | return 0; |
| 2428 | offset += copy; |
| 2429 | from += copy; |
| 2430 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2431 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2432 | } |
| 2433 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2434 | skb_walk_frags(skb, frag_iter) { |
| 2435 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2436 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2437 | WARN_ON(start > offset + len); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2438 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2439 | end = start + frag_iter->len; |
| 2440 | if ((copy = end - offset) > 0) { |
| 2441 | if (copy > len) |
| 2442 | copy = len; |
| 2443 | if (skb_store_bits(frag_iter, offset - start, |
| 2444 | from, copy)) |
| 2445 | goto fault; |
| 2446 | if ((len -= copy) == 0) |
| 2447 | return 0; |
| 2448 | offset += copy; |
| 2449 | from += copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2450 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2451 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2452 | } |
| 2453 | if (!len) |
| 2454 | return 0; |
| 2455 | |
| 2456 | fault: |
| 2457 | return -EFAULT; |
| 2458 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2459 | EXPORT_SYMBOL(skb_store_bits); |
| 2460 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2461 | /* Checksum skb data. */ |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2462 | __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, |
| 2463 | __wsum csum, const struct skb_checksum_ops *ops) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2464 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2465 | int start = skb_headlen(skb); |
| 2466 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2467 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2468 | int pos = 0; |
| 2469 | |
| 2470 | /* Checksum header. */ |
| 2471 | if (copy > 0) { |
| 2472 | if (copy > len) |
| 2473 | copy = len; |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2474 | csum = ops->update(skb->data + offset, copy, csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2475 | if ((len -= copy) == 0) |
| 2476 | return csum; |
| 2477 | offset += copy; |
| 2478 | pos = copy; |
| 2479 | } |
| 2480 | |
| 2481 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2482 | int end; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2483 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2484 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2485 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2486 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2487 | end = start + skb_frag_size(frag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2488 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2489 | u32 p_off, p_len, copied; |
| 2490 | struct page *p; |
Al Viro | 44bb936 | 2006-11-14 21:36:14 -0800 | [diff] [blame] | 2491 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2492 | u8 *vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | |
| 2494 | if (copy > len) |
| 2495 | copy = len; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2496 | |
| 2497 | skb_frag_foreach_page(frag, |
| 2498 | frag->page_offset + offset - start, |
| 2499 | copy, p, p_off, p_len, copied) { |
| 2500 | vaddr = kmap_atomic(p); |
| 2501 | csum2 = ops->update(vaddr + p_off, p_len, 0); |
| 2502 | kunmap_atomic(vaddr); |
| 2503 | csum = ops->combine(csum, csum2, pos, p_len); |
| 2504 | pos += p_len; |
| 2505 | } |
| 2506 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2507 | if (!(len -= copy)) |
| 2508 | return csum; |
| 2509 | offset += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2510 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2511 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2512 | } |
| 2513 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2514 | skb_walk_frags(skb, frag_iter) { |
| 2515 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2516 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2517 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2518 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2519 | end = start + frag_iter->len; |
| 2520 | if ((copy = end - offset) > 0) { |
| 2521 | __wsum csum2; |
| 2522 | if (copy > len) |
| 2523 | copy = len; |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2524 | csum2 = __skb_checksum(frag_iter, offset - start, |
| 2525 | copy, 0, ops); |
| 2526 | csum = ops->combine(csum, csum2, pos, copy); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2527 | if ((len -= copy) == 0) |
| 2528 | return csum; |
| 2529 | offset += copy; |
| 2530 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2531 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2532 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2533 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2534 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2535 | |
| 2536 | return csum; |
| 2537 | } |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2538 | EXPORT_SYMBOL(__skb_checksum); |
| 2539 | |
| 2540 | __wsum skb_checksum(const struct sk_buff *skb, int offset, |
| 2541 | int len, __wsum csum) |
| 2542 | { |
| 2543 | const struct skb_checksum_ops ops = { |
Daniel Borkmann | cea80ea | 2013-11-04 17:10:25 +0100 | [diff] [blame] | 2544 | .update = csum_partial_ext, |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2545 | .combine = csum_block_add_ext, |
| 2546 | }; |
| 2547 | |
| 2548 | return __skb_checksum(skb, offset, len, csum, &ops); |
| 2549 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2550 | EXPORT_SYMBOL(skb_checksum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2551 | |
| 2552 | /* Both of above in one bottle. */ |
| 2553 | |
Al Viro | 81d7766 | 2006-11-14 21:37:33 -0800 | [diff] [blame] | 2554 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, |
| 2555 | u8 *to, int len, __wsum csum) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2556 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2557 | int start = skb_headlen(skb); |
| 2558 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2559 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2560 | int pos = 0; |
| 2561 | |
| 2562 | /* Copy header. */ |
| 2563 | if (copy > 0) { |
| 2564 | if (copy > len) |
| 2565 | copy = len; |
| 2566 | csum = csum_partial_copy_nocheck(skb->data + offset, to, |
| 2567 | copy, csum); |
| 2568 | if ((len -= copy) == 0) |
| 2569 | return csum; |
| 2570 | offset += copy; |
| 2571 | to += copy; |
| 2572 | pos = copy; |
| 2573 | } |
| 2574 | |
| 2575 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2576 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2577 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2578 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2579 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2580 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2581 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2582 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 2583 | u32 p_off, p_len, copied; |
| 2584 | struct page *p; |
Al Viro | 5084205 | 2006-11-14 21:36:34 -0800 | [diff] [blame] | 2585 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2586 | u8 *vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2587 | |
| 2588 | if (copy > len) |
| 2589 | copy = len; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2590 | |
| 2591 | skb_frag_foreach_page(frag, |
| 2592 | frag->page_offset + offset - start, |
| 2593 | copy, p, p_off, p_len, copied) { |
| 2594 | vaddr = kmap_atomic(p); |
| 2595 | csum2 = csum_partial_copy_nocheck(vaddr + p_off, |
| 2596 | to + copied, |
| 2597 | p_len, 0); |
| 2598 | kunmap_atomic(vaddr); |
| 2599 | csum = csum_block_add(csum, csum2, pos); |
| 2600 | pos += p_len; |
| 2601 | } |
| 2602 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2603 | if (!(len -= copy)) |
| 2604 | return csum; |
| 2605 | offset += copy; |
| 2606 | to += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2607 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2608 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2609 | } |
| 2610 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2611 | skb_walk_frags(skb, frag_iter) { |
| 2612 | __wsum csum2; |
| 2613 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2614 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2615 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2616 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2617 | end = start + frag_iter->len; |
| 2618 | if ((copy = end - offset) > 0) { |
| 2619 | if (copy > len) |
| 2620 | copy = len; |
| 2621 | csum2 = skb_copy_and_csum_bits(frag_iter, |
| 2622 | offset - start, |
| 2623 | to, copy, 0); |
| 2624 | csum = csum_block_add(csum, csum2, pos); |
| 2625 | if ((len -= copy) == 0) |
| 2626 | return csum; |
| 2627 | offset += copy; |
| 2628 | to += copy; |
| 2629 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2630 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2631 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2632 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2633 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2634 | return csum; |
| 2635 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2636 | EXPORT_SYMBOL(skb_copy_and_csum_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2637 | |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2638 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
| 2639 | { |
| 2640 | __sum16 sum; |
| 2641 | |
| 2642 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
Cong Wang | 1464193 | 2018-11-26 09:31:26 -0800 | [diff] [blame] | 2643 | /* See comments in __skb_checksum_complete(). */ |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2644 | if (likely(!sum)) { |
| 2645 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && |
| 2646 | !skb->csum_complete_sw) |
Cong Wang | 7fe50ac | 2018-11-12 14:47:18 -0800 | [diff] [blame] | 2647 | netdev_rx_csum_fault(skb->dev, skb); |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2648 | } |
| 2649 | if (!skb_shared(skb)) |
| 2650 | skb->csum_valid = !sum; |
| 2651 | return sum; |
| 2652 | } |
| 2653 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
| 2654 | |
Cong Wang | 1464193 | 2018-11-26 09:31:26 -0800 | [diff] [blame] | 2655 | /* This function assumes skb->csum already holds pseudo header's checksum, |
| 2656 | * which has been changed from the hardware checksum, for example, by |
| 2657 | * __skb_checksum_validate_complete(). And, the original skb->csum must |
| 2658 | * have been validated unsuccessfully for CHECKSUM_COMPLETE case. |
| 2659 | * |
| 2660 | * It returns non-zero if the recomputed checksum is still invalid, otherwise |
| 2661 | * zero. The new checksum is stored back into skb->csum unless the skb is |
| 2662 | * shared. |
| 2663 | */ |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2664 | __sum16 __skb_checksum_complete(struct sk_buff *skb) |
| 2665 | { |
| 2666 | __wsum csum; |
| 2667 | __sum16 sum; |
| 2668 | |
| 2669 | csum = skb_checksum(skb, 0, skb->len, 0); |
| 2670 | |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2671 | sum = csum_fold(csum_add(skb->csum, csum)); |
Cong Wang | 1464193 | 2018-11-26 09:31:26 -0800 | [diff] [blame] | 2672 | /* This check is inverted, because we already knew the hardware |
| 2673 | * checksum is invalid before calling this function. So, if the |
| 2674 | * re-computed checksum is valid instead, then we have a mismatch |
| 2675 | * between the original skb->csum and skb_checksum(). This means either |
| 2676 | * the original hardware checksum is incorrect or we screw up skb->csum |
| 2677 | * when moving skb->data around. |
| 2678 | */ |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2679 | if (likely(!sum)) { |
| 2680 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && |
| 2681 | !skb->csum_complete_sw) |
Cong Wang | 7fe50ac | 2018-11-12 14:47:18 -0800 | [diff] [blame] | 2682 | netdev_rx_csum_fault(skb->dev, skb); |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2683 | } |
| 2684 | |
| 2685 | if (!skb_shared(skb)) { |
| 2686 | /* Save full packet checksum */ |
| 2687 | skb->csum = csum; |
| 2688 | skb->ip_summed = CHECKSUM_COMPLETE; |
| 2689 | skb->csum_complete_sw = 1; |
| 2690 | skb->csum_valid = !sum; |
| 2691 | } |
| 2692 | |
| 2693 | return sum; |
| 2694 | } |
| 2695 | EXPORT_SYMBOL(__skb_checksum_complete); |
| 2696 | |
Davide Caratti | 9617813 | 2017-05-18 15:44:37 +0200 | [diff] [blame] | 2697 | static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) |
| 2698 | { |
| 2699 | net_warn_ratelimited( |
| 2700 | "%s: attempt to compute crc32c without libcrc32c.ko\n", |
| 2701 | __func__); |
| 2702 | return 0; |
| 2703 | } |
| 2704 | |
| 2705 | static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, |
| 2706 | int offset, int len) |
| 2707 | { |
| 2708 | net_warn_ratelimited( |
| 2709 | "%s: attempt to compute crc32c without libcrc32c.ko\n", |
| 2710 | __func__); |
| 2711 | return 0; |
| 2712 | } |
| 2713 | |
| 2714 | static const struct skb_checksum_ops default_crc32c_ops = { |
| 2715 | .update = warn_crc32c_csum_update, |
| 2716 | .combine = warn_crc32c_csum_combine, |
| 2717 | }; |
| 2718 | |
| 2719 | const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = |
| 2720 | &default_crc32c_ops; |
| 2721 | EXPORT_SYMBOL(crc32c_csum_stub); |
| 2722 | |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2723 | /** |
| 2724 | * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() |
| 2725 | * @from: source buffer |
| 2726 | * |
| 2727 | * Calculates the amount of linear headroom needed in the 'to' skb passed |
| 2728 | * into skb_zerocopy(). |
| 2729 | */ |
| 2730 | unsigned int |
| 2731 | skb_zerocopy_headlen(const struct sk_buff *from) |
| 2732 | { |
| 2733 | unsigned int hlen = 0; |
| 2734 | |
| 2735 | if (!from->head_frag || |
| 2736 | skb_headlen(from) < L1_CACHE_BYTES || |
| 2737 | skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) |
| 2738 | hlen = skb_headlen(from); |
| 2739 | |
| 2740 | if (skb_has_frag_list(from)) |
| 2741 | hlen = from->len; |
| 2742 | |
| 2743 | return hlen; |
| 2744 | } |
| 2745 | EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); |
| 2746 | |
| 2747 | /** |
| 2748 | * skb_zerocopy - Zero copy skb to skb |
| 2749 | * @to: destination buffer |
Masanari Iida | 7fceb4d | 2014-01-29 01:05:28 +0900 | [diff] [blame] | 2750 | * @from: source buffer |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2751 | * @len: number of bytes to copy from source buffer |
| 2752 | * @hlen: size of linear headroom in destination buffer |
| 2753 | * |
| 2754 | * Copies up to `len` bytes from `from` to `to` by creating references |
| 2755 | * to the frags in the source buffer. |
| 2756 | * |
| 2757 | * The `hlen` as calculated by skb_zerocopy_headlen() specifies the |
| 2758 | * headroom in the `to` buffer. |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2759 | * |
| 2760 | * Return value: |
| 2761 | * 0: everything is OK |
| 2762 | * -ENOMEM: couldn't orphan frags of @from due to lack of memory |
| 2763 | * -EFAULT: skb_copy_bits() found some problem with skb geometry |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2764 | */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2765 | int |
| 2766 | skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2767 | { |
| 2768 | int i, j = 0; |
| 2769 | int plen = 0; /* length of skb->head fragment */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2770 | int ret; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2771 | struct page *page; |
| 2772 | unsigned int offset; |
| 2773 | |
| 2774 | BUG_ON(!from->head_frag && !hlen); |
| 2775 | |
| 2776 | /* dont bother with small payloads */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2777 | if (len <= skb_tailroom(to)) |
| 2778 | return skb_copy_bits(from, 0, skb_put(to, len), len); |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2779 | |
| 2780 | if (hlen) { |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2781 | ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); |
| 2782 | if (unlikely(ret)) |
| 2783 | return ret; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2784 | len -= hlen; |
| 2785 | } else { |
| 2786 | plen = min_t(int, skb_headlen(from), len); |
| 2787 | if (plen) { |
| 2788 | page = virt_to_head_page(from->head); |
| 2789 | offset = from->data - (unsigned char *)page_address(page); |
| 2790 | __skb_fill_page_desc(to, 0, page, offset, plen); |
| 2791 | get_page(page); |
| 2792 | j = 1; |
| 2793 | len -= plen; |
| 2794 | } |
| 2795 | } |
| 2796 | |
| 2797 | to->truesize += len + plen; |
| 2798 | to->len += len + plen; |
| 2799 | to->data_len += len + plen; |
| 2800 | |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2801 | if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { |
| 2802 | skb_tx_error(from); |
| 2803 | return -ENOMEM; |
| 2804 | } |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 2805 | skb_zerocopy_clone(to, from, GFP_ATOMIC); |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2806 | |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2807 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { |
| 2808 | if (!len) |
| 2809 | break; |
| 2810 | skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; |
| 2811 | skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); |
| 2812 | len -= skb_shinfo(to)->frags[j].size; |
| 2813 | skb_frag_ref(to, j); |
| 2814 | j++; |
| 2815 | } |
| 2816 | skb_shinfo(to)->nr_frags = j; |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2817 | |
| 2818 | return 0; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2819 | } |
| 2820 | EXPORT_SYMBOL_GPL(skb_zerocopy); |
| 2821 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2822 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) |
| 2823 | { |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 2824 | __wsum csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2825 | long csstart; |
| 2826 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2827 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 2828 | csstart = skb_checksum_start_offset(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2829 | else |
| 2830 | csstart = skb_headlen(skb); |
| 2831 | |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2832 | BUG_ON(csstart > skb_headlen(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2833 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2834 | skb_copy_from_linear_data(skb, to, csstart); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2835 | |
| 2836 | csum = 0; |
| 2837 | if (csstart != skb->len) |
| 2838 | csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, |
| 2839 | skb->len - csstart, 0); |
| 2840 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2841 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Al Viro | ff1dcad | 2006-11-20 18:07:29 -0800 | [diff] [blame] | 2842 | long csstuff = csstart + skb->csum_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2843 | |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 2844 | *((__sum16 *)(to + csstuff)) = csum_fold(csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2845 | } |
| 2846 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2847 | EXPORT_SYMBOL(skb_copy_and_csum_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2848 | |
| 2849 | /** |
| 2850 | * skb_dequeue - remove from the head of the queue |
| 2851 | * @list: list to dequeue from |
| 2852 | * |
| 2853 | * Remove the head of the list. The list lock is taken so the function |
| 2854 | * may be used safely with other locking list functions. The head item is |
| 2855 | * returned or %NULL if the list is empty. |
| 2856 | */ |
| 2857 | |
| 2858 | struct sk_buff *skb_dequeue(struct sk_buff_head *list) |
| 2859 | { |
| 2860 | unsigned long flags; |
| 2861 | struct sk_buff *result; |
| 2862 | |
| 2863 | spin_lock_irqsave(&list->lock, flags); |
| 2864 | result = __skb_dequeue(list); |
| 2865 | spin_unlock_irqrestore(&list->lock, flags); |
| 2866 | return result; |
| 2867 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2868 | EXPORT_SYMBOL(skb_dequeue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2869 | |
| 2870 | /** |
| 2871 | * skb_dequeue_tail - remove from the tail of the queue |
| 2872 | * @list: list to dequeue from |
| 2873 | * |
| 2874 | * Remove the tail of the list. The list lock is taken so the function |
| 2875 | * may be used safely with other locking list functions. The tail item is |
| 2876 | * returned or %NULL if the list is empty. |
| 2877 | */ |
| 2878 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) |
| 2879 | { |
| 2880 | unsigned long flags; |
| 2881 | struct sk_buff *result; |
| 2882 | |
| 2883 | spin_lock_irqsave(&list->lock, flags); |
| 2884 | result = __skb_dequeue_tail(list); |
| 2885 | spin_unlock_irqrestore(&list->lock, flags); |
| 2886 | return result; |
| 2887 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2888 | EXPORT_SYMBOL(skb_dequeue_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2889 | |
| 2890 | /** |
| 2891 | * skb_queue_purge - empty a list |
| 2892 | * @list: list to empty |
| 2893 | * |
| 2894 | * Delete all buffers on an &sk_buff list. Each buffer is removed from |
| 2895 | * the list and one reference dropped. This function takes the list |
| 2896 | * lock and is atomic with respect to other list locking functions. |
| 2897 | */ |
| 2898 | void skb_queue_purge(struct sk_buff_head *list) |
| 2899 | { |
| 2900 | struct sk_buff *skb; |
| 2901 | while ((skb = skb_dequeue(list)) != NULL) |
| 2902 | kfree_skb(skb); |
| 2903 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2904 | EXPORT_SYMBOL(skb_queue_purge); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2905 | |
| 2906 | /** |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 2907 | * skb_rbtree_purge - empty a skb rbtree |
| 2908 | * @root: root of the rbtree to empty |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 2909 | * Return value: the sum of truesizes of all purged skbs. |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 2910 | * |
| 2911 | * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from |
| 2912 | * the list and one reference dropped. This function does not take |
| 2913 | * any lock. Synchronization should be handled by the caller (e.g., TCP |
| 2914 | * out-of-order queue is protected by the socket lock). |
| 2915 | */ |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 2916 | unsigned int skb_rbtree_purge(struct rb_root *root) |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 2917 | { |
Eric Dumazet | 7c90584 | 2017-09-23 12:39:12 -0700 | [diff] [blame] | 2918 | struct rb_node *p = rb_first(root); |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 2919 | unsigned int sum = 0; |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 2920 | |
Eric Dumazet | 7c90584 | 2017-09-23 12:39:12 -0700 | [diff] [blame] | 2921 | while (p) { |
| 2922 | struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); |
| 2923 | |
| 2924 | p = rb_next(p); |
| 2925 | rb_erase(&skb->rbnode, root); |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 2926 | sum += skb->truesize; |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 2927 | kfree_skb(skb); |
Eric Dumazet | 7c90584 | 2017-09-23 12:39:12 -0700 | [diff] [blame] | 2928 | } |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 2929 | return sum; |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 2930 | } |
| 2931 | |
| 2932 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2933 | * skb_queue_head - queue a buffer at the list head |
| 2934 | * @list: list to use |
| 2935 | * @newsk: buffer to queue |
| 2936 | * |
| 2937 | * Queue a buffer at the start of the list. This function takes the |
| 2938 | * list lock and can be used safely with other locking &sk_buff functions |
| 2939 | * safely. |
| 2940 | * |
| 2941 | * A buffer cannot be placed on two lists at the same time. |
| 2942 | */ |
| 2943 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) |
| 2944 | { |
| 2945 | unsigned long flags; |
| 2946 | |
| 2947 | spin_lock_irqsave(&list->lock, flags); |
| 2948 | __skb_queue_head(list, newsk); |
| 2949 | spin_unlock_irqrestore(&list->lock, flags); |
| 2950 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2951 | EXPORT_SYMBOL(skb_queue_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2952 | |
| 2953 | /** |
| 2954 | * skb_queue_tail - queue a buffer at the list tail |
| 2955 | * @list: list to use |
| 2956 | * @newsk: buffer to queue |
| 2957 | * |
| 2958 | * Queue a buffer at the tail of the list. This function takes the |
| 2959 | * list lock and can be used safely with other locking &sk_buff functions |
| 2960 | * safely. |
| 2961 | * |
| 2962 | * A buffer cannot be placed on two lists at the same time. |
| 2963 | */ |
| 2964 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) |
| 2965 | { |
| 2966 | unsigned long flags; |
| 2967 | |
| 2968 | spin_lock_irqsave(&list->lock, flags); |
| 2969 | __skb_queue_tail(list, newsk); |
| 2970 | spin_unlock_irqrestore(&list->lock, flags); |
| 2971 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2972 | EXPORT_SYMBOL(skb_queue_tail); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2973 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2974 | /** |
| 2975 | * skb_unlink - remove a buffer from a list |
| 2976 | * @skb: buffer to remove |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2977 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2978 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2979 | * Remove a packet from a list. The list locks are taken and this |
| 2980 | * function is atomic with respect to other list locked calls |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2981 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2982 | * You must know what list the SKB is on. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2983 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2984 | void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2985 | { |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2986 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2987 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2988 | spin_lock_irqsave(&list->lock, flags); |
| 2989 | __skb_unlink(skb, list); |
| 2990 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2991 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2992 | EXPORT_SYMBOL(skb_unlink); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2993 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2994 | /** |
| 2995 | * skb_append - append a buffer |
| 2996 | * @old: buffer to insert after |
| 2997 | * @newsk: buffer to insert |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2998 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2999 | * |
| 3000 | * Place a packet after a given packet in a list. The list locks are taken |
| 3001 | * and this function is atomic with respect to other list locked calls. |
| 3002 | * A buffer cannot be placed on two lists at the same time. |
| 3003 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3004 | void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3005 | { |
| 3006 | unsigned long flags; |
| 3007 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3008 | spin_lock_irqsave(&list->lock, flags); |
Gerrit Renker | 7de6c03 | 2008-04-14 00:05:09 -0700 | [diff] [blame] | 3009 | __skb_queue_after(list, old, newsk); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3010 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3011 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3012 | EXPORT_SYMBOL(skb_append); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3013 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3014 | static inline void skb_split_inside_header(struct sk_buff *skb, |
| 3015 | struct sk_buff* skb1, |
| 3016 | const u32 len, const int pos) |
| 3017 | { |
| 3018 | int i; |
| 3019 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 3020 | skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), |
| 3021 | pos - len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3022 | /* And move data appendix as is. */ |
| 3023 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
| 3024 | skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; |
| 3025 | |
| 3026 | skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; |
| 3027 | skb_shinfo(skb)->nr_frags = 0; |
| 3028 | skb1->data_len = skb->data_len; |
| 3029 | skb1->len += skb1->data_len; |
| 3030 | skb->data_len = 0; |
| 3031 | skb->len = len; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 3032 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3033 | } |
| 3034 | |
| 3035 | static inline void skb_split_no_header(struct sk_buff *skb, |
| 3036 | struct sk_buff* skb1, |
| 3037 | const u32 len, int pos) |
| 3038 | { |
| 3039 | int i, k = 0; |
| 3040 | const int nfrags = skb_shinfo(skb)->nr_frags; |
| 3041 | |
| 3042 | skb_shinfo(skb)->nr_frags = 0; |
| 3043 | skb1->len = skb1->data_len = skb->len - len; |
| 3044 | skb->len = len; |
| 3045 | skb->data_len = len - pos; |
| 3046 | |
| 3047 | for (i = 0; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3048 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3049 | |
| 3050 | if (pos + size > len) { |
| 3051 | skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 3052 | |
| 3053 | if (pos < len) { |
| 3054 | /* Split frag. |
| 3055 | * We have two variants in this case: |
| 3056 | * 1. Move all the frag to the second |
| 3057 | * part, if it is possible. F.e. |
| 3058 | * this approach is mandatory for TUX, |
| 3059 | * where splitting is expensive. |
| 3060 | * 2. Split is accurately. We make this. |
| 3061 | */ |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3062 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3063 | skb_shinfo(skb1)->frags[0].page_offset += len - pos; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3064 | skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); |
| 3065 | skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3066 | skb_shinfo(skb)->nr_frags++; |
| 3067 | } |
| 3068 | k++; |
| 3069 | } else |
| 3070 | skb_shinfo(skb)->nr_frags++; |
| 3071 | pos += size; |
| 3072 | } |
| 3073 | skb_shinfo(skb1)->nr_frags = k; |
| 3074 | } |
| 3075 | |
| 3076 | /** |
| 3077 | * skb_split - Split fragmented skb to two parts at length len. |
| 3078 | * @skb: the buffer to split |
| 3079 | * @skb1: the buffer to receive the second part |
| 3080 | * @len: new length for skb |
| 3081 | */ |
| 3082 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) |
| 3083 | { |
| 3084 | int pos = skb_headlen(skb); |
| 3085 | |
Willem de Bruijn | fff8803 | 2017-06-08 11:35:03 -0400 | [diff] [blame] | 3086 | skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & |
| 3087 | SKBTX_SHARED_FRAG; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 3088 | skb_zerocopy_clone(skb1, skb, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3089 | if (len < pos) /* Split line is inside header. */ |
| 3090 | skb_split_inside_header(skb, skb1, len, pos); |
| 3091 | else /* Second chunk has no header, nothing to copy. */ |
| 3092 | skb_split_no_header(skb, skb1, len, pos); |
| 3093 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3094 | EXPORT_SYMBOL(skb_split); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3095 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 3096 | /* Shifting from/to a cloned skb is a no-go. |
| 3097 | * |
| 3098 | * Caller cannot keep skb_shinfo related pointers past calling here! |
| 3099 | */ |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3100 | static int skb_prepare_for_shift(struct sk_buff *skb) |
| 3101 | { |
Ilpo Järvinen | 0ace285 | 2008-11-24 21:30:21 -0800 | [diff] [blame] | 3102 | return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3103 | } |
| 3104 | |
| 3105 | /** |
| 3106 | * skb_shift - Shifts paged data partially from skb to another |
| 3107 | * @tgt: buffer into which tail data gets added |
| 3108 | * @skb: buffer from which the paged data comes from |
| 3109 | * @shiftlen: shift up to this many bytes |
| 3110 | * |
| 3111 | * Attempts to shift up to shiftlen worth of bytes, which may be less than |
Feng King | 20e994a | 2011-11-21 01:47:11 +0000 | [diff] [blame] | 3112 | * the length of the skb, from skb to tgt. Returns number bytes shifted. |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3113 | * It's up to caller to free skb if everything was shifted. |
| 3114 | * |
| 3115 | * If @tgt runs out of frags, the whole operation is aborted. |
| 3116 | * |
| 3117 | * Skb cannot include anything else but paged data while tgt is allowed |
| 3118 | * to have non-paged data as well. |
| 3119 | * |
| 3120 | * TODO: full sized shift could be optimized but that would need |
| 3121 | * specialized skb free'er to handle frags without up-to-date nr_frags. |
| 3122 | */ |
| 3123 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) |
| 3124 | { |
| 3125 | int from, to, merge, todo; |
| 3126 | struct skb_frag_struct *fragfrom, *fragto; |
| 3127 | |
| 3128 | BUG_ON(shiftlen > skb->len); |
Eric Dumazet | f8071cd | 2016-11-15 12:51:50 -0800 | [diff] [blame] | 3129 | |
| 3130 | if (skb_headlen(skb)) |
| 3131 | return 0; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 3132 | if (skb_zcopy(tgt) || skb_zcopy(skb)) |
| 3133 | return 0; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3134 | |
| 3135 | todo = shiftlen; |
| 3136 | from = 0; |
| 3137 | to = skb_shinfo(tgt)->nr_frags; |
| 3138 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 3139 | |
| 3140 | /* Actual merge is delayed until the point when we know we can |
| 3141 | * commit all, so that we don't have to undo partial changes |
| 3142 | */ |
| 3143 | if (!to || |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3144 | !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), |
| 3145 | fragfrom->page_offset)) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3146 | merge = -1; |
| 3147 | } else { |
| 3148 | merge = to - 1; |
| 3149 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3150 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3151 | if (todo < 0) { |
| 3152 | if (skb_prepare_for_shift(skb) || |
| 3153 | skb_prepare_for_shift(tgt)) |
| 3154 | return 0; |
| 3155 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 3156 | /* All previous frag pointers might be stale! */ |
| 3157 | fragfrom = &skb_shinfo(skb)->frags[from]; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3158 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 3159 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3160 | skb_frag_size_add(fragto, shiftlen); |
| 3161 | skb_frag_size_sub(fragfrom, shiftlen); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3162 | fragfrom->page_offset += shiftlen; |
| 3163 | |
| 3164 | goto onlymerged; |
| 3165 | } |
| 3166 | |
| 3167 | from++; |
| 3168 | } |
| 3169 | |
| 3170 | /* Skip full, not-fitting skb to avoid expensive operations */ |
| 3171 | if ((shiftlen == skb->len) && |
| 3172 | (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) |
| 3173 | return 0; |
| 3174 | |
| 3175 | if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) |
| 3176 | return 0; |
| 3177 | |
| 3178 | while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { |
| 3179 | if (to == MAX_SKB_FRAGS) |
| 3180 | return 0; |
| 3181 | |
| 3182 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 3183 | fragto = &skb_shinfo(tgt)->frags[to]; |
| 3184 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3185 | if (todo >= skb_frag_size(fragfrom)) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3186 | *fragto = *fragfrom; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3187 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3188 | from++; |
| 3189 | to++; |
| 3190 | |
| 3191 | } else { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3192 | __skb_frag_ref(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3193 | fragto->page = fragfrom->page; |
| 3194 | fragto->page_offset = fragfrom->page_offset; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3195 | skb_frag_size_set(fragto, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3196 | |
| 3197 | fragfrom->page_offset += todo; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3198 | skb_frag_size_sub(fragfrom, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3199 | todo = 0; |
| 3200 | |
| 3201 | to++; |
| 3202 | break; |
| 3203 | } |
| 3204 | } |
| 3205 | |
| 3206 | /* Ready to "commit" this state change to tgt */ |
| 3207 | skb_shinfo(tgt)->nr_frags = to; |
| 3208 | |
| 3209 | if (merge >= 0) { |
| 3210 | fragfrom = &skb_shinfo(skb)->frags[0]; |
| 3211 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 3212 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3213 | skb_frag_size_add(fragto, skb_frag_size(fragfrom)); |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3214 | __skb_frag_unref(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3215 | } |
| 3216 | |
| 3217 | /* Reposition in the original skb */ |
| 3218 | to = 0; |
| 3219 | while (from < skb_shinfo(skb)->nr_frags) |
| 3220 | skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; |
| 3221 | skb_shinfo(skb)->nr_frags = to; |
| 3222 | |
| 3223 | BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); |
| 3224 | |
| 3225 | onlymerged: |
| 3226 | /* Most likely the tgt won't ever need its checksum anymore, skb on |
| 3227 | * the other hand might need it if it needs to be resent |
| 3228 | */ |
| 3229 | tgt->ip_summed = CHECKSUM_PARTIAL; |
| 3230 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 3231 | |
| 3232 | /* Yak, is it really working this way? Some helper please? */ |
| 3233 | skb->len -= shiftlen; |
| 3234 | skb->data_len -= shiftlen; |
| 3235 | skb->truesize -= shiftlen; |
| 3236 | tgt->len += shiftlen; |
| 3237 | tgt->data_len += shiftlen; |
| 3238 | tgt->truesize += shiftlen; |
| 3239 | |
| 3240 | return shiftlen; |
| 3241 | } |
| 3242 | |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3243 | /** |
| 3244 | * skb_prepare_seq_read - Prepare a sequential read of skb data |
| 3245 | * @skb: the buffer to read |
| 3246 | * @from: lower offset of data to be read |
| 3247 | * @to: upper offset of data to be read |
| 3248 | * @st: state variable |
| 3249 | * |
| 3250 | * Initializes the specified state variable. Must be called before |
| 3251 | * invoking skb_seq_read() for the first time. |
| 3252 | */ |
| 3253 | void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, |
| 3254 | unsigned int to, struct skb_seq_state *st) |
| 3255 | { |
| 3256 | st->lower_offset = from; |
| 3257 | st->upper_offset = to; |
| 3258 | st->root_skb = st->cur_skb = skb; |
| 3259 | st->frag_idx = st->stepped_offset = 0; |
| 3260 | st->frag_data = NULL; |
| 3261 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3262 | EXPORT_SYMBOL(skb_prepare_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3263 | |
| 3264 | /** |
| 3265 | * skb_seq_read - Sequentially read skb data |
| 3266 | * @consumed: number of bytes consumed by the caller so far |
| 3267 | * @data: destination pointer for data to be returned |
| 3268 | * @st: state variable |
| 3269 | * |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 3270 | * Reads a block of skb data at @consumed relative to the |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3271 | * lower offset specified to skb_prepare_seq_read(). Assigns |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 3272 | * the head of the data block to @data and returns the length |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3273 | * of the block or 0 if the end of the skb data or the upper |
| 3274 | * offset has been reached. |
| 3275 | * |
| 3276 | * The caller is not required to consume all of the data |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 3277 | * returned, i.e. @consumed is typically set to the number |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3278 | * of bytes already consumed and the next call to |
| 3279 | * skb_seq_read() will return the remaining part of the block. |
| 3280 | * |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 3281 | * Note 1: The size of each block of data returned can be arbitrary, |
Masanari Iida | e793c0f | 2014-09-04 23:44:36 +0900 | [diff] [blame] | 3282 | * this limitation is the cost for zerocopy sequential |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3283 | * reads of potentially non linear data. |
| 3284 | * |
Randy Dunlap | bc2cda1 | 2008-02-13 15:03:25 -0800 | [diff] [blame] | 3285 | * Note 2: Fragment lists within fragments are not implemented |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3286 | * at the moment, state->root_skb could be replaced with |
| 3287 | * a stack for this purpose. |
| 3288 | */ |
| 3289 | unsigned int skb_seq_read(unsigned int consumed, const u8 **data, |
| 3290 | struct skb_seq_state *st) |
| 3291 | { |
| 3292 | unsigned int block_limit, abs_offset = consumed + st->lower_offset; |
| 3293 | skb_frag_t *frag; |
| 3294 | |
Wedson Almeida Filho | aeb193e | 2013-06-23 23:33:48 -0700 | [diff] [blame] | 3295 | if (unlikely(abs_offset >= st->upper_offset)) { |
| 3296 | if (st->frag_data) { |
| 3297 | kunmap_atomic(st->frag_data); |
| 3298 | st->frag_data = NULL; |
| 3299 | } |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3300 | return 0; |
Wedson Almeida Filho | aeb193e | 2013-06-23 23:33:48 -0700 | [diff] [blame] | 3301 | } |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3302 | |
| 3303 | next_skb: |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 3304 | block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3305 | |
Thomas Chenault | 995b337 | 2009-05-18 21:43:27 -0700 | [diff] [blame] | 3306 | if (abs_offset < block_limit && !st->frag_data) { |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 3307 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3308 | return block_limit - abs_offset; |
| 3309 | } |
| 3310 | |
| 3311 | if (st->frag_idx == 0 && !st->frag_data) |
| 3312 | st->stepped_offset += skb_headlen(st->cur_skb); |
| 3313 | |
| 3314 | while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { |
| 3315 | frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3316 | block_limit = skb_frag_size(frag) + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3317 | |
| 3318 | if (abs_offset < block_limit) { |
| 3319 | if (!st->frag_data) |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 3320 | st->frag_data = kmap_atomic(skb_frag_page(frag)); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3321 | |
| 3322 | *data = (u8 *) st->frag_data + frag->page_offset + |
| 3323 | (abs_offset - st->stepped_offset); |
| 3324 | |
| 3325 | return block_limit - abs_offset; |
| 3326 | } |
| 3327 | |
| 3328 | if (st->frag_data) { |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 3329 | kunmap_atomic(st->frag_data); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3330 | st->frag_data = NULL; |
| 3331 | } |
| 3332 | |
| 3333 | st->frag_idx++; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3334 | st->stepped_offset += skb_frag_size(frag); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3335 | } |
| 3336 | |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 3337 | if (st->frag_data) { |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 3338 | kunmap_atomic(st->frag_data); |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 3339 | st->frag_data = NULL; |
| 3340 | } |
| 3341 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3342 | if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 3343 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3344 | st->frag_idx = 0; |
| 3345 | goto next_skb; |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 3346 | } else if (st->cur_skb->next) { |
| 3347 | st->cur_skb = st->cur_skb->next; |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 3348 | st->frag_idx = 0; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3349 | goto next_skb; |
| 3350 | } |
| 3351 | |
| 3352 | return 0; |
| 3353 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3354 | EXPORT_SYMBOL(skb_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3355 | |
| 3356 | /** |
| 3357 | * skb_abort_seq_read - Abort a sequential read of skb data |
| 3358 | * @st: state variable |
| 3359 | * |
| 3360 | * Must be called if skb_seq_read() was not called until it |
| 3361 | * returned 0. |
| 3362 | */ |
| 3363 | void skb_abort_seq_read(struct skb_seq_state *st) |
| 3364 | { |
| 3365 | if (st->frag_data) |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 3366 | kunmap_atomic(st->frag_data); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3367 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3368 | EXPORT_SYMBOL(skb_abort_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3369 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3370 | #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) |
| 3371 | |
| 3372 | static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, |
| 3373 | struct ts_config *conf, |
| 3374 | struct ts_state *state) |
| 3375 | { |
| 3376 | return skb_seq_read(offset, text, TS_SKB_CB(state)); |
| 3377 | } |
| 3378 | |
| 3379 | static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) |
| 3380 | { |
| 3381 | skb_abort_seq_read(TS_SKB_CB(state)); |
| 3382 | } |
| 3383 | |
| 3384 | /** |
| 3385 | * skb_find_text - Find a text pattern in skb data |
| 3386 | * @skb: the buffer to look in |
| 3387 | * @from: search offset |
| 3388 | * @to: search limit |
| 3389 | * @config: textsearch configuration |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3390 | * |
| 3391 | * Finds a pattern in the skb data according to the specified |
| 3392 | * textsearch configuration. Use textsearch_next() to retrieve |
| 3393 | * subsequent occurrences of the pattern. Returns the offset |
| 3394 | * to the first occurrence or UINT_MAX if no match was found. |
| 3395 | */ |
| 3396 | unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3397 | unsigned int to, struct ts_config *config) |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3398 | { |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3399 | struct ts_state state; |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 3400 | unsigned int ret; |
| 3401 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3402 | config->get_next_block = skb_ts_get_next_block; |
| 3403 | config->finish = skb_ts_finish; |
| 3404 | |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3405 | skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3406 | |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3407 | ret = textsearch_find(config, &state); |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 3408 | return (ret <= to - from ? ret : UINT_MAX); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3409 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3410 | EXPORT_SYMBOL(skb_find_text); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3411 | |
Hannes Frederic Sowa | be12a1f | 2015-05-21 16:59:58 +0200 | [diff] [blame] | 3412 | int skb_append_pagefrags(struct sk_buff *skb, struct page *page, |
| 3413 | int offset, size_t size) |
| 3414 | { |
| 3415 | int i = skb_shinfo(skb)->nr_frags; |
| 3416 | |
| 3417 | if (skb_can_coalesce(skb, i, page, offset)) { |
| 3418 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); |
| 3419 | } else if (i < MAX_SKB_FRAGS) { |
| 3420 | get_page(page); |
| 3421 | skb_fill_page_desc(skb, i, page, offset, size); |
| 3422 | } else { |
| 3423 | return -EMSGSIZE; |
| 3424 | } |
| 3425 | |
| 3426 | return 0; |
| 3427 | } |
| 3428 | EXPORT_SYMBOL_GPL(skb_append_pagefrags); |
| 3429 | |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3430 | /** |
| 3431 | * skb_pull_rcsum - pull skb and update receive checksum |
| 3432 | * @skb: buffer to update |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3433 | * @len: length of data pulled |
| 3434 | * |
| 3435 | * This function performs an skb_pull on the packet and updates |
Urs Thuermann | fee54fa | 2008-02-12 22:03:25 -0800 | [diff] [blame] | 3436 | * the CHECKSUM_COMPLETE checksum. It should be used on |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 3437 | * receive path processing instead of skb_pull unless you know |
| 3438 | * that the checksum difference is zero (e.g., a valid IP header) |
| 3439 | * or you are setting ip_summed to CHECKSUM_NONE. |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3440 | */ |
Johannes Berg | af72868 | 2017-06-16 14:29:22 +0200 | [diff] [blame] | 3441 | void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3442 | { |
Pravin B Shelar | 31b33df | 2015-09-28 17:24:25 -0700 | [diff] [blame] | 3443 | unsigned char *data = skb->data; |
| 3444 | |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3445 | BUG_ON(len > skb->len); |
Pravin B Shelar | 31b33df | 2015-09-28 17:24:25 -0700 | [diff] [blame] | 3446 | __skb_pull(skb, len); |
| 3447 | skb_postpull_rcsum(skb, data, len); |
| 3448 | return skb->data; |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3449 | } |
Arnaldo Carvalho de Melo | f94691a | 2006-03-20 22:47:55 -0800 | [diff] [blame] | 3450 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); |
| 3451 | |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 3452 | static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) |
| 3453 | { |
| 3454 | skb_frag_t head_frag; |
| 3455 | struct page *page; |
| 3456 | |
| 3457 | page = virt_to_head_page(frag_skb->head); |
| 3458 | head_frag.page.p = page; |
| 3459 | head_frag.page_offset = frag_skb->data - |
| 3460 | (unsigned char *)page_address(page); |
| 3461 | head_frag.size = skb_headlen(frag_skb); |
| 3462 | return head_frag; |
| 3463 | } |
| 3464 | |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3465 | /** |
| 3466 | * skb_segment - Perform protocol segmentation on skb. |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3467 | * @head_skb: buffer to segment |
Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 3468 | * @features: features for the output path (see dev->features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3469 | * |
| 3470 | * This function performs segmentation on the given skb. It returns |
Ben Hutchings | 4c821d7 | 2008-04-13 21:52:48 -0700 | [diff] [blame] | 3471 | * a pointer to the first in a list of new skbs for the segments. |
| 3472 | * In case of error it returns ERR_PTR(err). |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3473 | */ |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3474 | struct sk_buff *skb_segment(struct sk_buff *head_skb, |
| 3475 | netdev_features_t features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3476 | { |
| 3477 | struct sk_buff *segs = NULL; |
| 3478 | struct sk_buff *tail = NULL; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3479 | struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3480 | skb_frag_t *frag = skb_shinfo(head_skb)->frags; |
| 3481 | unsigned int mss = skb_shinfo(head_skb)->gso_size; |
| 3482 | unsigned int doffset = head_skb->data - skb_mac_header(head_skb); |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 3483 | struct sk_buff *frag_skb = head_skb; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3484 | unsigned int offset = doffset; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3485 | unsigned int tnl_hlen = skb_tnl_header_len(head_skb); |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3486 | unsigned int partial_segs = 0; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3487 | unsigned int headroom; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3488 | unsigned int len = head_skb->len; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3489 | __be16 proto; |
Alexander Duyck | 36c9838 | 2016-05-02 09:38:18 -0700 | [diff] [blame] | 3490 | bool csum, sg; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3491 | int nfrags = skb_shinfo(head_skb)->nr_frags; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3492 | int err = -ENOMEM; |
| 3493 | int i = 0; |
| 3494 | int pos; |
Vlad Yasevich | 53d6471 | 2014-03-27 17:26:18 -0400 | [diff] [blame] | 3495 | int dummy; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3496 | |
Wei-Chun Chao | 5882a07 | 2014-06-08 23:48:54 -0700 | [diff] [blame] | 3497 | __skb_push(head_skb, doffset); |
Vlad Yasevich | 53d6471 | 2014-03-27 17:26:18 -0400 | [diff] [blame] | 3498 | proto = skb_network_protocol(head_skb, &dummy); |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3499 | if (unlikely(!proto)) |
| 3500 | return ERR_PTR(-EINVAL); |
| 3501 | |
Alexander Duyck | 36c9838 | 2016-05-02 09:38:18 -0700 | [diff] [blame] | 3502 | sg = !!(features & NETIF_F_SG); |
Alexander Duyck | f245d07 | 2016-02-05 15:28:26 -0800 | [diff] [blame] | 3503 | csum = !!can_checksum_protocol(features, proto); |
Tom Herbert | 7e2b10c | 2014-06-04 17:20:02 -0700 | [diff] [blame] | 3504 | |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3505 | if (sg && csum && (mss != GSO_BY_FRAGS)) { |
| 3506 | if (!(features & NETIF_F_GSO_PARTIAL)) { |
| 3507 | struct sk_buff *iter; |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 3508 | unsigned int frag_len; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3509 | |
| 3510 | if (!list_skb || |
| 3511 | !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) |
| 3512 | goto normal; |
| 3513 | |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 3514 | /* If we get here then all the required |
| 3515 | * GSO features except frag_list are supported. |
| 3516 | * Try to split the SKB to multiple GSO SKBs |
| 3517 | * with no frag_list. |
| 3518 | * Currently we can do that only when the buffers don't |
| 3519 | * have a linear part and all the buffers except |
| 3520 | * the last are of the same length. |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3521 | */ |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 3522 | frag_len = list_skb->len; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3523 | skb_walk_frags(head_skb, iter) { |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 3524 | if (frag_len != iter->len && iter->next) |
| 3525 | goto normal; |
Ilan Tayari | eaffadb | 2017-04-08 02:07:08 +0300 | [diff] [blame] | 3526 | if (skb_headlen(iter) && !iter->head_frag) |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3527 | goto normal; |
| 3528 | |
| 3529 | len -= iter->len; |
| 3530 | } |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 3531 | |
| 3532 | if (len != frag_len) |
| 3533 | goto normal; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3534 | } |
| 3535 | |
| 3536 | /* GSO partial only requires that we trim off any excess that |
| 3537 | * doesn't fit into an MSS sized block, so take care of that |
| 3538 | * now. |
| 3539 | */ |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3540 | partial_segs = len / mss; |
Alexander Duyck | d7fb5a8 | 2016-05-02 09:38:12 -0700 | [diff] [blame] | 3541 | if (partial_segs > 1) |
| 3542 | mss *= partial_segs; |
| 3543 | else |
| 3544 | partial_segs = 0; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3545 | } |
| 3546 | |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3547 | normal: |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3548 | headroom = skb_headroom(head_skb); |
| 3549 | pos = skb_headlen(head_skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3550 | |
| 3551 | do { |
| 3552 | struct sk_buff *nskb; |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3553 | skb_frag_t *nskb_frag; |
Herbert Xu | c8884ed | 2006-10-29 15:59:41 -0800 | [diff] [blame] | 3554 | int hsize; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3555 | int size; |
| 3556 | |
Marcelo Ricardo Leitner | 3953c46 | 2016-06-02 15:05:40 -0300 | [diff] [blame] | 3557 | if (unlikely(mss == GSO_BY_FRAGS)) { |
| 3558 | len = list_skb->len; |
| 3559 | } else { |
| 3560 | len = head_skb->len - offset; |
| 3561 | if (len > mss) |
| 3562 | len = mss; |
| 3563 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3564 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3565 | hsize = skb_headlen(head_skb) - offset; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3566 | if (hsize < 0) |
| 3567 | hsize = 0; |
Herbert Xu | c8884ed | 2006-10-29 15:59:41 -0800 | [diff] [blame] | 3568 | if (hsize > len || !sg) |
| 3569 | hsize = len; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3570 | |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3571 | if (!hsize && i >= nfrags && skb_headlen(list_skb) && |
| 3572 | (skb_headlen(list_skb) == len || sg)) { |
| 3573 | BUG_ON(skb_headlen(list_skb) > len); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3574 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3575 | i = 0; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3576 | nfrags = skb_shinfo(list_skb)->nr_frags; |
| 3577 | frag = skb_shinfo(list_skb)->frags; |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 3578 | frag_skb = list_skb; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3579 | pos += skb_headlen(list_skb); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3580 | |
| 3581 | while (pos < offset + len) { |
| 3582 | BUG_ON(i >= nfrags); |
| 3583 | |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 3584 | size = skb_frag_size(frag); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3585 | if (pos + size > offset + len) |
| 3586 | break; |
| 3587 | |
| 3588 | i++; |
| 3589 | pos += size; |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 3590 | frag++; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3591 | } |
| 3592 | |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3593 | nskb = skb_clone(list_skb, GFP_ATOMIC); |
| 3594 | list_skb = list_skb->next; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3595 | |
| 3596 | if (unlikely(!nskb)) |
| 3597 | goto err; |
| 3598 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3599 | if (unlikely(pskb_trim(nskb, len))) { |
| 3600 | kfree_skb(nskb); |
| 3601 | goto err; |
| 3602 | } |
| 3603 | |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 3604 | hsize = skb_end_offset(nskb); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3605 | if (skb_cow_head(nskb, doffset + headroom)) { |
| 3606 | kfree_skb(nskb); |
| 3607 | goto err; |
| 3608 | } |
| 3609 | |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 3610 | nskb->truesize += skb_end_offset(nskb) - hsize; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3611 | skb_release_head_state(nskb); |
| 3612 | __skb_push(nskb, doffset); |
| 3613 | } else { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 3614 | nskb = __alloc_skb(hsize + doffset + headroom, |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3615 | GFP_ATOMIC, skb_alloc_rx_flag(head_skb), |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 3616 | NUMA_NO_NODE); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3617 | |
| 3618 | if (unlikely(!nskb)) |
| 3619 | goto err; |
| 3620 | |
| 3621 | skb_reserve(nskb, headroom); |
| 3622 | __skb_put(nskb, doffset); |
| 3623 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3624 | |
| 3625 | if (segs) |
| 3626 | tail->next = nskb; |
| 3627 | else |
| 3628 | segs = nskb; |
| 3629 | tail = nskb; |
| 3630 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3631 | __copy_skb_header(nskb, head_skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3632 | |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 3633 | skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); |
Vlad Yasevich | fcdfe3a | 2014-07-31 10:33:06 -0400 | [diff] [blame] | 3634 | skb_reset_mac_len(nskb); |
Pravin B Shelar | 68c3316 | 2013-02-14 14:02:41 +0000 | [diff] [blame] | 3635 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3636 | skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, |
Pravin B Shelar | 68c3316 | 2013-02-14 14:02:41 +0000 | [diff] [blame] | 3637 | nskb->data - tnl_hlen, |
| 3638 | doffset + tnl_hlen); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3639 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3640 | if (nskb->len == len + doffset) |
Simon Horman | 1cdbcb7 | 2013-05-19 15:46:49 +0000 | [diff] [blame] | 3641 | goto perform_csum_check; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3642 | |
Alexander Duyck | 7fbeffe | 2016-02-05 15:27:43 -0800 | [diff] [blame] | 3643 | if (!sg) { |
| 3644 | if (!nskb->remcsum_offload) |
| 3645 | nskb->ip_summed = CHECKSUM_NONE; |
Alexander Duyck | 7644345 | 2016-02-05 15:27:37 -0800 | [diff] [blame] | 3646 | SKB_GSO_CB(nskb)->csum = |
| 3647 | skb_copy_and_csum_bits(head_skb, offset, |
| 3648 | skb_put(nskb, len), |
| 3649 | len, 0); |
Tom Herbert | 7e2b10c | 2014-06-04 17:20:02 -0700 | [diff] [blame] | 3650 | SKB_GSO_CB(nskb)->csum_start = |
Alexander Duyck | 7644345 | 2016-02-05 15:27:37 -0800 | [diff] [blame] | 3651 | skb_headroom(nskb) + doffset; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3652 | continue; |
| 3653 | } |
| 3654 | |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3655 | nskb_frag = skb_shinfo(nskb)->frags; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3656 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3657 | skb_copy_from_linear_data_offset(head_skb, offset, |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 3658 | skb_put(nskb, hsize), hsize); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3659 | |
Willem de Bruijn | fff8803 | 2017-06-08 11:35:03 -0400 | [diff] [blame] | 3660 | skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & |
| 3661 | SKBTX_SHARED_FRAG; |
Eric Dumazet | cef401d | 2013-01-25 20:34:37 +0000 | [diff] [blame] | 3662 | |
Willem de Bruijn | bf5c25d | 2017-12-22 19:00:17 -0500 | [diff] [blame] | 3663 | if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || |
| 3664 | skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) |
| 3665 | goto err; |
| 3666 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3667 | while (pos < offset + len) { |
| 3668 | if (i >= nfrags) { |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3669 | i = 0; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3670 | nfrags = skb_shinfo(list_skb)->nr_frags; |
| 3671 | frag = skb_shinfo(list_skb)->frags; |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 3672 | frag_skb = list_skb; |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 3673 | if (!skb_headlen(list_skb)) { |
| 3674 | BUG_ON(!nfrags); |
| 3675 | } else { |
| 3676 | BUG_ON(!list_skb->head_frag); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3677 | |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 3678 | /* to make room for head_frag. */ |
| 3679 | i--; |
| 3680 | frag--; |
| 3681 | } |
Willem de Bruijn | bf5c25d | 2017-12-22 19:00:17 -0500 | [diff] [blame] | 3682 | if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || |
| 3683 | skb_zerocopy_clone(nskb, frag_skb, |
| 3684 | GFP_ATOMIC)) |
| 3685 | goto err; |
| 3686 | |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3687 | list_skb = list_skb->next; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3688 | } |
| 3689 | |
| 3690 | if (unlikely(skb_shinfo(nskb)->nr_frags >= |
| 3691 | MAX_SKB_FRAGS)) { |
| 3692 | net_warn_ratelimited( |
| 3693 | "skb_segment: too many frags: %u %u\n", |
| 3694 | pos, mss); |
Eric Dumazet | ff907a1 | 2018-07-19 16:04:38 -0700 | [diff] [blame] | 3695 | err = -EINVAL; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3696 | goto err; |
| 3697 | } |
| 3698 | |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 3699 | *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3700 | __skb_frag_ref(nskb_frag); |
| 3701 | size = skb_frag_size(nskb_frag); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3702 | |
| 3703 | if (pos < offset) { |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3704 | nskb_frag->page_offset += offset - pos; |
| 3705 | skb_frag_size_sub(nskb_frag, offset - pos); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3706 | } |
| 3707 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3708 | skb_shinfo(nskb)->nr_frags++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3709 | |
| 3710 | if (pos + size <= offset + len) { |
| 3711 | i++; |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 3712 | frag++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3713 | pos += size; |
| 3714 | } else { |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3715 | skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3716 | goto skip_fraglist; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3717 | } |
| 3718 | |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3719 | nskb_frag++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3720 | } |
| 3721 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3722 | skip_fraglist: |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3723 | nskb->data_len = len - hsize; |
| 3724 | nskb->len += nskb->data_len; |
| 3725 | nskb->truesize += nskb->data_len; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3726 | |
Simon Horman | 1cdbcb7 | 2013-05-19 15:46:49 +0000 | [diff] [blame] | 3727 | perform_csum_check: |
Alexander Duyck | 7fbeffe | 2016-02-05 15:27:43 -0800 | [diff] [blame] | 3728 | if (!csum) { |
Eric Dumazet | ff907a1 | 2018-07-19 16:04:38 -0700 | [diff] [blame] | 3729 | if (skb_has_shared_frag(nskb) && |
| 3730 | __skb_linearize(nskb)) |
| 3731 | goto err; |
| 3732 | |
Alexander Duyck | 7fbeffe | 2016-02-05 15:27:43 -0800 | [diff] [blame] | 3733 | if (!nskb->remcsum_offload) |
| 3734 | nskb->ip_summed = CHECKSUM_NONE; |
Alexander Duyck | 7644345 | 2016-02-05 15:27:37 -0800 | [diff] [blame] | 3735 | SKB_GSO_CB(nskb)->csum = |
| 3736 | skb_checksum(nskb, doffset, |
| 3737 | nskb->len - doffset, 0); |
Tom Herbert | 7e2b10c | 2014-06-04 17:20:02 -0700 | [diff] [blame] | 3738 | SKB_GSO_CB(nskb)->csum_start = |
Alexander Duyck | 7644345 | 2016-02-05 15:27:37 -0800 | [diff] [blame] | 3739 | skb_headroom(nskb) + doffset; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3740 | } |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3741 | } while ((offset += len) < head_skb->len); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3742 | |
Eric Dumazet | bec3cfd | 2014-10-03 20:59:19 -0700 | [diff] [blame] | 3743 | /* Some callers want to get the end of the list. |
| 3744 | * Put it in segs->prev to avoid walking the list. |
| 3745 | * (see validate_xmit_skb_list() for example) |
| 3746 | */ |
| 3747 | segs->prev = tail; |
Toshiaki Makita | 432c856 | 2014-10-27 10:30:51 -0700 | [diff] [blame] | 3748 | |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3749 | if (partial_segs) { |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3750 | struct sk_buff *iter; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3751 | int type = skb_shinfo(head_skb)->gso_type; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3752 | unsigned short gso_size = skb_shinfo(head_skb)->gso_size; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3753 | |
| 3754 | /* Update type to add partial and then remove dodgy if set */ |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3755 | type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3756 | type &= ~SKB_GSO_DODGY; |
| 3757 | |
| 3758 | /* Update GSO info and prepare to start updating headers on |
| 3759 | * our way back down the stack of protocols. |
| 3760 | */ |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 3761 | for (iter = segs; iter; iter = iter->next) { |
| 3762 | skb_shinfo(iter)->gso_size = gso_size; |
| 3763 | skb_shinfo(iter)->gso_segs = partial_segs; |
| 3764 | skb_shinfo(iter)->gso_type = type; |
| 3765 | SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; |
| 3766 | } |
| 3767 | |
| 3768 | if (tail->len - doffset <= gso_size) |
| 3769 | skb_shinfo(tail)->gso_size = 0; |
| 3770 | else if (tail != segs) |
| 3771 | skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3772 | } |
| 3773 | |
Toshiaki Makita | 432c856 | 2014-10-27 10:30:51 -0700 | [diff] [blame] | 3774 | /* Following permits correct backpressure, for protocols |
| 3775 | * using skb_set_owner_w(). |
| 3776 | * Idea is to tranfert ownership from head_skb to last segment. |
| 3777 | */ |
| 3778 | if (head_skb->destructor == sock_wfree) { |
| 3779 | swap(tail->truesize, head_skb->truesize); |
| 3780 | swap(tail->destructor, head_skb->destructor); |
| 3781 | swap(tail->sk, head_skb->sk); |
| 3782 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3783 | return segs; |
| 3784 | |
| 3785 | err: |
Eric Dumazet | 289dccb | 2013-12-20 14:29:08 -0800 | [diff] [blame] | 3786 | kfree_skb_list(segs); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3787 | return ERR_PTR(err); |
| 3788 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3789 | EXPORT_SYMBOL_GPL(skb_segment); |
| 3790 | |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 3791 | int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3792 | { |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3793 | struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3794 | unsigned int offset = skb_gro_offset(skb); |
| 3795 | unsigned int headlen = skb_headlen(skb); |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3796 | unsigned int len = skb_gro_len(skb); |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3797 | unsigned int delta_truesize; |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 3798 | struct sk_buff *lp; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3799 | |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3800 | if (unlikely(p->len + len >= 65536)) |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3801 | return -E2BIG; |
| 3802 | |
Eric Dumazet | 29e9824 | 2014-05-16 11:34:37 -0700 | [diff] [blame] | 3803 | lp = NAPI_GRO_CB(p)->last; |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3804 | pinfo = skb_shinfo(lp); |
| 3805 | |
| 3806 | if (headlen <= offset) { |
Herbert Xu | 42da699 | 2009-05-26 18:50:19 +0000 | [diff] [blame] | 3807 | skb_frag_t *frag; |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 3808 | skb_frag_t *frag2; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 3809 | int i = skbinfo->nr_frags; |
| 3810 | int nr_frags = pinfo->nr_frags + i; |
Herbert Xu | 42da699 | 2009-05-26 18:50:19 +0000 | [diff] [blame] | 3811 | |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 3812 | if (nr_frags > MAX_SKB_FRAGS) |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3813 | goto merge; |
Herbert Xu | 81705ad | 2009-01-29 14:19:51 +0000 | [diff] [blame] | 3814 | |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3815 | offset -= headlen; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 3816 | pinfo->nr_frags = nr_frags; |
| 3817 | skbinfo->nr_frags = 0; |
Herbert Xu | f557206 | 2009-01-14 20:40:03 -0800 | [diff] [blame] | 3818 | |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 3819 | frag = pinfo->frags + nr_frags; |
| 3820 | frag2 = skbinfo->frags + i; |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 3821 | do { |
| 3822 | *--frag = *--frag2; |
| 3823 | } while (--i); |
| 3824 | |
| 3825 | frag->page_offset += offset; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3826 | skb_frag_size_sub(frag, offset); |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 3827 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3828 | /* all fragments truesize : remove (head size + sk_buff) */ |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 3829 | delta_truesize = skb->truesize - |
| 3830 | SKB_TRUESIZE(skb_end_offset(skb)); |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3831 | |
Herbert Xu | f557206 | 2009-01-14 20:40:03 -0800 | [diff] [blame] | 3832 | skb->truesize -= skb->data_len; |
| 3833 | skb->len -= skb->data_len; |
| 3834 | skb->data_len = 0; |
| 3835 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3836 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; |
Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3837 | goto done; |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 3838 | } else if (skb->head_frag) { |
| 3839 | int nr_frags = pinfo->nr_frags; |
| 3840 | skb_frag_t *frag = pinfo->frags + nr_frags; |
| 3841 | struct page *page = virt_to_head_page(skb->head); |
| 3842 | unsigned int first_size = headlen - offset; |
| 3843 | unsigned int first_offset; |
| 3844 | |
| 3845 | if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3846 | goto merge; |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 3847 | |
| 3848 | first_offset = skb->data - |
| 3849 | (unsigned char *)page_address(page) + |
| 3850 | offset; |
| 3851 | |
| 3852 | pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; |
| 3853 | |
| 3854 | frag->page.p = page; |
| 3855 | frag->page_offset = first_offset; |
| 3856 | skb_frag_size_set(frag, first_size); |
| 3857 | |
| 3858 | memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); |
| 3859 | /* We dont need to clear skbinfo->nr_frags here */ |
| 3860 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3861 | delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 3862 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; |
| 3863 | goto done; |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3864 | } |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3865 | |
| 3866 | merge: |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3867 | delta_truesize = skb->truesize; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3868 | if (offset > headlen) { |
Michal Schmidt | d1dc7ab | 2011-01-24 12:08:48 +0000 | [diff] [blame] | 3869 | unsigned int eat = offset - headlen; |
| 3870 | |
| 3871 | skbinfo->frags[0].page_offset += eat; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3872 | skb_frag_size_sub(&skbinfo->frags[0], eat); |
Michal Schmidt | d1dc7ab | 2011-01-24 12:08:48 +0000 | [diff] [blame] | 3873 | skb->data_len -= eat; |
| 3874 | skb->len -= eat; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3875 | offset = headlen; |
Herbert Xu | 5603502 | 2009-02-05 21:26:52 -0800 | [diff] [blame] | 3876 | } |
| 3877 | |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3878 | __skb_pull(skb, offset); |
Herbert Xu | 5603502 | 2009-02-05 21:26:52 -0800 | [diff] [blame] | 3879 | |
Eric Dumazet | 29e9824 | 2014-05-16 11:34:37 -0700 | [diff] [blame] | 3880 | if (NAPI_GRO_CB(p)->last == p) |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3881 | skb_shinfo(p)->frag_list = skb; |
| 3882 | else |
| 3883 | NAPI_GRO_CB(p)->last->next = skb; |
Eric Dumazet | c3c7c25 | 2012-12-06 13:54:59 +0000 | [diff] [blame] | 3884 | NAPI_GRO_CB(p)->last = skb; |
Eric Dumazet | f4a775d | 2014-09-22 16:29:32 -0700 | [diff] [blame] | 3885 | __skb_header_release(skb); |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3886 | lp = p; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3887 | |
Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3888 | done: |
| 3889 | NAPI_GRO_CB(p)->count++; |
Herbert Xu | 37fe473 | 2009-01-17 19:48:13 +0000 | [diff] [blame] | 3890 | p->data_len += len; |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3891 | p->truesize += delta_truesize; |
Herbert Xu | 37fe473 | 2009-01-17 19:48:13 +0000 | [diff] [blame] | 3892 | p->len += len; |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3893 | if (lp != p) { |
| 3894 | lp->data_len += len; |
| 3895 | lp->truesize += delta_truesize; |
| 3896 | lp->len += len; |
| 3897 | } |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3898 | NAPI_GRO_CB(skb)->same_flow = 1; |
| 3899 | return 0; |
| 3900 | } |
Marcelo Ricardo Leitner | 57c05650 | 2016-06-02 15:05:39 -0300 | [diff] [blame] | 3901 | EXPORT_SYMBOL_GPL(skb_gro_receive); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3902 | |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 3903 | #ifdef CONFIG_SKB_EXTENSIONS |
| 3904 | #define SKB_EXT_ALIGN_VALUE 8 |
| 3905 | #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) |
| 3906 | |
| 3907 | static const u8 skb_ext_type_len[] = { |
| 3908 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
| 3909 | [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), |
| 3910 | #endif |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 3911 | #ifdef CONFIG_XFRM |
| 3912 | [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), |
| 3913 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 3914 | }; |
| 3915 | |
| 3916 | static __always_inline unsigned int skb_ext_total_length(void) |
| 3917 | { |
| 3918 | return SKB_EXT_CHUNKSIZEOF(struct skb_ext) + |
| 3919 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
| 3920 | skb_ext_type_len[SKB_EXT_BRIDGE_NF] + |
| 3921 | #endif |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 3922 | #ifdef CONFIG_XFRM |
| 3923 | skb_ext_type_len[SKB_EXT_SEC_PATH] + |
| 3924 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 3925 | 0; |
| 3926 | } |
| 3927 | |
| 3928 | static void skb_extensions_init(void) |
| 3929 | { |
| 3930 | BUILD_BUG_ON(SKB_EXT_NUM >= 8); |
| 3931 | BUILD_BUG_ON(skb_ext_total_length() > 255); |
| 3932 | |
| 3933 | skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", |
| 3934 | SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), |
| 3935 | 0, |
| 3936 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
| 3937 | NULL); |
| 3938 | } |
| 3939 | #else |
| 3940 | static void skb_extensions_init(void) {} |
| 3941 | #endif |
| 3942 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3943 | void __init skb_init(void) |
| 3944 | { |
Kees Cook | 79a8a64 | 2018-02-07 17:44:38 -0800 | [diff] [blame] | 3945 | skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3946 | sizeof(struct sk_buff), |
| 3947 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 3948 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Kees Cook | 79a8a64 | 2018-02-07 17:44:38 -0800 | [diff] [blame] | 3949 | offsetof(struct sk_buff, cb), |
| 3950 | sizeof_field(struct sk_buff, cb), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 3951 | NULL); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 3952 | skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 3953 | sizeof(struct sk_buff_fclones), |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 3954 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 3955 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 3956 | NULL); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 3957 | skb_extensions_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3958 | } |
| 3959 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3960 | static int |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 3961 | __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, |
| 3962 | unsigned int recursion_level) |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3963 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3964 | int start = skb_headlen(skb); |
| 3965 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3966 | struct sk_buff *frag_iter; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3967 | int elt = 0; |
| 3968 | |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 3969 | if (unlikely(recursion_level >= 24)) |
| 3970 | return -EMSGSIZE; |
| 3971 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3972 | if (copy > 0) { |
| 3973 | if (copy > len) |
| 3974 | copy = len; |
Jens Axboe | 642f14903 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 3975 | sg_set_buf(sg, skb->data + offset, copy); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3976 | elt++; |
| 3977 | if ((len -= copy) == 0) |
| 3978 | return elt; |
| 3979 | offset += copy; |
| 3980 | } |
| 3981 | |
| 3982 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3983 | int end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3984 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 3985 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3986 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3987 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3988 | if ((copy = end - offset) > 0) { |
| 3989 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 3990 | if (unlikely(elt && sg_is_last(&sg[elt - 1]))) |
| 3991 | return -EMSGSIZE; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3992 | |
| 3993 | if (copy > len) |
| 3994 | copy = len; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3995 | sg_set_page(&sg[elt], skb_frag_page(frag), copy, |
Jens Axboe | 642f14903 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 3996 | frag->page_offset+offset-start); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3997 | elt++; |
| 3998 | if (!(len -= copy)) |
| 3999 | return elt; |
| 4000 | offset += copy; |
| 4001 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 4002 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4003 | } |
| 4004 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4005 | skb_walk_frags(skb, frag_iter) { |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4006 | int end, ret; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4007 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4008 | WARN_ON(start > offset + len); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4009 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4010 | end = start + frag_iter->len; |
| 4011 | if ((copy = end - offset) > 0) { |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4012 | if (unlikely(elt && sg_is_last(&sg[elt - 1]))) |
| 4013 | return -EMSGSIZE; |
| 4014 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4015 | if (copy > len) |
| 4016 | copy = len; |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4017 | ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
| 4018 | copy, recursion_level + 1); |
| 4019 | if (unlikely(ret < 0)) |
| 4020 | return ret; |
| 4021 | elt += ret; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4022 | if ((len -= copy) == 0) |
| 4023 | return elt; |
| 4024 | offset += copy; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4025 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4026 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4027 | } |
| 4028 | BUG_ON(len); |
| 4029 | return elt; |
| 4030 | } |
| 4031 | |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4032 | /** |
| 4033 | * skb_to_sgvec - Fill a scatter-gather list from a socket buffer |
| 4034 | * @skb: Socket buffer containing the buffers to be mapped |
| 4035 | * @sg: The scatter-gather list to map into |
| 4036 | * @offset: The offset into the buffer's contents to start mapping |
| 4037 | * @len: Length of buffer space to be mapped |
| 4038 | * |
| 4039 | * Fill the specified scatter-gather list with mappings/pointers into a |
| 4040 | * region of the buffer space attached to a socket buffer. Returns either |
| 4041 | * the number of scatterlist items used, or -EMSGSIZE if the contents |
| 4042 | * could not fit. |
| 4043 | */ |
| 4044 | int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
| 4045 | { |
| 4046 | int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); |
| 4047 | |
| 4048 | if (nsg <= 0) |
| 4049 | return nsg; |
| 4050 | |
| 4051 | sg_mark_end(&sg[nsg - 1]); |
| 4052 | |
| 4053 | return nsg; |
| 4054 | } |
| 4055 | EXPORT_SYMBOL_GPL(skb_to_sgvec); |
| 4056 | |
Fan Du | 25a91d8 | 2014-01-18 09:54:23 +0800 | [diff] [blame] | 4057 | /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given |
| 4058 | * sglist without mark the sg which contain last skb data as the end. |
| 4059 | * So the caller can mannipulate sg list as will when padding new data after |
| 4060 | * the first call without calling sg_unmark_end to expend sg list. |
| 4061 | * |
| 4062 | * Scenario to use skb_to_sgvec_nomark: |
| 4063 | * 1. sg_init_table |
| 4064 | * 2. skb_to_sgvec_nomark(payload1) |
| 4065 | * 3. skb_to_sgvec_nomark(payload2) |
| 4066 | * |
| 4067 | * This is equivalent to: |
| 4068 | * 1. sg_init_table |
| 4069 | * 2. skb_to_sgvec(payload1) |
| 4070 | * 3. sg_unmark_end |
| 4071 | * 4. skb_to_sgvec(payload2) |
| 4072 | * |
| 4073 | * When mapping mutilple payload conditionally, skb_to_sgvec_nomark |
| 4074 | * is more preferable. |
| 4075 | */ |
| 4076 | int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, |
| 4077 | int offset, int len) |
| 4078 | { |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4079 | return __skb_to_sgvec(skb, sg, offset, len, 0); |
Fan Du | 25a91d8 | 2014-01-18 09:54:23 +0800 | [diff] [blame] | 4080 | } |
| 4081 | EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); |
| 4082 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 4083 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 4084 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4085 | /** |
| 4086 | * skb_cow_data - Check that a socket buffer's data buffers are writable |
| 4087 | * @skb: The socket buffer to check. |
| 4088 | * @tailbits: Amount of trailing space to be added |
| 4089 | * @trailer: Returned pointer to the skb where the @tailbits space begins |
| 4090 | * |
| 4091 | * Make sure that the data buffers attached to a socket buffer are |
| 4092 | * writable. If they are not, private copies are made of the data buffers |
| 4093 | * and the socket buffer is set to use these instead. |
| 4094 | * |
| 4095 | * If @tailbits is given, make sure that there is space to write @tailbits |
| 4096 | * bytes of data beyond current end of socket buffer. @trailer will be |
| 4097 | * set to point to the skb in which this space begins. |
| 4098 | * |
| 4099 | * The number of scatterlist elements required to completely map the |
| 4100 | * COW'd and extended socket buffer will be returned. |
| 4101 | */ |
| 4102 | int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) |
| 4103 | { |
| 4104 | int copyflag; |
| 4105 | int elt; |
| 4106 | struct sk_buff *skb1, **skb_p; |
| 4107 | |
| 4108 | /* If skb is cloned or its head is paged, reallocate |
| 4109 | * head pulling out all the pages (pages are considered not writable |
| 4110 | * at the moment even if they are anonymous). |
| 4111 | */ |
| 4112 | if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && |
| 4113 | __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) |
| 4114 | return -ENOMEM; |
| 4115 | |
| 4116 | /* Easy case. Most of packets will go this way. */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 4117 | if (!skb_has_frag_list(skb)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4118 | /* A little of trouble, not enough of space for trailer. |
| 4119 | * This should not happen, when stack is tuned to generate |
| 4120 | * good frames. OK, on miss we reallocate and reserve even more |
| 4121 | * space, 128 bytes is fair. */ |
| 4122 | |
| 4123 | if (skb_tailroom(skb) < tailbits && |
| 4124 | pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) |
| 4125 | return -ENOMEM; |
| 4126 | |
| 4127 | /* Voila! */ |
| 4128 | *trailer = skb; |
| 4129 | return 1; |
| 4130 | } |
| 4131 | |
| 4132 | /* Misery. We are in troubles, going to mincer fragments... */ |
| 4133 | |
| 4134 | elt = 1; |
| 4135 | skb_p = &skb_shinfo(skb)->frag_list; |
| 4136 | copyflag = 0; |
| 4137 | |
| 4138 | while ((skb1 = *skb_p) != NULL) { |
| 4139 | int ntail = 0; |
| 4140 | |
| 4141 | /* The fragment is partially pulled by someone, |
| 4142 | * this can happen on input. Copy it and everything |
| 4143 | * after it. */ |
| 4144 | |
| 4145 | if (skb_shared(skb1)) |
| 4146 | copyflag = 1; |
| 4147 | |
| 4148 | /* If the skb is the last, worry about trailer. */ |
| 4149 | |
| 4150 | if (skb1->next == NULL && tailbits) { |
| 4151 | if (skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 4152 | skb_has_frag_list(skb1) || |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4153 | skb_tailroom(skb1) < tailbits) |
| 4154 | ntail = tailbits + 128; |
| 4155 | } |
| 4156 | |
| 4157 | if (copyflag || |
| 4158 | skb_cloned(skb1) || |
| 4159 | ntail || |
| 4160 | skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 4161 | skb_has_frag_list(skb1)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4162 | struct sk_buff *skb2; |
| 4163 | |
| 4164 | /* Fuck, we are miserable poor guys... */ |
| 4165 | if (ntail == 0) |
| 4166 | skb2 = skb_copy(skb1, GFP_ATOMIC); |
| 4167 | else |
| 4168 | skb2 = skb_copy_expand(skb1, |
| 4169 | skb_headroom(skb1), |
| 4170 | ntail, |
| 4171 | GFP_ATOMIC); |
| 4172 | if (unlikely(skb2 == NULL)) |
| 4173 | return -ENOMEM; |
| 4174 | |
| 4175 | if (skb1->sk) |
| 4176 | skb_set_owner_w(skb2, skb1->sk); |
| 4177 | |
| 4178 | /* Looking around. Are we still alive? |
| 4179 | * OK, link new skb, drop old one */ |
| 4180 | |
| 4181 | skb2->next = skb1->next; |
| 4182 | *skb_p = skb2; |
| 4183 | kfree_skb(skb1); |
| 4184 | skb1 = skb2; |
| 4185 | } |
| 4186 | elt++; |
| 4187 | *trailer = skb1; |
| 4188 | skb_p = &skb1->next; |
| 4189 | } |
| 4190 | |
| 4191 | return elt; |
| 4192 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 4193 | EXPORT_SYMBOL_GPL(skb_cow_data); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4194 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4195 | static void sock_rmem_free(struct sk_buff *skb) |
| 4196 | { |
| 4197 | struct sock *sk = skb->sk; |
| 4198 | |
| 4199 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
| 4200 | } |
| 4201 | |
Soheil Hassas Yeganeh | 8605330a | 2017-03-18 17:02:59 -0400 | [diff] [blame] | 4202 | static void skb_set_err_queue(struct sk_buff *skb) |
| 4203 | { |
| 4204 | /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. |
| 4205 | * So, it is safe to (mis)use it to mark skbs on the error queue. |
| 4206 | */ |
| 4207 | skb->pkt_type = PACKET_OUTGOING; |
| 4208 | BUILD_BUG_ON(PACKET_OUTGOING == 0); |
| 4209 | } |
| 4210 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4211 | /* |
| 4212 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) |
| 4213 | */ |
| 4214 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) |
| 4215 | { |
| 4216 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 4217 | (unsigned int)sk->sk_rcvbuf) |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4218 | return -ENOMEM; |
| 4219 | |
| 4220 | skb_orphan(skb); |
| 4221 | skb->sk = sk; |
| 4222 | skb->destructor = sock_rmem_free; |
| 4223 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); |
Soheil Hassas Yeganeh | 8605330a | 2017-03-18 17:02:59 -0400 | [diff] [blame] | 4224 | skb_set_err_queue(skb); |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4225 | |
Eric Dumazet | abb57ea | 2011-05-18 02:21:31 -0400 | [diff] [blame] | 4226 | /* before exiting rcu section, make sure dst is refcounted */ |
| 4227 | skb_dst_force(skb); |
| 4228 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4229 | skb_queue_tail(&sk->sk_error_queue, skb); |
| 4230 | if (!sock_flag(sk, SOCK_DEAD)) |
Vinicius Costa Gomes | 6e5d58f | 2018-03-14 13:32:09 -0700 | [diff] [blame] | 4231 | sk->sk_error_report(sk); |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4232 | return 0; |
| 4233 | } |
| 4234 | EXPORT_SYMBOL(sock_queue_err_skb); |
| 4235 | |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4236 | static bool is_icmp_err_skb(const struct sk_buff *skb) |
| 4237 | { |
| 4238 | return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || |
| 4239 | SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); |
| 4240 | } |
| 4241 | |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4242 | struct sk_buff *sock_dequeue_err_skb(struct sock *sk) |
| 4243 | { |
| 4244 | struct sk_buff_head *q = &sk->sk_error_queue; |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4245 | struct sk_buff *skb, *skb_next = NULL; |
| 4246 | bool icmp_next = false; |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 4247 | unsigned long flags; |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4248 | |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 4249 | spin_lock_irqsave(&q->lock, flags); |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4250 | skb = __skb_dequeue(q); |
Soheil Hassas Yeganeh | 38b2579 | 2017-06-02 12:38:22 -0400 | [diff] [blame] | 4251 | if (skb && (skb_next = skb_peek(q))) { |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4252 | icmp_next = is_icmp_err_skb(skb_next); |
Soheil Hassas Yeganeh | 38b2579 | 2017-06-02 12:38:22 -0400 | [diff] [blame] | 4253 | if (icmp_next) |
| 4254 | sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin; |
| 4255 | } |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 4256 | spin_unlock_irqrestore(&q->lock, flags); |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4257 | |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4258 | if (is_icmp_err_skb(skb) && !icmp_next) |
| 4259 | sk->sk_err = 0; |
| 4260 | |
| 4261 | if (skb_next) |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4262 | sk->sk_error_report(sk); |
| 4263 | |
| 4264 | return skb; |
| 4265 | } |
| 4266 | EXPORT_SYMBOL(sock_dequeue_err_skb); |
| 4267 | |
Alexander Duyck | cab41c4 | 2014-09-10 18:05:26 -0400 | [diff] [blame] | 4268 | /** |
| 4269 | * skb_clone_sk - create clone of skb, and take reference to socket |
| 4270 | * @skb: the skb to clone |
| 4271 | * |
| 4272 | * This function creates a clone of a buffer that holds a reference on |
| 4273 | * sk_refcnt. Buffers created via this function are meant to be |
| 4274 | * returned using sock_queue_err_skb, or free via kfree_skb. |
| 4275 | * |
| 4276 | * When passing buffers allocated with this function to sock_queue_err_skb |
| 4277 | * it is necessary to wrap the call with sock_hold/sock_put in order to |
| 4278 | * prevent the socket from being released prior to being enqueued on |
| 4279 | * the sk_error_queue. |
| 4280 | */ |
Alexander Duyck | 62bccb8 | 2014-09-04 13:31:35 -0400 | [diff] [blame] | 4281 | struct sk_buff *skb_clone_sk(struct sk_buff *skb) |
| 4282 | { |
| 4283 | struct sock *sk = skb->sk; |
| 4284 | struct sk_buff *clone; |
| 4285 | |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 4286 | if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) |
Alexander Duyck | 62bccb8 | 2014-09-04 13:31:35 -0400 | [diff] [blame] | 4287 | return NULL; |
| 4288 | |
| 4289 | clone = skb_clone(skb, GFP_ATOMIC); |
| 4290 | if (!clone) { |
| 4291 | sock_put(sk); |
| 4292 | return NULL; |
| 4293 | } |
| 4294 | |
| 4295 | clone->sk = sk; |
| 4296 | clone->destructor = sock_efree; |
| 4297 | |
| 4298 | return clone; |
| 4299 | } |
| 4300 | EXPORT_SYMBOL(skb_clone_sk); |
| 4301 | |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4302 | static void __skb_complete_tx_timestamp(struct sk_buff *skb, |
| 4303 | struct sock *sk, |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4304 | int tstype, |
| 4305 | bool opt_stats) |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4306 | { |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4307 | struct sock_exterr_skb *serr; |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4308 | int err; |
| 4309 | |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4310 | BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); |
| 4311 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4312 | serr = SKB_EXT_ERR(skb); |
| 4313 | memset(serr, 0, sizeof(*serr)); |
| 4314 | serr->ee.ee_errno = ENOMSG; |
| 4315 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
Willem de Bruijn | e7fd288 | 2014-08-04 22:11:48 -0400 | [diff] [blame] | 4316 | serr->ee.ee_info = tstype; |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4317 | serr->opt_stats = opt_stats; |
Willem de Bruijn | 1862d62 | 2017-04-12 19:24:35 -0400 | [diff] [blame] | 4318 | serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 4319 | if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { |
Willem de Bruijn | 09c2d25 | 2014-08-04 22:11:47 -0400 | [diff] [blame] | 4320 | serr->ee.ee_data = skb_shinfo(skb)->tskey; |
WANG Cong | ac5cc97 | 2015-12-16 23:39:04 -0800 | [diff] [blame] | 4321 | if (sk->sk_protocol == IPPROTO_TCP && |
| 4322 | sk->sk_type == SOCK_STREAM) |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 4323 | serr->ee.ee_data -= sk->sk_tskey; |
| 4324 | } |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 4325 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4326 | err = sock_queue_err_skb(sk, skb); |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 4327 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4328 | if (err) |
| 4329 | kfree_skb(skb); |
| 4330 | } |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4331 | |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 4332 | static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) |
| 4333 | { |
| 4334 | bool ret; |
| 4335 | |
| 4336 | if (likely(sysctl_tstamp_allow_data || tsonly)) |
| 4337 | return true; |
| 4338 | |
| 4339 | read_lock_bh(&sk->sk_callback_lock); |
| 4340 | ret = sk->sk_socket && sk->sk_socket->file && |
| 4341 | file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); |
| 4342 | read_unlock_bh(&sk->sk_callback_lock); |
| 4343 | return ret; |
| 4344 | } |
| 4345 | |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4346 | void skb_complete_tx_timestamp(struct sk_buff *skb, |
| 4347 | struct skb_shared_hwtstamps *hwtstamps) |
| 4348 | { |
| 4349 | struct sock *sk = skb->sk; |
| 4350 | |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 4351 | if (!skb_may_tx_timestamp(sk, false)) |
Willem de Bruijn | 35b99df | 2017-12-13 14:41:06 -0500 | [diff] [blame] | 4352 | goto err; |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 4353 | |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4354 | /* Take a reference to prevent skb_orphan() from freeing the socket, |
| 4355 | * but only if the socket refcount is not zero. |
| 4356 | */ |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 4357 | if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4358 | *skb_hwtstamps(skb) = *hwtstamps; |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4359 | __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4360 | sock_put(sk); |
Willem de Bruijn | 35b99df | 2017-12-13 14:41:06 -0500 | [diff] [blame] | 4361 | return; |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4362 | } |
Willem de Bruijn | 35b99df | 2017-12-13 14:41:06 -0500 | [diff] [blame] | 4363 | |
| 4364 | err: |
| 4365 | kfree_skb(skb); |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4366 | } |
| 4367 | EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); |
| 4368 | |
| 4369 | void __skb_tstamp_tx(struct sk_buff *orig_skb, |
| 4370 | struct skb_shared_hwtstamps *hwtstamps, |
| 4371 | struct sock *sk, int tstype) |
| 4372 | { |
| 4373 | struct sk_buff *skb; |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4374 | bool tsonly, opt_stats = false; |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4375 | |
Willem de Bruijn | 3a8dd97 | 2015-03-11 15:43:55 -0400 | [diff] [blame] | 4376 | if (!sk) |
| 4377 | return; |
| 4378 | |
Miroslav Lichvar | b50a5c7 | 2017-05-19 17:52:40 +0200 | [diff] [blame] | 4379 | if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && |
| 4380 | skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) |
| 4381 | return; |
| 4382 | |
Willem de Bruijn | 3a8dd97 | 2015-03-11 15:43:55 -0400 | [diff] [blame] | 4383 | tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; |
| 4384 | if (!skb_may_tx_timestamp(sk, tsonly)) |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4385 | return; |
| 4386 | |
Francis Yan | 1c88580 | 2016-11-27 23:07:18 -0800 | [diff] [blame] | 4387 | if (tsonly) { |
| 4388 | #ifdef CONFIG_INET |
| 4389 | if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && |
| 4390 | sk->sk_protocol == IPPROTO_TCP && |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4391 | sk->sk_type == SOCK_STREAM) { |
Francis Yan | 1c88580 | 2016-11-27 23:07:18 -0800 | [diff] [blame] | 4392 | skb = tcp_get_timestamping_opt_stats(sk); |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4393 | opt_stats = true; |
| 4394 | } else |
Francis Yan | 1c88580 | 2016-11-27 23:07:18 -0800 | [diff] [blame] | 4395 | #endif |
| 4396 | skb = alloc_skb(0, GFP_ATOMIC); |
| 4397 | } else { |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 4398 | skb = skb_clone(orig_skb, GFP_ATOMIC); |
Francis Yan | 1c88580 | 2016-11-27 23:07:18 -0800 | [diff] [blame] | 4399 | } |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4400 | if (!skb) |
| 4401 | return; |
| 4402 | |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 4403 | if (tsonly) { |
Willem de Bruijn | fff8803 | 2017-06-08 11:35:03 -0400 | [diff] [blame] | 4404 | skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & |
| 4405 | SKBTX_ANY_TSTAMP; |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 4406 | skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; |
| 4407 | } |
| 4408 | |
| 4409 | if (hwtstamps) |
| 4410 | *skb_hwtstamps(skb) = *hwtstamps; |
| 4411 | else |
| 4412 | skb->tstamp = ktime_get_real(); |
| 4413 | |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4414 | __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4415 | } |
Willem de Bruijn | e7fd288 | 2014-08-04 22:11:48 -0400 | [diff] [blame] | 4416 | EXPORT_SYMBOL_GPL(__skb_tstamp_tx); |
| 4417 | |
| 4418 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
| 4419 | struct skb_shared_hwtstamps *hwtstamps) |
| 4420 | { |
| 4421 | return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, |
| 4422 | SCM_TSTAMP_SND); |
| 4423 | } |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4424 | EXPORT_SYMBOL_GPL(skb_tstamp_tx); |
| 4425 | |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 4426 | void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) |
| 4427 | { |
| 4428 | struct sock *sk = skb->sk; |
| 4429 | struct sock_exterr_skb *serr; |
Eric Dumazet | dd4f107 | 2017-03-03 21:01:02 -0800 | [diff] [blame] | 4430 | int err = 1; |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 4431 | |
| 4432 | skb->wifi_acked_valid = 1; |
| 4433 | skb->wifi_acked = acked; |
| 4434 | |
| 4435 | serr = SKB_EXT_ERR(skb); |
| 4436 | memset(serr, 0, sizeof(*serr)); |
| 4437 | serr->ee.ee_errno = ENOMSG; |
| 4438 | serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; |
| 4439 | |
Eric Dumazet | dd4f107 | 2017-03-03 21:01:02 -0800 | [diff] [blame] | 4440 | /* Take a reference to prevent skb_orphan() from freeing the socket, |
| 4441 | * but only if the socket refcount is not zero. |
| 4442 | */ |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 4443 | if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { |
Eric Dumazet | dd4f107 | 2017-03-03 21:01:02 -0800 | [diff] [blame] | 4444 | err = sock_queue_err_skb(sk, skb); |
| 4445 | sock_put(sk); |
| 4446 | } |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 4447 | if (err) |
| 4448 | kfree_skb(skb); |
| 4449 | } |
| 4450 | EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); |
| 4451 | |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4452 | /** |
| 4453 | * skb_partial_csum_set - set up and verify partial csum values for packet |
| 4454 | * @skb: the skb to set |
| 4455 | * @start: the number of bytes after skb->data to start checksumming. |
| 4456 | * @off: the offset from start to place the checksum. |
| 4457 | * |
| 4458 | * For untrusted partially-checksummed packets, we need to make sure the values |
| 4459 | * for skb->csum_start and skb->csum_offset are valid so we don't oops. |
| 4460 | * |
| 4461 | * This function checks and sets those values and skb->ip_summed: if this |
| 4462 | * returns false you should drop the packet. |
| 4463 | */ |
| 4464 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) |
| 4465 | { |
Eric Dumazet | 52b5d6f | 2018-10-10 06:59:35 -0700 | [diff] [blame] | 4466 | u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); |
| 4467 | u32 csum_start = skb_headroom(skb) + (u32)start; |
| 4468 | |
| 4469 | if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { |
| 4470 | net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", |
| 4471 | start, off, skb_headroom(skb), skb_headlen(skb)); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4472 | return false; |
| 4473 | } |
| 4474 | skb->ip_summed = CHECKSUM_PARTIAL; |
Eric Dumazet | 52b5d6f | 2018-10-10 06:59:35 -0700 | [diff] [blame] | 4475 | skb->csum_start = csum_start; |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4476 | skb->csum_offset = off; |
Jason Wang | e5d5dec | 2013-03-26 23:11:20 +0000 | [diff] [blame] | 4477 | skb_set_transport_header(skb, start); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4478 | return true; |
| 4479 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 4480 | EXPORT_SYMBOL_GPL(skb_partial_csum_set); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4481 | |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4482 | static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, |
| 4483 | unsigned int max) |
| 4484 | { |
| 4485 | if (skb_headlen(skb) >= len) |
| 4486 | return 0; |
| 4487 | |
| 4488 | /* If we need to pullup then pullup to the max, so we |
| 4489 | * won't need to do it again. |
| 4490 | */ |
| 4491 | if (max > skb->len) |
| 4492 | max = skb->len; |
| 4493 | |
| 4494 | if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) |
| 4495 | return -ENOMEM; |
| 4496 | |
| 4497 | if (skb_headlen(skb) < len) |
| 4498 | return -EPROTO; |
| 4499 | |
| 4500 | return 0; |
| 4501 | } |
| 4502 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4503 | #define MAX_TCP_HDR_LEN (15 * 4) |
| 4504 | |
| 4505 | static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, |
| 4506 | typeof(IPPROTO_IP) proto, |
| 4507 | unsigned int off) |
| 4508 | { |
| 4509 | switch (proto) { |
| 4510 | int err; |
| 4511 | |
| 4512 | case IPPROTO_TCP: |
| 4513 | err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), |
| 4514 | off + MAX_TCP_HDR_LEN); |
| 4515 | if (!err && !skb_partial_csum_set(skb, off, |
| 4516 | offsetof(struct tcphdr, |
| 4517 | check))) |
| 4518 | err = -EPROTO; |
| 4519 | return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; |
| 4520 | |
| 4521 | case IPPROTO_UDP: |
| 4522 | err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), |
| 4523 | off + sizeof(struct udphdr)); |
| 4524 | if (!err && !skb_partial_csum_set(skb, off, |
| 4525 | offsetof(struct udphdr, |
| 4526 | check))) |
| 4527 | err = -EPROTO; |
| 4528 | return err ? ERR_PTR(err) : &udp_hdr(skb)->check; |
| 4529 | } |
| 4530 | |
| 4531 | return ERR_PTR(-EPROTO); |
| 4532 | } |
| 4533 | |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4534 | /* This value should be large enough to cover a tagged ethernet header plus |
| 4535 | * maximally sized IP and TCP or UDP headers. |
| 4536 | */ |
| 4537 | #define MAX_IP_HDR_LEN 128 |
| 4538 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4539 | static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4540 | { |
| 4541 | unsigned int off; |
| 4542 | bool fragment; |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4543 | __sum16 *csum; |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4544 | int err; |
| 4545 | |
| 4546 | fragment = false; |
| 4547 | |
| 4548 | err = skb_maybe_pull_tail(skb, |
| 4549 | sizeof(struct iphdr), |
| 4550 | MAX_IP_HDR_LEN); |
| 4551 | if (err < 0) |
| 4552 | goto out; |
| 4553 | |
| 4554 | if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) |
| 4555 | fragment = true; |
| 4556 | |
| 4557 | off = ip_hdrlen(skb); |
| 4558 | |
| 4559 | err = -EPROTO; |
| 4560 | |
| 4561 | if (fragment) |
| 4562 | goto out; |
| 4563 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4564 | csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); |
| 4565 | if (IS_ERR(csum)) |
| 4566 | return PTR_ERR(csum); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4567 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4568 | if (recalculate) |
| 4569 | *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, |
| 4570 | ip_hdr(skb)->daddr, |
| 4571 | skb->len - off, |
| 4572 | ip_hdr(skb)->protocol, 0); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4573 | err = 0; |
| 4574 | |
| 4575 | out: |
| 4576 | return err; |
| 4577 | } |
| 4578 | |
| 4579 | /* This value should be large enough to cover a tagged ethernet header plus |
| 4580 | * an IPv6 header, all options, and a maximal TCP or UDP header. |
| 4581 | */ |
| 4582 | #define MAX_IPV6_HDR_LEN 256 |
| 4583 | |
| 4584 | #define OPT_HDR(type, skb, off) \ |
| 4585 | (type *)(skb_network_header(skb) + (off)) |
| 4586 | |
| 4587 | static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) |
| 4588 | { |
| 4589 | int err; |
| 4590 | u8 nexthdr; |
| 4591 | unsigned int off; |
| 4592 | unsigned int len; |
| 4593 | bool fragment; |
| 4594 | bool done; |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4595 | __sum16 *csum; |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4596 | |
| 4597 | fragment = false; |
| 4598 | done = false; |
| 4599 | |
| 4600 | off = sizeof(struct ipv6hdr); |
| 4601 | |
| 4602 | err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); |
| 4603 | if (err < 0) |
| 4604 | goto out; |
| 4605 | |
| 4606 | nexthdr = ipv6_hdr(skb)->nexthdr; |
| 4607 | |
| 4608 | len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); |
| 4609 | while (off <= len && !done) { |
| 4610 | switch (nexthdr) { |
| 4611 | case IPPROTO_DSTOPTS: |
| 4612 | case IPPROTO_HOPOPTS: |
| 4613 | case IPPROTO_ROUTING: { |
| 4614 | struct ipv6_opt_hdr *hp; |
| 4615 | |
| 4616 | err = skb_maybe_pull_tail(skb, |
| 4617 | off + |
| 4618 | sizeof(struct ipv6_opt_hdr), |
| 4619 | MAX_IPV6_HDR_LEN); |
| 4620 | if (err < 0) |
| 4621 | goto out; |
| 4622 | |
| 4623 | hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); |
| 4624 | nexthdr = hp->nexthdr; |
| 4625 | off += ipv6_optlen(hp); |
| 4626 | break; |
| 4627 | } |
| 4628 | case IPPROTO_AH: { |
| 4629 | struct ip_auth_hdr *hp; |
| 4630 | |
| 4631 | err = skb_maybe_pull_tail(skb, |
| 4632 | off + |
| 4633 | sizeof(struct ip_auth_hdr), |
| 4634 | MAX_IPV6_HDR_LEN); |
| 4635 | if (err < 0) |
| 4636 | goto out; |
| 4637 | |
| 4638 | hp = OPT_HDR(struct ip_auth_hdr, skb, off); |
| 4639 | nexthdr = hp->nexthdr; |
| 4640 | off += ipv6_authlen(hp); |
| 4641 | break; |
| 4642 | } |
| 4643 | case IPPROTO_FRAGMENT: { |
| 4644 | struct frag_hdr *hp; |
| 4645 | |
| 4646 | err = skb_maybe_pull_tail(skb, |
| 4647 | off + |
| 4648 | sizeof(struct frag_hdr), |
| 4649 | MAX_IPV6_HDR_LEN); |
| 4650 | if (err < 0) |
| 4651 | goto out; |
| 4652 | |
| 4653 | hp = OPT_HDR(struct frag_hdr, skb, off); |
| 4654 | |
| 4655 | if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) |
| 4656 | fragment = true; |
| 4657 | |
| 4658 | nexthdr = hp->nexthdr; |
| 4659 | off += sizeof(struct frag_hdr); |
| 4660 | break; |
| 4661 | } |
| 4662 | default: |
| 4663 | done = true; |
| 4664 | break; |
| 4665 | } |
| 4666 | } |
| 4667 | |
| 4668 | err = -EPROTO; |
| 4669 | |
| 4670 | if (!done || fragment) |
| 4671 | goto out; |
| 4672 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4673 | csum = skb_checksum_setup_ip(skb, nexthdr, off); |
| 4674 | if (IS_ERR(csum)) |
| 4675 | return PTR_ERR(csum); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4676 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4677 | if (recalculate) |
| 4678 | *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
| 4679 | &ipv6_hdr(skb)->daddr, |
| 4680 | skb->len - off, nexthdr, 0); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4681 | err = 0; |
| 4682 | |
| 4683 | out: |
| 4684 | return err; |
| 4685 | } |
| 4686 | |
| 4687 | /** |
| 4688 | * skb_checksum_setup - set up partial checksum offset |
| 4689 | * @skb: the skb to set up |
| 4690 | * @recalculate: if true the pseudo-header checksum will be recalculated |
| 4691 | */ |
| 4692 | int skb_checksum_setup(struct sk_buff *skb, bool recalculate) |
| 4693 | { |
| 4694 | int err; |
| 4695 | |
| 4696 | switch (skb->protocol) { |
| 4697 | case htons(ETH_P_IP): |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4698 | err = skb_checksum_setup_ipv4(skb, recalculate); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4699 | break; |
| 4700 | |
| 4701 | case htons(ETH_P_IPV6): |
| 4702 | err = skb_checksum_setup_ipv6(skb, recalculate); |
| 4703 | break; |
| 4704 | |
| 4705 | default: |
| 4706 | err = -EPROTO; |
| 4707 | break; |
| 4708 | } |
| 4709 | |
| 4710 | return err; |
| 4711 | } |
| 4712 | EXPORT_SYMBOL(skb_checksum_setup); |
| 4713 | |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4714 | /** |
| 4715 | * skb_checksum_maybe_trim - maybe trims the given skb |
| 4716 | * @skb: the skb to check |
| 4717 | * @transport_len: the data length beyond the network header |
| 4718 | * |
| 4719 | * Checks whether the given skb has data beyond the given transport length. |
| 4720 | * If so, returns a cloned skb trimmed to this transport length. |
| 4721 | * Otherwise returns the provided skb. Returns NULL in error cases |
| 4722 | * (e.g. transport_len exceeds skb length or out-of-memory). |
| 4723 | * |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 4724 | * Caller needs to set the skb transport header and free any returned skb if it |
| 4725 | * differs from the provided skb. |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4726 | */ |
| 4727 | static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, |
| 4728 | unsigned int transport_len) |
| 4729 | { |
| 4730 | struct sk_buff *skb_chk; |
| 4731 | unsigned int len = skb_transport_offset(skb) + transport_len; |
| 4732 | int ret; |
| 4733 | |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 4734 | if (skb->len < len) |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4735 | return NULL; |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 4736 | else if (skb->len == len) |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4737 | return skb; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4738 | |
| 4739 | skb_chk = skb_clone(skb, GFP_ATOMIC); |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4740 | if (!skb_chk) |
| 4741 | return NULL; |
| 4742 | |
| 4743 | ret = pskb_trim_rcsum(skb_chk, len); |
| 4744 | if (ret) { |
| 4745 | kfree_skb(skb_chk); |
| 4746 | return NULL; |
| 4747 | } |
| 4748 | |
| 4749 | return skb_chk; |
| 4750 | } |
| 4751 | |
| 4752 | /** |
| 4753 | * skb_checksum_trimmed - validate checksum of an skb |
| 4754 | * @skb: the skb to check |
| 4755 | * @transport_len: the data length beyond the network header |
| 4756 | * @skb_chkf: checksum function to use |
| 4757 | * |
| 4758 | * Applies the given checksum function skb_chkf to the provided skb. |
| 4759 | * Returns a checked and maybe trimmed skb. Returns NULL on error. |
| 4760 | * |
| 4761 | * If the skb has data beyond the given transport length, then a |
| 4762 | * trimmed & cloned skb is checked and returned. |
| 4763 | * |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 4764 | * Caller needs to set the skb transport header and free any returned skb if it |
| 4765 | * differs from the provided skb. |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4766 | */ |
| 4767 | struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, |
| 4768 | unsigned int transport_len, |
| 4769 | __sum16(*skb_chkf)(struct sk_buff *skb)) |
| 4770 | { |
| 4771 | struct sk_buff *skb_chk; |
| 4772 | unsigned int offset = skb_transport_offset(skb); |
Linus Lüssing | fcba67c | 2015-05-05 00:19:35 +0200 | [diff] [blame] | 4773 | __sum16 ret; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4774 | |
| 4775 | skb_chk = skb_checksum_maybe_trim(skb, transport_len); |
| 4776 | if (!skb_chk) |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 4777 | goto err; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4778 | |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 4779 | if (!pskb_may_pull(skb_chk, offset)) |
| 4780 | goto err; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4781 | |
Linus Lüssing | 9b36881 | 2016-02-24 04:21:42 +0100 | [diff] [blame] | 4782 | skb_pull_rcsum(skb_chk, offset); |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4783 | ret = skb_chkf(skb_chk); |
Linus Lüssing | 9b36881 | 2016-02-24 04:21:42 +0100 | [diff] [blame] | 4784 | skb_push_rcsum(skb_chk, offset); |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4785 | |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 4786 | if (ret) |
| 4787 | goto err; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4788 | |
| 4789 | return skb_chk; |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 4790 | |
| 4791 | err: |
| 4792 | if (skb_chk && skb_chk != skb) |
| 4793 | kfree_skb(skb_chk); |
| 4794 | |
| 4795 | return NULL; |
| 4796 | |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4797 | } |
| 4798 | EXPORT_SYMBOL(skb_checksum_trimmed); |
| 4799 | |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 4800 | void __skb_warn_lro_forwarding(const struct sk_buff *skb) |
| 4801 | { |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 4802 | net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", |
| 4803 | skb->dev->name); |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 4804 | } |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 4805 | EXPORT_SYMBOL(__skb_warn_lro_forwarding); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4806 | |
| 4807 | void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) |
| 4808 | { |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 4809 | if (head_stolen) { |
| 4810 | skb_release_head_state(skb); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4811 | kmem_cache_free(skbuff_head_cache, skb); |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 4812 | } else { |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4813 | __kfree_skb(skb); |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 4814 | } |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4815 | } |
| 4816 | EXPORT_SYMBOL(kfree_skb_partial); |
| 4817 | |
| 4818 | /** |
| 4819 | * skb_try_coalesce - try to merge skb to prior one |
| 4820 | * @to: prior buffer |
| 4821 | * @from: buffer to add |
| 4822 | * @fragstolen: pointer to boolean |
Randy Dunlap | c6c4b97 | 2012-06-08 14:01:44 +0000 | [diff] [blame] | 4823 | * @delta_truesize: how much more was allocated than was requested |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4824 | */ |
| 4825 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, |
| 4826 | bool *fragstolen, int *delta_truesize) |
| 4827 | { |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 4828 | struct skb_shared_info *to_shinfo, *from_shinfo; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4829 | int i, delta, len = from->len; |
| 4830 | |
| 4831 | *fragstolen = false; |
| 4832 | |
| 4833 | if (skb_cloned(to)) |
| 4834 | return false; |
| 4835 | |
| 4836 | if (len <= skb_tailroom(to)) { |
Eric Dumazet | e93a043 | 2014-09-15 04:19:52 -0700 | [diff] [blame] | 4837 | if (len) |
| 4838 | BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4839 | *delta_truesize = 0; |
| 4840 | return true; |
| 4841 | } |
| 4842 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 4843 | to_shinfo = skb_shinfo(to); |
| 4844 | from_shinfo = skb_shinfo(from); |
| 4845 | if (to_shinfo->frag_list || from_shinfo->frag_list) |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4846 | return false; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 4847 | if (skb_zcopy(to) || skb_zcopy(from)) |
| 4848 | return false; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4849 | |
| 4850 | if (skb_headlen(from) != 0) { |
| 4851 | struct page *page; |
| 4852 | unsigned int offset; |
| 4853 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 4854 | if (to_shinfo->nr_frags + |
| 4855 | from_shinfo->nr_frags >= MAX_SKB_FRAGS) |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4856 | return false; |
| 4857 | |
| 4858 | if (skb_head_is_locked(from)) |
| 4859 | return false; |
| 4860 | |
| 4861 | delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); |
| 4862 | |
| 4863 | page = virt_to_head_page(from->head); |
| 4864 | offset = from->data - (unsigned char *)page_address(page); |
| 4865 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 4866 | skb_fill_page_desc(to, to_shinfo->nr_frags, |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4867 | page, offset, skb_headlen(from)); |
| 4868 | *fragstolen = true; |
| 4869 | } else { |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 4870 | if (to_shinfo->nr_frags + |
| 4871 | from_shinfo->nr_frags > MAX_SKB_FRAGS) |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4872 | return false; |
| 4873 | |
Weiping Pan | f4b549a | 2012-09-28 20:15:30 +0000 | [diff] [blame] | 4874 | delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4875 | } |
| 4876 | |
| 4877 | WARN_ON_ONCE(delta < len); |
| 4878 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 4879 | memcpy(to_shinfo->frags + to_shinfo->nr_frags, |
| 4880 | from_shinfo->frags, |
| 4881 | from_shinfo->nr_frags * sizeof(skb_frag_t)); |
| 4882 | to_shinfo->nr_frags += from_shinfo->nr_frags; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4883 | |
| 4884 | if (!skb_cloned(from)) |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 4885 | from_shinfo->nr_frags = 0; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4886 | |
Li RongQing | 8ea853f | 2012-09-18 16:53:21 +0000 | [diff] [blame] | 4887 | /* if the skb is not cloned this does nothing |
| 4888 | * since we set nr_frags to 0. |
| 4889 | */ |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 4890 | for (i = 0; i < from_shinfo->nr_frags; i++) |
| 4891 | __skb_frag_ref(&from_shinfo->frags[i]); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4892 | |
| 4893 | to->truesize += delta; |
| 4894 | to->len += len; |
| 4895 | to->data_len += len; |
| 4896 | |
| 4897 | *delta_truesize = delta; |
| 4898 | return true; |
| 4899 | } |
| 4900 | EXPORT_SYMBOL(skb_try_coalesce); |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4901 | |
| 4902 | /** |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 4903 | * skb_scrub_packet - scrub an skb |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4904 | * |
| 4905 | * @skb: buffer to clean |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 4906 | * @xnet: packet is crossing netns |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4907 | * |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 4908 | * skb_scrub_packet can be used after encapsulating or decapsulting a packet |
| 4909 | * into/from a tunnel. Some information have to be cleared during these |
| 4910 | * operations. |
| 4911 | * skb_scrub_packet can also be used to clean a skb before injecting it in |
| 4912 | * another namespace (@xnet == true). We have to clear all information in the |
| 4913 | * skb that could impact namespace isolation. |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4914 | */ |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 4915 | void skb_scrub_packet(struct sk_buff *skb, bool xnet) |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4916 | { |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4917 | skb->pkt_type = PACKET_HOST; |
| 4918 | skb->skb_iif = 0; |
WANG Cong | 60ff746 | 2014-05-04 16:39:18 -0700 | [diff] [blame] | 4919 | skb->ignore_df = 0; |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4920 | skb_dst_drop(skb); |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4921 | secpath_reset(skb); |
| 4922 | nf_reset(skb); |
| 4923 | nf_reset_trace(skb); |
Herbert Xu | 213dd74 | 2015-04-16 09:03:27 +0800 | [diff] [blame] | 4924 | |
Petr Machata | 6f9a506 | 2018-11-19 16:11:07 +0000 | [diff] [blame] | 4925 | #ifdef CONFIG_NET_SWITCHDEV |
| 4926 | skb->offload_fwd_mark = 0; |
Ido Schimmel | 875e893 | 2018-12-04 08:15:10 +0000 | [diff] [blame] | 4927 | skb->offload_l3_fwd_mark = 0; |
Petr Machata | 6f9a506 | 2018-11-19 16:11:07 +0000 | [diff] [blame] | 4928 | #endif |
| 4929 | |
Herbert Xu | 213dd74 | 2015-04-16 09:03:27 +0800 | [diff] [blame] | 4930 | if (!xnet) |
| 4931 | return; |
| 4932 | |
Ye Yin | 2b5ec1a | 2017-10-26 16:57:05 +0800 | [diff] [blame] | 4933 | ipvs_reset(skb); |
Herbert Xu | 213dd74 | 2015-04-16 09:03:27 +0800 | [diff] [blame] | 4934 | skb->mark = 0; |
Jesus Sanchez-Palencia | c47d8c2 | 2018-07-03 15:42:47 -0700 | [diff] [blame] | 4935 | skb->tstamp = 0; |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4936 | } |
| 4937 | EXPORT_SYMBOL_GPL(skb_scrub_packet); |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 4938 | |
| 4939 | /** |
| 4940 | * skb_gso_transport_seglen - Return length of individual segments of a gso packet |
| 4941 | * |
| 4942 | * @skb: GSO skb |
| 4943 | * |
| 4944 | * skb_gso_transport_seglen is used to determine the real size of the |
| 4945 | * individual segments, including Layer4 headers (TCP/UDP). |
| 4946 | * |
| 4947 | * The MAC/L2 or network (IP, IPv6) headers are not accounted for. |
| 4948 | */ |
Daniel Axtens | a4a7771 | 2018-03-01 17:13:40 +1100 | [diff] [blame] | 4949 | static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 4950 | { |
| 4951 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 4952 | unsigned int thlen = 0; |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 4953 | |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 4954 | if (skb->encapsulation) { |
| 4955 | thlen = skb_inner_transport_header(skb) - |
| 4956 | skb_transport_header(skb); |
Florian Westphal | 6d39d58 | 2014-04-09 10:28:50 +0200 | [diff] [blame] | 4957 | |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 4958 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) |
| 4959 | thlen += inner_tcp_hdrlen(skb); |
| 4960 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { |
| 4961 | thlen = tcp_hdrlen(skb); |
Daniel Axtens | 1dd27cd | 2018-03-09 14:06:09 +1100 | [diff] [blame] | 4962 | } else if (unlikely(skb_is_gso_sctp(skb))) { |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 4963 | thlen = sizeof(struct sctphdr); |
Willem de Bruijn | ee80d1e | 2018-04-26 13:42:16 -0400 | [diff] [blame] | 4964 | } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { |
| 4965 | thlen = sizeof(struct udphdr); |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 4966 | } |
Florian Westphal | 6d39d58 | 2014-04-09 10:28:50 +0200 | [diff] [blame] | 4967 | /* UFO sets gso_size to the size of the fragmentation |
| 4968 | * payload, i.e. the size of the L4 (UDP) header is already |
| 4969 | * accounted for. |
| 4970 | */ |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 4971 | return thlen + shinfo->gso_size; |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 4972 | } |
Daniel Axtens | a4a7771 | 2018-03-01 17:13:40 +1100 | [diff] [blame] | 4973 | |
| 4974 | /** |
| 4975 | * skb_gso_network_seglen - Return length of individual segments of a gso packet |
| 4976 | * |
| 4977 | * @skb: GSO skb |
| 4978 | * |
| 4979 | * skb_gso_network_seglen is used to determine the real size of the |
| 4980 | * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). |
| 4981 | * |
| 4982 | * The MAC/L2 header is not accounted for. |
| 4983 | */ |
| 4984 | static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) |
| 4985 | { |
| 4986 | unsigned int hdr_len = skb_transport_header(skb) - |
| 4987 | skb_network_header(skb); |
| 4988 | |
| 4989 | return hdr_len + skb_gso_transport_seglen(skb); |
| 4990 | } |
| 4991 | |
| 4992 | /** |
| 4993 | * skb_gso_mac_seglen - Return length of individual segments of a gso packet |
| 4994 | * |
| 4995 | * @skb: GSO skb |
| 4996 | * |
| 4997 | * skb_gso_mac_seglen is used to determine the real size of the |
| 4998 | * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 |
| 4999 | * headers (TCP/UDP). |
| 5000 | */ |
| 5001 | static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) |
| 5002 | { |
| 5003 | unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); |
| 5004 | |
| 5005 | return hdr_len + skb_gso_transport_seglen(skb); |
| 5006 | } |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5007 | |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5008 | /** |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5009 | * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS |
| 5010 | * |
| 5011 | * There are a couple of instances where we have a GSO skb, and we |
| 5012 | * want to determine what size it would be after it is segmented. |
| 5013 | * |
| 5014 | * We might want to check: |
| 5015 | * - L3+L4+payload size (e.g. IP forwarding) |
| 5016 | * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) |
| 5017 | * |
| 5018 | * This is a helper to do that correctly considering GSO_BY_FRAGS. |
| 5019 | * |
Mathieu Malaterre | 49682bf | 2018-10-31 13:16:58 +0100 | [diff] [blame] | 5020 | * @skb: GSO skb |
| 5021 | * |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5022 | * @seg_len: The segmented length (from skb_gso_*_seglen). In the |
| 5023 | * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. |
| 5024 | * |
| 5025 | * @max_len: The maximum permissible length. |
| 5026 | * |
| 5027 | * Returns true if the segmented length <= max length. |
| 5028 | */ |
| 5029 | static inline bool skb_gso_size_check(const struct sk_buff *skb, |
| 5030 | unsigned int seg_len, |
| 5031 | unsigned int max_len) { |
| 5032 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 5033 | const struct sk_buff *iter; |
| 5034 | |
| 5035 | if (shinfo->gso_size != GSO_BY_FRAGS) |
| 5036 | return seg_len <= max_len; |
| 5037 | |
| 5038 | /* Undo this so we can re-use header sizes */ |
| 5039 | seg_len -= GSO_BY_FRAGS; |
| 5040 | |
| 5041 | skb_walk_frags(skb, iter) { |
| 5042 | if (seg_len + skb_headlen(iter) > max_len) |
| 5043 | return false; |
| 5044 | } |
| 5045 | |
| 5046 | return true; |
| 5047 | } |
| 5048 | |
| 5049 | /** |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5050 | * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5051 | * |
| 5052 | * @skb: GSO skb |
David S. Miller | 76f21b9 | 2016-06-03 22:56:28 -0700 | [diff] [blame] | 5053 | * @mtu: MTU to validate against |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5054 | * |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5055 | * skb_gso_validate_network_len validates if a given skb will fit a |
| 5056 | * wanted MTU once split. It considers L3 headers, L4 headers, and the |
| 5057 | * payload. |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5058 | */ |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5059 | bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5060 | { |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5061 | return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5062 | } |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5063 | EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5064 | |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5065 | /** |
| 5066 | * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? |
| 5067 | * |
| 5068 | * @skb: GSO skb |
| 5069 | * @len: length to validate against |
| 5070 | * |
| 5071 | * skb_gso_validate_mac_len validates if a given skb will fit a wanted |
| 5072 | * length once split, including L2, L3 and L4 headers and the payload. |
| 5073 | */ |
| 5074 | bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) |
| 5075 | { |
| 5076 | return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); |
| 5077 | } |
| 5078 | EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); |
| 5079 | |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5080 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
| 5081 | { |
Toshiaki Makita | 4bbb3e0 | 2018-03-13 14:51:27 +0900 | [diff] [blame] | 5082 | int mac_len; |
| 5083 | |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5084 | if (skb_cow(skb, skb_headroom(skb)) < 0) { |
| 5085 | kfree_skb(skb); |
| 5086 | return NULL; |
| 5087 | } |
| 5088 | |
Toshiaki Makita | 4bbb3e0 | 2018-03-13 14:51:27 +0900 | [diff] [blame] | 5089 | mac_len = skb->data - skb_mac_header(skb); |
Toshiaki Makita | ae47457 | 2018-03-29 19:05:29 +0900 | [diff] [blame] | 5090 | if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { |
| 5091 | memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), |
| 5092 | mac_len - VLAN_HLEN - ETH_TLEN); |
| 5093 | } |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5094 | skb->mac_header += VLAN_HLEN; |
| 5095 | return skb; |
| 5096 | } |
| 5097 | |
| 5098 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb) |
| 5099 | { |
| 5100 | struct vlan_hdr *vhdr; |
| 5101 | u16 vlan_tci; |
| 5102 | |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5103 | if (unlikely(skb_vlan_tag_present(skb))) { |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5104 | /* vlan_tci is already set-up so leave this for another time */ |
| 5105 | return skb; |
| 5106 | } |
| 5107 | |
| 5108 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 5109 | if (unlikely(!skb)) |
| 5110 | goto err_free; |
| 5111 | |
| 5112 | if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) |
| 5113 | goto err_free; |
| 5114 | |
| 5115 | vhdr = (struct vlan_hdr *)skb->data; |
| 5116 | vlan_tci = ntohs(vhdr->h_vlan_TCI); |
| 5117 | __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); |
| 5118 | |
| 5119 | skb_pull_rcsum(skb, VLAN_HLEN); |
| 5120 | vlan_set_encap_proto(skb, vhdr); |
| 5121 | |
| 5122 | skb = skb_reorder_vlan_header(skb); |
| 5123 | if (unlikely(!skb)) |
| 5124 | goto err_free; |
| 5125 | |
| 5126 | skb_reset_network_header(skb); |
| 5127 | skb_reset_transport_header(skb); |
| 5128 | skb_reset_mac_len(skb); |
| 5129 | |
| 5130 | return skb; |
| 5131 | |
| 5132 | err_free: |
| 5133 | kfree_skb(skb); |
| 5134 | return NULL; |
| 5135 | } |
| 5136 | EXPORT_SYMBOL(skb_vlan_untag); |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5137 | |
Jiri Pirko | e219512 | 2014-11-19 14:05:01 +0100 | [diff] [blame] | 5138 | int skb_ensure_writable(struct sk_buff *skb, int write_len) |
| 5139 | { |
| 5140 | if (!pskb_may_pull(skb, write_len)) |
| 5141 | return -ENOMEM; |
| 5142 | |
| 5143 | if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) |
| 5144 | return 0; |
| 5145 | |
| 5146 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
| 5147 | } |
| 5148 | EXPORT_SYMBOL(skb_ensure_writable); |
| 5149 | |
Shmulik Ladkani | bfca4c5 | 2016-09-19 19:11:09 +0300 | [diff] [blame] | 5150 | /* remove VLAN header from packet and update csum accordingly. |
| 5151 | * expects a non skb_vlan_tag_present skb with a vlan tag payload |
| 5152 | */ |
| 5153 | int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5154 | { |
| 5155 | struct vlan_hdr *vhdr; |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5156 | int offset = skb->data - skb_mac_header(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5157 | int err; |
| 5158 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5159 | if (WARN_ONCE(offset, |
| 5160 | "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", |
| 5161 | offset)) { |
| 5162 | return -EINVAL; |
| 5163 | } |
| 5164 | |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5165 | err = skb_ensure_writable(skb, VLAN_ETH_HLEN); |
| 5166 | if (unlikely(err)) |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5167 | return err; |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5168 | |
| 5169 | skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); |
| 5170 | |
| 5171 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
| 5172 | *vlan_tci = ntohs(vhdr->h_vlan_TCI); |
| 5173 | |
| 5174 | memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); |
| 5175 | __skb_pull(skb, VLAN_HLEN); |
| 5176 | |
| 5177 | vlan_set_encap_proto(skb, vhdr); |
| 5178 | skb->mac_header += VLAN_HLEN; |
| 5179 | |
| 5180 | if (skb_network_offset(skb) < ETH_HLEN) |
| 5181 | skb_set_network_header(skb, ETH_HLEN); |
| 5182 | |
| 5183 | skb_reset_mac_len(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5184 | |
| 5185 | return err; |
| 5186 | } |
Shmulik Ladkani | bfca4c5 | 2016-09-19 19:11:09 +0300 | [diff] [blame] | 5187 | EXPORT_SYMBOL(__skb_vlan_pop); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5188 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5189 | /* Pop a vlan tag either from hwaccel or from payload. |
| 5190 | * Expects skb->data at mac header. |
| 5191 | */ |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5192 | int skb_vlan_pop(struct sk_buff *skb) |
| 5193 | { |
| 5194 | u16 vlan_tci; |
| 5195 | __be16 vlan_proto; |
| 5196 | int err; |
| 5197 | |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5198 | if (likely(skb_vlan_tag_present(skb))) { |
Michał Mirosław | b1817524 | 2018-11-09 00:18:02 +0100 | [diff] [blame] | 5199 | __vlan_hwaccel_clear_tag(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5200 | } else { |
Shmulik Ladkani | ecf4ee4 | 2016-09-20 12:48:37 +0300 | [diff] [blame] | 5201 | if (unlikely(!eth_type_vlan(skb->protocol))) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5202 | return 0; |
| 5203 | |
| 5204 | err = __skb_vlan_pop(skb, &vlan_tci); |
| 5205 | if (err) |
| 5206 | return err; |
| 5207 | } |
| 5208 | /* move next vlan tag to hw accel tag */ |
Shmulik Ladkani | ecf4ee4 | 2016-09-20 12:48:37 +0300 | [diff] [blame] | 5209 | if (likely(!eth_type_vlan(skb->protocol))) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5210 | return 0; |
| 5211 | |
| 5212 | vlan_proto = skb->protocol; |
| 5213 | err = __skb_vlan_pop(skb, &vlan_tci); |
| 5214 | if (unlikely(err)) |
| 5215 | return err; |
| 5216 | |
| 5217 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); |
| 5218 | return 0; |
| 5219 | } |
| 5220 | EXPORT_SYMBOL(skb_vlan_pop); |
| 5221 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5222 | /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). |
| 5223 | * Expects skb->data at mac header. |
| 5224 | */ |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5225 | int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) |
| 5226 | { |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5227 | if (skb_vlan_tag_present(skb)) { |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5228 | int offset = skb->data - skb_mac_header(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5229 | int err; |
| 5230 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5231 | if (WARN_ONCE(offset, |
| 5232 | "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", |
| 5233 | offset)) { |
| 5234 | return -EINVAL; |
| 5235 | } |
| 5236 | |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5237 | err = __vlan_insert_tag(skb, skb->vlan_proto, |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5238 | skb_vlan_tag_get(skb)); |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5239 | if (err) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5240 | return err; |
Daniel Borkmann | 9241e2d | 2016-04-16 02:27:58 +0200 | [diff] [blame] | 5241 | |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5242 | skb->protocol = skb->vlan_proto; |
| 5243 | skb->mac_len += VLAN_HLEN; |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5244 | |
Daniel Borkmann | 6b83d28 | 2016-02-20 00:29:30 +0100 | [diff] [blame] | 5245 | skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5246 | } |
| 5247 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); |
| 5248 | return 0; |
| 5249 | } |
| 5250 | EXPORT_SYMBOL(skb_vlan_push); |
| 5251 | |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5252 | /** |
| 5253 | * alloc_skb_with_frags - allocate skb with page frags |
| 5254 | * |
Masanari Iida | de3f0d0 | 2014-10-09 12:58:08 +0900 | [diff] [blame] | 5255 | * @header_len: size of linear part |
| 5256 | * @data_len: needed length in frags |
| 5257 | * @max_page_order: max page order desired. |
| 5258 | * @errcode: pointer to error code if any |
| 5259 | * @gfp_mask: allocation mask |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5260 | * |
| 5261 | * This can be used to allocate a paged skb, given a maximal order for frags. |
| 5262 | */ |
| 5263 | struct sk_buff *alloc_skb_with_frags(unsigned long header_len, |
| 5264 | unsigned long data_len, |
| 5265 | int max_page_order, |
| 5266 | int *errcode, |
| 5267 | gfp_t gfp_mask) |
| 5268 | { |
| 5269 | int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 5270 | unsigned long chunk; |
| 5271 | struct sk_buff *skb; |
| 5272 | struct page *page; |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5273 | int i; |
| 5274 | |
| 5275 | *errcode = -EMSGSIZE; |
| 5276 | /* Note this test could be relaxed, if we succeed to allocate |
| 5277 | * high order pages... |
| 5278 | */ |
| 5279 | if (npages > MAX_SKB_FRAGS) |
| 5280 | return NULL; |
| 5281 | |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5282 | *errcode = -ENOBUFS; |
David Rientjes | f8c468e | 2019-01-02 13:01:43 -0800 | [diff] [blame^] | 5283 | skb = alloc_skb(header_len, gfp_mask); |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5284 | if (!skb) |
| 5285 | return NULL; |
| 5286 | |
| 5287 | skb->truesize += npages << PAGE_SHIFT; |
| 5288 | |
| 5289 | for (i = 0; npages > 0; i++) { |
| 5290 | int order = max_page_order; |
| 5291 | |
| 5292 | while (order) { |
| 5293 | if (npages >= 1 << order) { |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 5294 | page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5295 | __GFP_COMP | |
Michal Hocko | d14b56f | 2018-06-28 17:53:06 +0200 | [diff] [blame] | 5296 | __GFP_NOWARN, |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5297 | order); |
| 5298 | if (page) |
| 5299 | goto fill_page; |
| 5300 | /* Do not retry other high order allocations */ |
| 5301 | order = 1; |
| 5302 | max_page_order = 0; |
| 5303 | } |
| 5304 | order--; |
| 5305 | } |
| 5306 | page = alloc_page(gfp_mask); |
| 5307 | if (!page) |
| 5308 | goto failure; |
| 5309 | fill_page: |
| 5310 | chunk = min_t(unsigned long, data_len, |
| 5311 | PAGE_SIZE << order); |
| 5312 | skb_fill_page_desc(skb, i, page, 0, chunk); |
| 5313 | data_len -= chunk; |
| 5314 | npages -= 1 << order; |
| 5315 | } |
| 5316 | return skb; |
| 5317 | |
| 5318 | failure: |
| 5319 | kfree_skb(skb); |
| 5320 | return NULL; |
| 5321 | } |
| 5322 | EXPORT_SYMBOL(alloc_skb_with_frags); |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 5323 | |
| 5324 | /* carve out the first off bytes from skb when off < headlen */ |
| 5325 | static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, |
| 5326 | const int headlen, gfp_t gfp_mask) |
| 5327 | { |
| 5328 | int i; |
| 5329 | int size = skb_end_offset(skb); |
| 5330 | int new_hlen = headlen - off; |
| 5331 | u8 *data; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 5332 | |
| 5333 | size = SKB_DATA_ALIGN(size); |
| 5334 | |
| 5335 | if (skb_pfmemalloc(skb)) |
| 5336 | gfp_mask |= __GFP_MEMALLOC; |
| 5337 | data = kmalloc_reserve(size + |
| 5338 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 5339 | gfp_mask, NUMA_NO_NODE, NULL); |
| 5340 | if (!data) |
| 5341 | return -ENOMEM; |
| 5342 | |
| 5343 | size = SKB_WITH_OVERHEAD(ksize(data)); |
| 5344 | |
| 5345 | /* Copy real data, and all frags */ |
| 5346 | skb_copy_from_linear_data_offset(skb, off, data, new_hlen); |
| 5347 | skb->len -= off; |
| 5348 | |
| 5349 | memcpy((struct skb_shared_info *)(data + size), |
| 5350 | skb_shinfo(skb), |
| 5351 | offsetof(struct skb_shared_info, |
| 5352 | frags[skb_shinfo(skb)->nr_frags])); |
| 5353 | if (skb_cloned(skb)) { |
| 5354 | /* drop the old head gracefully */ |
| 5355 | if (skb_orphan_frags(skb, gfp_mask)) { |
| 5356 | kfree(data); |
| 5357 | return -ENOMEM; |
| 5358 | } |
| 5359 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
| 5360 | skb_frag_ref(skb, i); |
| 5361 | if (skb_has_frag_list(skb)) |
| 5362 | skb_clone_fraglist(skb); |
| 5363 | skb_release_data(skb); |
| 5364 | } else { |
| 5365 | /* we can reuse existing recount- all we did was |
| 5366 | * relocate values |
| 5367 | */ |
| 5368 | skb_free_head(skb); |
| 5369 | } |
| 5370 | |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 5371 | skb->head = data; |
| 5372 | skb->data = data; |
| 5373 | skb->head_frag = 0; |
| 5374 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 5375 | skb->end = size; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 5376 | #else |
| 5377 | skb->end = skb->head + size; |
| 5378 | #endif |
| 5379 | skb_set_tail_pointer(skb, skb_headlen(skb)); |
| 5380 | skb_headers_offset_update(skb, 0); |
| 5381 | skb->cloned = 0; |
| 5382 | skb->hdr_len = 0; |
| 5383 | skb->nohdr = 0; |
| 5384 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
| 5385 | |
| 5386 | return 0; |
| 5387 | } |
| 5388 | |
| 5389 | static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); |
| 5390 | |
| 5391 | /* carve out the first eat bytes from skb's frag_list. May recurse into |
| 5392 | * pskb_carve() |
| 5393 | */ |
| 5394 | static int pskb_carve_frag_list(struct sk_buff *skb, |
| 5395 | struct skb_shared_info *shinfo, int eat, |
| 5396 | gfp_t gfp_mask) |
| 5397 | { |
| 5398 | struct sk_buff *list = shinfo->frag_list; |
| 5399 | struct sk_buff *clone = NULL; |
| 5400 | struct sk_buff *insp = NULL; |
| 5401 | |
| 5402 | do { |
| 5403 | if (!list) { |
| 5404 | pr_err("Not enough bytes to eat. Want %d\n", eat); |
| 5405 | return -EFAULT; |
| 5406 | } |
| 5407 | if (list->len <= eat) { |
| 5408 | /* Eaten as whole. */ |
| 5409 | eat -= list->len; |
| 5410 | list = list->next; |
| 5411 | insp = list; |
| 5412 | } else { |
| 5413 | /* Eaten partially. */ |
| 5414 | if (skb_shared(list)) { |
| 5415 | clone = skb_clone(list, gfp_mask); |
| 5416 | if (!clone) |
| 5417 | return -ENOMEM; |
| 5418 | insp = list->next; |
| 5419 | list = clone; |
| 5420 | } else { |
| 5421 | /* This may be pulled without problems. */ |
| 5422 | insp = list; |
| 5423 | } |
| 5424 | if (pskb_carve(list, eat, gfp_mask) < 0) { |
| 5425 | kfree_skb(clone); |
| 5426 | return -ENOMEM; |
| 5427 | } |
| 5428 | break; |
| 5429 | } |
| 5430 | } while (eat); |
| 5431 | |
| 5432 | /* Free pulled out fragments. */ |
| 5433 | while ((list = shinfo->frag_list) != insp) { |
| 5434 | shinfo->frag_list = list->next; |
| 5435 | kfree_skb(list); |
| 5436 | } |
| 5437 | /* And insert new clone at head. */ |
| 5438 | if (clone) { |
| 5439 | clone->next = list; |
| 5440 | shinfo->frag_list = clone; |
| 5441 | } |
| 5442 | return 0; |
| 5443 | } |
| 5444 | |
| 5445 | /* carve off first len bytes from skb. Split line (off) is in the |
| 5446 | * non-linear part of skb |
| 5447 | */ |
| 5448 | static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, |
| 5449 | int pos, gfp_t gfp_mask) |
| 5450 | { |
| 5451 | int i, k = 0; |
| 5452 | int size = skb_end_offset(skb); |
| 5453 | u8 *data; |
| 5454 | const int nfrags = skb_shinfo(skb)->nr_frags; |
| 5455 | struct skb_shared_info *shinfo; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 5456 | |
| 5457 | size = SKB_DATA_ALIGN(size); |
| 5458 | |
| 5459 | if (skb_pfmemalloc(skb)) |
| 5460 | gfp_mask |= __GFP_MEMALLOC; |
| 5461 | data = kmalloc_reserve(size + |
| 5462 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 5463 | gfp_mask, NUMA_NO_NODE, NULL); |
| 5464 | if (!data) |
| 5465 | return -ENOMEM; |
| 5466 | |
| 5467 | size = SKB_WITH_OVERHEAD(ksize(data)); |
| 5468 | |
| 5469 | memcpy((struct skb_shared_info *)(data + size), |
| 5470 | skb_shinfo(skb), offsetof(struct skb_shared_info, |
| 5471 | frags[skb_shinfo(skb)->nr_frags])); |
| 5472 | if (skb_orphan_frags(skb, gfp_mask)) { |
| 5473 | kfree(data); |
| 5474 | return -ENOMEM; |
| 5475 | } |
| 5476 | shinfo = (struct skb_shared_info *)(data + size); |
| 5477 | for (i = 0; i < nfrags; i++) { |
| 5478 | int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 5479 | |
| 5480 | if (pos + fsize > off) { |
| 5481 | shinfo->frags[k] = skb_shinfo(skb)->frags[i]; |
| 5482 | |
| 5483 | if (pos < off) { |
| 5484 | /* Split frag. |
| 5485 | * We have two variants in this case: |
| 5486 | * 1. Move all the frag to the second |
| 5487 | * part, if it is possible. F.e. |
| 5488 | * this approach is mandatory for TUX, |
| 5489 | * where splitting is expensive. |
| 5490 | * 2. Split is accurately. We make this. |
| 5491 | */ |
| 5492 | shinfo->frags[0].page_offset += off - pos; |
| 5493 | skb_frag_size_sub(&shinfo->frags[0], off - pos); |
| 5494 | } |
| 5495 | skb_frag_ref(skb, i); |
| 5496 | k++; |
| 5497 | } |
| 5498 | pos += fsize; |
| 5499 | } |
| 5500 | shinfo->nr_frags = k; |
| 5501 | if (skb_has_frag_list(skb)) |
| 5502 | skb_clone_fraglist(skb); |
| 5503 | |
| 5504 | if (k == 0) { |
| 5505 | /* split line is in frag list */ |
| 5506 | pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); |
| 5507 | } |
| 5508 | skb_release_data(skb); |
| 5509 | |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 5510 | skb->head = data; |
| 5511 | skb->head_frag = 0; |
| 5512 | skb->data = data; |
| 5513 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 5514 | skb->end = size; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 5515 | #else |
| 5516 | skb->end = skb->head + size; |
| 5517 | #endif |
| 5518 | skb_reset_tail_pointer(skb); |
| 5519 | skb_headers_offset_update(skb, 0); |
| 5520 | skb->cloned = 0; |
| 5521 | skb->hdr_len = 0; |
| 5522 | skb->nohdr = 0; |
| 5523 | skb->len -= off; |
| 5524 | skb->data_len = skb->len; |
| 5525 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
| 5526 | return 0; |
| 5527 | } |
| 5528 | |
| 5529 | /* remove len bytes from the beginning of the skb */ |
| 5530 | static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) |
| 5531 | { |
| 5532 | int headlen = skb_headlen(skb); |
| 5533 | |
| 5534 | if (len < headlen) |
| 5535 | return pskb_carve_inside_header(skb, len, headlen, gfp); |
| 5536 | else |
| 5537 | return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); |
| 5538 | } |
| 5539 | |
| 5540 | /* Extract to_copy bytes starting at off from skb, and return this in |
| 5541 | * a new skb |
| 5542 | */ |
| 5543 | struct sk_buff *pskb_extract(struct sk_buff *skb, int off, |
| 5544 | int to_copy, gfp_t gfp) |
| 5545 | { |
| 5546 | struct sk_buff *clone = skb_clone(skb, gfp); |
| 5547 | |
| 5548 | if (!clone) |
| 5549 | return NULL; |
| 5550 | |
| 5551 | if (pskb_carve(clone, off, gfp) < 0 || |
| 5552 | pskb_trim(clone, to_copy)) { |
| 5553 | kfree_skb(clone); |
| 5554 | return NULL; |
| 5555 | } |
| 5556 | return clone; |
| 5557 | } |
| 5558 | EXPORT_SYMBOL(pskb_extract); |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 5559 | |
| 5560 | /** |
| 5561 | * skb_condense - try to get rid of fragments/frag_list if possible |
| 5562 | * @skb: buffer |
| 5563 | * |
| 5564 | * Can be used to save memory before skb is added to a busy queue. |
| 5565 | * If packet has bytes in frags and enough tail room in skb->head, |
| 5566 | * pull all of them, so that we can free the frags right now and adjust |
| 5567 | * truesize. |
| 5568 | * Notes: |
| 5569 | * We do not reallocate skb->head thus can not fail. |
| 5570 | * Caller must re-evaluate skb->truesize if needed. |
| 5571 | */ |
| 5572 | void skb_condense(struct sk_buff *skb) |
| 5573 | { |
Eric Dumazet | 3174fed | 2016-12-09 08:02:05 -0800 | [diff] [blame] | 5574 | if (skb->data_len) { |
| 5575 | if (skb->data_len > skb->end - skb->tail || |
| 5576 | skb_cloned(skb)) |
| 5577 | return; |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 5578 | |
Eric Dumazet | 3174fed | 2016-12-09 08:02:05 -0800 | [diff] [blame] | 5579 | /* Nice, we can free page frag(s) right now */ |
| 5580 | __pskb_pull_tail(skb, skb->data_len); |
| 5581 | } |
| 5582 | /* At this point, skb->truesize might be over estimated, |
| 5583 | * because skb had a fragment, and fragments do not tell |
| 5584 | * their truesize. |
| 5585 | * When we pulled its content into skb->head, fragment |
| 5586 | * was freed, but __pskb_pull_tail() could not possibly |
| 5587 | * adjust skb->truesize, not knowing the frag truesize. |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 5588 | */ |
| 5589 | skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); |
| 5590 | } |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5591 | |
| 5592 | #ifdef CONFIG_SKB_EXTENSIONS |
| 5593 | static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) |
| 5594 | { |
| 5595 | return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); |
| 5596 | } |
| 5597 | |
| 5598 | static struct skb_ext *skb_ext_alloc(void) |
| 5599 | { |
| 5600 | struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); |
| 5601 | |
| 5602 | if (new) { |
| 5603 | memset(new->offset, 0, sizeof(new->offset)); |
| 5604 | refcount_set(&new->refcnt, 1); |
| 5605 | } |
| 5606 | |
| 5607 | return new; |
| 5608 | } |
| 5609 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 5610 | static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, |
| 5611 | unsigned int old_active) |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5612 | { |
| 5613 | struct skb_ext *new; |
| 5614 | |
| 5615 | if (refcount_read(&old->refcnt) == 1) |
| 5616 | return old; |
| 5617 | |
| 5618 | new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); |
| 5619 | if (!new) |
| 5620 | return NULL; |
| 5621 | |
| 5622 | memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); |
| 5623 | refcount_set(&new->refcnt, 1); |
| 5624 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 5625 | #ifdef CONFIG_XFRM |
| 5626 | if (old_active & (1 << SKB_EXT_SEC_PATH)) { |
| 5627 | struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); |
| 5628 | unsigned int i; |
| 5629 | |
| 5630 | for (i = 0; i < sp->len; i++) |
| 5631 | xfrm_state_hold(sp->xvec[i]); |
| 5632 | } |
| 5633 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5634 | __skb_ext_put(old); |
| 5635 | return new; |
| 5636 | } |
| 5637 | |
| 5638 | /** |
| 5639 | * skb_ext_add - allocate space for given extension, COW if needed |
| 5640 | * @skb: buffer |
| 5641 | * @id: extension to allocate space for |
| 5642 | * |
| 5643 | * Allocates enough space for the given extension. |
| 5644 | * If the extension is already present, a pointer to that extension |
| 5645 | * is returned. |
| 5646 | * |
| 5647 | * If the skb was cloned, COW applies and the returned memory can be |
| 5648 | * modified without changing the extension space of clones buffers. |
| 5649 | * |
| 5650 | * Returns pointer to the extension or NULL on allocation failure. |
| 5651 | */ |
| 5652 | void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) |
| 5653 | { |
| 5654 | struct skb_ext *new, *old = NULL; |
| 5655 | unsigned int newlen, newoff; |
| 5656 | |
| 5657 | if (skb->active_extensions) { |
| 5658 | old = skb->extensions; |
| 5659 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 5660 | new = skb_ext_maybe_cow(old, skb->active_extensions); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5661 | if (!new) |
| 5662 | return NULL; |
| 5663 | |
Paolo Abeni | 682ec85 | 2018-12-21 19:03:15 +0100 | [diff] [blame] | 5664 | if (__skb_ext_exist(new, id)) |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5665 | goto set_active; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5666 | |
Paolo Abeni | e94e50b | 2018-12-21 19:03:13 +0100 | [diff] [blame] | 5667 | newoff = new->chunks; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5668 | } else { |
| 5669 | newoff = SKB_EXT_CHUNKSIZEOF(*new); |
| 5670 | |
| 5671 | new = skb_ext_alloc(); |
| 5672 | if (!new) |
| 5673 | return NULL; |
| 5674 | } |
| 5675 | |
| 5676 | newlen = newoff + skb_ext_type_len[id]; |
| 5677 | new->chunks = newlen; |
| 5678 | new->offset[id] = newoff; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5679 | set_active: |
Paolo Abeni | 682ec85 | 2018-12-21 19:03:15 +0100 | [diff] [blame] | 5680 | skb->extensions = new; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5681 | skb->active_extensions |= 1 << id; |
| 5682 | return skb_ext_get_ptr(new, id); |
| 5683 | } |
| 5684 | EXPORT_SYMBOL(skb_ext_add); |
| 5685 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 5686 | #ifdef CONFIG_XFRM |
| 5687 | static void skb_ext_put_sp(struct sec_path *sp) |
| 5688 | { |
| 5689 | unsigned int i; |
| 5690 | |
| 5691 | for (i = 0; i < sp->len; i++) |
| 5692 | xfrm_state_put(sp->xvec[i]); |
| 5693 | } |
| 5694 | #endif |
| 5695 | |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5696 | void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) |
| 5697 | { |
| 5698 | struct skb_ext *ext = skb->extensions; |
| 5699 | |
| 5700 | skb->active_extensions &= ~(1 << id); |
| 5701 | if (skb->active_extensions == 0) { |
| 5702 | skb->extensions = NULL; |
| 5703 | __skb_ext_put(ext); |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 5704 | #ifdef CONFIG_XFRM |
| 5705 | } else if (id == SKB_EXT_SEC_PATH && |
| 5706 | refcount_read(&ext->refcnt) == 1) { |
| 5707 | struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); |
| 5708 | |
| 5709 | skb_ext_put_sp(sp); |
| 5710 | sp->len = 0; |
| 5711 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5712 | } |
| 5713 | } |
| 5714 | EXPORT_SYMBOL(__skb_ext_del); |
| 5715 | |
| 5716 | void __skb_ext_put(struct skb_ext *ext) |
| 5717 | { |
| 5718 | /* If this is last clone, nothing can increment |
| 5719 | * it after check passes. Avoids one atomic op. |
| 5720 | */ |
| 5721 | if (refcount_read(&ext->refcnt) == 1) |
| 5722 | goto free_now; |
| 5723 | |
| 5724 | if (!refcount_dec_and_test(&ext->refcnt)) |
| 5725 | return; |
| 5726 | free_now: |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 5727 | #ifdef CONFIG_XFRM |
| 5728 | if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) |
| 5729 | skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); |
| 5730 | #endif |
| 5731 | |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 5732 | kmem_cache_free(skbuff_ext_cache, ext); |
| 5733 | } |
| 5734 | EXPORT_SYMBOL(__skb_ext_put); |
| 5735 | #endif /* CONFIG_SKB_EXTENSIONS */ |