blob: 2828f6d5ba898a5e50ccce45589bf1370e474b0f [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Routines having to do with the 'struct sk_buff' memory handlers.
4 *
Alan Cox113aa832008-10-13 19:01:08 -07005 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Florian La Roche <rzsfl@rz.uni-sb.de>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Fixes:
9 * Alan Cox : Fixed the worst of the load
10 * balancer bugs.
11 * Dave Platt : Interrupt stacking fix.
12 * Richard Kooijman : Timestamp fixes.
13 * Alan Cox : Changed buffer format.
14 * Alan Cox : destructor hook for AF_UNIX etc.
15 * Linus Torvalds : Better skb_clone.
16 * Alan Cox : Added skb_copy.
17 * Alan Cox : Added all the changed routines Linus
18 * only put in the headers
19 * Ray VanTassle : Fixed --skb->lock in free
20 * Alan Cox : skb_copy copy arp field
21 * Andi Kleen : slabified it.
22 * Robert Olsson : Removed skb_head_pool
23 *
24 * NOTE:
25 * The __skb_ routines should be called with interrupts
26 * disabled, or you better be *real* sure that the operation is atomic
27 * with respect to whatever list is being frobbed (e.g. via lock_sock()
28 * or via disabling bottom half handlers, etc).
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30
31/*
32 * The functions in this file will not compile correctly with gcc 2.4.x
33 */
34
Joe Perchese005d192012-05-16 19:58:40 +000035#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/in.h>
43#include <linux/inet.h>
44#include <linux/slab.h>
Florian Westphalde960aa2014-01-26 10:58:16 +010045#include <linux/tcp.h>
46#include <linux/udp.h>
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -030047#include <linux/sctp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/netdevice.h>
49#ifdef CONFIG_NET_CLS_ACT
50#include <net/pkt_sched.h>
51#endif
52#include <linux/string.h>
53#include <linux/skbuff.h>
Jens Axboe9c55e012007-11-06 23:30:13 -080054#include <linux/splice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/cache.h>
56#include <linux/rtnetlink.h>
57#include <linux/init.h>
David Howells716ea3a2007-04-02 20:19:53 -070058#include <linux/scatterlist.h>
Patrick Ohlyac45f602009-02-12 05:03:37 +000059#include <linux/errqueue.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070060#include <linux/prefetch.h>
Vlad Yasevich0d5501c2014-08-08 14:42:13 -040061#include <linux/if_vlan.h>
John Hurley2a2ea502019-07-07 15:01:57 +010062#include <linux/mpls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64#include <net/protocol.h>
65#include <net/dst.h>
66#include <net/sock.h>
67#include <net/checksum.h>
Paul Durranted1f50c2014-01-09 10:02:46 +000068#include <net/ip6_checksum.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <net/xfrm.h>
John Hurley8822e272019-07-07 15:01:54 +010070#include <net/mpls.h>
Mat Martineau3ee17bc2020-01-09 07:59:19 -080071#include <net/mptcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080073#include <linux/uaccess.h>
Steven Rostedtad8d75f2009-04-14 19:39:12 -040074#include <trace/events/skb.h>
Eric Dumazet51c56b02012-04-05 11:35:15 +020075#include <linux/highmem.h>
Willem de Bruijnb245be12015-01-30 13:29:32 -050076#include <linux/capability.h>
77#include <linux/user_namespace.h>
Matteo Croce2544af02019-05-29 17:13:48 +020078#include <linux/indirect_call_wrapper.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040079
Bart Van Assche7b7ed882019-03-25 09:17:23 -070080#include "datagram.h"
81
Alexey Dobriyan08009a72018-02-24 21:20:33 +030082struct kmem_cache *skbuff_head_cache __ro_after_init;
83static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
Florian Westphaldf5042f2018-12-18 17:15:16 +010084#ifdef CONFIG_SKB_EXTENSIONS
85static struct kmem_cache *skbuff_ext_cache __ro_after_init;
86#endif
Hans Westgaard Ry5f74f82e2016-02-03 09:26:57 +010087int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
88EXPORT_SYMBOL(sysctl_max_skb_frags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Linus Torvalds1da177e2005-04-16 15:20:36 -070090/**
Jean Sacrenf05de732013-02-11 13:30:38 +000091 * skb_panic - private function for out-of-line support
92 * @skb: buffer
93 * @sz: size
94 * @addr: address
James Hogan99d58512013-02-13 11:20:27 +000095 * @msg: skb_over_panic or skb_under_panic
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 *
Jean Sacrenf05de732013-02-11 13:30:38 +000097 * Out-of-line support for skb_put() and skb_push().
98 * Called via the wrapper skb_over_panic() or skb_under_panic().
99 * Keep out of line to prevent kernel bloat.
100 * __builtin_return_address is not used because it is not always reliable.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 */
Jean Sacrenf05de732013-02-11 13:30:38 +0000102static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
James Hogan99d58512013-02-13 11:20:27 +0000103 const char msg[])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Jesper Dangaard Brouer41a46912020-04-27 18:37:43 +0200105 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
James Hogan99d58512013-02-13 11:20:27 +0000106 msg, addr, skb->len, sz, skb->head, skb->data,
Joe Perchese005d192012-05-16 19:58:40 +0000107 (unsigned long)skb->tail, (unsigned long)skb->end,
108 skb->dev ? skb->dev->name : "<NULL>");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 BUG();
110}
111
Jean Sacrenf05de732013-02-11 13:30:38 +0000112static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
Jean Sacrenf05de732013-02-11 13:30:38 +0000114 skb_panic(skb, sz, addr, __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115}
116
Jean Sacrenf05de732013-02-11 13:30:38 +0000117static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
118{
119 skb_panic(skb, sz, addr, __func__);
120}
Mel Gormanc93bdd02012-07-31 16:44:19 -0700121
122/*
123 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
124 * the caller if emergency pfmemalloc reserves are being used. If it is and
125 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
126 * may be used. Otherwise, the packet data may be discarded until enough
127 * memory is free
128 */
129#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
130 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
stephen hemminger61c5e882012-12-28 18:24:28 +0000131
132static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
133 unsigned long ip, bool *pfmemalloc)
Mel Gormanc93bdd02012-07-31 16:44:19 -0700134{
135 void *obj;
136 bool ret_pfmemalloc = false;
137
138 /*
139 * Try a regular allocation, when that fails and we're not entitled
140 * to the reserves, fail.
141 */
142 obj = kmalloc_node_track_caller(size,
143 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
144 node);
145 if (obj || !(gfp_pfmemalloc_allowed(flags)))
146 goto out;
147
148 /* Try again but now we are using pfmemalloc reserves */
149 ret_pfmemalloc = true;
150 obj = kmalloc_node_track_caller(size, flags, node);
151
152out:
153 if (pfmemalloc)
154 *pfmemalloc = ret_pfmemalloc;
155
156 return obj;
157}
158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159/* Allocate a new skbuff. We do this ourselves so we can fill in a few
160 * 'private' fields and also do memory statistics to find all the
161 * [BEEP] leaks.
162 *
163 */
164
165/**
David S. Millerd179cd12005-08-17 14:57:30 -0700166 * __alloc_skb - allocate a network buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 * @size: size to allocate
168 * @gfp_mask: allocation mask
Mel Gormanc93bdd02012-07-31 16:44:19 -0700169 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
170 * instead of head cache and allocate a cloned (child) skb.
171 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
172 * allocations in case the data is required for writeback
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800173 * @node: numa node to allocate memory on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 *
175 * Allocate a new &sk_buff. The returned buffer has no headroom and a
Ben Hutchings94b60422012-06-06 15:23:37 +0000176 * tail room of at least size bytes. The object has a reference count
177 * of one. The return is the buffer. On a failure the return is %NULL.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 *
179 * Buffers may only be allocated from interrupts using a @gfp_mask of
180 * %GFP_ATOMIC.
181 */
Al Virodd0fc662005-10-07 07:46:04 +0100182struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
Mel Gormanc93bdd02012-07-31 16:44:19 -0700183 int flags, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
Christoph Lametere18b8902006-12-06 20:33:20 -0800185 struct kmem_cache *cache;
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800186 struct skb_shared_info *shinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 struct sk_buff *skb;
188 u8 *data;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700189 bool pfmemalloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Mel Gormanc93bdd02012-07-31 16:44:19 -0700191 cache = (flags & SKB_ALLOC_FCLONE)
192 ? skbuff_fclone_cache : skbuff_head_cache;
193
194 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
195 gfp_mask |= __GFP_MEMALLOC;
Herbert Xu8798b3f2006-01-23 16:32:45 -0800196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 /* Get the HEAD */
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800198 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 if (!skb)
200 goto out;
Eric Dumazetec7d2f22010-05-05 01:07:37 -0700201 prefetchw(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000203 /* We do our best to align skb_shared_info on a separate cache
204 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
205 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
206 * Both skb->head and skb_shared_info are cache line aligned.
207 */
Tony Lindgrenbc417e32011-11-02 13:40:28 +0000208 size = SKB_DATA_ALIGN(size);
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000209 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Mel Gormanc93bdd02012-07-31 16:44:19 -0700210 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 if (!data)
212 goto nodata;
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000213 /* kmalloc(size) might give us more room than requested.
214 * Put skb_shared_info exactly at the end of allocated zone,
215 * to allow max possible filling before reallocation.
216 */
217 size = SKB_WITH_OVERHEAD(ksize(data));
Eric Dumazetec7d2f22010-05-05 01:07:37 -0700218 prefetchw(data + size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Arnaldo Carvalho de Meloca0605a2007-03-19 10:48:59 -0300220 /*
Johannes Bergc8005782008-05-03 20:56:42 -0700221 * Only clear those fields we need to clear, not those that we will
222 * actually initialise below. Hence, don't put any more fields after
223 * the tail pointer in struct sk_buff!
Arnaldo Carvalho de Meloca0605a2007-03-19 10:48:59 -0300224 */
225 memset(skb, 0, offsetof(struct sk_buff, tail));
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000226 /* Account for allocated memory : skb + skb->head */
227 skb->truesize = SKB_TRUESIZE(size);
Mel Gormanc93bdd02012-07-31 16:44:19 -0700228 skb->pfmemalloc = pfmemalloc;
Reshetova, Elena63354792017-06-30 13:07:58 +0300229 refcount_set(&skb->users, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 skb->head = data;
231 skb->data = data;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700232 skb_reset_tail_pointer(skb);
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700233 skb->end = skb->tail + size;
Cong Wang35d04612013-05-29 15:16:05 +0800234 skb->mac_header = (typeof(skb->mac_header))~0U;
235 skb->transport_header = (typeof(skb->transport_header))~0U;
Stephen Hemminger19633e12009-06-17 05:23:27 +0000236
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800237 /* make sure we initialize shinfo sequentially */
238 shinfo = skb_shinfo(skb);
Eric Dumazetec7d2f22010-05-05 01:07:37 -0700239 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800240 atomic_set(&shinfo->dataref, 1);
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800241
Mel Gormanc93bdd02012-07-31 16:44:19 -0700242 if (flags & SKB_ALLOC_FCLONE) {
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700243 struct sk_buff_fclones *fclones;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700245 fclones = container_of(skb, struct sk_buff_fclones, skb1);
246
David S. Millerd179cd12005-08-17 14:57:30 -0700247 skb->fclone = SKB_FCLONE_ORIG;
Reshetova, Elena26385952017-06-30 13:07:59 +0300248 refcount_set(&fclones->fclone_ref, 1);
David S. Millerd179cd12005-08-17 14:57:30 -0700249
Eric Dumazet6ffe75eb2014-12-03 17:04:39 -0800250 fclones->skb2.fclone = SKB_FCLONE_CLONE;
David S. Millerd179cd12005-08-17 14:57:30 -0700251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252out:
253 return skb;
254nodata:
Herbert Xu8798b3f2006-01-23 16:32:45 -0800255 kmem_cache_free(cache, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 skb = NULL;
257 goto out;
258}
David S. Millerb4ac530fc2009-02-10 02:09:24 -0800259EXPORT_SYMBOL(__alloc_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Jesper Dangaard Brouerba0509b2019-04-12 17:07:37 +0200261/* Caller must provide SKB that is memset cleared */
262static struct sk_buff *__build_skb_around(struct sk_buff *skb,
263 void *data, unsigned int frag_size)
264{
265 struct skb_shared_info *shinfo;
266 unsigned int size = frag_size ? : ksize(data);
267
268 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
269
270 /* Assumes caller memset cleared SKB */
271 skb->truesize = SKB_TRUESIZE(size);
272 refcount_set(&skb->users, 1);
273 skb->head = data;
274 skb->data = data;
275 skb_reset_tail_pointer(skb);
276 skb->end = skb->tail + size;
277 skb->mac_header = (typeof(skb->mac_header))~0U;
278 skb->transport_header = (typeof(skb->transport_header))~0U;
279
280 /* make sure we initialize shinfo sequentially */
281 shinfo = skb_shinfo(skb);
282 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
283 atomic_set(&shinfo->dataref, 1);
284
285 return skb;
286}
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288/**
Eric Dumazet2ea2f622015-04-24 16:05:01 -0700289 * __build_skb - build a network buffer
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000290 * @data: data buffer provided by caller
Eric Dumazet2ea2f622015-04-24 16:05:01 -0700291 * @frag_size: size of data, or 0 if head was kmalloced
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000292 *
293 * Allocate a new &sk_buff. Caller provides space holding head and
Florian Fainellideceb4c2013-07-23 20:22:39 +0100294 * skb_shared_info. @data must have been allocated by kmalloc() only if
Eric Dumazet2ea2f622015-04-24 16:05:01 -0700295 * @frag_size is 0, otherwise data should come from the page allocator
296 * or vmalloc()
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000297 * The return is the new skb buffer.
298 * On a failure the return is %NULL, and @data is not freed.
299 * Notes :
300 * Before IO, driver allocates only data buffer where NIC put incoming frame
301 * Driver should add room at head (NET_SKB_PAD) and
302 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
303 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
304 * before giving packet to stack.
305 * RX rings only contains data buffers, not full skbs.
306 */
Eric Dumazet2ea2f622015-04-24 16:05:01 -0700307struct sk_buff *__build_skb(void *data, unsigned int frag_size)
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000308{
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000309 struct sk_buff *skb;
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000310
311 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
Jesper Dangaard Brouerba0509b2019-04-12 17:07:37 +0200312 if (unlikely(!skb))
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000313 return NULL;
314
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000315 memset(skb, 0, offsetof(struct sk_buff, tail));
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000316
Jesper Dangaard Brouerba0509b2019-04-12 17:07:37 +0200317 return __build_skb_around(skb, data, frag_size);
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000318}
Eric Dumazet2ea2f622015-04-24 16:05:01 -0700319
320/* build_skb() is wrapper over __build_skb(), that specifically
321 * takes care of skb->head and skb->pfmemalloc
322 * This means that if @frag_size is not zero, then @data must be backed
323 * by a page fragment, not kmalloc() or vmalloc()
324 */
325struct sk_buff *build_skb(void *data, unsigned int frag_size)
326{
327 struct sk_buff *skb = __build_skb(data, frag_size);
328
329 if (skb && frag_size) {
330 skb->head_frag = 1;
Michal Hocko2f064f32015-08-21 14:11:51 -0700331 if (page_is_pfmemalloc(virt_to_head_page(data)))
Eric Dumazet2ea2f622015-04-24 16:05:01 -0700332 skb->pfmemalloc = 1;
333 }
334 return skb;
335}
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000336EXPORT_SYMBOL(build_skb);
337
Jesper Dangaard Brouerba0509b2019-04-12 17:07:37 +0200338/**
339 * build_skb_around - build a network buffer around provided skb
340 * @skb: sk_buff provide by caller, must be memset cleared
341 * @data: data buffer provided by caller
342 * @frag_size: size of data, or 0 if head was kmalloced
343 */
344struct sk_buff *build_skb_around(struct sk_buff *skb,
345 void *data, unsigned int frag_size)
346{
347 if (unlikely(!skb))
348 return NULL;
349
350 skb = __build_skb_around(skb, data, frag_size);
351
352 if (skb && frag_size) {
353 skb->head_frag = 1;
354 if (page_is_pfmemalloc(virt_to_head_page(data)))
355 skb->pfmemalloc = 1;
356 }
357 return skb;
358}
359EXPORT_SYMBOL(build_skb_around);
360
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100361#define NAPI_SKB_CACHE_SIZE 64
362
363struct napi_alloc_cache {
364 struct page_frag_cache page;
Alexey Dobriyane0d79242016-11-19 03:47:56 +0300365 unsigned int skb_count;
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100366 void *skb_cache[NAPI_SKB_CACHE_SIZE];
367};
368
Alexander Duyckb63ae8c2015-05-06 21:11:57 -0700369static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100370static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
Alexander Duyckffde7322014-12-09 19:40:42 -0800371
Alexander Duyckffde7322014-12-09 19:40:42 -0800372static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
373{
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100374 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
Alexander Duyck94519802015-05-06 21:11:40 -0700375
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -0800376 return page_frag_alloc(&nc->page, fragsz, gfp_mask);
Alexander Duyckffde7322014-12-09 19:40:42 -0800377}
378
379void *napi_alloc_frag(unsigned int fragsz)
380{
Alexander Duyck3bed3cc2019-02-15 14:44:18 -0800381 fragsz = SKB_DATA_ALIGN(fragsz);
382
Mel Gorman453f85d2017-11-15 17:38:03 -0800383 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
Alexander Duyckffde7322014-12-09 19:40:42 -0800384}
385EXPORT_SYMBOL(napi_alloc_frag);
386
Eric Dumazet6f532612012-05-18 05:12:12 +0000387/**
Sebastian Andrzej Siewior7ba7aea2019-06-07 21:20:34 +0200388 * netdev_alloc_frag - allocate a page fragment
389 * @fragsz: fragment size
390 *
391 * Allocates a frag from a page for receive buffer.
392 * Uses GFP_ATOMIC allocations.
393 */
394void *netdev_alloc_frag(unsigned int fragsz)
395{
396 struct page_frag_cache *nc;
397 void *data;
398
399 fragsz = SKB_DATA_ALIGN(fragsz);
400 if (in_irq() || irqs_disabled()) {
401 nc = this_cpu_ptr(&netdev_alloc_cache);
402 data = page_frag_alloc(nc, fragsz, GFP_ATOMIC);
403 } else {
404 local_bh_disable();
405 data = __napi_alloc_frag(fragsz, GFP_ATOMIC);
406 local_bh_enable();
407 }
408 return data;
409}
410EXPORT_SYMBOL(netdev_alloc_frag);
411
412/**
Alexander Duyckfd11a832014-12-09 19:40:49 -0800413 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
414 * @dev: network device to receive on
Masanari Iidad7499162015-08-24 22:56:54 +0900415 * @len: length to allocate
Alexander Duyckfd11a832014-12-09 19:40:49 -0800416 * @gfp_mask: get_free_pages mask, passed to alloc_skb
417 *
418 * Allocate a new &sk_buff and assign it a usage count of one. The
419 * buffer has NET_SKB_PAD headroom built in. Users should allocate
420 * the headroom they think they need without accounting for the
421 * built in space. The built in space is used for optimisations.
422 *
423 * %NULL is returned if there is no free memory.
424 */
Alexander Duyck94519802015-05-06 21:11:40 -0700425struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
426 gfp_t gfp_mask)
Alexander Duyckfd11a832014-12-09 19:40:49 -0800427{
Alexander Duyckb63ae8c2015-05-06 21:11:57 -0700428 struct page_frag_cache *nc;
Alexander Duyckfd11a832014-12-09 19:40:49 -0800429 struct sk_buff *skb;
Alexander Duyck94519802015-05-06 21:11:40 -0700430 bool pfmemalloc;
431 void *data;
Alexander Duyckfd11a832014-12-09 19:40:49 -0800432
Alexander Duyck94519802015-05-06 21:11:40 -0700433 len += NET_SKB_PAD;
Alexander Duyckfd11a832014-12-09 19:40:49 -0800434
Alexander Duyck94519802015-05-06 21:11:40 -0700435 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
Mel Gormand0164ad2015-11-06 16:28:21 -0800436 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
Alexander Duycka080e7b2015-05-13 13:34:13 -0700437 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
438 if (!skb)
439 goto skb_fail;
440 goto skb_success;
441 }
Alexander Duyck94519802015-05-06 21:11:40 -0700442
443 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
444 len = SKB_DATA_ALIGN(len);
445
446 if (sk_memalloc_socks())
447 gfp_mask |= __GFP_MEMALLOC;
448
Sebastian Andrzej Siewior92dcabd2019-06-07 21:20:35 +0200449 if (in_irq() || irqs_disabled()) {
450 nc = this_cpu_ptr(&netdev_alloc_cache);
451 data = page_frag_alloc(nc, len, gfp_mask);
452 pfmemalloc = nc->pfmemalloc;
453 } else {
454 local_bh_disable();
455 nc = this_cpu_ptr(&napi_alloc_cache.page);
456 data = page_frag_alloc(nc, len, gfp_mask);
457 pfmemalloc = nc->pfmemalloc;
458 local_bh_enable();
459 }
Alexander Duyck94519802015-05-06 21:11:40 -0700460
461 if (unlikely(!data))
462 return NULL;
463
464 skb = __build_skb(data, len);
465 if (unlikely(!skb)) {
Alexander Duyck181edb22015-05-06 21:12:03 -0700466 skb_free_frag(data);
Alexander Duyck94519802015-05-06 21:11:40 -0700467 return NULL;
Christoph Hellwig7b2e4972006-08-07 16:09:04 -0700468 }
Alexander Duyckfd11a832014-12-09 19:40:49 -0800469
Alexander Duyck94519802015-05-06 21:11:40 -0700470 if (pfmemalloc)
471 skb->pfmemalloc = 1;
472 skb->head_frag = 1;
473
Alexander Duycka080e7b2015-05-13 13:34:13 -0700474skb_success:
Alexander Duyck94519802015-05-06 21:11:40 -0700475 skb_reserve(skb, NET_SKB_PAD);
476 skb->dev = dev;
477
Alexander Duycka080e7b2015-05-13 13:34:13 -0700478skb_fail:
Christoph Hellwig8af27452006-07-31 22:35:23 -0700479 return skb;
480}
David S. Millerb4ac530fc2009-02-10 02:09:24 -0800481EXPORT_SYMBOL(__netdev_alloc_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
Alexander Duyckfd11a832014-12-09 19:40:49 -0800483/**
484 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
485 * @napi: napi instance this buffer was allocated for
Masanari Iidad7499162015-08-24 22:56:54 +0900486 * @len: length to allocate
Alexander Duyckfd11a832014-12-09 19:40:49 -0800487 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
488 *
489 * Allocate a new sk_buff for use in NAPI receive. This buffer will
490 * attempt to allocate the head from a special reserved region used
491 * only for NAPI Rx allocation. By doing this we can save several
492 * CPU cycles by avoiding having to disable and re-enable IRQs.
493 *
494 * %NULL is returned if there is no free memory.
495 */
Alexander Duyck94519802015-05-06 21:11:40 -0700496struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
497 gfp_t gfp_mask)
Alexander Duyckfd11a832014-12-09 19:40:49 -0800498{
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100499 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
Alexander Duyckfd11a832014-12-09 19:40:49 -0800500 struct sk_buff *skb;
Alexander Duyck94519802015-05-06 21:11:40 -0700501 void *data;
Alexander Duyckfd11a832014-12-09 19:40:49 -0800502
Alexander Duyck94519802015-05-06 21:11:40 -0700503 len += NET_SKB_PAD + NET_IP_ALIGN;
Alexander Duyckfd11a832014-12-09 19:40:49 -0800504
Alexander Duyck94519802015-05-06 21:11:40 -0700505 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
Mel Gormand0164ad2015-11-06 16:28:21 -0800506 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
Alexander Duycka080e7b2015-05-13 13:34:13 -0700507 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
508 if (!skb)
509 goto skb_fail;
510 goto skb_success;
511 }
Alexander Duyck94519802015-05-06 21:11:40 -0700512
513 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
514 len = SKB_DATA_ALIGN(len);
515
516 if (sk_memalloc_socks())
517 gfp_mask |= __GFP_MEMALLOC;
518
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -0800519 data = page_frag_alloc(&nc->page, len, gfp_mask);
Alexander Duyck94519802015-05-06 21:11:40 -0700520 if (unlikely(!data))
521 return NULL;
522
523 skb = __build_skb(data, len);
524 if (unlikely(!skb)) {
Alexander Duyck181edb22015-05-06 21:12:03 -0700525 skb_free_frag(data);
Alexander Duyck94519802015-05-06 21:11:40 -0700526 return NULL;
Alexander Duyckfd11a832014-12-09 19:40:49 -0800527 }
528
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100529 if (nc->page.pfmemalloc)
Alexander Duyck94519802015-05-06 21:11:40 -0700530 skb->pfmemalloc = 1;
531 skb->head_frag = 1;
532
Alexander Duycka080e7b2015-05-13 13:34:13 -0700533skb_success:
Alexander Duyck94519802015-05-06 21:11:40 -0700534 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
535 skb->dev = napi->dev;
536
Alexander Duycka080e7b2015-05-13 13:34:13 -0700537skb_fail:
Alexander Duyckfd11a832014-12-09 19:40:49 -0800538 return skb;
539}
540EXPORT_SYMBOL(__napi_alloc_skb);
541
Peter Zijlstra654bed12008-10-07 14:22:33 -0700542void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
Eric Dumazet50269e12012-03-23 23:59:33 +0000543 int size, unsigned int truesize)
Peter Zijlstra654bed12008-10-07 14:22:33 -0700544{
545 skb_fill_page_desc(skb, i, page, off, size);
546 skb->len += size;
547 skb->data_len += size;
Eric Dumazet50269e12012-03-23 23:59:33 +0000548 skb->truesize += truesize;
Peter Zijlstra654bed12008-10-07 14:22:33 -0700549}
550EXPORT_SYMBOL(skb_add_rx_frag);
551
Jason Wangf8e617e2013-11-01 14:07:47 +0800552void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
553 unsigned int truesize)
554{
555 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
556
557 skb_frag_size_add(frag, size);
558 skb->len += size;
559 skb->data_len += size;
560 skb->truesize += truesize;
561}
562EXPORT_SYMBOL(skb_coalesce_rx_frag);
563
Herbert Xu27b437c2006-07-13 19:26:39 -0700564static void skb_drop_list(struct sk_buff **listp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565{
Eric Dumazetbd8a7032013-06-24 06:26:00 -0700566 kfree_skb_list(*listp);
Herbert Xu27b437c2006-07-13 19:26:39 -0700567 *listp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568}
569
Herbert Xu27b437c2006-07-13 19:26:39 -0700570static inline void skb_drop_fraglist(struct sk_buff *skb)
571{
572 skb_drop_list(&skb_shinfo(skb)->frag_list);
573}
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575static void skb_clone_fraglist(struct sk_buff *skb)
576{
577 struct sk_buff *list;
578
David S. Millerfbb398a2009-06-09 00:18:59 -0700579 skb_walk_frags(skb, list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 skb_get(list);
581}
582
Eric Dumazetd3836f22012-04-27 00:33:38 +0000583static void skb_free_head(struct sk_buff *skb)
584{
Alexander Duyck181edb22015-05-06 21:12:03 -0700585 unsigned char *head = skb->head;
586
Eric Dumazetd3836f22012-04-27 00:33:38 +0000587 if (skb->head_frag)
Alexander Duyck181edb22015-05-06 21:12:03 -0700588 skb_free_frag(head);
Eric Dumazetd3836f22012-04-27 00:33:38 +0000589 else
Alexander Duyck181edb22015-05-06 21:12:03 -0700590 kfree(head);
Eric Dumazetd3836f22012-04-27 00:33:38 +0000591}
592
Adrian Bunk5bba1712006-06-29 13:02:35 -0700593static void skb_release_data(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594{
Eric Dumazetff04a772014-09-23 18:39:30 -0700595 struct skb_shared_info *shinfo = skb_shinfo(skb);
596 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Eric Dumazetff04a772014-09-23 18:39:30 -0700598 if (skb->cloned &&
599 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
600 &shinfo->dataref))
601 return;
Shirley Maa6686f22011-07-06 12:22:12 +0000602
Eric Dumazetff04a772014-09-23 18:39:30 -0700603 for (i = 0; i < shinfo->nr_frags; i++)
604 __skb_frag_unref(&shinfo->frags[i]);
Shirley Maa6686f22011-07-06 12:22:12 +0000605
Eric Dumazetff04a772014-09-23 18:39:30 -0700606 if (shinfo->frag_list)
607 kfree_skb_list(shinfo->frag_list);
608
Willem de Bruijn1f8b9772017-08-03 16:29:41 -0400609 skb_zcopy_clear(skb, true);
Eric Dumazetff04a772014-09-23 18:39:30 -0700610 skb_free_head(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611}
612
613/*
614 * Free an skbuff by memory without cleaning the state.
615 */
Herbert Xu2d4baff2007-11-26 23:11:19 +0800616static void kfree_skbmem(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617{
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700618 struct sk_buff_fclones *fclones;
David S. Millerd179cd12005-08-17 14:57:30 -0700619
David S. Millerd179cd12005-08-17 14:57:30 -0700620 switch (skb->fclone) {
621 case SKB_FCLONE_UNAVAILABLE:
622 kmem_cache_free(skbuff_head_cache, skb);
Eric Dumazet6ffe75eb2014-12-03 17:04:39 -0800623 return;
David S. Millerd179cd12005-08-17 14:57:30 -0700624
625 case SKB_FCLONE_ORIG:
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700626 fclones = container_of(skb, struct sk_buff_fclones, skb1);
Eric Dumazet6ffe75eb2014-12-03 17:04:39 -0800627
628 /* We usually free the clone (TX completion) before original skb
629 * This test would have no chance to be true for the clone,
630 * while here, branch prediction will be good.
631 */
Reshetova, Elena26385952017-06-30 13:07:59 +0300632 if (refcount_read(&fclones->fclone_ref) == 1)
Eric Dumazet6ffe75eb2014-12-03 17:04:39 -0800633 goto fastpath;
David S. Millerd179cd12005-08-17 14:57:30 -0700634 break;
635
Eric Dumazet6ffe75eb2014-12-03 17:04:39 -0800636 default: /* SKB_FCLONE_CLONE */
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700637 fclones = container_of(skb, struct sk_buff_fclones, skb2);
David S. Millerd179cd12005-08-17 14:57:30 -0700638 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700639 }
Reshetova, Elena26385952017-06-30 13:07:59 +0300640 if (!refcount_dec_and_test(&fclones->fclone_ref))
Eric Dumazet6ffe75eb2014-12-03 17:04:39 -0800641 return;
642fastpath:
643 kmem_cache_free(skbuff_fclone_cache, fclones);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644}
645
Paolo Abeni0a463c72017-06-12 11:23:42 +0200646void skb_release_head_state(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Eric Dumazetadf30902009-06-02 05:19:30 +0000648 skb_dst_drop(skb);
Stephen Hemminger9c2b3322005-04-19 22:39:42 -0700649 if (skb->destructor) {
650 WARN_ON(in_irq());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 skb->destructor(skb);
652 }
Igor Maravića3bf7ae2011-12-12 02:58:22 +0000653#if IS_ENABLED(CONFIG_NF_CONNTRACK)
Florian Westphalcb9c6832017-01-23 18:21:56 +0100654 nf_conntrack_put(skb_nfct(skb));
KOVACS Krisztian2fc72c72011-01-12 20:25:08 +0100655#endif
Florian Westphaldf5042f2018-12-18 17:15:16 +0100656 skb_ext_put(skb);
Lennert Buytenhek04a4bb52008-10-01 02:33:12 -0700657}
658
659/* Free everything but the sk_buff shell. */
660static void skb_release_all(struct sk_buff *skb)
661{
662 skb_release_head_state(skb);
Florian Westphala28b1b92017-07-23 19:54:47 +0200663 if (likely(skb->head))
664 skb_release_data(skb);
Herbert Xu2d4baff2007-11-26 23:11:19 +0800665}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Herbert Xu2d4baff2007-11-26 23:11:19 +0800667/**
668 * __kfree_skb - private function
669 * @skb: buffer
670 *
671 * Free an sk_buff. Release anything attached to the buffer.
672 * Clean the state. This is an internal helper function. Users should
673 * always call kfree_skb
674 */
675
676void __kfree_skb(struct sk_buff *skb)
677{
678 skb_release_all(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 kfree_skbmem(skb);
680}
David S. Millerb4ac530fc2009-02-10 02:09:24 -0800681EXPORT_SYMBOL(__kfree_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683/**
Jörn Engel231d06a2006-03-20 21:28:35 -0800684 * kfree_skb - free an sk_buff
685 * @skb: buffer to free
686 *
687 * Drop a reference to the buffer and free it if the usage count has
688 * hit zero.
689 */
690void kfree_skb(struct sk_buff *skb)
691{
Paolo Abeni3889a8032017-06-12 11:23:41 +0200692 if (!skb_unref(skb))
Jörn Engel231d06a2006-03-20 21:28:35 -0800693 return;
Paolo Abeni3889a8032017-06-12 11:23:41 +0200694
Neil Hormanead2ceb2009-03-11 09:49:55 +0000695 trace_kfree_skb(skb, __builtin_return_address(0));
Jörn Engel231d06a2006-03-20 21:28:35 -0800696 __kfree_skb(skb);
697}
David S. Millerb4ac530fc2009-02-10 02:09:24 -0800698EXPORT_SYMBOL(kfree_skb);
Jörn Engel231d06a2006-03-20 21:28:35 -0800699
Eric Dumazetbd8a7032013-06-24 06:26:00 -0700700void kfree_skb_list(struct sk_buff *segs)
701{
702 while (segs) {
703 struct sk_buff *next = segs->next;
704
705 kfree_skb(segs);
706 segs = next;
707 }
708}
709EXPORT_SYMBOL(kfree_skb_list);
710
Willem de Bruijn64131392019-07-07 05:51:55 -0400711/* Dump skb information and contents.
712 *
713 * Must only be called from net_ratelimit()-ed paths.
714 *
715 * Dumps up to can_dump_full whole packets if full_pkt, headers otherwise.
716 */
717void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
718{
719 static atomic_t can_dump_full = ATOMIC_INIT(5);
720 struct skb_shared_info *sh = skb_shinfo(skb);
721 struct net_device *dev = skb->dev;
722 struct sock *sk = skb->sk;
723 struct sk_buff *list_skb;
724 bool has_mac, has_trans;
725 int headroom, tailroom;
726 int i, len, seg_len;
727
728 if (full_pkt)
729 full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0;
730
731 if (full_pkt)
732 len = skb->len;
733 else
734 len = min_t(int, skb->len, MAX_HEADER + 128);
735
736 headroom = skb_headroom(skb);
737 tailroom = skb_tailroom(skb);
738
739 has_mac = skb_mac_header_was_set(skb);
740 has_trans = skb_transport_header_was_set(skb);
741
742 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
743 "mac=(%d,%d) net=(%d,%d) trans=%d\n"
744 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
745 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
746 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
747 level, skb->len, headroom, skb_headlen(skb), tailroom,
748 has_mac ? skb->mac_header : -1,
749 has_mac ? skb_mac_header_len(skb) : -1,
750 skb->network_header,
751 has_trans ? skb_network_header_len(skb) : -1,
752 has_trans ? skb->transport_header : -1,
753 sh->tx_flags, sh->nr_frags,
754 sh->gso_size, sh->gso_type, sh->gso_segs,
755 skb->csum, skb->ip_summed, skb->csum_complete_sw,
756 skb->csum_valid, skb->csum_level,
757 skb->hash, skb->sw_hash, skb->l4_hash,
758 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
759
760 if (dev)
761 printk("%sdev name=%s feat=0x%pNF\n",
762 level, dev->name, &dev->features);
763 if (sk)
Qian Caidb8051f2019-07-16 11:43:05 -0400764 printk("%ssk family=%hu type=%u proto=%u\n",
Willem de Bruijn64131392019-07-07 05:51:55 -0400765 level, sk->sk_family, sk->sk_type, sk->sk_protocol);
766
767 if (full_pkt && headroom)
768 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
769 16, 1, skb->head, headroom, false);
770
771 seg_len = min_t(int, skb_headlen(skb), len);
772 if (seg_len)
773 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET,
774 16, 1, skb->data, seg_len, false);
775 len -= seg_len;
776
777 if (full_pkt && tailroom)
778 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
779 16, 1, skb_tail_pointer(skb), tailroom, false);
780
781 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
782 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
783 u32 p_off, p_len, copied;
784 struct page *p;
785 u8 *vaddr;
786
Jonathan Lemonb54c9d52019-07-30 07:40:33 -0700787 skb_frag_foreach_page(frag, skb_frag_off(frag),
Willem de Bruijn64131392019-07-07 05:51:55 -0400788 skb_frag_size(frag), p, p_off, p_len,
789 copied) {
790 seg_len = min_t(int, p_len, len);
791 vaddr = kmap_atomic(p);
792 print_hex_dump(level, "skb frag: ",
793 DUMP_PREFIX_OFFSET,
794 16, 1, vaddr + p_off, seg_len, false);
795 kunmap_atomic(vaddr);
796 len -= seg_len;
797 if (!len)
798 break;
799 }
800 }
801
802 if (full_pkt && skb_has_frag_list(skb)) {
803 printk("skb fraglist:\n");
804 skb_walk_frags(skb, list_skb)
805 skb_dump(level, list_skb, true);
806 }
807}
808EXPORT_SYMBOL(skb_dump);
809
Stephen Hemmingerd1a203e2008-11-01 21:01:09 -0700810/**
Michael S. Tsirkin25121172012-11-01 09:16:28 +0000811 * skb_tx_error - report an sk_buff xmit error
812 * @skb: buffer that triggered an error
813 *
814 * Report xmit error if a device callback is tracking this skb.
815 * skb must be freed afterwards.
816 */
817void skb_tx_error(struct sk_buff *skb)
818{
Willem de Bruijn1f8b9772017-08-03 16:29:41 -0400819 skb_zcopy_clear(skb, true);
Michael S. Tsirkin25121172012-11-01 09:16:28 +0000820}
821EXPORT_SYMBOL(skb_tx_error);
822
823/**
Neil Hormanead2ceb2009-03-11 09:49:55 +0000824 * consume_skb - free an skbuff
825 * @skb: buffer to free
826 *
827 * Drop a ref to the buffer and free it if the usage count has hit zero
828 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
829 * is being dropped after a failure and notes that
830 */
831void consume_skb(struct sk_buff *skb)
832{
Paolo Abeni3889a8032017-06-12 11:23:41 +0200833 if (!skb_unref(skb))
Neil Hormanead2ceb2009-03-11 09:49:55 +0000834 return;
Paolo Abeni3889a8032017-06-12 11:23:41 +0200835
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900836 trace_consume_skb(skb);
Neil Hormanead2ceb2009-03-11 09:49:55 +0000837 __kfree_skb(skb);
838}
839EXPORT_SYMBOL(consume_skb);
840
Paolo Abeni0a463c72017-06-12 11:23:42 +0200841/**
842 * consume_stateless_skb - free an skbuff, assuming it is stateless
843 * @skb: buffer to free
844 *
Paolo Abenica2c1412017-09-06 14:44:36 +0200845 * Alike consume_skb(), but this variant assumes that this is the last
846 * skb reference and all the head states have been already dropped
Paolo Abeni0a463c72017-06-12 11:23:42 +0200847 */
Paolo Abenica2c1412017-09-06 14:44:36 +0200848void __consume_stateless_skb(struct sk_buff *skb)
Paolo Abeni0a463c72017-06-12 11:23:42 +0200849{
Paolo Abeni0a463c72017-06-12 11:23:42 +0200850 trace_consume_skb(skb);
Florian Westphal06dc75a2017-07-17 18:56:54 +0200851 skb_release_data(skb);
Paolo Abeni0a463c72017-06-12 11:23:42 +0200852 kfree_skbmem(skb);
853}
854
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100855void __kfree_skb_flush(void)
856{
857 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
858
859 /* flush skb_cache if containing objects */
860 if (nc->skb_count) {
861 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
862 nc->skb_cache);
863 nc->skb_count = 0;
864 }
865}
866
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +0100867static inline void _kfree_skb_defer(struct sk_buff *skb)
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100868{
869 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
870
871 /* drop skb->head and call any destructors for packet */
872 skb_release_all(skb);
873
874 /* record skb to CPU local list */
875 nc->skb_cache[nc->skb_count++] = skb;
876
877#ifdef CONFIG_SLUB
878 /* SLUB writes into objects when freeing */
879 prefetchw(skb);
880#endif
881
882 /* flush skb_cache if it is filled */
883 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
884 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
885 nc->skb_cache);
886 nc->skb_count = 0;
887 }
888}
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +0100889void __kfree_skb_defer(struct sk_buff *skb)
890{
891 _kfree_skb_defer(skb);
892}
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100893
894void napi_consume_skb(struct sk_buff *skb, int budget)
895{
896 if (unlikely(!skb))
897 return;
898
Jesper Dangaard Brouer885eb0a2016-03-11 09:43:58 +0100899 /* Zero budget indicate non-NAPI context called us, like netpoll */
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100900 if (unlikely(!budget)) {
Jesper Dangaard Brouer885eb0a2016-03-11 09:43:58 +0100901 dev_consume_skb_any(skb);
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100902 return;
903 }
904
Paolo Abeni76088942017-06-14 11:48:48 +0200905 if (!skb_unref(skb))
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100906 return;
Paolo Abeni76088942017-06-14 11:48:48 +0200907
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100908 /* if reaching here SKB is ready to free */
909 trace_consume_skb(skb);
910
911 /* if SKB is a clone, don't handle this case */
Eric Dumazetabbdb5a2016-03-20 11:27:47 -0700912 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100913 __kfree_skb(skb);
914 return;
915 }
916
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +0100917 _kfree_skb_defer(skb);
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +0100918}
919EXPORT_SYMBOL(napi_consume_skb);
920
Eric Dumazetb1937222014-09-28 22:18:47 -0700921/* Make sure a field is enclosed inside headers_start/headers_end section */
922#define CHECK_SKB_FIELD(field) \
923 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
924 offsetof(struct sk_buff, headers_start)); \
925 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
926 offsetof(struct sk_buff, headers_end)); \
927
Herbert Xudec18812007-10-14 00:37:30 -0700928static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
929{
930 new->tstamp = old->tstamp;
Eric Dumazetb1937222014-09-28 22:18:47 -0700931 /* We do not copy old->sk */
Herbert Xudec18812007-10-14 00:37:30 -0700932 new->dev = old->dev;
Eric Dumazetb1937222014-09-28 22:18:47 -0700933 memcpy(new->cb, old->cb, sizeof(old->cb));
Eric Dumazet7fee2262010-05-11 23:19:48 +0000934 skb_dst_copy(new, old);
Florian Westphaldf5042f2018-12-18 17:15:16 +0100935 __skb_ext_copy(new, old);
Eric Dumazetb1937222014-09-28 22:18:47 -0700936 __nf_copy(new, old, false);
Patrick McHardy6aa895b2008-07-14 22:49:06 -0700937
Eric Dumazetb1937222014-09-28 22:18:47 -0700938 /* Note : this field could be in headers_start/headers_end section
939 * It is not yet because we do not want to have a 16 bit hole
940 */
941 new->queue_mapping = old->queue_mapping;
Eliezer Tamir06021292013-06-10 11:39:50 +0300942
Eric Dumazetb1937222014-09-28 22:18:47 -0700943 memcpy(&new->headers_start, &old->headers_start,
944 offsetof(struct sk_buff, headers_end) -
945 offsetof(struct sk_buff, headers_start));
946 CHECK_SKB_FIELD(protocol);
947 CHECK_SKB_FIELD(csum);
948 CHECK_SKB_FIELD(hash);
949 CHECK_SKB_FIELD(priority);
950 CHECK_SKB_FIELD(skb_iif);
951 CHECK_SKB_FIELD(vlan_proto);
952 CHECK_SKB_FIELD(vlan_tci);
953 CHECK_SKB_FIELD(transport_header);
954 CHECK_SKB_FIELD(network_header);
955 CHECK_SKB_FIELD(mac_header);
956 CHECK_SKB_FIELD(inner_protocol);
957 CHECK_SKB_FIELD(inner_transport_header);
958 CHECK_SKB_FIELD(inner_network_header);
959 CHECK_SKB_FIELD(inner_mac_header);
960 CHECK_SKB_FIELD(mark);
961#ifdef CONFIG_NETWORK_SECMARK
962 CHECK_SKB_FIELD(secmark);
963#endif
Cong Wange0d10952013-08-01 11:10:25 +0800964#ifdef CONFIG_NET_RX_BUSY_POLL
Eric Dumazetb1937222014-09-28 22:18:47 -0700965 CHECK_SKB_FIELD(napi_id);
Eliezer Tamir06021292013-06-10 11:39:50 +0300966#endif
Eric Dumazet2bd82482015-02-03 23:48:24 -0800967#ifdef CONFIG_XPS
968 CHECK_SKB_FIELD(sender_cpu);
969#endif
Eric Dumazetb1937222014-09-28 22:18:47 -0700970#ifdef CONFIG_NET_SCHED
971 CHECK_SKB_FIELD(tc_index);
Eric Dumazetb1937222014-09-28 22:18:47 -0700972#endif
973
Herbert Xudec18812007-10-14 00:37:30 -0700974}
975
Herbert Xu82c49a32009-05-22 22:11:37 +0000976/*
977 * You should not add any new code to this function. Add it to
978 * __copy_skb_header above instead.
979 */
Herbert Xue0053ec2007-10-14 00:37:52 -0700980static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982#define C(x) n->x = skb->x
983
984 n->next = n->prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 n->sk = NULL;
Herbert Xudec18812007-10-14 00:37:30 -0700986 __copy_skb_header(n, skb);
987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 C(len);
989 C(data_len);
Alexey Dobriyan3e6b3b22007-03-16 15:00:46 -0700990 C(mac_len);
Patrick McHardy334a8132007-06-25 04:35:20 -0700991 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
Paul Moore02f1c892008-01-07 21:56:41 -0800992 n->cloned = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 n->nohdr = 0;
Eric Dumazetb13dda92018-04-07 13:42:39 -0700994 n->peeked = 0;
Stefano Brivioe78bfb02018-07-13 13:21:07 +0200995 C(pfmemalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 n->destructor = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 C(tail);
998 C(end);
Paul Moore02f1c892008-01-07 21:56:41 -0800999 C(head);
Eric Dumazetd3836f22012-04-27 00:33:38 +00001000 C(head_frag);
Paul Moore02f1c892008-01-07 21:56:41 -08001001 C(data);
1002 C(truesize);
Reshetova, Elena63354792017-06-30 13:07:58 +03001003 refcount_set(&n->users, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 atomic_inc(&(skb_shinfo(skb)->dataref));
1006 skb->cloned = 1;
1007
1008 return n;
Herbert Xue0053ec2007-10-14 00:37:52 -07001009#undef C
1010}
1011
1012/**
Jakub Kicinskida29e4b2019-06-03 15:16:58 -07001013 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1014 * @first: first sk_buff of the msg
1015 */
1016struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1017{
1018 struct sk_buff *n;
1019
1020 n = alloc_skb(0, GFP_ATOMIC);
1021 if (!n)
1022 return NULL;
1023
1024 n->len = first->len;
1025 n->data_len = first->len;
1026 n->truesize = first->truesize;
1027
1028 skb_shinfo(n)->frag_list = first;
1029
1030 __copy_skb_header(n, first);
1031 n->destructor = NULL;
1032
1033 return n;
1034}
1035EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1036
1037/**
Herbert Xue0053ec2007-10-14 00:37:52 -07001038 * skb_morph - morph one skb into another
1039 * @dst: the skb to receive the contents
1040 * @src: the skb to supply the contents
1041 *
1042 * This is identical to skb_clone except that the target skb is
1043 * supplied by the user.
1044 *
1045 * The target skb is returned upon exit.
1046 */
1047struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1048{
Herbert Xu2d4baff2007-11-26 23:11:19 +08001049 skb_release_all(dst);
Herbert Xue0053ec2007-10-14 00:37:52 -07001050 return __skb_clone(dst, src);
1051}
1052EXPORT_SYMBOL_GPL(skb_morph);
1053
Sowmini Varadhan6f89dbc2018-02-15 10:49:32 -08001054int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
Willem de Bruijna91dbff2017-08-03 16:29:43 -04001055{
1056 unsigned long max_pg, num_pg, new_pg, old_pg;
1057 struct user_struct *user;
1058
1059 if (capable(CAP_IPC_LOCK) || !size)
1060 return 0;
1061
1062 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
1063 max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1064 user = mmp->user ? : current_user();
1065
1066 do {
1067 old_pg = atomic_long_read(&user->locked_vm);
1068 new_pg = old_pg + num_pg;
1069 if (new_pg > max_pg)
1070 return -ENOBUFS;
1071 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
1072 old_pg);
1073
1074 if (!mmp->user) {
1075 mmp->user = get_uid(user);
1076 mmp->num_pg = num_pg;
1077 } else {
1078 mmp->num_pg += num_pg;
1079 }
1080
1081 return 0;
1082}
Sowmini Varadhan6f89dbc2018-02-15 10:49:32 -08001083EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
Willem de Bruijna91dbff2017-08-03 16:29:43 -04001084
Sowmini Varadhan6f89dbc2018-02-15 10:49:32 -08001085void mm_unaccount_pinned_pages(struct mmpin *mmp)
Willem de Bruijna91dbff2017-08-03 16:29:43 -04001086{
1087 if (mmp->user) {
1088 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1089 free_uid(mmp->user);
1090 }
1091}
Sowmini Varadhan6f89dbc2018-02-15 10:49:32 -08001092EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
Willem de Bruijna91dbff2017-08-03 16:29:43 -04001093
Willem de Bruijn52267792017-08-03 16:29:39 -04001094struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
1095{
1096 struct ubuf_info *uarg;
1097 struct sk_buff *skb;
1098
1099 WARN_ON_ONCE(!in_task());
1100
1101 skb = sock_omalloc(sk, 0, GFP_KERNEL);
1102 if (!skb)
1103 return NULL;
1104
1105 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
1106 uarg = (void *)skb->cb;
Willem de Bruijna91dbff2017-08-03 16:29:43 -04001107 uarg->mmp.user = NULL;
1108
1109 if (mm_account_pinned_pages(&uarg->mmp, size)) {
1110 kfree_skb(skb);
1111 return NULL;
1112 }
Willem de Bruijn52267792017-08-03 16:29:39 -04001113
1114 uarg->callback = sock_zerocopy_callback;
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001115 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
1116 uarg->len = 1;
1117 uarg->bytelen = size;
Willem de Bruijn52267792017-08-03 16:29:39 -04001118 uarg->zerocopy = 1;
Eric Dumazetc1d1b432017-08-31 16:48:22 -07001119 refcount_set(&uarg->refcnt, 1);
Willem de Bruijn52267792017-08-03 16:29:39 -04001120 sock_hold(sk);
1121
1122 return uarg;
1123}
1124EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
1125
1126static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
1127{
1128 return container_of((void *)uarg, struct sk_buff, cb);
1129}
1130
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001131struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
1132 struct ubuf_info *uarg)
1133{
1134 if (uarg) {
1135 const u32 byte_limit = 1 << 19; /* limit to a few TSO */
1136 u32 bytelen, next;
1137
1138 /* realloc only when socket is locked (TCP, UDP cork),
1139 * so uarg->len and sk_zckey access is serialized
1140 */
1141 if (!sock_owned_by_user(sk)) {
1142 WARN_ON_ONCE(1);
1143 return NULL;
1144 }
1145
1146 bytelen = uarg->bytelen + size;
1147 if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
1148 /* TCP can create new skb to attach new uarg */
1149 if (sk->sk_type == SOCK_STREAM)
1150 goto new_alloc;
1151 return NULL;
1152 }
1153
1154 next = (u32)atomic_read(&sk->sk_zckey);
1155 if ((u32)(uarg->id + uarg->len) == next) {
Willem de Bruijna91dbff2017-08-03 16:29:43 -04001156 if (mm_account_pinned_pages(&uarg->mmp, size))
1157 return NULL;
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001158 uarg->len++;
1159 uarg->bytelen = bytelen;
1160 atomic_set(&sk->sk_zckey, ++next);
Willem de Bruijn100f6d82019-05-30 18:01:21 -04001161
1162 /* no extra ref when appending to datagram (MSG_MORE) */
1163 if (sk->sk_type == SOCK_STREAM)
1164 sock_zerocopy_get(uarg);
1165
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001166 return uarg;
1167 }
1168 }
1169
1170new_alloc:
1171 return sock_zerocopy_alloc(sk, size);
1172}
1173EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
1174
1175static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1176{
1177 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1178 u32 old_lo, old_hi;
1179 u64 sum_len;
1180
1181 old_lo = serr->ee.ee_info;
1182 old_hi = serr->ee.ee_data;
1183 sum_len = old_hi - old_lo + 1ULL + len;
1184
1185 if (sum_len >= (1ULL << 32))
1186 return false;
1187
1188 if (lo != old_hi + 1)
1189 return false;
1190
1191 serr->ee.ee_data += len;
1192 return true;
1193}
1194
Willem de Bruijn52267792017-08-03 16:29:39 -04001195void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
1196{
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001197 struct sk_buff *tail, *skb = skb_from_uarg(uarg);
Willem de Bruijn52267792017-08-03 16:29:39 -04001198 struct sock_exterr_skb *serr;
1199 struct sock *sk = skb->sk;
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001200 struct sk_buff_head *q;
1201 unsigned long flags;
1202 u32 lo, hi;
1203 u16 len;
Willem de Bruijn52267792017-08-03 16:29:39 -04001204
Willem de Bruijnccaffff2017-08-09 19:09:43 -04001205 mm_unaccount_pinned_pages(&uarg->mmp);
1206
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001207 /* if !len, there was only 1 call, and it was aborted
1208 * so do not queue a completion notification
1209 */
1210 if (!uarg->len || sock_flag(sk, SOCK_DEAD))
Willem de Bruijn52267792017-08-03 16:29:39 -04001211 goto release;
1212
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001213 len = uarg->len;
1214 lo = uarg->id;
1215 hi = uarg->id + len - 1;
1216
Willem de Bruijn52267792017-08-03 16:29:39 -04001217 serr = SKB_EXT_ERR(skb);
1218 memset(serr, 0, sizeof(*serr));
1219 serr->ee.ee_errno = 0;
1220 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001221 serr->ee.ee_data = hi;
1222 serr->ee.ee_info = lo;
Willem de Bruijn52267792017-08-03 16:29:39 -04001223 if (!success)
1224 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1225
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001226 q = &sk->sk_error_queue;
1227 spin_lock_irqsave(&q->lock, flags);
1228 tail = skb_peek_tail(q);
1229 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1230 !skb_zerocopy_notify_extend(tail, lo, len)) {
1231 __skb_queue_tail(q, skb);
1232 skb = NULL;
1233 }
1234 spin_unlock_irqrestore(&q->lock, flags);
Willem de Bruijn52267792017-08-03 16:29:39 -04001235
1236 sk->sk_error_report(sk);
1237
1238release:
1239 consume_skb(skb);
1240 sock_put(sk);
1241}
1242EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
1243
1244void sock_zerocopy_put(struct ubuf_info *uarg)
1245{
Eric Dumazetc1d1b432017-08-31 16:48:22 -07001246 if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
Willem de Bruijn52267792017-08-03 16:29:39 -04001247 if (uarg->callback)
1248 uarg->callback(uarg, uarg->zerocopy);
1249 else
1250 consume_skb(skb_from_uarg(uarg));
1251 }
1252}
1253EXPORT_SYMBOL_GPL(sock_zerocopy_put);
1254
Willem de Bruijn52900d22018-11-30 15:32:40 -05001255void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
Willem de Bruijn52267792017-08-03 16:29:39 -04001256{
1257 if (uarg) {
1258 struct sock *sk = skb_from_uarg(uarg)->sk;
1259
1260 atomic_dec(&sk->sk_zckey);
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001261 uarg->len--;
Willem de Bruijn52267792017-08-03 16:29:39 -04001262
Willem de Bruijn52900d22018-11-30 15:32:40 -05001263 if (have_uref)
1264 sock_zerocopy_put(uarg);
Willem de Bruijn52267792017-08-03 16:29:39 -04001265 }
1266}
1267EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
1268
Willem de Bruijnb5947e52018-11-30 15:32:39 -05001269int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
1270{
1271 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
1272}
1273EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
1274
Willem de Bruijn52267792017-08-03 16:29:39 -04001275int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1276 struct msghdr *msg, int len,
1277 struct ubuf_info *uarg)
1278{
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001279 struct ubuf_info *orig_uarg = skb_zcopy(skb);
Willem de Bruijn52267792017-08-03 16:29:39 -04001280 struct iov_iter orig_iter = msg->msg_iter;
1281 int err, orig_len = skb->len;
1282
Willem de Bruijn4ab6c992017-08-03 16:29:42 -04001283 /* An skb can only point to one uarg. This edge case happens when
1284 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1285 */
1286 if (orig_uarg && uarg != orig_uarg)
1287 return -EEXIST;
1288
Willem de Bruijn52267792017-08-03 16:29:39 -04001289 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1290 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
Willem de Bruijn54d431172017-10-19 12:40:39 -04001291 struct sock *save_sk = skb->sk;
1292
Willem de Bruijn52267792017-08-03 16:29:39 -04001293 /* Streams do not free skb on error. Reset to prev state. */
1294 msg->msg_iter = orig_iter;
Willem de Bruijn54d431172017-10-19 12:40:39 -04001295 skb->sk = sk;
Willem de Bruijn52267792017-08-03 16:29:39 -04001296 ___pskb_trim(skb, orig_len);
Willem de Bruijn54d431172017-10-19 12:40:39 -04001297 skb->sk = save_sk;
Willem de Bruijn52267792017-08-03 16:29:39 -04001298 return err;
1299 }
1300
Willem de Bruijn52900d22018-11-30 15:32:40 -05001301 skb_zcopy_set(skb, uarg, NULL);
Willem de Bruijn52267792017-08-03 16:29:39 -04001302 return skb->len - orig_len;
1303}
1304EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1305
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04001306static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
Willem de Bruijn52267792017-08-03 16:29:39 -04001307 gfp_t gfp_mask)
1308{
1309 if (skb_zcopy(orig)) {
1310 if (skb_zcopy(nskb)) {
1311 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1312 if (!gfp_mask) {
1313 WARN_ON_ONCE(1);
1314 return -ENOMEM;
1315 }
1316 if (skb_uarg(nskb) == skb_uarg(orig))
1317 return 0;
1318 if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1319 return -EIO;
1320 }
Willem de Bruijn52900d22018-11-30 15:32:40 -05001321 skb_zcopy_set(nskb, skb_uarg(orig), NULL);
Willem de Bruijn52267792017-08-03 16:29:39 -04001322 }
1323 return 0;
1324}
1325
Ben Hutchings2c530402012-07-10 10:55:09 +00001326/**
1327 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001328 * @skb: the skb to modify
1329 * @gfp_mask: allocation priority
1330 *
1331 * This must be called on SKBTX_DEV_ZEROCOPY skb.
1332 * It will copy all frags into kernel and drop the reference
1333 * to userspace pages.
1334 *
1335 * If this function is called from an interrupt gfp_mask() must be
1336 * %GFP_ATOMIC.
1337 *
1338 * Returns 0 on success or a negative error code on failure
1339 * to allocate kernel memory to copy to.
1340 */
1341int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
Shirley Maa6686f22011-07-06 12:22:12 +00001342{
Shirley Maa6686f22011-07-06 12:22:12 +00001343 int num_frags = skb_shinfo(skb)->nr_frags;
1344 struct page *page, *head = NULL;
Willem de Bruijn3ece7822017-08-03 16:29:38 -04001345 int i, new_frags;
1346 u32 d_off;
Shirley Maa6686f22011-07-06 12:22:12 +00001347
Willem de Bruijn3ece7822017-08-03 16:29:38 -04001348 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1349 return -EINVAL;
1350
Willem de Bruijnf72c4ac2017-12-28 12:38:13 -05001351 if (!num_frags)
1352 goto release;
1353
Willem de Bruijn3ece7822017-08-03 16:29:38 -04001354 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1355 for (i = 0; i < new_frags; i++) {
Krishna Kumar02756ed2012-07-17 02:05:29 +00001356 page = alloc_page(gfp_mask);
Shirley Maa6686f22011-07-06 12:22:12 +00001357 if (!page) {
1358 while (head) {
Sunghan Suh40dadff2013-07-12 16:17:23 +09001359 struct page *next = (struct page *)page_private(head);
Shirley Maa6686f22011-07-06 12:22:12 +00001360 put_page(head);
1361 head = next;
1362 }
1363 return -ENOMEM;
1364 }
Willem de Bruijn3ece7822017-08-03 16:29:38 -04001365 set_page_private(page, (unsigned long)head);
1366 head = page;
1367 }
1368
1369 page = head;
1370 d_off = 0;
1371 for (i = 0; i < num_frags; i++) {
1372 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1373 u32 p_off, p_len, copied;
1374 struct page *p;
1375 u8 *vaddr;
Willem de Bruijnc613c202017-07-31 08:15:47 -04001376
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07001377 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
Willem de Bruijnc613c202017-07-31 08:15:47 -04001378 p, p_off, p_len, copied) {
Willem de Bruijn3ece7822017-08-03 16:29:38 -04001379 u32 copy, done = 0;
Willem de Bruijnc613c202017-07-31 08:15:47 -04001380 vaddr = kmap_atomic(p);
Willem de Bruijn3ece7822017-08-03 16:29:38 -04001381
1382 while (done < p_len) {
1383 if (d_off == PAGE_SIZE) {
1384 d_off = 0;
1385 page = (struct page *)page_private(page);
1386 }
1387 copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1388 memcpy(page_address(page) + d_off,
1389 vaddr + p_off + done, copy);
1390 done += copy;
1391 d_off += copy;
1392 }
Willem de Bruijnc613c202017-07-31 08:15:47 -04001393 kunmap_atomic(vaddr);
1394 }
Shirley Maa6686f22011-07-06 12:22:12 +00001395 }
1396
1397 /* skb frags release userspace buffers */
Krishna Kumar02756ed2012-07-17 02:05:29 +00001398 for (i = 0; i < num_frags; i++)
Ian Campbella8605c62011-10-19 23:01:49 +00001399 skb_frag_unref(skb, i);
Shirley Maa6686f22011-07-06 12:22:12 +00001400
Shirley Maa6686f22011-07-06 12:22:12 +00001401 /* skb frags point to kernel buffers */
Willem de Bruijn3ece7822017-08-03 16:29:38 -04001402 for (i = 0; i < new_frags - 1; i++) {
1403 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
Sunghan Suh40dadff2013-07-12 16:17:23 +09001404 head = (struct page *)page_private(head);
Shirley Maa6686f22011-07-06 12:22:12 +00001405 }
Willem de Bruijn3ece7822017-08-03 16:29:38 -04001406 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1407 skb_shinfo(skb)->nr_frags = new_frags;
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001408
Willem de Bruijnb90ddd52017-12-20 17:37:50 -05001409release:
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04001410 skb_zcopy_clear(skb, false);
Shirley Maa6686f22011-07-06 12:22:12 +00001411 return 0;
1412}
Michael S. Tsirkindcc0fb72012-07-20 09:23:20 +00001413EXPORT_SYMBOL_GPL(skb_copy_ubufs);
Shirley Maa6686f22011-07-06 12:22:12 +00001414
Herbert Xue0053ec2007-10-14 00:37:52 -07001415/**
1416 * skb_clone - duplicate an sk_buff
1417 * @skb: buffer to clone
1418 * @gfp_mask: allocation priority
1419 *
1420 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1421 * copies share the same packet data but not structure. The new
1422 * buffer has a reference count of 1. If the allocation fails the
1423 * function returns %NULL otherwise the new buffer is returned.
1424 *
1425 * If this function is called from an interrupt gfp_mask() must be
1426 * %GFP_ATOMIC.
1427 */
1428
1429struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1430{
Eric Dumazetd0bf4a92014-09-29 13:29:15 -07001431 struct sk_buff_fclones *fclones = container_of(skb,
1432 struct sk_buff_fclones,
1433 skb1);
Eric Dumazet6ffe75eb2014-12-03 17:04:39 -08001434 struct sk_buff *n;
Herbert Xue0053ec2007-10-14 00:37:52 -07001435
Michael S. Tsirkin70008aa2012-07-20 09:23:10 +00001436 if (skb_orphan_frags(skb, gfp_mask))
1437 return NULL;
Shirley Maa6686f22011-07-06 12:22:12 +00001438
Herbert Xue0053ec2007-10-14 00:37:52 -07001439 if (skb->fclone == SKB_FCLONE_ORIG &&
Reshetova, Elena26385952017-06-30 13:07:59 +03001440 refcount_read(&fclones->fclone_ref) == 1) {
Eric Dumazet6ffe75eb2014-12-03 17:04:39 -08001441 n = &fclones->skb2;
Reshetova, Elena26385952017-06-30 13:07:59 +03001442 refcount_set(&fclones->fclone_ref, 2);
Herbert Xue0053ec2007-10-14 00:37:52 -07001443 } else {
Mel Gormanc93bdd02012-07-31 16:44:19 -07001444 if (skb_pfmemalloc(skb))
1445 gfp_mask |= __GFP_MEMALLOC;
1446
Herbert Xue0053ec2007-10-14 00:37:52 -07001447 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1448 if (!n)
1449 return NULL;
Vegard Nossumfe55f6d2008-08-30 12:16:35 +02001450
Herbert Xue0053ec2007-10-14 00:37:52 -07001451 n->fclone = SKB_FCLONE_UNAVAILABLE;
1452 }
1453
1454 return __skb_clone(n, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001456EXPORT_SYMBOL(skb_clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
Toshiaki Makitab0768a82018-08-03 16:58:09 +09001458void skb_headers_offset_update(struct sk_buff *skb, int off)
Pravin B Shelarf5b17292013-03-07 13:21:40 +00001459{
Eric Dumazet030737b2013-10-19 11:42:54 -07001460 /* Only adjust this if it actually is csum_start rather than csum */
1461 if (skb->ip_summed == CHECKSUM_PARTIAL)
1462 skb->csum_start += off;
Pravin B Shelarf5b17292013-03-07 13:21:40 +00001463 /* {transport,network,mac}_header and tail are relative to skb->head */
1464 skb->transport_header += off;
1465 skb->network_header += off;
1466 if (skb_mac_header_was_set(skb))
1467 skb->mac_header += off;
1468 skb->inner_transport_header += off;
1469 skb->inner_network_header += off;
Pravin B Shelaraefbd2b2013-03-07 13:21:46 +00001470 skb->inner_mac_header += off;
Pravin B Shelarf5b17292013-03-07 13:21:40 +00001471}
Toshiaki Makitab0768a82018-08-03 16:58:09 +09001472EXPORT_SYMBOL(skb_headers_offset_update);
Pravin B Shelarf5b17292013-03-07 13:21:40 +00001473
Ilya Lesokhin08303c12018-04-30 10:16:11 +03001474void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475{
Herbert Xudec18812007-10-14 00:37:30 -07001476 __copy_skb_header(new, old);
1477
Herbert Xu79671682006-06-22 02:40:14 -07001478 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1479 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1480 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481}
Ilya Lesokhin08303c12018-04-30 10:16:11 +03001482EXPORT_SYMBOL(skb_copy_header);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Mel Gormanc93bdd02012-07-31 16:44:19 -07001484static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1485{
1486 if (skb_pfmemalloc(skb))
1487 return SKB_ALLOC_RX;
1488 return 0;
1489}
1490
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491/**
1492 * skb_copy - create private copy of an sk_buff
1493 * @skb: buffer to copy
1494 * @gfp_mask: allocation priority
1495 *
1496 * Make a copy of both an &sk_buff and its data. This is used when the
1497 * caller wishes to modify the data and needs a private copy of the
1498 * data to alter. Returns %NULL on failure or the pointer to the buffer
1499 * on success. The returned buffer has a reference count of 1.
1500 *
1501 * As by-product this function converts non-linear &sk_buff to linear
1502 * one, so that &sk_buff becomes completely private and caller is allowed
1503 * to modify all the data of returned buffer. This means that this
1504 * function is not recommended for use in circumstances when only
1505 * header is going to be modified. Use pskb_copy() instead.
1506 */
1507
Al Virodd0fc662005-10-07 07:46:04 +01001508struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509{
Eric Dumazet6602ceb2010-09-01 05:25:10 +00001510 int headerlen = skb_headroom(skb);
Alexander Duyckec47ea82012-05-04 14:26:56 +00001511 unsigned int size = skb_end_offset(skb) + skb->data_len;
Mel Gormanc93bdd02012-07-31 16:44:19 -07001512 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1513 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
Eric Dumazet6602ceb2010-09-01 05:25:10 +00001514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 if (!n)
1516 return NULL;
1517
1518 /* Set the data pointer */
1519 skb_reserve(n, headerlen);
1520 /* Set the tail pointer and length */
1521 skb_put(n, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Tim Hansen9f77fad2017-10-09 11:37:59 -04001523 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Ilya Lesokhin08303c12018-04-30 10:16:11 +03001525 skb_copy_header(n, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 return n;
1527}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001528EXPORT_SYMBOL(skb_copy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529
1530/**
Octavian Purdilabad93e92014-06-12 01:36:26 +03001531 * __pskb_copy_fclone - create copy of an sk_buff with private head.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 * @skb: buffer to copy
Eric Dumazet117632e2011-12-03 21:39:53 +00001533 * @headroom: headroom of new skb
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 * @gfp_mask: allocation priority
Octavian Purdilabad93e92014-06-12 01:36:26 +03001535 * @fclone: if true allocate the copy of the skb from the fclone
1536 * cache instead of the head cache; it is recommended to set this
1537 * to true for the cases where the copy will likely be cloned
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 *
1539 * Make a copy of both an &sk_buff and part of its data, located
1540 * in header. Fragmented data remain shared. This is used when
1541 * the caller wishes to modify only header of &sk_buff and needs
1542 * private copy of the header to alter. Returns %NULL on failure
1543 * or the pointer to the buffer on success.
1544 * The returned buffer has a reference count of 1.
1545 */
1546
Octavian Purdilabad93e92014-06-12 01:36:26 +03001547struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1548 gfp_t gfp_mask, bool fclone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{
Eric Dumazet117632e2011-12-03 21:39:53 +00001550 unsigned int size = skb_headlen(skb) + headroom;
Octavian Purdilabad93e92014-06-12 01:36:26 +03001551 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1552 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
Eric Dumazet6602ceb2010-09-01 05:25:10 +00001553
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 if (!n)
1555 goto out;
1556
1557 /* Set the data pointer */
Eric Dumazet117632e2011-12-03 21:39:53 +00001558 skb_reserve(n, headroom);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 /* Set the tail pointer and length */
1560 skb_put(n, skb_headlen(skb));
1561 /* Copy the bytes */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001562 skb_copy_from_linear_data(skb, n->data, n->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
Herbert Xu25f484a2006-11-07 14:57:15 -08001564 n->truesize += skb->data_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 n->data_len = skb->data_len;
1566 n->len = skb->len;
1567
1568 if (skb_shinfo(skb)->nr_frags) {
1569 int i;
1570
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04001571 if (skb_orphan_frags(skb, gfp_mask) ||
1572 skb_zerocopy_clone(n, skb, gfp_mask)) {
Michael S. Tsirkin70008aa2012-07-20 09:23:10 +00001573 kfree_skb(n);
1574 n = NULL;
1575 goto out;
Shirley Maa6686f22011-07-06 12:22:12 +00001576 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1578 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
Ian Campbellea2ab692011-08-22 23:44:58 +00001579 skb_frag_ref(skb, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 }
1581 skb_shinfo(n)->nr_frags = i;
1582 }
1583
David S. Miller21dc3302010-08-23 00:13:46 -07001584 if (skb_has_frag_list(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1586 skb_clone_fraglist(n);
1587 }
1588
Ilya Lesokhin08303c12018-04-30 10:16:11 +03001589 skb_copy_header(n, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590out:
1591 return n;
1592}
Octavian Purdilabad93e92014-06-12 01:36:26 +03001593EXPORT_SYMBOL(__pskb_copy_fclone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
1595/**
1596 * pskb_expand_head - reallocate header of &sk_buff
1597 * @skb: buffer to reallocate
1598 * @nhead: room to add at head
1599 * @ntail: room to add at tail
1600 * @gfp_mask: allocation priority
1601 *
Mathias Krausebc323832013-11-07 14:18:26 +01001602 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1603 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 * reference count of 1. Returns zero in the case of success or error,
1605 * if expansion failed. In the last case, &sk_buff is not changed.
1606 *
1607 * All the pointers pointing into skb header may change and must be
1608 * reloaded after call to this function.
1609 */
1610
Victor Fusco86a76ca2005-07-08 14:57:47 -07001611int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
Al Virodd0fc662005-10-07 07:46:04 +01001612 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613{
Eric Dumazet158f3232017-01-27 07:11:27 -08001614 int i, osize = skb_end_offset(skb);
1615 int size = osize + nhead + ntail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 long off;
Eric Dumazet158f3232017-01-27 07:11:27 -08001617 u8 *data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
Herbert Xu4edd87a2008-10-01 07:09:38 -07001619 BUG_ON(nhead < 0);
1620
Tim Hansen9f77fad2017-10-09 11:37:59 -04001621 BUG_ON(skb_shared(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
1623 size = SKB_DATA_ALIGN(size);
1624
Mel Gormanc93bdd02012-07-31 16:44:19 -07001625 if (skb_pfmemalloc(skb))
1626 gfp_mask |= __GFP_MEMALLOC;
1627 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1628 gfp_mask, NUMA_NO_NODE, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 if (!data)
1630 goto nodata;
Eric Dumazet87151b82012-04-10 20:08:39 +00001631 size = SKB_WITH_OVERHEAD(ksize(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
1633 /* Copy only real data... and, alas, header. This should be
Eric Dumazet6602ceb2010-09-01 05:25:10 +00001634 * optimized for the cases when header is void.
1635 */
1636 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1637
1638 memcpy((struct skb_shared_info *)(data + size),
1639 skb_shinfo(skb),
Eric Dumazetfed66382010-07-22 19:09:08 +00001640 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
Alexander Duyck3e245912012-05-04 14:26:51 +00001642 /*
1643 * if shinfo is shared we must drop the old head gracefully, but if it
1644 * is not we can just drop the old head and let the existing refcount
1645 * be since all we did is relocate the values
1646 */
1647 if (skb_cloned(skb)) {
Michael S. Tsirkin70008aa2012-07-20 09:23:10 +00001648 if (skb_orphan_frags(skb, gfp_mask))
1649 goto nofrags;
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04001650 if (skb_zcopy(skb))
Eric Dumazetc1d1b432017-08-31 16:48:22 -07001651 refcount_inc(&skb_uarg(skb)->refcnt);
Eric Dumazet1fd63042010-09-02 23:09:32 +00001652 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
Ian Campbellea2ab692011-08-22 23:44:58 +00001653 skb_frag_ref(skb, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Eric Dumazet1fd63042010-09-02 23:09:32 +00001655 if (skb_has_frag_list(skb))
1656 skb_clone_fraglist(skb);
1657
1658 skb_release_data(skb);
Alexander Duyck3e245912012-05-04 14:26:51 +00001659 } else {
1660 skb_free_head(skb);
Eric Dumazet1fd63042010-09-02 23:09:32 +00001661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 off = (data + nhead) - skb->head;
1663
1664 skb->head = data;
Eric Dumazetd3836f22012-04-27 00:33:38 +00001665 skb->head_frag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 skb->data += off;
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001667#ifdef NET_SKBUFF_DATA_USES_OFFSET
1668 skb->end = size;
Patrick McHardy56eb8882007-04-09 11:45:04 -07001669 off = nhead;
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001670#else
1671 skb->end = skb->head + size;
Patrick McHardy56eb8882007-04-09 11:45:04 -07001672#endif
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001673 skb->tail += off;
Peter Pan(潘卫平)b41abb42013-06-06 21:27:21 +08001674 skb_headers_offset_update(skb, nhead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 skb->cloned = 0;
Patrick McHardy334a8132007-06-25 04:35:20 -07001676 skb->hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 skb->nohdr = 0;
1678 atomic_set(&skb_shinfo(skb)->dataref, 1);
Eric Dumazet158f3232017-01-27 07:11:27 -08001679
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02001680 skb_metadata_clear(skb);
1681
Eric Dumazet158f3232017-01-27 07:11:27 -08001682 /* It is not generally safe to change skb->truesize.
1683 * For the moment, we really care of rx path, or
1684 * when skb is orphaned (not attached to a socket).
1685 */
1686 if (!skb->sk || skb->destructor == sock_edemux)
1687 skb->truesize += size - osize;
1688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 return 0;
1690
Shirley Maa6686f22011-07-06 12:22:12 +00001691nofrags:
1692 kfree(data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693nodata:
1694 return -ENOMEM;
1695}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001696EXPORT_SYMBOL(pskb_expand_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
1698/* Make private copy of skb with writable head and some headroom */
1699
1700struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1701{
1702 struct sk_buff *skb2;
1703 int delta = headroom - skb_headroom(skb);
1704
1705 if (delta <= 0)
1706 skb2 = pskb_copy(skb, GFP_ATOMIC);
1707 else {
1708 skb2 = skb_clone(skb, GFP_ATOMIC);
1709 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1710 GFP_ATOMIC)) {
1711 kfree_skb(skb2);
1712 skb2 = NULL;
1713 }
1714 }
1715 return skb2;
1716}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001717EXPORT_SYMBOL(skb_realloc_headroom);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719/**
1720 * skb_copy_expand - copy and expand sk_buff
1721 * @skb: buffer to copy
1722 * @newheadroom: new free bytes at head
1723 * @newtailroom: new free bytes at tail
1724 * @gfp_mask: allocation priority
1725 *
1726 * Make a copy of both an &sk_buff and its data and while doing so
1727 * allocate additional space.
1728 *
1729 * This is used when the caller wishes to modify the data and needs a
1730 * private copy of the data to alter as well as more space for new fields.
1731 * Returns %NULL on failure or the pointer to the buffer
1732 * on success. The returned buffer has a reference count of 1.
1733 *
1734 * You must pass %GFP_ATOMIC as the allocation priority if this function
1735 * is called from an interrupt.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 */
1737struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
Victor Fusco86a76ca2005-07-08 14:57:47 -07001738 int newheadroom, int newtailroom,
Al Virodd0fc662005-10-07 07:46:04 +01001739 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740{
1741 /*
1742 * Allocate the copy buffer
1743 */
Mel Gormanc93bdd02012-07-31 16:44:19 -07001744 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1745 gfp_mask, skb_alloc_rx_flag(skb),
1746 NUMA_NO_NODE);
Patrick McHardyefd1e8d2007-04-10 18:30:09 -07001747 int oldheadroom = skb_headroom(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 int head_copy_len, head_copy_off;
1749
1750 if (!n)
1751 return NULL;
1752
1753 skb_reserve(n, newheadroom);
1754
1755 /* Set the tail pointer and length */
1756 skb_put(n, skb->len);
1757
Patrick McHardyefd1e8d2007-04-10 18:30:09 -07001758 head_copy_len = oldheadroom;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 head_copy_off = 0;
1760 if (newheadroom <= head_copy_len)
1761 head_copy_len = newheadroom;
1762 else
1763 head_copy_off = newheadroom - head_copy_len;
1764
1765 /* Copy the linear header and data. */
Tim Hansen9f77fad2017-10-09 11:37:59 -04001766 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1767 skb->len + head_copy_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
Ilya Lesokhin08303c12018-04-30 10:16:11 +03001769 skb_copy_header(n, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Eric Dumazet030737b2013-10-19 11:42:54 -07001771 skb_headers_offset_update(n, newheadroom - oldheadroom);
Patrick McHardyefd1e8d2007-04-10 18:30:09 -07001772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 return n;
1774}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001775EXPORT_SYMBOL(skb_copy_expand);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
1777/**
Florian Fainellicd0a1372017-08-22 15:12:14 -07001778 * __skb_pad - zero pad the tail of an skb
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 * @skb: buffer to pad
1780 * @pad: space to pad
Florian Fainellicd0a1372017-08-22 15:12:14 -07001781 * @free_on_error: free buffer on error
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 *
1783 * Ensure that a buffer is followed by a padding area that is zero
1784 * filled. Used by network drivers which may DMA or transfer data
1785 * beyond the buffer end onto the wire.
1786 *
Florian Fainellicd0a1372017-08-22 15:12:14 -07001787 * May return error in out of memory cases. The skb is freed on error
1788 * if @free_on_error is true.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001790
Florian Fainellicd0a1372017-08-22 15:12:14 -07001791int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792{
Herbert Xu5b057c62006-06-23 02:06:41 -07001793 int err;
1794 int ntail;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001795
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 /* If the skbuff is non linear tailroom is always zero.. */
Herbert Xu5b057c62006-06-23 02:06:41 -07001797 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 memset(skb->data+skb->len, 0, pad);
Herbert Xu5b057c62006-06-23 02:06:41 -07001799 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 }
Herbert Xu5b057c62006-06-23 02:06:41 -07001801
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001802 ntail = skb->data_len + pad - (skb->end - skb->tail);
Herbert Xu5b057c62006-06-23 02:06:41 -07001803 if (likely(skb_cloned(skb) || ntail > 0)) {
1804 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1805 if (unlikely(err))
1806 goto free_skb;
1807 }
1808
1809 /* FIXME: The use of this function with non-linear skb's really needs
1810 * to be audited.
1811 */
1812 err = skb_linearize(skb);
1813 if (unlikely(err))
1814 goto free_skb;
1815
1816 memset(skb->data + skb->len, 0, pad);
1817 return 0;
1818
1819free_skb:
Florian Fainellicd0a1372017-08-22 15:12:14 -07001820 if (free_on_error)
1821 kfree_skb(skb);
Herbert Xu5b057c62006-06-23 02:06:41 -07001822 return err;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001823}
Florian Fainellicd0a1372017-08-22 15:12:14 -07001824EXPORT_SYMBOL(__skb_pad);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001825
Ilpo Järvinen0dde3e12008-03-27 17:43:41 -07001826/**
Mathias Krause0c7ddf32013-11-07 14:18:24 +01001827 * pskb_put - add data to the tail of a potentially fragmented buffer
1828 * @skb: start of the buffer to use
1829 * @tail: tail fragment of the buffer to use
1830 * @len: amount of data to add
1831 *
1832 * This function extends the used data area of the potentially
1833 * fragmented buffer. @tail must be the last fragment of @skb -- or
1834 * @skb itself. If this would exceed the total buffer size the kernel
1835 * will panic. A pointer to the first byte of the extra data is
1836 * returned.
1837 */
1838
Johannes Berg4df864c2017-06-16 14:29:21 +02001839void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
Mathias Krause0c7ddf32013-11-07 14:18:24 +01001840{
1841 if (tail != skb) {
1842 skb->data_len += len;
1843 skb->len += len;
1844 }
1845 return skb_put(tail, len);
1846}
1847EXPORT_SYMBOL_GPL(pskb_put);
1848
1849/**
Ilpo Järvinen0dde3e12008-03-27 17:43:41 -07001850 * skb_put - add data to a buffer
1851 * @skb: buffer to use
1852 * @len: amount of data to add
1853 *
1854 * This function extends the used data area of the buffer. If this would
1855 * exceed the total buffer size the kernel will panic. A pointer to the
1856 * first byte of the extra data is returned.
1857 */
Johannes Berg4df864c2017-06-16 14:29:21 +02001858void *skb_put(struct sk_buff *skb, unsigned int len)
Ilpo Järvinen0dde3e12008-03-27 17:43:41 -07001859{
Johannes Berg4df864c2017-06-16 14:29:21 +02001860 void *tmp = skb_tail_pointer(skb);
Ilpo Järvinen0dde3e12008-03-27 17:43:41 -07001861 SKB_LINEAR_ASSERT(skb);
1862 skb->tail += len;
1863 skb->len += len;
1864 if (unlikely(skb->tail > skb->end))
1865 skb_over_panic(skb, len, __builtin_return_address(0));
1866 return tmp;
1867}
1868EXPORT_SYMBOL(skb_put);
1869
Ilpo Järvinen6be8ac22008-03-27 17:47:24 -07001870/**
Ilpo Järvinenc2aa2702008-03-27 17:52:40 -07001871 * skb_push - add data to the start of a buffer
1872 * @skb: buffer to use
1873 * @len: amount of data to add
1874 *
1875 * This function extends the used data area of the buffer at the buffer
1876 * start. If this would exceed the total buffer headroom the kernel will
1877 * panic. A pointer to the first byte of the extra data is returned.
1878 */
Johannes Bergd58ff352017-06-16 14:29:23 +02001879void *skb_push(struct sk_buff *skb, unsigned int len)
Ilpo Järvinenc2aa2702008-03-27 17:52:40 -07001880{
1881 skb->data -= len;
1882 skb->len += len;
Ganesh Goudar9aba2f82018-08-02 15:34:52 +05301883 if (unlikely(skb->data < skb->head))
Ilpo Järvinenc2aa2702008-03-27 17:52:40 -07001884 skb_under_panic(skb, len, __builtin_return_address(0));
1885 return skb->data;
1886}
1887EXPORT_SYMBOL(skb_push);
1888
1889/**
Ilpo Järvinen6be8ac22008-03-27 17:47:24 -07001890 * skb_pull - remove data from the start of a buffer
1891 * @skb: buffer to use
1892 * @len: amount of data to remove
1893 *
1894 * This function removes data from the start of a buffer, returning
1895 * the memory to the headroom. A pointer to the next data in the buffer
1896 * is returned. Once the data has been pulled future pushes will overwrite
1897 * the old data.
1898 */
Johannes Bergaf728682017-06-16 14:29:22 +02001899void *skb_pull(struct sk_buff *skb, unsigned int len)
Ilpo Järvinen6be8ac22008-03-27 17:47:24 -07001900{
David S. Miller47d29642010-05-02 02:21:44 -07001901 return skb_pull_inline(skb, len);
Ilpo Järvinen6be8ac22008-03-27 17:47:24 -07001902}
1903EXPORT_SYMBOL(skb_pull);
1904
Ilpo Järvinen419ae742008-03-27 17:54:01 -07001905/**
1906 * skb_trim - remove end from a buffer
1907 * @skb: buffer to alter
1908 * @len: new length
1909 *
1910 * Cut the length of a buffer down by removing data from the tail. If
1911 * the buffer is already under the length specified it is not modified.
1912 * The skb must be linear.
1913 */
1914void skb_trim(struct sk_buff *skb, unsigned int len)
1915{
1916 if (skb->len > len)
1917 __skb_trim(skb, len);
1918}
1919EXPORT_SYMBOL(skb_trim);
1920
Herbert Xu3cc0e872006-06-09 16:13:38 -07001921/* Trims skb to length len. It can change skb pointers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 */
1923
Herbert Xu3cc0e872006-06-09 16:13:38 -07001924int ___pskb_trim(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925{
Herbert Xu27b437c2006-07-13 19:26:39 -07001926 struct sk_buff **fragp;
1927 struct sk_buff *frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 int offset = skb_headlen(skb);
1929 int nfrags = skb_shinfo(skb)->nr_frags;
1930 int i;
Herbert Xu27b437c2006-07-13 19:26:39 -07001931 int err;
1932
1933 if (skb_cloned(skb) &&
1934 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1935 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Herbert Xuf4d26fb2006-07-30 20:20:28 -07001937 i = 0;
1938 if (offset >= len)
1939 goto drop_pages;
1940
1941 for (; i < nfrags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00001942 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
Herbert Xu27b437c2006-07-13 19:26:39 -07001943
1944 if (end < len) {
1945 offset = end;
1946 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 }
Herbert Xu27b437c2006-07-13 19:26:39 -07001948
Eric Dumazet9e903e02011-10-18 21:00:24 +00001949 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
Herbert Xu27b437c2006-07-13 19:26:39 -07001950
Herbert Xuf4d26fb2006-07-30 20:20:28 -07001951drop_pages:
Herbert Xu27b437c2006-07-13 19:26:39 -07001952 skb_shinfo(skb)->nr_frags = i;
1953
1954 for (; i < nfrags; i++)
Ian Campbellea2ab692011-08-22 23:44:58 +00001955 skb_frag_unref(skb, i);
Herbert Xu27b437c2006-07-13 19:26:39 -07001956
David S. Miller21dc3302010-08-23 00:13:46 -07001957 if (skb_has_frag_list(skb))
Herbert Xu27b437c2006-07-13 19:26:39 -07001958 skb_drop_fraglist(skb);
Herbert Xuf4d26fb2006-07-30 20:20:28 -07001959 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 }
1961
Herbert Xu27b437c2006-07-13 19:26:39 -07001962 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1963 fragp = &frag->next) {
1964 int end = offset + frag->len;
1965
1966 if (skb_shared(frag)) {
1967 struct sk_buff *nfrag;
1968
1969 nfrag = skb_clone(frag, GFP_ATOMIC);
1970 if (unlikely(!nfrag))
1971 return -ENOMEM;
1972
1973 nfrag->next = frag->next;
Eric Dumazet85bb2a62012-04-19 02:24:53 +00001974 consume_skb(frag);
Herbert Xu27b437c2006-07-13 19:26:39 -07001975 frag = nfrag;
1976 *fragp = frag;
1977 }
1978
1979 if (end < len) {
1980 offset = end;
1981 continue;
1982 }
1983
1984 if (end > len &&
1985 unlikely((err = pskb_trim(frag, len - offset))))
1986 return err;
1987
1988 if (frag->next)
1989 skb_drop_list(&frag->next);
1990 break;
1991 }
1992
Herbert Xuf4d26fb2006-07-30 20:20:28 -07001993done:
Herbert Xu27b437c2006-07-13 19:26:39 -07001994 if (len > skb_headlen(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 skb->data_len -= skb->len - len;
1996 skb->len = len;
1997 } else {
Herbert Xu27b437c2006-07-13 19:26:39 -07001998 skb->len = len;
1999 skb->data_len = 0;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002000 skb_set_tail_pointer(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 }
2002
Eric Dumazetc21b48c2017-04-26 09:07:46 -07002003 if (!skb->sk || skb->destructor == sock_edemux)
2004 skb_condense(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 return 0;
2006}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002007EXPORT_SYMBOL(___pskb_trim);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
Eric Dumazet88078d92018-04-18 11:43:15 -07002009/* Note : use pskb_trim_rcsum() instead of calling this directly
2010 */
2011int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2012{
2013 if (skb->ip_summed == CHECKSUM_COMPLETE) {
2014 int delta = skb->len - len;
2015
Dimitris Michailidisd55bef502018-10-19 17:07:13 -07002016 skb->csum = csum_block_sub(skb->csum,
2017 skb_checksum(skb, len, delta, 0),
2018 len);
Eric Dumazet88078d92018-04-18 11:43:15 -07002019 }
2020 return __pskb_trim(skb, len);
2021}
2022EXPORT_SYMBOL(pskb_trim_rcsum_slow);
2023
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024/**
2025 * __pskb_pull_tail - advance tail of skb header
2026 * @skb: buffer to reallocate
2027 * @delta: number of bytes to advance tail
2028 *
2029 * The function makes a sense only on a fragmented &sk_buff,
2030 * it expands header moving its tail forward and copying necessary
2031 * data from fragmented part.
2032 *
2033 * &sk_buff MUST have reference count of 1.
2034 *
2035 * Returns %NULL (and &sk_buff does not change) if pull failed
2036 * or value of new tail of skb in the case of success.
2037 *
2038 * All the pointers pointing into skb header may change and must be
2039 * reloaded after call to this function.
2040 */
2041
2042/* Moves tail of skb head forward, copying data from fragmented part,
2043 * when it is necessary.
2044 * 1. It may fail due to malloc failure.
2045 * 2. It may change skb pointers.
2046 *
2047 * It is pretty complicated. Luckily, it is called only in exceptional cases.
2048 */
Johannes Bergaf728682017-06-16 14:29:22 +02002049void *__pskb_pull_tail(struct sk_buff *skb, int delta)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050{
2051 /* If skb has not enough free space at tail, get new one
2052 * plus 128 bytes for future expansions. If we have enough
2053 * room at tail, reallocate without expansion only if skb is cloned.
2054 */
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07002055 int i, k, eat = (skb->tail + delta) - skb->end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056
2057 if (eat > 0 || skb_cloned(skb)) {
2058 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
2059 GFP_ATOMIC))
2060 return NULL;
2061 }
2062
Tim Hansen9f77fad2017-10-09 11:37:59 -04002063 BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
2064 skb_tail_pointer(skb), delta));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
2066 /* Optimization: no fragments, no reasons to preestimate
2067 * size of pulled pages. Superb.
2068 */
David S. Miller21dc3302010-08-23 00:13:46 -07002069 if (!skb_has_frag_list(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 goto pull_pages;
2071
2072 /* Estimate size of pulled pages. */
2073 eat = delta;
2074 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002075 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2076
2077 if (size >= eat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 goto pull_pages;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002079 eat -= size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 }
2081
2082 /* If we need update frag list, we are in troubles.
Wenhua Shi09001b02017-10-14 18:51:36 +02002083 * Certainly, it is possible to add an offset to skb data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 * but taking into account that pulling is expected to
2085 * be very rare operation, it is worth to fight against
2086 * further bloating skb head and crucify ourselves here instead.
2087 * Pure masohism, indeed. 8)8)
2088 */
2089 if (eat) {
2090 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2091 struct sk_buff *clone = NULL;
2092 struct sk_buff *insp = NULL;
2093
2094 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 if (list->len <= eat) {
2096 /* Eaten as whole. */
2097 eat -= list->len;
2098 list = list->next;
2099 insp = list;
2100 } else {
2101 /* Eaten partially. */
2102
2103 if (skb_shared(list)) {
2104 /* Sucks! We need to fork list. :-( */
2105 clone = skb_clone(list, GFP_ATOMIC);
2106 if (!clone)
2107 return NULL;
2108 insp = list->next;
2109 list = clone;
2110 } else {
2111 /* This may be pulled without
2112 * problems. */
2113 insp = list;
2114 }
2115 if (!pskb_pull(list, eat)) {
Wei Yongjunf3fbbe02009-02-25 00:37:32 +00002116 kfree_skb(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 return NULL;
2118 }
2119 break;
2120 }
2121 } while (eat);
2122
2123 /* Free pulled out fragments. */
2124 while ((list = skb_shinfo(skb)->frag_list) != insp) {
2125 skb_shinfo(skb)->frag_list = list->next;
2126 kfree_skb(list);
2127 }
2128 /* And insert new clone at head. */
2129 if (clone) {
2130 clone->next = list;
2131 skb_shinfo(skb)->frag_list = clone;
2132 }
2133 }
2134 /* Success! Now we may commit changes to skb data. */
2135
2136pull_pages:
2137 eat = delta;
2138 k = 0;
2139 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002140 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2141
2142 if (size <= eat) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002143 skb_frag_unref(skb, i);
Eric Dumazet9e903e02011-10-18 21:00:24 +00002144 eat -= size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 } else {
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07002146 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2147
2148 *frag = skb_shinfo(skb)->frags[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 if (eat) {
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07002150 skb_frag_off_add(frag, eat);
2151 skb_frag_size_sub(frag, eat);
linzhang3ccc6c62017-07-17 17:25:02 +08002152 if (!i)
2153 goto end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 eat = 0;
2155 }
2156 k++;
2157 }
2158 }
2159 skb_shinfo(skb)->nr_frags = k;
2160
linzhang3ccc6c62017-07-17 17:25:02 +08002161end:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 skb->tail += delta;
2163 skb->data_len -= delta;
2164
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04002165 if (!skb->data_len)
2166 skb_zcopy_clear(skb, false);
2167
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002168 return skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002170EXPORT_SYMBOL(__pskb_pull_tail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
Eric Dumazet22019b12011-07-29 18:37:31 +00002172/**
2173 * skb_copy_bits - copy bits from skb to kernel buffer
2174 * @skb: source skb
2175 * @offset: offset in source
2176 * @to: destination buffer
2177 * @len: number of bytes to copy
2178 *
2179 * Copy the specified number of bytes from the source skb to the
2180 * destination buffer.
2181 *
2182 * CAUTION ! :
2183 * If its prototype is ever changed,
2184 * check arch/{*}/net/{*}.S files,
2185 * since it is called from BPF assembly code.
2186 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2188{
David S. Miller1a028e52007-04-27 15:21:23 -07002189 int start = skb_headlen(skb);
David S. Millerfbb398a2009-06-09 00:18:59 -07002190 struct sk_buff *frag_iter;
2191 int i, copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
2193 if (offset > (int)skb->len - len)
2194 goto fault;
2195
2196 /* Copy header. */
David S. Miller1a028e52007-04-27 15:21:23 -07002197 if ((copy = start - offset) > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 if (copy > len)
2199 copy = len;
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002200 skb_copy_from_linear_data_offset(skb, offset, to, copy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 if ((len -= copy) == 0)
2202 return 0;
2203 offset += copy;
2204 to += copy;
2205 }
2206
2207 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
David S. Miller1a028e52007-04-27 15:21:23 -07002208 int end;
Eric Dumazet51c56b02012-04-05 11:35:15 +02002209 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002211 WARN_ON(start > offset + len);
David S. Miller1a028e52007-04-27 15:21:23 -07002212
Eric Dumazet51c56b02012-04-05 11:35:15 +02002213 end = start + skb_frag_size(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 if ((copy = end - offset) > 0) {
Willem de Bruijnc613c202017-07-31 08:15:47 -04002215 u32 p_off, p_len, copied;
2216 struct page *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 u8 *vaddr;
2218
2219 if (copy > len)
2220 copy = len;
2221
Willem de Bruijnc613c202017-07-31 08:15:47 -04002222 skb_frag_foreach_page(f,
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07002223 skb_frag_off(f) + offset - start,
Willem de Bruijnc613c202017-07-31 08:15:47 -04002224 copy, p, p_off, p_len, copied) {
2225 vaddr = kmap_atomic(p);
2226 memcpy(to + copied, vaddr + p_off, p_len);
2227 kunmap_atomic(vaddr);
2228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
2230 if ((len -= copy) == 0)
2231 return 0;
2232 offset += copy;
2233 to += copy;
2234 }
David S. Miller1a028e52007-04-27 15:21:23 -07002235 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 }
2237
David S. Millerfbb398a2009-06-09 00:18:59 -07002238 skb_walk_frags(skb, frag_iter) {
2239 int end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
David S. Millerfbb398a2009-06-09 00:18:59 -07002241 WARN_ON(start > offset + len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
David S. Millerfbb398a2009-06-09 00:18:59 -07002243 end = start + frag_iter->len;
2244 if ((copy = end - offset) > 0) {
2245 if (copy > len)
2246 copy = len;
2247 if (skb_copy_bits(frag_iter, offset - start, to, copy))
2248 goto fault;
2249 if ((len -= copy) == 0)
2250 return 0;
2251 offset += copy;
2252 to += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 }
David S. Millerfbb398a2009-06-09 00:18:59 -07002254 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 }
Shirley Maa6686f22011-07-06 12:22:12 +00002256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 if (!len)
2258 return 0;
2259
2260fault:
2261 return -EFAULT;
2262}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002263EXPORT_SYMBOL(skb_copy_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
Jens Axboe9c55e012007-11-06 23:30:13 -08002265/*
2266 * Callback from splice_to_pipe(), if we need to release some pages
2267 * at the end of the spd in case we error'ed out in filling the pipe.
2268 */
2269static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2270{
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08002271 put_page(spd->pages[i]);
2272}
Jens Axboe9c55e012007-11-06 23:30:13 -08002273
David S. Millera108d5f2012-04-23 23:06:11 -04002274static struct page *linear_to_page(struct page *page, unsigned int *len,
2275 unsigned int *offset,
Eric Dumazet18aafc62013-01-11 14:46:37 +00002276 struct sock *sk)
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08002277{
Eric Dumazet5640f762012-09-23 23:04:42 +00002278 struct page_frag *pfrag = sk_page_frag(sk);
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08002279
Eric Dumazet5640f762012-09-23 23:04:42 +00002280 if (!sk_page_frag_refill(sk, pfrag))
2281 return NULL;
Jarek Poplawski4fb66992009-02-01 00:41:42 -08002282
Eric Dumazet5640f762012-09-23 23:04:42 +00002283 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
Jarek Poplawski4fb66992009-02-01 00:41:42 -08002284
Eric Dumazet5640f762012-09-23 23:04:42 +00002285 memcpy(page_address(pfrag->page) + pfrag->offset,
2286 page_address(page) + *offset, *len);
2287 *offset = pfrag->offset;
2288 pfrag->offset += *len;
Jarek Poplawski4fb66992009-02-01 00:41:42 -08002289
Eric Dumazet5640f762012-09-23 23:04:42 +00002290 return pfrag->page;
Jens Axboe9c55e012007-11-06 23:30:13 -08002291}
2292
Eric Dumazet41c73a02012-04-22 12:26:16 +00002293static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2294 struct page *page,
2295 unsigned int offset)
2296{
2297 return spd->nr_pages &&
2298 spd->pages[spd->nr_pages - 1] == page &&
2299 (spd->partial[spd->nr_pages - 1].offset +
2300 spd->partial[spd->nr_pages - 1].len == offset);
2301}
2302
Jens Axboe9c55e012007-11-06 23:30:13 -08002303/*
2304 * Fill page/offset/length into spd, if it can hold more pages.
2305 */
David S. Millera108d5f2012-04-23 23:06:11 -04002306static bool spd_fill_page(struct splice_pipe_desc *spd,
2307 struct pipe_inode_info *pipe, struct page *page,
2308 unsigned int *len, unsigned int offset,
Eric Dumazet18aafc62013-01-11 14:46:37 +00002309 bool linear,
David S. Millera108d5f2012-04-23 23:06:11 -04002310 struct sock *sk)
Jens Axboe9c55e012007-11-06 23:30:13 -08002311{
Eric Dumazet41c73a02012-04-22 12:26:16 +00002312 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
David S. Millera108d5f2012-04-23 23:06:11 -04002313 return true;
Jens Axboe9c55e012007-11-06 23:30:13 -08002314
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08002315 if (linear) {
Eric Dumazet18aafc62013-01-11 14:46:37 +00002316 page = linear_to_page(page, len, &offset, sk);
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08002317 if (!page)
David S. Millera108d5f2012-04-23 23:06:11 -04002318 return true;
Eric Dumazet41c73a02012-04-22 12:26:16 +00002319 }
2320 if (spd_can_coalesce(spd, page, offset)) {
2321 spd->partial[spd->nr_pages - 1].len += *len;
David S. Millera108d5f2012-04-23 23:06:11 -04002322 return false;
Eric Dumazet41c73a02012-04-22 12:26:16 +00002323 }
2324 get_page(page);
Jens Axboe9c55e012007-11-06 23:30:13 -08002325 spd->pages[spd->nr_pages] = page;
Jarek Poplawski4fb66992009-02-01 00:41:42 -08002326 spd->partial[spd->nr_pages].len = *len;
Jens Axboe9c55e012007-11-06 23:30:13 -08002327 spd->partial[spd->nr_pages].offset = offset;
Jens Axboe9c55e012007-11-06 23:30:13 -08002328 spd->nr_pages++;
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08002329
David S. Millera108d5f2012-04-23 23:06:11 -04002330 return false;
Jens Axboe9c55e012007-11-06 23:30:13 -08002331}
2332
David S. Millera108d5f2012-04-23 23:06:11 -04002333static bool __splice_segment(struct page *page, unsigned int poff,
2334 unsigned int plen, unsigned int *off,
Eric Dumazet18aafc62013-01-11 14:46:37 +00002335 unsigned int *len,
Eric Dumazetd7ccf7c2012-04-23 23:35:04 -04002336 struct splice_pipe_desc *spd, bool linear,
David S. Millera108d5f2012-04-23 23:06:11 -04002337 struct sock *sk,
2338 struct pipe_inode_info *pipe)
Octavian Purdila2870c432008-07-15 00:49:11 -07002339{
2340 if (!*len)
David S. Millera108d5f2012-04-23 23:06:11 -04002341 return true;
Octavian Purdila2870c432008-07-15 00:49:11 -07002342
2343 /* skip this segment if already processed */
2344 if (*off >= plen) {
2345 *off -= plen;
David S. Millera108d5f2012-04-23 23:06:11 -04002346 return false;
Octavian Purdiladb43a282008-06-27 17:27:21 -07002347 }
Jens Axboe9c55e012007-11-06 23:30:13 -08002348
Octavian Purdila2870c432008-07-15 00:49:11 -07002349 /* ignore any bits we already processed */
Eric Dumazet9ca1b222013-01-05 21:31:18 +00002350 poff += *off;
2351 plen -= *off;
2352 *off = 0;
Octavian Purdila2870c432008-07-15 00:49:11 -07002353
Eric Dumazet18aafc62013-01-11 14:46:37 +00002354 do {
2355 unsigned int flen = min(*len, plen);
Octavian Purdila2870c432008-07-15 00:49:11 -07002356
Eric Dumazet18aafc62013-01-11 14:46:37 +00002357 if (spd_fill_page(spd, pipe, page, &flen, poff,
2358 linear, sk))
2359 return true;
2360 poff += flen;
2361 plen -= flen;
2362 *len -= flen;
2363 } while (*len && plen);
Octavian Purdila2870c432008-07-15 00:49:11 -07002364
David S. Millera108d5f2012-04-23 23:06:11 -04002365 return false;
Octavian Purdila2870c432008-07-15 00:49:11 -07002366}
2367
2368/*
David S. Millera108d5f2012-04-23 23:06:11 -04002369 * Map linear and fragment data from the skb to spd. It reports true if the
Octavian Purdila2870c432008-07-15 00:49:11 -07002370 * pipe is full or if we already spliced the requested length.
2371 */
David S. Millera108d5f2012-04-23 23:06:11 -04002372static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2373 unsigned int *offset, unsigned int *len,
2374 struct splice_pipe_desc *spd, struct sock *sk)
Octavian Purdila2870c432008-07-15 00:49:11 -07002375{
2376 int seg;
Tom Herbertfa9835e2016-03-07 14:11:04 -08002377 struct sk_buff *iter;
Octavian Purdila2870c432008-07-15 00:49:11 -07002378
Eric Dumazet1d0c0b32012-04-27 02:10:03 +00002379 /* map the linear part :
Alexander Duyck2996d312012-05-02 18:18:42 +00002380 * If skb->head_frag is set, this 'linear' part is backed by a
2381 * fragment, and if the head is not shared with any clones then
2382 * we can avoid a copy since we own the head portion of this page.
Jens Axboe9c55e012007-11-06 23:30:13 -08002383 */
Octavian Purdila2870c432008-07-15 00:49:11 -07002384 if (__splice_segment(virt_to_page(skb->data),
2385 (unsigned long) skb->data & (PAGE_SIZE - 1),
2386 skb_headlen(skb),
Eric Dumazet18aafc62013-01-11 14:46:37 +00002387 offset, len, spd,
Alexander Duyck3a7c1ee42012-05-03 01:09:42 +00002388 skb_head_is_locked(skb),
Eric Dumazet1d0c0b32012-04-27 02:10:03 +00002389 sk, pipe))
David S. Millera108d5f2012-04-23 23:06:11 -04002390 return true;
Jens Axboe9c55e012007-11-06 23:30:13 -08002391
2392 /*
2393 * then map the fragments
2394 */
Jens Axboe9c55e012007-11-06 23:30:13 -08002395 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2396 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2397
Ian Campbellea2ab692011-08-22 23:44:58 +00002398 if (__splice_segment(skb_frag_page(f),
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07002399 skb_frag_off(f), skb_frag_size(f),
Eric Dumazet18aafc62013-01-11 14:46:37 +00002400 offset, len, spd, false, sk, pipe))
David S. Millera108d5f2012-04-23 23:06:11 -04002401 return true;
Jens Axboe9c55e012007-11-06 23:30:13 -08002402 }
2403
Tom Herbertfa9835e2016-03-07 14:11:04 -08002404 skb_walk_frags(skb, iter) {
2405 if (*offset >= iter->len) {
2406 *offset -= iter->len;
2407 continue;
2408 }
2409 /* __skb_splice_bits() only fails if the output has no room
2410 * left, so no point in going over the frag_list for the error
2411 * case.
2412 */
2413 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2414 return true;
2415 }
2416
David S. Millera108d5f2012-04-23 23:06:11 -04002417 return false;
Jens Axboe9c55e012007-11-06 23:30:13 -08002418}
2419
2420/*
2421 * Map data from the skb to a pipe. Should handle both the linear part,
Tom Herbertfa9835e2016-03-07 14:11:04 -08002422 * the fragments, and the frag list.
Jens Axboe9c55e012007-11-06 23:30:13 -08002423 */
Hannes Frederic Sowaa60e3cc2015-05-21 17:00:00 +02002424int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
Jens Axboe9c55e012007-11-06 23:30:13 -08002425 struct pipe_inode_info *pipe, unsigned int tlen,
Al Viro25869262016-09-17 21:02:10 -04002426 unsigned int flags)
Jens Axboe9c55e012007-11-06 23:30:13 -08002427{
Eric Dumazet41c73a02012-04-22 12:26:16 +00002428 struct partial_page partial[MAX_SKB_FRAGS];
2429 struct page *pages[MAX_SKB_FRAGS];
Jens Axboe9c55e012007-11-06 23:30:13 -08002430 struct splice_pipe_desc spd = {
2431 .pages = pages,
2432 .partial = partial,
Eric Dumazet047fe362012-06-12 15:24:40 +02002433 .nr_pages_max = MAX_SKB_FRAGS,
Miklos Szeredi28a625c2014-01-22 19:36:57 +01002434 .ops = &nosteal_pipe_buf_ops,
Jens Axboe9c55e012007-11-06 23:30:13 -08002435 .spd_release = sock_spd_release,
2436 };
Jens Axboe35f3d142010-05-20 10:43:18 +02002437 int ret = 0;
2438
Tom Herbertfa9835e2016-03-07 14:11:04 -08002439 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
Jens Axboe9c55e012007-11-06 23:30:13 -08002440
Hannes Frederic Sowaa60e3cc2015-05-21 17:00:00 +02002441 if (spd.nr_pages)
Al Viro25869262016-09-17 21:02:10 -04002442 ret = splice_to_pipe(pipe, &spd);
Jens Axboe9c55e012007-11-06 23:30:13 -08002443
Jens Axboe35f3d142010-05-20 10:43:18 +02002444 return ret;
Jens Axboe9c55e012007-11-06 23:30:13 -08002445}
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002446EXPORT_SYMBOL_GPL(skb_splice_bits);
Jens Axboe9c55e012007-11-06 23:30:13 -08002447
Tom Herbert20bf50d2017-07-28 16:22:42 -07002448/* Send skb data on a socket. Socket must be locked. */
2449int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2450 int len)
2451{
2452 unsigned int orig_len = len;
2453 struct sk_buff *head = skb;
2454 unsigned short fragidx;
2455 int slen, ret;
2456
2457do_frag_list:
2458
2459 /* Deal with head data */
2460 while (offset < skb_headlen(skb) && len) {
2461 struct kvec kv;
2462 struct msghdr msg;
2463
2464 slen = min_t(int, len, skb_headlen(skb) - offset);
2465 kv.iov_base = skb->data + offset;
John Fastabenddb5980d2017-08-15 22:31:34 -07002466 kv.iov_len = slen;
Tom Herbert20bf50d2017-07-28 16:22:42 -07002467 memset(&msg, 0, sizeof(msg));
John Fastabendbd95e6782019-05-24 08:01:00 -07002468 msg.msg_flags = MSG_DONTWAIT;
Tom Herbert20bf50d2017-07-28 16:22:42 -07002469
2470 ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
2471 if (ret <= 0)
2472 goto error;
2473
2474 offset += ret;
2475 len -= ret;
2476 }
2477
2478 /* All the data was skb head? */
2479 if (!len)
2480 goto out;
2481
2482 /* Make offset relative to start of frags */
2483 offset -= skb_headlen(skb);
2484
2485 /* Find where we are in frag list */
2486 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2487 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2488
Matthew Wilcox (Oracle)d8e18a52019-07-22 20:08:26 -07002489 if (offset < skb_frag_size(frag))
Tom Herbert20bf50d2017-07-28 16:22:42 -07002490 break;
2491
Matthew Wilcox (Oracle)d8e18a52019-07-22 20:08:26 -07002492 offset -= skb_frag_size(frag);
Tom Herbert20bf50d2017-07-28 16:22:42 -07002493 }
2494
2495 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2496 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2497
Matthew Wilcox (Oracle)d8e18a52019-07-22 20:08:26 -07002498 slen = min_t(size_t, len, skb_frag_size(frag) - offset);
Tom Herbert20bf50d2017-07-28 16:22:42 -07002499
2500 while (slen) {
Matthew Wilcox (Oracle)d8e18a52019-07-22 20:08:26 -07002501 ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07002502 skb_frag_off(frag) + offset,
Tom Herbert20bf50d2017-07-28 16:22:42 -07002503 slen, MSG_DONTWAIT);
2504 if (ret <= 0)
2505 goto error;
2506
2507 len -= ret;
2508 offset += ret;
2509 slen -= ret;
2510 }
2511
2512 offset = 0;
2513 }
2514
2515 if (len) {
2516 /* Process any frag lists */
2517
2518 if (skb == head) {
2519 if (skb_has_frag_list(skb)) {
2520 skb = skb_shinfo(skb)->frag_list;
2521 goto do_frag_list;
2522 }
2523 } else if (skb->next) {
2524 skb = skb->next;
2525 goto do_frag_list;
2526 }
2527 }
2528
2529out:
2530 return orig_len - len;
2531
2532error:
2533 return orig_len == len ? ret : orig_len - len;
2534}
2535EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2536
Herbert Xu357b40a2005-04-19 22:30:14 -07002537/**
2538 * skb_store_bits - store bits from kernel buffer to skb
2539 * @skb: destination buffer
2540 * @offset: offset in destination
2541 * @from: source buffer
2542 * @len: number of bytes to copy
2543 *
2544 * Copy the specified number of bytes from the source buffer to the
2545 * destination skb. This function handles all the messy bits of
2546 * traversing fragment lists and such.
2547 */
2548
Stephen Hemminger0c6fcc82007-04-20 16:40:01 -07002549int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
Herbert Xu357b40a2005-04-19 22:30:14 -07002550{
David S. Miller1a028e52007-04-27 15:21:23 -07002551 int start = skb_headlen(skb);
David S. Millerfbb398a2009-06-09 00:18:59 -07002552 struct sk_buff *frag_iter;
2553 int i, copy;
Herbert Xu357b40a2005-04-19 22:30:14 -07002554
2555 if (offset > (int)skb->len - len)
2556 goto fault;
2557
David S. Miller1a028e52007-04-27 15:21:23 -07002558 if ((copy = start - offset) > 0) {
Herbert Xu357b40a2005-04-19 22:30:14 -07002559 if (copy > len)
2560 copy = len;
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03002561 skb_copy_to_linear_data_offset(skb, offset, from, copy);
Herbert Xu357b40a2005-04-19 22:30:14 -07002562 if ((len -= copy) == 0)
2563 return 0;
2564 offset += copy;
2565 from += copy;
2566 }
2567
2568 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2569 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
David S. Miller1a028e52007-04-27 15:21:23 -07002570 int end;
Herbert Xu357b40a2005-04-19 22:30:14 -07002571
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002572 WARN_ON(start > offset + len);
David S. Miller1a028e52007-04-27 15:21:23 -07002573
Eric Dumazet9e903e02011-10-18 21:00:24 +00002574 end = start + skb_frag_size(frag);
Herbert Xu357b40a2005-04-19 22:30:14 -07002575 if ((copy = end - offset) > 0) {
Willem de Bruijnc613c202017-07-31 08:15:47 -04002576 u32 p_off, p_len, copied;
2577 struct page *p;
Herbert Xu357b40a2005-04-19 22:30:14 -07002578 u8 *vaddr;
2579
2580 if (copy > len)
2581 copy = len;
2582
Willem de Bruijnc613c202017-07-31 08:15:47 -04002583 skb_frag_foreach_page(frag,
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07002584 skb_frag_off(frag) + offset - start,
Willem de Bruijnc613c202017-07-31 08:15:47 -04002585 copy, p, p_off, p_len, copied) {
2586 vaddr = kmap_atomic(p);
2587 memcpy(vaddr + p_off, from + copied, p_len);
2588 kunmap_atomic(vaddr);
2589 }
Herbert Xu357b40a2005-04-19 22:30:14 -07002590
2591 if ((len -= copy) == 0)
2592 return 0;
2593 offset += copy;
2594 from += copy;
2595 }
David S. Miller1a028e52007-04-27 15:21:23 -07002596 start = end;
Herbert Xu357b40a2005-04-19 22:30:14 -07002597 }
2598
David S. Millerfbb398a2009-06-09 00:18:59 -07002599 skb_walk_frags(skb, frag_iter) {
2600 int end;
Herbert Xu357b40a2005-04-19 22:30:14 -07002601
David S. Millerfbb398a2009-06-09 00:18:59 -07002602 WARN_ON(start > offset + len);
Herbert Xu357b40a2005-04-19 22:30:14 -07002603
David S. Millerfbb398a2009-06-09 00:18:59 -07002604 end = start + frag_iter->len;
2605 if ((copy = end - offset) > 0) {
2606 if (copy > len)
2607 copy = len;
2608 if (skb_store_bits(frag_iter, offset - start,
2609 from, copy))
2610 goto fault;
2611 if ((len -= copy) == 0)
2612 return 0;
2613 offset += copy;
2614 from += copy;
Herbert Xu357b40a2005-04-19 22:30:14 -07002615 }
David S. Millerfbb398a2009-06-09 00:18:59 -07002616 start = end;
Herbert Xu357b40a2005-04-19 22:30:14 -07002617 }
2618 if (!len)
2619 return 0;
2620
2621fault:
2622 return -EFAULT;
2623}
Herbert Xu357b40a2005-04-19 22:30:14 -07002624EXPORT_SYMBOL(skb_store_bits);
2625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626/* Checksum skb data. */
Daniel Borkmann2817a332013-10-30 11:50:51 +01002627__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2628 __wsum csum, const struct skb_checksum_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629{
David S. Miller1a028e52007-04-27 15:21:23 -07002630 int start = skb_headlen(skb);
2631 int i, copy = start - offset;
David S. Millerfbb398a2009-06-09 00:18:59 -07002632 struct sk_buff *frag_iter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 int pos = 0;
2634
2635 /* Checksum header. */
2636 if (copy > 0) {
2637 if (copy > len)
2638 copy = len;
Matteo Croce2544af02019-05-29 17:13:48 +02002639 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext,
2640 skb->data + offset, copy, csum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 if ((len -= copy) == 0)
2642 return csum;
2643 offset += copy;
2644 pos = copy;
2645 }
2646
2647 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
David S. Miller1a028e52007-04-27 15:21:23 -07002648 int end;
Eric Dumazet51c56b02012-04-05 11:35:15 +02002649 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002651 WARN_ON(start > offset + len);
David S. Miller1a028e52007-04-27 15:21:23 -07002652
Eric Dumazet51c56b02012-04-05 11:35:15 +02002653 end = start + skb_frag_size(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 if ((copy = end - offset) > 0) {
Willem de Bruijnc613c202017-07-31 08:15:47 -04002655 u32 p_off, p_len, copied;
2656 struct page *p;
Al Viro44bb9362006-11-14 21:36:14 -08002657 __wsum csum2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 u8 *vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659
2660 if (copy > len)
2661 copy = len;
Willem de Bruijnc613c202017-07-31 08:15:47 -04002662
2663 skb_frag_foreach_page(frag,
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07002664 skb_frag_off(frag) + offset - start,
Willem de Bruijnc613c202017-07-31 08:15:47 -04002665 copy, p, p_off, p_len, copied) {
2666 vaddr = kmap_atomic(p);
Matteo Croce2544af02019-05-29 17:13:48 +02002667 csum2 = INDIRECT_CALL_1(ops->update,
2668 csum_partial_ext,
2669 vaddr + p_off, p_len, 0);
Willem de Bruijnc613c202017-07-31 08:15:47 -04002670 kunmap_atomic(vaddr);
Matteo Croce2544af02019-05-29 17:13:48 +02002671 csum = INDIRECT_CALL_1(ops->combine,
2672 csum_block_add_ext, csum,
2673 csum2, pos, p_len);
Willem de Bruijnc613c202017-07-31 08:15:47 -04002674 pos += p_len;
2675 }
2676
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 if (!(len -= copy))
2678 return csum;
2679 offset += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 }
David S. Miller1a028e52007-04-27 15:21:23 -07002681 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 }
2683
David S. Millerfbb398a2009-06-09 00:18:59 -07002684 skb_walk_frags(skb, frag_iter) {
2685 int end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686
David S. Millerfbb398a2009-06-09 00:18:59 -07002687 WARN_ON(start > offset + len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688
David S. Millerfbb398a2009-06-09 00:18:59 -07002689 end = start + frag_iter->len;
2690 if ((copy = end - offset) > 0) {
2691 __wsum csum2;
2692 if (copy > len)
2693 copy = len;
Daniel Borkmann2817a332013-10-30 11:50:51 +01002694 csum2 = __skb_checksum(frag_iter, offset - start,
2695 copy, 0, ops);
Matteo Croce2544af02019-05-29 17:13:48 +02002696 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext,
2697 csum, csum2, pos, copy);
David S. Millerfbb398a2009-06-09 00:18:59 -07002698 if ((len -= copy) == 0)
2699 return csum;
2700 offset += copy;
2701 pos += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 }
David S. Millerfbb398a2009-06-09 00:18:59 -07002703 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 }
Kris Katterjohn09a62662006-01-08 22:24:28 -08002705 BUG_ON(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706
2707 return csum;
2708}
Daniel Borkmann2817a332013-10-30 11:50:51 +01002709EXPORT_SYMBOL(__skb_checksum);
2710
2711__wsum skb_checksum(const struct sk_buff *skb, int offset,
2712 int len, __wsum csum)
2713{
2714 const struct skb_checksum_ops ops = {
Daniel Borkmanncea80ea2013-11-04 17:10:25 +01002715 .update = csum_partial_ext,
Daniel Borkmann2817a332013-10-30 11:50:51 +01002716 .combine = csum_block_add_ext,
2717 };
2718
2719 return __skb_checksum(skb, offset, len, csum, &ops);
2720}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002721EXPORT_SYMBOL(skb_checksum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
2723/* Both of above in one bottle. */
2724
Al Viro81d77662006-11-14 21:37:33 -08002725__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2726 u8 *to, int len, __wsum csum)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727{
David S. Miller1a028e52007-04-27 15:21:23 -07002728 int start = skb_headlen(skb);
2729 int i, copy = start - offset;
David S. Millerfbb398a2009-06-09 00:18:59 -07002730 struct sk_buff *frag_iter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 int pos = 0;
2732
2733 /* Copy header. */
2734 if (copy > 0) {
2735 if (copy > len)
2736 copy = len;
2737 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2738 copy, csum);
2739 if ((len -= copy) == 0)
2740 return csum;
2741 offset += copy;
2742 to += copy;
2743 pos = copy;
2744 }
2745
2746 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
David S. Miller1a028e52007-04-27 15:21:23 -07002747 int end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002749 WARN_ON(start > offset + len);
David S. Miller1a028e52007-04-27 15:21:23 -07002750
Eric Dumazet9e903e02011-10-18 21:00:24 +00002751 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 if ((copy = end - offset) > 0) {
Willem de Bruijnc613c202017-07-31 08:15:47 -04002753 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2754 u32 p_off, p_len, copied;
2755 struct page *p;
Al Viro50842052006-11-14 21:36:34 -08002756 __wsum csum2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 u8 *vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758
2759 if (copy > len)
2760 copy = len;
Willem de Bruijnc613c202017-07-31 08:15:47 -04002761
2762 skb_frag_foreach_page(frag,
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07002763 skb_frag_off(frag) + offset - start,
Willem de Bruijnc613c202017-07-31 08:15:47 -04002764 copy, p, p_off, p_len, copied) {
2765 vaddr = kmap_atomic(p);
2766 csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2767 to + copied,
2768 p_len, 0);
2769 kunmap_atomic(vaddr);
2770 csum = csum_block_add(csum, csum2, pos);
2771 pos += p_len;
2772 }
2773
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 if (!(len -= copy))
2775 return csum;
2776 offset += copy;
2777 to += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 }
David S. Miller1a028e52007-04-27 15:21:23 -07002779 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 }
2781
David S. Millerfbb398a2009-06-09 00:18:59 -07002782 skb_walk_frags(skb, frag_iter) {
2783 __wsum csum2;
2784 int end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785
David S. Millerfbb398a2009-06-09 00:18:59 -07002786 WARN_ON(start > offset + len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787
David S. Millerfbb398a2009-06-09 00:18:59 -07002788 end = start + frag_iter->len;
2789 if ((copy = end - offset) > 0) {
2790 if (copy > len)
2791 copy = len;
2792 csum2 = skb_copy_and_csum_bits(frag_iter,
2793 offset - start,
2794 to, copy, 0);
2795 csum = csum_block_add(csum, csum2, pos);
2796 if ((len -= copy) == 0)
2797 return csum;
2798 offset += copy;
2799 to += copy;
2800 pos += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 }
David S. Millerfbb398a2009-06-09 00:18:59 -07002802 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 }
Kris Katterjohn09a62662006-01-08 22:24:28 -08002804 BUG_ON(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 return csum;
2806}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002807EXPORT_SYMBOL(skb_copy_and_csum_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808
Cong Wang49f8e832018-11-08 14:05:42 -08002809__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
2810{
2811 __sum16 sum;
2812
2813 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
Cong Wang14641932018-11-26 09:31:26 -08002814 /* See comments in __skb_checksum_complete(). */
Cong Wang49f8e832018-11-08 14:05:42 -08002815 if (likely(!sum)) {
2816 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2817 !skb->csum_complete_sw)
Cong Wang7fe50ac2018-11-12 14:47:18 -08002818 netdev_rx_csum_fault(skb->dev, skb);
Cong Wang49f8e832018-11-08 14:05:42 -08002819 }
2820 if (!skb_shared(skb))
2821 skb->csum_valid = !sum;
2822 return sum;
2823}
2824EXPORT_SYMBOL(__skb_checksum_complete_head);
2825
Cong Wang14641932018-11-26 09:31:26 -08002826/* This function assumes skb->csum already holds pseudo header's checksum,
2827 * which has been changed from the hardware checksum, for example, by
2828 * __skb_checksum_validate_complete(). And, the original skb->csum must
2829 * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
2830 *
2831 * It returns non-zero if the recomputed checksum is still invalid, otherwise
2832 * zero. The new checksum is stored back into skb->csum unless the skb is
2833 * shared.
2834 */
Cong Wang49f8e832018-11-08 14:05:42 -08002835__sum16 __skb_checksum_complete(struct sk_buff *skb)
2836{
2837 __wsum csum;
2838 __sum16 sum;
2839
2840 csum = skb_checksum(skb, 0, skb->len, 0);
2841
Cong Wang49f8e832018-11-08 14:05:42 -08002842 sum = csum_fold(csum_add(skb->csum, csum));
Cong Wang14641932018-11-26 09:31:26 -08002843 /* This check is inverted, because we already knew the hardware
2844 * checksum is invalid before calling this function. So, if the
2845 * re-computed checksum is valid instead, then we have a mismatch
2846 * between the original skb->csum and skb_checksum(). This means either
2847 * the original hardware checksum is incorrect or we screw up skb->csum
2848 * when moving skb->data around.
2849 */
Cong Wang49f8e832018-11-08 14:05:42 -08002850 if (likely(!sum)) {
2851 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2852 !skb->csum_complete_sw)
Cong Wang7fe50ac2018-11-12 14:47:18 -08002853 netdev_rx_csum_fault(skb->dev, skb);
Cong Wang49f8e832018-11-08 14:05:42 -08002854 }
2855
2856 if (!skb_shared(skb)) {
2857 /* Save full packet checksum */
2858 skb->csum = csum;
2859 skb->ip_summed = CHECKSUM_COMPLETE;
2860 skb->csum_complete_sw = 1;
2861 skb->csum_valid = !sum;
2862 }
2863
2864 return sum;
2865}
2866EXPORT_SYMBOL(__skb_checksum_complete);
2867
Davide Caratti96178132017-05-18 15:44:37 +02002868static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2869{
2870 net_warn_ratelimited(
2871 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2872 __func__);
2873 return 0;
2874}
2875
2876static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2877 int offset, int len)
2878{
2879 net_warn_ratelimited(
2880 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2881 __func__);
2882 return 0;
2883}
2884
2885static const struct skb_checksum_ops default_crc32c_ops = {
2886 .update = warn_crc32c_csum_update,
2887 .combine = warn_crc32c_csum_combine,
2888};
2889
2890const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
2891 &default_crc32c_ops;
2892EXPORT_SYMBOL(crc32c_csum_stub);
2893
Thomas Grafaf2806f2013-12-13 15:22:17 +01002894 /**
2895 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2896 * @from: source buffer
2897 *
2898 * Calculates the amount of linear headroom needed in the 'to' skb passed
2899 * into skb_zerocopy().
2900 */
2901unsigned int
2902skb_zerocopy_headlen(const struct sk_buff *from)
2903{
2904 unsigned int hlen = 0;
2905
2906 if (!from->head_frag ||
2907 skb_headlen(from) < L1_CACHE_BYTES ||
2908 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2909 hlen = skb_headlen(from);
2910
2911 if (skb_has_frag_list(from))
2912 hlen = from->len;
2913
2914 return hlen;
2915}
2916EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2917
2918/**
2919 * skb_zerocopy - Zero copy skb to skb
2920 * @to: destination buffer
Masanari Iida7fceb4d2014-01-29 01:05:28 +09002921 * @from: source buffer
Thomas Grafaf2806f2013-12-13 15:22:17 +01002922 * @len: number of bytes to copy from source buffer
2923 * @hlen: size of linear headroom in destination buffer
2924 *
2925 * Copies up to `len` bytes from `from` to `to` by creating references
2926 * to the frags in the source buffer.
2927 *
2928 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2929 * headroom in the `to` buffer.
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002930 *
2931 * Return value:
2932 * 0: everything is OK
2933 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2934 * -EFAULT: skb_copy_bits() found some problem with skb geometry
Thomas Grafaf2806f2013-12-13 15:22:17 +01002935 */
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002936int
2937skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
Thomas Grafaf2806f2013-12-13 15:22:17 +01002938{
2939 int i, j = 0;
2940 int plen = 0; /* length of skb->head fragment */
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002941 int ret;
Thomas Grafaf2806f2013-12-13 15:22:17 +01002942 struct page *page;
2943 unsigned int offset;
2944
2945 BUG_ON(!from->head_frag && !hlen);
2946
2947 /* dont bother with small payloads */
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002948 if (len <= skb_tailroom(to))
2949 return skb_copy_bits(from, 0, skb_put(to, len), len);
Thomas Grafaf2806f2013-12-13 15:22:17 +01002950
2951 if (hlen) {
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002952 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2953 if (unlikely(ret))
2954 return ret;
Thomas Grafaf2806f2013-12-13 15:22:17 +01002955 len -= hlen;
2956 } else {
2957 plen = min_t(int, skb_headlen(from), len);
2958 if (plen) {
2959 page = virt_to_head_page(from->head);
2960 offset = from->data - (unsigned char *)page_address(page);
2961 __skb_fill_page_desc(to, 0, page, offset, plen);
2962 get_page(page);
2963 j = 1;
2964 len -= plen;
2965 }
2966 }
2967
2968 to->truesize += len + plen;
2969 to->len += len + plen;
2970 to->data_len += len + plen;
2971
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002972 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2973 skb_tx_error(from);
2974 return -ENOMEM;
2975 }
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04002976 skb_zerocopy_clone(to, from, GFP_ATOMIC);
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002977
Thomas Grafaf2806f2013-12-13 15:22:17 +01002978 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
Matthew Wilcox (Oracle)d8e18a52019-07-22 20:08:26 -07002979 int size;
2980
Thomas Grafaf2806f2013-12-13 15:22:17 +01002981 if (!len)
2982 break;
2983 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
Matthew Wilcox (Oracle)d8e18a52019-07-22 20:08:26 -07002984 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
2985 len);
2986 skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
2987 len -= size;
Thomas Grafaf2806f2013-12-13 15:22:17 +01002988 skb_frag_ref(to, j);
2989 j++;
2990 }
2991 skb_shinfo(to)->nr_frags = j;
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002992
2993 return 0;
Thomas Grafaf2806f2013-12-13 15:22:17 +01002994}
2995EXPORT_SYMBOL_GPL(skb_zerocopy);
2996
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2998{
Al Virod3bc23e2006-11-14 21:24:49 -08002999 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 long csstart;
3001
Patrick McHardy84fa7932006-08-29 16:44:56 -07003002 if (skb->ip_summed == CHECKSUM_PARTIAL)
Michał Mirosław55508d62010-12-14 15:24:08 +00003003 csstart = skb_checksum_start_offset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 else
3005 csstart = skb_headlen(skb);
3006
Kris Katterjohn09a62662006-01-08 22:24:28 -08003007 BUG_ON(csstart > skb_headlen(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03003009 skb_copy_from_linear_data(skb, to, csstart);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010
3011 csum = 0;
3012 if (csstart != skb->len)
3013 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
3014 skb->len - csstart, 0);
3015
Patrick McHardy84fa7932006-08-29 16:44:56 -07003016 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Al Viroff1dcad2006-11-20 18:07:29 -08003017 long csstuff = csstart + skb->csum_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018
Al Virod3bc23e2006-11-14 21:24:49 -08003019 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 }
3021}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003022EXPORT_SYMBOL(skb_copy_and_csum_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023
3024/**
3025 * skb_dequeue - remove from the head of the queue
3026 * @list: list to dequeue from
3027 *
3028 * Remove the head of the list. The list lock is taken so the function
3029 * may be used safely with other locking list functions. The head item is
3030 * returned or %NULL if the list is empty.
3031 */
3032
3033struct sk_buff *skb_dequeue(struct sk_buff_head *list)
3034{
3035 unsigned long flags;
3036 struct sk_buff *result;
3037
3038 spin_lock_irqsave(&list->lock, flags);
3039 result = __skb_dequeue(list);
3040 spin_unlock_irqrestore(&list->lock, flags);
3041 return result;
3042}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003043EXPORT_SYMBOL(skb_dequeue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044
3045/**
3046 * skb_dequeue_tail - remove from the tail of the queue
3047 * @list: list to dequeue from
3048 *
3049 * Remove the tail of the list. The list lock is taken so the function
3050 * may be used safely with other locking list functions. The tail item is
3051 * returned or %NULL if the list is empty.
3052 */
3053struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
3054{
3055 unsigned long flags;
3056 struct sk_buff *result;
3057
3058 spin_lock_irqsave(&list->lock, flags);
3059 result = __skb_dequeue_tail(list);
3060 spin_unlock_irqrestore(&list->lock, flags);
3061 return result;
3062}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003063EXPORT_SYMBOL(skb_dequeue_tail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064
3065/**
3066 * skb_queue_purge - empty a list
3067 * @list: list to empty
3068 *
3069 * Delete all buffers on an &sk_buff list. Each buffer is removed from
3070 * the list and one reference dropped. This function takes the list
3071 * lock and is atomic with respect to other list locking functions.
3072 */
3073void skb_queue_purge(struct sk_buff_head *list)
3074{
3075 struct sk_buff *skb;
3076 while ((skb = skb_dequeue(list)) != NULL)
3077 kfree_skb(skb);
3078}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003079EXPORT_SYMBOL(skb_queue_purge);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080
3081/**
Yaogong Wang9f5afea2016-09-07 14:49:28 -07003082 * skb_rbtree_purge - empty a skb rbtree
3083 * @root: root of the rbtree to empty
Peter Oskolkov385114d2018-08-02 23:34:38 +00003084 * Return value: the sum of truesizes of all purged skbs.
Yaogong Wang9f5afea2016-09-07 14:49:28 -07003085 *
3086 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3087 * the list and one reference dropped. This function does not take
3088 * any lock. Synchronization should be handled by the caller (e.g., TCP
3089 * out-of-order queue is protected by the socket lock).
3090 */
Peter Oskolkov385114d2018-08-02 23:34:38 +00003091unsigned int skb_rbtree_purge(struct rb_root *root)
Yaogong Wang9f5afea2016-09-07 14:49:28 -07003092{
Eric Dumazet7c905842017-09-23 12:39:12 -07003093 struct rb_node *p = rb_first(root);
Peter Oskolkov385114d2018-08-02 23:34:38 +00003094 unsigned int sum = 0;
Yaogong Wang9f5afea2016-09-07 14:49:28 -07003095
Eric Dumazet7c905842017-09-23 12:39:12 -07003096 while (p) {
3097 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3098
3099 p = rb_next(p);
3100 rb_erase(&skb->rbnode, root);
Peter Oskolkov385114d2018-08-02 23:34:38 +00003101 sum += skb->truesize;
Yaogong Wang9f5afea2016-09-07 14:49:28 -07003102 kfree_skb(skb);
Eric Dumazet7c905842017-09-23 12:39:12 -07003103 }
Peter Oskolkov385114d2018-08-02 23:34:38 +00003104 return sum;
Yaogong Wang9f5afea2016-09-07 14:49:28 -07003105}
3106
3107/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108 * skb_queue_head - queue a buffer at the list head
3109 * @list: list to use
3110 * @newsk: buffer to queue
3111 *
3112 * Queue a buffer at the start of the list. This function takes the
3113 * list lock and can be used safely with other locking &sk_buff functions
3114 * safely.
3115 *
3116 * A buffer cannot be placed on two lists at the same time.
3117 */
3118void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
3119{
3120 unsigned long flags;
3121
3122 spin_lock_irqsave(&list->lock, flags);
3123 __skb_queue_head(list, newsk);
3124 spin_unlock_irqrestore(&list->lock, flags);
3125}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003126EXPORT_SYMBOL(skb_queue_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127
3128/**
3129 * skb_queue_tail - queue a buffer at the list tail
3130 * @list: list to use
3131 * @newsk: buffer to queue
3132 *
3133 * Queue a buffer at the tail of the list. This function takes the
3134 * list lock and can be used safely with other locking &sk_buff functions
3135 * safely.
3136 *
3137 * A buffer cannot be placed on two lists at the same time.
3138 */
3139void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
3140{
3141 unsigned long flags;
3142
3143 spin_lock_irqsave(&list->lock, flags);
3144 __skb_queue_tail(list, newsk);
3145 spin_unlock_irqrestore(&list->lock, flags);
3146}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003147EXPORT_SYMBOL(skb_queue_tail);
David S. Miller8728b832005-08-09 19:25:21 -07003148
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149/**
3150 * skb_unlink - remove a buffer from a list
3151 * @skb: buffer to remove
David S. Miller8728b832005-08-09 19:25:21 -07003152 * @list: list to use
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 *
David S. Miller8728b832005-08-09 19:25:21 -07003154 * Remove a packet from a list. The list locks are taken and this
3155 * function is atomic with respect to other list locked calls
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 *
David S. Miller8728b832005-08-09 19:25:21 -07003157 * You must know what list the SKB is on.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158 */
David S. Miller8728b832005-08-09 19:25:21 -07003159void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160{
David S. Miller8728b832005-08-09 19:25:21 -07003161 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162
David S. Miller8728b832005-08-09 19:25:21 -07003163 spin_lock_irqsave(&list->lock, flags);
3164 __skb_unlink(skb, list);
3165 spin_unlock_irqrestore(&list->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003167EXPORT_SYMBOL(skb_unlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169/**
3170 * skb_append - append a buffer
3171 * @old: buffer to insert after
3172 * @newsk: buffer to insert
David S. Miller8728b832005-08-09 19:25:21 -07003173 * @list: list to use
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 *
3175 * Place a packet after a given packet in a list. The list locks are taken
3176 * and this function is atomic with respect to other list locked calls.
3177 * A buffer cannot be placed on two lists at the same time.
3178 */
David S. Miller8728b832005-08-09 19:25:21 -07003179void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180{
3181 unsigned long flags;
3182
David S. Miller8728b832005-08-09 19:25:21 -07003183 spin_lock_irqsave(&list->lock, flags);
Gerrit Renker7de6c032008-04-14 00:05:09 -07003184 __skb_queue_after(list, old, newsk);
David S. Miller8728b832005-08-09 19:25:21 -07003185 spin_unlock_irqrestore(&list->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003187EXPORT_SYMBOL(skb_append);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189static inline void skb_split_inside_header(struct sk_buff *skb,
3190 struct sk_buff* skb1,
3191 const u32 len, const int pos)
3192{
3193 int i;
3194
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03003195 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3196 pos - len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 /* And move data appendix as is. */
3198 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
3199 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
3200
3201 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
3202 skb_shinfo(skb)->nr_frags = 0;
3203 skb1->data_len = skb->data_len;
3204 skb1->len += skb1->data_len;
3205 skb->data_len = 0;
3206 skb->len = len;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07003207 skb_set_tail_pointer(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208}
3209
3210static inline void skb_split_no_header(struct sk_buff *skb,
3211 struct sk_buff* skb1,
3212 const u32 len, int pos)
3213{
3214 int i, k = 0;
3215 const int nfrags = skb_shinfo(skb)->nr_frags;
3216
3217 skb_shinfo(skb)->nr_frags = 0;
3218 skb1->len = skb1->data_len = skb->len - len;
3219 skb->len = len;
3220 skb->data_len = len - pos;
3221
3222 for (i = 0; i < nfrags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00003223 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224
3225 if (pos + size > len) {
3226 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3227
3228 if (pos < len) {
3229 /* Split frag.
3230 * We have two variants in this case:
3231 * 1. Move all the frag to the second
3232 * part, if it is possible. F.e.
3233 * this approach is mandatory for TUX,
3234 * where splitting is expensive.
3235 * 2. Split is accurately. We make this.
3236 */
Ian Campbellea2ab692011-08-22 23:44:58 +00003237 skb_frag_ref(skb, i);
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07003238 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
Eric Dumazet9e903e02011-10-18 21:00:24 +00003239 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3240 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 skb_shinfo(skb)->nr_frags++;
3242 }
3243 k++;
3244 } else
3245 skb_shinfo(skb)->nr_frags++;
3246 pos += size;
3247 }
3248 skb_shinfo(skb1)->nr_frags = k;
3249}
3250
3251/**
3252 * skb_split - Split fragmented skb to two parts at length len.
3253 * @skb: the buffer to split
3254 * @skb1: the buffer to receive the second part
3255 * @len: new length for skb
3256 */
3257void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3258{
3259 int pos = skb_headlen(skb);
3260
Willem de Bruijnfff88032017-06-08 11:35:03 -04003261 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3262 SKBTX_SHARED_FRAG;
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04003263 skb_zerocopy_clone(skb1, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 if (len < pos) /* Split line is inside header. */
3265 skb_split_inside_header(skb, skb1, len, pos);
3266 else /* Second chunk has no header, nothing to copy. */
3267 skb_split_no_header(skb, skb1, len, pos);
3268}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003269EXPORT_SYMBOL(skb_split);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270
Ilpo Järvinen9f782db2008-11-25 13:57:01 -08003271/* Shifting from/to a cloned skb is a no-go.
3272 *
3273 * Caller cannot keep skb_shinfo related pointers past calling here!
3274 */
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003275static int skb_prepare_for_shift(struct sk_buff *skb)
3276{
Ilpo Järvinen0ace2852008-11-24 21:30:21 -08003277 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003278}
3279
3280/**
3281 * skb_shift - Shifts paged data partially from skb to another
3282 * @tgt: buffer into which tail data gets added
3283 * @skb: buffer from which the paged data comes from
3284 * @shiftlen: shift up to this many bytes
3285 *
3286 * Attempts to shift up to shiftlen worth of bytes, which may be less than
Feng King20e994a2011-11-21 01:47:11 +00003287 * the length of the skb, from skb to tgt. Returns number bytes shifted.
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003288 * It's up to caller to free skb if everything was shifted.
3289 *
3290 * If @tgt runs out of frags, the whole operation is aborted.
3291 *
3292 * Skb cannot include anything else but paged data while tgt is allowed
3293 * to have non-paged data as well.
3294 *
3295 * TODO: full sized shift could be optimized but that would need
3296 * specialized skb free'er to handle frags without up-to-date nr_frags.
3297 */
3298int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3299{
3300 int from, to, merge, todo;
Matthew Wilcox (Oracle)d8e18a52019-07-22 20:08:26 -07003301 skb_frag_t *fragfrom, *fragto;
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003302
3303 BUG_ON(shiftlen > skb->len);
Eric Dumazetf8071cd2016-11-15 12:51:50 -08003304
3305 if (skb_headlen(skb))
3306 return 0;
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04003307 if (skb_zcopy(tgt) || skb_zcopy(skb))
3308 return 0;
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003309
3310 todo = shiftlen;
3311 from = 0;
3312 to = skb_shinfo(tgt)->nr_frags;
3313 fragfrom = &skb_shinfo(skb)->frags[from];
3314
3315 /* Actual merge is delayed until the point when we know we can
3316 * commit all, so that we don't have to undo partial changes
3317 */
3318 if (!to ||
Ian Campbellea2ab692011-08-22 23:44:58 +00003319 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07003320 skb_frag_off(fragfrom))) {
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003321 merge = -1;
3322 } else {
3323 merge = to - 1;
3324
Eric Dumazet9e903e02011-10-18 21:00:24 +00003325 todo -= skb_frag_size(fragfrom);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003326 if (todo < 0) {
3327 if (skb_prepare_for_shift(skb) ||
3328 skb_prepare_for_shift(tgt))
3329 return 0;
3330
Ilpo Järvinen9f782db2008-11-25 13:57:01 -08003331 /* All previous frag pointers might be stale! */
3332 fragfrom = &skb_shinfo(skb)->frags[from];
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003333 fragto = &skb_shinfo(tgt)->frags[merge];
3334
Eric Dumazet9e903e02011-10-18 21:00:24 +00003335 skb_frag_size_add(fragto, shiftlen);
3336 skb_frag_size_sub(fragfrom, shiftlen);
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07003337 skb_frag_off_add(fragfrom, shiftlen);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003338
3339 goto onlymerged;
3340 }
3341
3342 from++;
3343 }
3344
3345 /* Skip full, not-fitting skb to avoid expensive operations */
3346 if ((shiftlen == skb->len) &&
3347 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3348 return 0;
3349
3350 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3351 return 0;
3352
3353 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3354 if (to == MAX_SKB_FRAGS)
3355 return 0;
3356
3357 fragfrom = &skb_shinfo(skb)->frags[from];
3358 fragto = &skb_shinfo(tgt)->frags[to];
3359
Eric Dumazet9e903e02011-10-18 21:00:24 +00003360 if (todo >= skb_frag_size(fragfrom)) {
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003361 *fragto = *fragfrom;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003362 todo -= skb_frag_size(fragfrom);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003363 from++;
3364 to++;
3365
3366 } else {
Ian Campbellea2ab692011-08-22 23:44:58 +00003367 __skb_frag_ref(fragfrom);
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07003368 skb_frag_page_copy(fragto, fragfrom);
3369 skb_frag_off_copy(fragto, fragfrom);
Eric Dumazet9e903e02011-10-18 21:00:24 +00003370 skb_frag_size_set(fragto, todo);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003371
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07003372 skb_frag_off_add(fragfrom, todo);
Eric Dumazet9e903e02011-10-18 21:00:24 +00003373 skb_frag_size_sub(fragfrom, todo);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003374 todo = 0;
3375
3376 to++;
3377 break;
3378 }
3379 }
3380
3381 /* Ready to "commit" this state change to tgt */
3382 skb_shinfo(tgt)->nr_frags = to;
3383
3384 if (merge >= 0) {
3385 fragfrom = &skb_shinfo(skb)->frags[0];
3386 fragto = &skb_shinfo(tgt)->frags[merge];
3387
Eric Dumazet9e903e02011-10-18 21:00:24 +00003388 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
Ian Campbellea2ab692011-08-22 23:44:58 +00003389 __skb_frag_unref(fragfrom);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08003390 }
3391
3392 /* Reposition in the original skb */
3393 to = 0;
3394 while (from < skb_shinfo(skb)->nr_frags)
3395 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3396 skb_shinfo(skb)->nr_frags = to;
3397
3398 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3399
3400onlymerged:
3401 /* Most likely the tgt won't ever need its checksum anymore, skb on
3402 * the other hand might need it if it needs to be resent
3403 */
3404 tgt->ip_summed = CHECKSUM_PARTIAL;
3405 skb->ip_summed = CHECKSUM_PARTIAL;
3406
3407 /* Yak, is it really working this way? Some helper please? */
3408 skb->len -= shiftlen;
3409 skb->data_len -= shiftlen;
3410 skb->truesize -= shiftlen;
3411 tgt->len += shiftlen;
3412 tgt->data_len += shiftlen;
3413 tgt->truesize += shiftlen;
3414
3415 return shiftlen;
3416}
3417
Thomas Graf677e90e2005-06-23 20:59:51 -07003418/**
3419 * skb_prepare_seq_read - Prepare a sequential read of skb data
3420 * @skb: the buffer to read
3421 * @from: lower offset of data to be read
3422 * @to: upper offset of data to be read
3423 * @st: state variable
3424 *
3425 * Initializes the specified state variable. Must be called before
3426 * invoking skb_seq_read() for the first time.
3427 */
3428void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3429 unsigned int to, struct skb_seq_state *st)
3430{
3431 st->lower_offset = from;
3432 st->upper_offset = to;
3433 st->root_skb = st->cur_skb = skb;
3434 st->frag_idx = st->stepped_offset = 0;
3435 st->frag_data = NULL;
3436}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003437EXPORT_SYMBOL(skb_prepare_seq_read);
Thomas Graf677e90e2005-06-23 20:59:51 -07003438
3439/**
3440 * skb_seq_read - Sequentially read skb data
3441 * @consumed: number of bytes consumed by the caller so far
3442 * @data: destination pointer for data to be returned
3443 * @st: state variable
3444 *
Mathias Krausebc323832013-11-07 14:18:26 +01003445 * Reads a block of skb data at @consumed relative to the
Thomas Graf677e90e2005-06-23 20:59:51 -07003446 * lower offset specified to skb_prepare_seq_read(). Assigns
Mathias Krausebc323832013-11-07 14:18:26 +01003447 * the head of the data block to @data and returns the length
Thomas Graf677e90e2005-06-23 20:59:51 -07003448 * of the block or 0 if the end of the skb data or the upper
3449 * offset has been reached.
3450 *
3451 * The caller is not required to consume all of the data
Mathias Krausebc323832013-11-07 14:18:26 +01003452 * returned, i.e. @consumed is typically set to the number
Thomas Graf677e90e2005-06-23 20:59:51 -07003453 * of bytes already consumed and the next call to
3454 * skb_seq_read() will return the remaining part of the block.
3455 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003456 * Note 1: The size of each block of data returned can be arbitrary,
Masanari Iidae793c0f2014-09-04 23:44:36 +09003457 * this limitation is the cost for zerocopy sequential
Thomas Graf677e90e2005-06-23 20:59:51 -07003458 * reads of potentially non linear data.
3459 *
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003460 * Note 2: Fragment lists within fragments are not implemented
Thomas Graf677e90e2005-06-23 20:59:51 -07003461 * at the moment, state->root_skb could be replaced with
3462 * a stack for this purpose.
3463 */
3464unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3465 struct skb_seq_state *st)
3466{
3467 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3468 skb_frag_t *frag;
3469
Wedson Almeida Filhoaeb193e2013-06-23 23:33:48 -07003470 if (unlikely(abs_offset >= st->upper_offset)) {
3471 if (st->frag_data) {
3472 kunmap_atomic(st->frag_data);
3473 st->frag_data = NULL;
3474 }
Thomas Graf677e90e2005-06-23 20:59:51 -07003475 return 0;
Wedson Almeida Filhoaeb193e2013-06-23 23:33:48 -07003476 }
Thomas Graf677e90e2005-06-23 20:59:51 -07003477
3478next_skb:
Herbert Xu95e3b242009-01-29 16:07:52 -08003479 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
Thomas Graf677e90e2005-06-23 20:59:51 -07003480
Thomas Chenault995b3372009-05-18 21:43:27 -07003481 if (abs_offset < block_limit && !st->frag_data) {
Herbert Xu95e3b242009-01-29 16:07:52 -08003482 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
Thomas Graf677e90e2005-06-23 20:59:51 -07003483 return block_limit - abs_offset;
3484 }
3485
3486 if (st->frag_idx == 0 && !st->frag_data)
3487 st->stepped_offset += skb_headlen(st->cur_skb);
3488
3489 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3490 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
Eric Dumazet9e903e02011-10-18 21:00:24 +00003491 block_limit = skb_frag_size(frag) + st->stepped_offset;
Thomas Graf677e90e2005-06-23 20:59:51 -07003492
3493 if (abs_offset < block_limit) {
3494 if (!st->frag_data)
Eric Dumazet51c56b02012-04-05 11:35:15 +02003495 st->frag_data = kmap_atomic(skb_frag_page(frag));
Thomas Graf677e90e2005-06-23 20:59:51 -07003496
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07003497 *data = (u8 *) st->frag_data + skb_frag_off(frag) +
Thomas Graf677e90e2005-06-23 20:59:51 -07003498 (abs_offset - st->stepped_offset);
3499
3500 return block_limit - abs_offset;
3501 }
3502
3503 if (st->frag_data) {
Eric Dumazet51c56b02012-04-05 11:35:15 +02003504 kunmap_atomic(st->frag_data);
Thomas Graf677e90e2005-06-23 20:59:51 -07003505 st->frag_data = NULL;
3506 }
3507
3508 st->frag_idx++;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003509 st->stepped_offset += skb_frag_size(frag);
Thomas Graf677e90e2005-06-23 20:59:51 -07003510 }
3511
Olaf Kirch5b5a60d2007-06-23 23:11:52 -07003512 if (st->frag_data) {
Eric Dumazet51c56b02012-04-05 11:35:15 +02003513 kunmap_atomic(st->frag_data);
Olaf Kirch5b5a60d2007-06-23 23:11:52 -07003514 st->frag_data = NULL;
3515 }
3516
David S. Miller21dc3302010-08-23 00:13:46 -07003517 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
Shyam Iyer71b33462009-01-29 16:12:42 -08003518 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
Thomas Graf677e90e2005-06-23 20:59:51 -07003519 st->frag_idx = 0;
3520 goto next_skb;
Shyam Iyer71b33462009-01-29 16:12:42 -08003521 } else if (st->cur_skb->next) {
3522 st->cur_skb = st->cur_skb->next;
Herbert Xu95e3b242009-01-29 16:07:52 -08003523 st->frag_idx = 0;
Thomas Graf677e90e2005-06-23 20:59:51 -07003524 goto next_skb;
3525 }
3526
3527 return 0;
3528}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003529EXPORT_SYMBOL(skb_seq_read);
Thomas Graf677e90e2005-06-23 20:59:51 -07003530
3531/**
3532 * skb_abort_seq_read - Abort a sequential read of skb data
3533 * @st: state variable
3534 *
3535 * Must be called if skb_seq_read() was not called until it
3536 * returned 0.
3537 */
3538void skb_abort_seq_read(struct skb_seq_state *st)
3539{
3540 if (st->frag_data)
Eric Dumazet51c56b02012-04-05 11:35:15 +02003541 kunmap_atomic(st->frag_data);
Thomas Graf677e90e2005-06-23 20:59:51 -07003542}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003543EXPORT_SYMBOL(skb_abort_seq_read);
Thomas Graf677e90e2005-06-23 20:59:51 -07003544
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07003545#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
3546
3547static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3548 struct ts_config *conf,
3549 struct ts_state *state)
3550{
3551 return skb_seq_read(offset, text, TS_SKB_CB(state));
3552}
3553
3554static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3555{
3556 skb_abort_seq_read(TS_SKB_CB(state));
3557}
3558
3559/**
3560 * skb_find_text - Find a text pattern in skb data
3561 * @skb: the buffer to look in
3562 * @from: search offset
3563 * @to: search limit
3564 * @config: textsearch configuration
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07003565 *
3566 * Finds a pattern in the skb data according to the specified
3567 * textsearch configuration. Use textsearch_next() to retrieve
3568 * subsequent occurrences of the pattern. Returns the offset
3569 * to the first occurrence or UINT_MAX if no match was found.
3570 */
3571unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
Bojan Prtvar059a2442015-02-22 11:46:35 +01003572 unsigned int to, struct ts_config *config)
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07003573{
Bojan Prtvar059a2442015-02-22 11:46:35 +01003574 struct ts_state state;
Phil Oesterf72b9482006-06-26 00:00:57 -07003575 unsigned int ret;
3576
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07003577 config->get_next_block = skb_ts_get_next_block;
3578 config->finish = skb_ts_finish;
3579
Bojan Prtvar059a2442015-02-22 11:46:35 +01003580 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07003581
Bojan Prtvar059a2442015-02-22 11:46:35 +01003582 ret = textsearch_find(config, &state);
Phil Oesterf72b9482006-06-26 00:00:57 -07003583 return (ret <= to - from ? ret : UINT_MAX);
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07003584}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003585EXPORT_SYMBOL(skb_find_text);
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07003586
Hannes Frederic Sowabe12a1f2015-05-21 16:59:58 +02003587int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3588 int offset, size_t size)
3589{
3590 int i = skb_shinfo(skb)->nr_frags;
3591
3592 if (skb_can_coalesce(skb, i, page, offset)) {
3593 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3594 } else if (i < MAX_SKB_FRAGS) {
3595 get_page(page);
3596 skb_fill_page_desc(skb, i, page, offset, size);
3597 } else {
3598 return -EMSGSIZE;
3599 }
3600
3601 return 0;
3602}
3603EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3604
Herbert Xucbb042f2006-03-20 22:43:56 -08003605/**
3606 * skb_pull_rcsum - pull skb and update receive checksum
3607 * @skb: buffer to update
Herbert Xucbb042f2006-03-20 22:43:56 -08003608 * @len: length of data pulled
3609 *
3610 * This function performs an skb_pull on the packet and updates
Urs Thuermannfee54fa2008-02-12 22:03:25 -08003611 * the CHECKSUM_COMPLETE checksum. It should be used on
Patrick McHardy84fa7932006-08-29 16:44:56 -07003612 * receive path processing instead of skb_pull unless you know
3613 * that the checksum difference is zero (e.g., a valid IP header)
3614 * or you are setting ip_summed to CHECKSUM_NONE.
Herbert Xucbb042f2006-03-20 22:43:56 -08003615 */
Johannes Bergaf728682017-06-16 14:29:22 +02003616void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
Herbert Xucbb042f2006-03-20 22:43:56 -08003617{
Pravin B Shelar31b33df2015-09-28 17:24:25 -07003618 unsigned char *data = skb->data;
3619
Herbert Xucbb042f2006-03-20 22:43:56 -08003620 BUG_ON(len > skb->len);
Pravin B Shelar31b33df2015-09-28 17:24:25 -07003621 __skb_pull(skb, len);
3622 skb_postpull_rcsum(skb, data, len);
3623 return skb->data;
Herbert Xucbb042f2006-03-20 22:43:56 -08003624}
Arnaldo Carvalho de Melof94691a2006-03-20 22:47:55 -08003625EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3626
Yonghong Song13acc942018-03-21 16:31:03 -07003627static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
3628{
3629 skb_frag_t head_frag;
3630 struct page *page;
3631
3632 page = virt_to_head_page(frag_skb->head);
Matthew Wilcox (Oracle)d8e18a52019-07-22 20:08:26 -07003633 __skb_frag_set_page(&head_frag, page);
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07003634 skb_frag_off_set(&head_frag, frag_skb->data -
3635 (unsigned char *)page_address(page));
Matthew Wilcox (Oracle)d8e18a52019-07-22 20:08:26 -07003636 skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
Yonghong Song13acc942018-03-21 16:31:03 -07003637 return head_frag;
3638}
3639
Steffen Klassert3a1296a2020-01-25 11:26:44 +01003640struct sk_buff *skb_segment_list(struct sk_buff *skb,
3641 netdev_features_t features,
3642 unsigned int offset)
3643{
3644 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
3645 unsigned int tnl_hlen = skb_tnl_header_len(skb);
3646 unsigned int delta_truesize = 0;
3647 unsigned int delta_len = 0;
3648 struct sk_buff *tail = NULL;
3649 struct sk_buff *nskb;
3650
3651 skb_push(skb, -skb_network_offset(skb) + offset);
3652
3653 skb_shinfo(skb)->frag_list = NULL;
3654
3655 do {
3656 nskb = list_skb;
3657 list_skb = list_skb->next;
3658
3659 if (!tail)
3660 skb->next = nskb;
3661 else
3662 tail->next = nskb;
3663
3664 tail = nskb;
3665
3666 delta_len += nskb->len;
3667 delta_truesize += nskb->truesize;
3668
3669 skb_push(nskb, -skb_network_offset(nskb) + offset);
3670
Florian Westphalcf673ed2020-03-30 18:51:29 +02003671 skb_release_head_state(nskb);
Steffen Klassert3a1296a2020-01-25 11:26:44 +01003672 __copy_skb_header(nskb, skb);
3673
3674 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
3675 skb_copy_from_linear_data_offset(skb, -tnl_hlen,
3676 nskb->data - tnl_hlen,
3677 offset + tnl_hlen);
3678
3679 if (skb_needs_linearize(nskb, features) &&
3680 __skb_linearize(nskb))
3681 goto err_linearize;
3682
3683 } while (list_skb);
3684
3685 skb->truesize = skb->truesize - delta_truesize;
3686 skb->data_len = skb->data_len - delta_len;
3687 skb->len = skb->len - delta_len;
3688
3689 skb_gso_reset(skb);
3690
3691 skb->prev = tail;
3692
3693 if (skb_needs_linearize(skb, features) &&
3694 __skb_linearize(skb))
3695 goto err_linearize;
3696
3697 skb_get(skb);
3698
3699 return skb;
3700
3701err_linearize:
3702 kfree_skb_list(skb->next);
3703 skb->next = NULL;
3704 return ERR_PTR(-ENOMEM);
3705}
3706EXPORT_SYMBOL_GPL(skb_segment_list);
3707
3708int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
3709{
3710 if (unlikely(p->len + skb->len >= 65536))
3711 return -E2BIG;
3712
3713 if (NAPI_GRO_CB(p)->last == p)
3714 skb_shinfo(p)->frag_list = skb;
3715 else
3716 NAPI_GRO_CB(p)->last->next = skb;
3717
3718 skb_pull(skb, skb_gro_offset(skb));
3719
3720 NAPI_GRO_CB(p)->last = skb;
3721 NAPI_GRO_CB(p)->count++;
3722 p->data_len += skb->len;
3723 p->truesize += skb->truesize;
3724 p->len += skb->len;
3725
3726 NAPI_GRO_CB(skb)->same_flow = 1;
3727
3728 return 0;
3729}
Steffen Klassert3a1296a2020-01-25 11:26:44 +01003730
Herbert Xuf4c50d92006-06-22 03:02:40 -07003731/**
3732 * skb_segment - Perform protocol segmentation on skb.
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003733 * @head_skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07003734 * @features: features for the output path (see dev->features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07003735 *
3736 * This function performs segmentation on the given skb. It returns
Ben Hutchings4c821d72008-04-13 21:52:48 -07003737 * a pointer to the first in a list of new skbs for the segments.
3738 * In case of error it returns ERR_PTR(err).
Herbert Xuf4c50d92006-06-22 03:02:40 -07003739 */
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003740struct sk_buff *skb_segment(struct sk_buff *head_skb,
3741 netdev_features_t features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07003742{
3743 struct sk_buff *segs = NULL;
3744 struct sk_buff *tail = NULL;
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003745 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003746 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3747 unsigned int mss = skb_shinfo(head_skb)->gso_size;
3748 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
Michael S. Tsirkin1fd819e2014-03-10 19:28:08 +02003749 struct sk_buff *frag_skb = head_skb;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003750 unsigned int offset = doffset;
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003751 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
Alexander Duyck802ab552016-04-10 21:45:03 -04003752 unsigned int partial_segs = 0;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003753 unsigned int headroom;
Alexander Duyck802ab552016-04-10 21:45:03 -04003754 unsigned int len = head_skb->len;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00003755 __be16 proto;
Alexander Duyck36c98382016-05-02 09:38:18 -07003756 bool csum, sg;
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003757 int nfrags = skb_shinfo(head_skb)->nr_frags;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003758 int err = -ENOMEM;
3759 int i = 0;
3760 int pos;
3761
Shmulik Ladkani3dcbdb12019-09-06 12:23:50 +03003762 if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
3763 (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
3764 /* gso_size is untrusted, and we have a frag_list with a linear
3765 * non head_frag head.
3766 *
3767 * (we assume checking the first list_skb member suffices;
3768 * i.e if either of the list_skb members have non head_frag
3769 * head, then the first one has too).
3770 *
3771 * If head_skb's headlen does not fit requested gso_size, it
3772 * means that the frag_list members do NOT terminate on exact
3773 * gso_size boundaries. Hence we cannot perform skb_frag_t page
3774 * sharing. Therefore we must fallback to copying the frag_list
3775 * skbs; we do so by disabling SG.
3776 */
3777 if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
3778 features &= ~NETIF_F_SG;
3779 }
3780
Wei-Chun Chao5882a072014-06-08 23:48:54 -07003781 __skb_push(head_skb, doffset);
Miaohe Lin2f631132020-08-01 17:36:05 +08003782 proto = skb_network_protocol(head_skb, NULL);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00003783 if (unlikely(!proto))
3784 return ERR_PTR(-EINVAL);
3785
Alexander Duyck36c98382016-05-02 09:38:18 -07003786 sg = !!(features & NETIF_F_SG);
Alexander Duyckf245d072016-02-05 15:28:26 -08003787 csum = !!can_checksum_protocol(features, proto);
Tom Herbert7e2b10c2014-06-04 17:20:02 -07003788
Steffen Klassert07b26c92016-09-19 12:58:47 +02003789 if (sg && csum && (mss != GSO_BY_FRAGS)) {
3790 if (!(features & NETIF_F_GSO_PARTIAL)) {
3791 struct sk_buff *iter;
Ilan Tayari43170c42017-04-19 21:26:07 +03003792 unsigned int frag_len;
Steffen Klassert07b26c92016-09-19 12:58:47 +02003793
3794 if (!list_skb ||
3795 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3796 goto normal;
3797
Ilan Tayari43170c42017-04-19 21:26:07 +03003798 /* If we get here then all the required
3799 * GSO features except frag_list are supported.
3800 * Try to split the SKB to multiple GSO SKBs
3801 * with no frag_list.
3802 * Currently we can do that only when the buffers don't
3803 * have a linear part and all the buffers except
3804 * the last are of the same length.
Steffen Klassert07b26c92016-09-19 12:58:47 +02003805 */
Ilan Tayari43170c42017-04-19 21:26:07 +03003806 frag_len = list_skb->len;
Steffen Klassert07b26c92016-09-19 12:58:47 +02003807 skb_walk_frags(head_skb, iter) {
Ilan Tayari43170c42017-04-19 21:26:07 +03003808 if (frag_len != iter->len && iter->next)
3809 goto normal;
Ilan Tayarieaffadb2017-04-08 02:07:08 +03003810 if (skb_headlen(iter) && !iter->head_frag)
Steffen Klassert07b26c92016-09-19 12:58:47 +02003811 goto normal;
3812
3813 len -= iter->len;
3814 }
Ilan Tayari43170c42017-04-19 21:26:07 +03003815
3816 if (len != frag_len)
3817 goto normal;
Steffen Klassert07b26c92016-09-19 12:58:47 +02003818 }
3819
3820 /* GSO partial only requires that we trim off any excess that
3821 * doesn't fit into an MSS sized block, so take care of that
3822 * now.
3823 */
Alexander Duyck802ab552016-04-10 21:45:03 -04003824 partial_segs = len / mss;
Alexander Duyckd7fb5a82016-05-02 09:38:12 -07003825 if (partial_segs > 1)
3826 mss *= partial_segs;
3827 else
3828 partial_segs = 0;
Alexander Duyck802ab552016-04-10 21:45:03 -04003829 }
3830
Steffen Klassert07b26c92016-09-19 12:58:47 +02003831normal:
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003832 headroom = skb_headroom(head_skb);
3833 pos = skb_headlen(head_skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003834
3835 do {
3836 struct sk_buff *nskb;
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02003837 skb_frag_t *nskb_frag;
Herbert Xuc8884ed2006-10-29 15:59:41 -08003838 int hsize;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003839 int size;
3840
Marcelo Ricardo Leitner3953c462016-06-02 15:05:40 -03003841 if (unlikely(mss == GSO_BY_FRAGS)) {
3842 len = list_skb->len;
3843 } else {
3844 len = head_skb->len - offset;
3845 if (len > mss)
3846 len = mss;
3847 }
Herbert Xuf4c50d92006-06-22 03:02:40 -07003848
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003849 hsize = skb_headlen(head_skb) - offset;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003850 if (hsize < 0)
3851 hsize = 0;
Herbert Xuc8884ed2006-10-29 15:59:41 -08003852 if (hsize > len || !sg)
3853 hsize = len;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003854
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003855 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3856 (skb_headlen(list_skb) == len || sg)) {
3857 BUG_ON(skb_headlen(list_skb) > len);
Herbert Xu89319d382008-12-15 23:26:06 -08003858
Herbert Xu9d8506c2013-11-21 11:10:04 -08003859 i = 0;
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003860 nfrags = skb_shinfo(list_skb)->nr_frags;
3861 frag = skb_shinfo(list_skb)->frags;
Michael S. Tsirkin1fd819e2014-03-10 19:28:08 +02003862 frag_skb = list_skb;
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003863 pos += skb_headlen(list_skb);
Herbert Xu9d8506c2013-11-21 11:10:04 -08003864
3865 while (pos < offset + len) {
3866 BUG_ON(i >= nfrags);
3867
Michael S. Tsirkin4e1beba2014-03-10 18:29:14 +02003868 size = skb_frag_size(frag);
Herbert Xu9d8506c2013-11-21 11:10:04 -08003869 if (pos + size > offset + len)
3870 break;
3871
3872 i++;
3873 pos += size;
Michael S. Tsirkin4e1beba2014-03-10 18:29:14 +02003874 frag++;
Herbert Xu9d8506c2013-11-21 11:10:04 -08003875 }
3876
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003877 nskb = skb_clone(list_skb, GFP_ATOMIC);
3878 list_skb = list_skb->next;
Herbert Xu89319d382008-12-15 23:26:06 -08003879
3880 if (unlikely(!nskb))
3881 goto err;
3882
Herbert Xu9d8506c2013-11-21 11:10:04 -08003883 if (unlikely(pskb_trim(nskb, len))) {
3884 kfree_skb(nskb);
3885 goto err;
3886 }
3887
Alexander Duyckec47ea82012-05-04 14:26:56 +00003888 hsize = skb_end_offset(nskb);
Herbert Xu89319d382008-12-15 23:26:06 -08003889 if (skb_cow_head(nskb, doffset + headroom)) {
3890 kfree_skb(nskb);
3891 goto err;
3892 }
3893
Alexander Duyckec47ea82012-05-04 14:26:56 +00003894 nskb->truesize += skb_end_offset(nskb) - hsize;
Herbert Xu89319d382008-12-15 23:26:06 -08003895 skb_release_head_state(nskb);
3896 __skb_push(nskb, doffset);
3897 } else {
Mel Gormanc93bdd02012-07-31 16:44:19 -07003898 nskb = __alloc_skb(hsize + doffset + headroom,
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003899 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
Mel Gormanc93bdd02012-07-31 16:44:19 -07003900 NUMA_NO_NODE);
Herbert Xu89319d382008-12-15 23:26:06 -08003901
3902 if (unlikely(!nskb))
3903 goto err;
3904
3905 skb_reserve(nskb, headroom);
3906 __skb_put(nskb, doffset);
3907 }
Herbert Xuf4c50d92006-06-22 03:02:40 -07003908
3909 if (segs)
3910 tail->next = nskb;
3911 else
3912 segs = nskb;
3913 tail = nskb;
3914
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003915 __copy_skb_header(nskb, head_skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003916
Eric Dumazet030737b2013-10-19 11:42:54 -07003917 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
Vlad Yasevichfcdfe3a2014-07-31 10:33:06 -04003918 skb_reset_mac_len(nskb);
Pravin B Shelar68c33162013-02-14 14:02:41 +00003919
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003920 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
Pravin B Shelar68c33162013-02-14 14:02:41 +00003921 nskb->data - tnl_hlen,
3922 doffset + tnl_hlen);
Herbert Xu89319d382008-12-15 23:26:06 -08003923
Herbert Xu9d8506c2013-11-21 11:10:04 -08003924 if (nskb->len == len + doffset)
Simon Horman1cdbcb72013-05-19 15:46:49 +00003925 goto perform_csum_check;
Herbert Xu89319d382008-12-15 23:26:06 -08003926
Alexander Duyck7fbeffe2016-02-05 15:27:43 -08003927 if (!sg) {
Yadu Kishore1454c9f2020-03-17 14:08:38 +05303928 if (!csum) {
3929 if (!nskb->remcsum_offload)
3930 nskb->ip_summed = CHECKSUM_NONE;
3931 SKB_GSO_CB(nskb)->csum =
3932 skb_copy_and_csum_bits(head_skb, offset,
3933 skb_put(nskb,
3934 len),
3935 len, 0);
3936 SKB_GSO_CB(nskb)->csum_start =
3937 skb_headroom(nskb) + doffset;
3938 } else {
3939 skb_copy_bits(head_skb, offset,
3940 skb_put(nskb, len),
3941 len);
3942 }
Herbert Xuf4c50d92006-06-22 03:02:40 -07003943 continue;
3944 }
3945
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02003946 nskb_frag = skb_shinfo(nskb)->frags;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003947
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003948 skb_copy_from_linear_data_offset(head_skb, offset,
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03003949 skb_put(nskb, hsize), hsize);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003950
Willem de Bruijnfff88032017-06-08 11:35:03 -04003951 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3952 SKBTX_SHARED_FRAG;
Eric Dumazetcef401d2013-01-25 20:34:37 +00003953
Willem de Bruijnbf5c25d2017-12-22 19:00:17 -05003954 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3955 skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
3956 goto err;
3957
Herbert Xu9d8506c2013-11-21 11:10:04 -08003958 while (pos < offset + len) {
3959 if (i >= nfrags) {
Herbert Xu9d8506c2013-11-21 11:10:04 -08003960 i = 0;
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003961 nfrags = skb_shinfo(list_skb)->nr_frags;
3962 frag = skb_shinfo(list_skb)->frags;
Michael S. Tsirkin1fd819e2014-03-10 19:28:08 +02003963 frag_skb = list_skb;
Yonghong Song13acc942018-03-21 16:31:03 -07003964 if (!skb_headlen(list_skb)) {
3965 BUG_ON(!nfrags);
3966 } else {
3967 BUG_ON(!list_skb->head_frag);
Herbert Xu9d8506c2013-11-21 11:10:04 -08003968
Yonghong Song13acc942018-03-21 16:31:03 -07003969 /* to make room for head_frag. */
3970 i--;
3971 frag--;
3972 }
Willem de Bruijnbf5c25d2017-12-22 19:00:17 -05003973 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3974 skb_zerocopy_clone(nskb, frag_skb,
3975 GFP_ATOMIC))
3976 goto err;
3977
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003978 list_skb = list_skb->next;
Herbert Xu9d8506c2013-11-21 11:10:04 -08003979 }
3980
3981 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3982 MAX_SKB_FRAGS)) {
3983 net_warn_ratelimited(
3984 "skb_segment: too many frags: %u %u\n",
3985 pos, mss);
Eric Dumazetff907a12018-07-19 16:04:38 -07003986 err = -EINVAL;
Herbert Xu9d8506c2013-11-21 11:10:04 -08003987 goto err;
3988 }
3989
Yonghong Song13acc942018-03-21 16:31:03 -07003990 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02003991 __skb_frag_ref(nskb_frag);
3992 size = skb_frag_size(nskb_frag);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003993
3994 if (pos < offset) {
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07003995 skb_frag_off_add(nskb_frag, offset - pos);
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02003996 skb_frag_size_sub(nskb_frag, offset - pos);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003997 }
3998
Herbert Xu89319d382008-12-15 23:26:06 -08003999 skb_shinfo(nskb)->nr_frags++;
Herbert Xuf4c50d92006-06-22 03:02:40 -07004000
4001 if (pos + size <= offset + len) {
4002 i++;
Michael S. Tsirkin4e1beba2014-03-10 18:29:14 +02004003 frag++;
Herbert Xuf4c50d92006-06-22 03:02:40 -07004004 pos += size;
4005 } else {
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02004006 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
Herbert Xu89319d382008-12-15 23:26:06 -08004007 goto skip_fraglist;
Herbert Xuf4c50d92006-06-22 03:02:40 -07004008 }
4009
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02004010 nskb_frag++;
Herbert Xuf4c50d92006-06-22 03:02:40 -07004011 }
4012
Herbert Xu89319d382008-12-15 23:26:06 -08004013skip_fraglist:
Herbert Xuf4c50d92006-06-22 03:02:40 -07004014 nskb->data_len = len - hsize;
4015 nskb->len += nskb->data_len;
4016 nskb->truesize += nskb->data_len;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00004017
Simon Horman1cdbcb72013-05-19 15:46:49 +00004018perform_csum_check:
Alexander Duyck7fbeffe2016-02-05 15:27:43 -08004019 if (!csum) {
Eric Dumazetff907a12018-07-19 16:04:38 -07004020 if (skb_has_shared_frag(nskb) &&
4021 __skb_linearize(nskb))
4022 goto err;
4023
Alexander Duyck7fbeffe2016-02-05 15:27:43 -08004024 if (!nskb->remcsum_offload)
4025 nskb->ip_summed = CHECKSUM_NONE;
Alexander Duyck76443452016-02-05 15:27:37 -08004026 SKB_GSO_CB(nskb)->csum =
4027 skb_checksum(nskb, doffset,
4028 nskb->len - doffset, 0);
Tom Herbert7e2b10c2014-06-04 17:20:02 -07004029 SKB_GSO_CB(nskb)->csum_start =
Alexander Duyck76443452016-02-05 15:27:37 -08004030 skb_headroom(nskb) + doffset;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00004031 }
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02004032 } while ((offset += len) < head_skb->len);
Herbert Xuf4c50d92006-06-22 03:02:40 -07004033
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07004034 /* Some callers want to get the end of the list.
4035 * Put it in segs->prev to avoid walking the list.
4036 * (see validate_xmit_skb_list() for example)
4037 */
4038 segs->prev = tail;
Toshiaki Makita432c8562014-10-27 10:30:51 -07004039
Alexander Duyck802ab552016-04-10 21:45:03 -04004040 if (partial_segs) {
Steffen Klassert07b26c92016-09-19 12:58:47 +02004041 struct sk_buff *iter;
Alexander Duyck802ab552016-04-10 21:45:03 -04004042 int type = skb_shinfo(head_skb)->gso_type;
Steffen Klassert07b26c92016-09-19 12:58:47 +02004043 unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
Alexander Duyck802ab552016-04-10 21:45:03 -04004044
4045 /* Update type to add partial and then remove dodgy if set */
Steffen Klassert07b26c92016-09-19 12:58:47 +02004046 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
Alexander Duyck802ab552016-04-10 21:45:03 -04004047 type &= ~SKB_GSO_DODGY;
4048
4049 /* Update GSO info and prepare to start updating headers on
4050 * our way back down the stack of protocols.
4051 */
Steffen Klassert07b26c92016-09-19 12:58:47 +02004052 for (iter = segs; iter; iter = iter->next) {
4053 skb_shinfo(iter)->gso_size = gso_size;
4054 skb_shinfo(iter)->gso_segs = partial_segs;
4055 skb_shinfo(iter)->gso_type = type;
4056 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
4057 }
4058
4059 if (tail->len - doffset <= gso_size)
4060 skb_shinfo(tail)->gso_size = 0;
4061 else if (tail != segs)
4062 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
Alexander Duyck802ab552016-04-10 21:45:03 -04004063 }
4064
Toshiaki Makita432c8562014-10-27 10:30:51 -07004065 /* Following permits correct backpressure, for protocols
4066 * using skb_set_owner_w().
4067 * Idea is to tranfert ownership from head_skb to last segment.
4068 */
4069 if (head_skb->destructor == sock_wfree) {
4070 swap(tail->truesize, head_skb->truesize);
4071 swap(tail->destructor, head_skb->destructor);
4072 swap(tail->sk, head_skb->sk);
4073 }
Herbert Xuf4c50d92006-06-22 03:02:40 -07004074 return segs;
4075
4076err:
Eric Dumazet289dccb2013-12-20 14:29:08 -08004077 kfree_skb_list(segs);
Herbert Xuf4c50d92006-06-22 03:02:40 -07004078 return ERR_PTR(err);
4079}
Herbert Xuf4c50d92006-06-22 03:02:40 -07004080EXPORT_SYMBOL_GPL(skb_segment);
4081
David Millerd4546c22018-06-24 14:13:49 +09004082int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
Herbert Xu71d93b32008-12-15 23:42:33 -08004083{
Eric Dumazet8a291112013-10-08 09:02:23 -07004084 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
Herbert Xu67147ba2009-05-26 18:50:22 +00004085 unsigned int offset = skb_gro_offset(skb);
4086 unsigned int headlen = skb_headlen(skb);
Eric Dumazet8a291112013-10-08 09:02:23 -07004087 unsigned int len = skb_gro_len(skb);
Eric Dumazet715dc1f2012-05-02 23:33:21 +00004088 unsigned int delta_truesize;
David Millerd4546c22018-06-24 14:13:49 +09004089 struct sk_buff *lp;
Herbert Xu71d93b32008-12-15 23:42:33 -08004090
Steffen Klassert0ab03f32019-04-02 08:16:03 +02004091 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
Herbert Xu71d93b32008-12-15 23:42:33 -08004092 return -E2BIG;
4093
Eric Dumazet29e98242014-05-16 11:34:37 -07004094 lp = NAPI_GRO_CB(p)->last;
Eric Dumazet8a291112013-10-08 09:02:23 -07004095 pinfo = skb_shinfo(lp);
4096
4097 if (headlen <= offset) {
Herbert Xu42da6992009-05-26 18:50:19 +00004098 skb_frag_t *frag;
Herbert Xu66e92fc2009-05-26 18:50:32 +00004099 skb_frag_t *frag2;
Herbert Xu9aaa1562009-05-26 18:50:33 +00004100 int i = skbinfo->nr_frags;
4101 int nr_frags = pinfo->nr_frags + i;
Herbert Xu42da6992009-05-26 18:50:19 +00004102
Herbert Xu66e92fc2009-05-26 18:50:32 +00004103 if (nr_frags > MAX_SKB_FRAGS)
Eric Dumazet8a291112013-10-08 09:02:23 -07004104 goto merge;
Herbert Xu81705ad2009-01-29 14:19:51 +00004105
Eric Dumazet8a291112013-10-08 09:02:23 -07004106 offset -= headlen;
Herbert Xu9aaa1562009-05-26 18:50:33 +00004107 pinfo->nr_frags = nr_frags;
4108 skbinfo->nr_frags = 0;
Herbert Xuf5572062009-01-14 20:40:03 -08004109
Herbert Xu9aaa1562009-05-26 18:50:33 +00004110 frag = pinfo->frags + nr_frags;
4111 frag2 = skbinfo->frags + i;
Herbert Xu66e92fc2009-05-26 18:50:32 +00004112 do {
4113 *--frag = *--frag2;
4114 } while (--i);
4115
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07004116 skb_frag_off_add(frag, offset);
Eric Dumazet9e903e02011-10-18 21:00:24 +00004117 skb_frag_size_sub(frag, offset);
Herbert Xu66e92fc2009-05-26 18:50:32 +00004118
Eric Dumazet715dc1f2012-05-02 23:33:21 +00004119 /* all fragments truesize : remove (head size + sk_buff) */
Alexander Duyckec47ea82012-05-04 14:26:56 +00004120 delta_truesize = skb->truesize -
4121 SKB_TRUESIZE(skb_end_offset(skb));
Eric Dumazet715dc1f2012-05-02 23:33:21 +00004122
Herbert Xuf5572062009-01-14 20:40:03 -08004123 skb->truesize -= skb->data_len;
4124 skb->len -= skb->data_len;
4125 skb->data_len = 0;
4126
Eric Dumazet715dc1f2012-05-02 23:33:21 +00004127 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
Herbert Xu5d38a072009-01-04 16:13:40 -08004128 goto done;
Eric Dumazetd7e88832012-04-30 08:10:34 +00004129 } else if (skb->head_frag) {
4130 int nr_frags = pinfo->nr_frags;
4131 skb_frag_t *frag = pinfo->frags + nr_frags;
4132 struct page *page = virt_to_head_page(skb->head);
4133 unsigned int first_size = headlen - offset;
4134 unsigned int first_offset;
4135
4136 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
Eric Dumazet8a291112013-10-08 09:02:23 -07004137 goto merge;
Eric Dumazetd7e88832012-04-30 08:10:34 +00004138
4139 first_offset = skb->data -
4140 (unsigned char *)page_address(page) +
4141 offset;
4142
4143 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
4144
Matthew Wilcox (Oracle)d8e18a52019-07-22 20:08:26 -07004145 __skb_frag_set_page(frag, page);
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07004146 skb_frag_off_set(frag, first_offset);
Eric Dumazetd7e88832012-04-30 08:10:34 +00004147 skb_frag_size_set(frag, first_size);
4148
4149 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
4150 /* We dont need to clear skbinfo->nr_frags here */
4151
Eric Dumazet715dc1f2012-05-02 23:33:21 +00004152 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
Eric Dumazetd7e88832012-04-30 08:10:34 +00004153 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
4154 goto done;
Eric Dumazet8a291112013-10-08 09:02:23 -07004155 }
Herbert Xu71d93b32008-12-15 23:42:33 -08004156
4157merge:
Eric Dumazet715dc1f2012-05-02 23:33:21 +00004158 delta_truesize = skb->truesize;
Herbert Xu67147ba2009-05-26 18:50:22 +00004159 if (offset > headlen) {
Michal Schmidtd1dc7ab2011-01-24 12:08:48 +00004160 unsigned int eat = offset - headlen;
4161
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07004162 skb_frag_off_add(&skbinfo->frags[0], eat);
Eric Dumazet9e903e02011-10-18 21:00:24 +00004163 skb_frag_size_sub(&skbinfo->frags[0], eat);
Michal Schmidtd1dc7ab2011-01-24 12:08:48 +00004164 skb->data_len -= eat;
4165 skb->len -= eat;
Herbert Xu67147ba2009-05-26 18:50:22 +00004166 offset = headlen;
Herbert Xu56035022009-02-05 21:26:52 -08004167 }
4168
Herbert Xu67147ba2009-05-26 18:50:22 +00004169 __skb_pull(skb, offset);
Herbert Xu56035022009-02-05 21:26:52 -08004170
Eric Dumazet29e98242014-05-16 11:34:37 -07004171 if (NAPI_GRO_CB(p)->last == p)
Eric Dumazet8a291112013-10-08 09:02:23 -07004172 skb_shinfo(p)->frag_list = skb;
4173 else
4174 NAPI_GRO_CB(p)->last->next = skb;
Eric Dumazetc3c7c252012-12-06 13:54:59 +00004175 NAPI_GRO_CB(p)->last = skb;
Eric Dumazetf4a775d2014-09-22 16:29:32 -07004176 __skb_header_release(skb);
Eric Dumazet8a291112013-10-08 09:02:23 -07004177 lp = p;
Herbert Xu71d93b32008-12-15 23:42:33 -08004178
Herbert Xu5d38a072009-01-04 16:13:40 -08004179done:
4180 NAPI_GRO_CB(p)->count++;
Herbert Xu37fe4732009-01-17 19:48:13 +00004181 p->data_len += len;
Eric Dumazet715dc1f2012-05-02 23:33:21 +00004182 p->truesize += delta_truesize;
Herbert Xu37fe4732009-01-17 19:48:13 +00004183 p->len += len;
Eric Dumazet8a291112013-10-08 09:02:23 -07004184 if (lp != p) {
4185 lp->data_len += len;
4186 lp->truesize += delta_truesize;
4187 lp->len += len;
4188 }
Herbert Xu71d93b32008-12-15 23:42:33 -08004189 NAPI_GRO_CB(skb)->same_flow = 1;
4190 return 0;
4191}
Herbert Xu71d93b32008-12-15 23:42:33 -08004192
Florian Westphaldf5042f2018-12-18 17:15:16 +01004193#ifdef CONFIG_SKB_EXTENSIONS
4194#define SKB_EXT_ALIGN_VALUE 8
4195#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4196
4197static const u8 skb_ext_type_len[] = {
4198#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4199 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
4200#endif
Florian Westphal41650792018-12-18 17:15:27 +01004201#ifdef CONFIG_XFRM
4202 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
4203#endif
Paul Blakey95a72332019-09-04 16:56:37 +03004204#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4205 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
4206#endif
Mat Martineau3ee17bc2020-01-09 07:59:19 -08004207#if IS_ENABLED(CONFIG_MPTCP)
4208 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
4209#endif
Florian Westphaldf5042f2018-12-18 17:15:16 +01004210};
4211
4212static __always_inline unsigned int skb_ext_total_length(void)
4213{
4214 return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
4215#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4216 skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
4217#endif
Florian Westphal41650792018-12-18 17:15:27 +01004218#ifdef CONFIG_XFRM
4219 skb_ext_type_len[SKB_EXT_SEC_PATH] +
4220#endif
Paul Blakey95a72332019-09-04 16:56:37 +03004221#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4222 skb_ext_type_len[TC_SKB_EXT] +
4223#endif
Mat Martineau3ee17bc2020-01-09 07:59:19 -08004224#if IS_ENABLED(CONFIG_MPTCP)
4225 skb_ext_type_len[SKB_EXT_MPTCP] +
4226#endif
Florian Westphaldf5042f2018-12-18 17:15:16 +01004227 0;
4228}
4229
4230static void skb_extensions_init(void)
4231{
4232 BUILD_BUG_ON(SKB_EXT_NUM >= 8);
4233 BUILD_BUG_ON(skb_ext_total_length() > 255);
4234
4235 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
4236 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
4237 0,
4238 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4239 NULL);
4240}
4241#else
4242static void skb_extensions_init(void) {}
4243#endif
4244
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245void __init skb_init(void)
4246{
Kees Cook79a8a642018-02-07 17:44:38 -08004247 skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248 sizeof(struct sk_buff),
4249 0,
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07004250 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
Kees Cook79a8a642018-02-07 17:44:38 -08004251 offsetof(struct sk_buff, cb),
4252 sizeof_field(struct sk_buff, cb),
Paul Mundt20c2df82007-07-20 10:11:58 +09004253 NULL);
David S. Millerd179cd12005-08-17 14:57:30 -07004254 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
Eric Dumazetd0bf4a92014-09-29 13:29:15 -07004255 sizeof(struct sk_buff_fclones),
David S. Millerd179cd12005-08-17 14:57:30 -07004256 0,
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07004257 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09004258 NULL);
Florian Westphaldf5042f2018-12-18 17:15:16 +01004259 skb_extensions_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260}
4261
David S. Miller51c739d2007-10-30 21:29:29 -07004262static int
Jason A. Donenfeld48a1df62017-06-04 04:16:22 +02004263__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
4264 unsigned int recursion_level)
David Howells716ea3a2007-04-02 20:19:53 -07004265{
David S. Miller1a028e52007-04-27 15:21:23 -07004266 int start = skb_headlen(skb);
4267 int i, copy = start - offset;
David S. Millerfbb398a2009-06-09 00:18:59 -07004268 struct sk_buff *frag_iter;
David Howells716ea3a2007-04-02 20:19:53 -07004269 int elt = 0;
4270
Jason A. Donenfeld48a1df62017-06-04 04:16:22 +02004271 if (unlikely(recursion_level >= 24))
4272 return -EMSGSIZE;
4273
David Howells716ea3a2007-04-02 20:19:53 -07004274 if (copy > 0) {
4275 if (copy > len)
4276 copy = len;
Jens Axboe642f149032007-10-24 11:20:47 +02004277 sg_set_buf(sg, skb->data + offset, copy);
David Howells716ea3a2007-04-02 20:19:53 -07004278 elt++;
4279 if ((len -= copy) == 0)
4280 return elt;
4281 offset += copy;
4282 }
4283
4284 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
David S. Miller1a028e52007-04-27 15:21:23 -07004285 int end;
David Howells716ea3a2007-04-02 20:19:53 -07004286
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004287 WARN_ON(start > offset + len);
David S. Miller1a028e52007-04-27 15:21:23 -07004288
Eric Dumazet9e903e02011-10-18 21:00:24 +00004289 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
David Howells716ea3a2007-04-02 20:19:53 -07004290 if ((copy = end - offset) > 0) {
4291 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Jason A. Donenfeld48a1df62017-06-04 04:16:22 +02004292 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4293 return -EMSGSIZE;
David Howells716ea3a2007-04-02 20:19:53 -07004294
4295 if (copy > len)
4296 copy = len;
Ian Campbellea2ab692011-08-22 23:44:58 +00004297 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07004298 skb_frag_off(frag) + offset - start);
David Howells716ea3a2007-04-02 20:19:53 -07004299 elt++;
4300 if (!(len -= copy))
4301 return elt;
4302 offset += copy;
4303 }
David S. Miller1a028e52007-04-27 15:21:23 -07004304 start = end;
David Howells716ea3a2007-04-02 20:19:53 -07004305 }
4306
David S. Millerfbb398a2009-06-09 00:18:59 -07004307 skb_walk_frags(skb, frag_iter) {
Jason A. Donenfeld48a1df62017-06-04 04:16:22 +02004308 int end, ret;
David Howells716ea3a2007-04-02 20:19:53 -07004309
David S. Millerfbb398a2009-06-09 00:18:59 -07004310 WARN_ON(start > offset + len);
David Howells716ea3a2007-04-02 20:19:53 -07004311
David S. Millerfbb398a2009-06-09 00:18:59 -07004312 end = start + frag_iter->len;
4313 if ((copy = end - offset) > 0) {
Jason A. Donenfeld48a1df62017-06-04 04:16:22 +02004314 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4315 return -EMSGSIZE;
4316
David S. Millerfbb398a2009-06-09 00:18:59 -07004317 if (copy > len)
4318 copy = len;
Jason A. Donenfeld48a1df62017-06-04 04:16:22 +02004319 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
4320 copy, recursion_level + 1);
4321 if (unlikely(ret < 0))
4322 return ret;
4323 elt += ret;
David S. Millerfbb398a2009-06-09 00:18:59 -07004324 if ((len -= copy) == 0)
4325 return elt;
4326 offset += copy;
David Howells716ea3a2007-04-02 20:19:53 -07004327 }
David S. Millerfbb398a2009-06-09 00:18:59 -07004328 start = end;
David Howells716ea3a2007-04-02 20:19:53 -07004329 }
4330 BUG_ON(len);
4331 return elt;
4332}
4333
Jason A. Donenfeld48a1df62017-06-04 04:16:22 +02004334/**
4335 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4336 * @skb: Socket buffer containing the buffers to be mapped
4337 * @sg: The scatter-gather list to map into
4338 * @offset: The offset into the buffer's contents to start mapping
4339 * @len: Length of buffer space to be mapped
4340 *
4341 * Fill the specified scatter-gather list with mappings/pointers into a
4342 * region of the buffer space attached to a socket buffer. Returns either
4343 * the number of scatterlist items used, or -EMSGSIZE if the contents
4344 * could not fit.
4345 */
4346int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4347{
4348 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4349
4350 if (nsg <= 0)
4351 return nsg;
4352
4353 sg_mark_end(&sg[nsg - 1]);
4354
4355 return nsg;
4356}
4357EXPORT_SYMBOL_GPL(skb_to_sgvec);
4358
Fan Du25a91d82014-01-18 09:54:23 +08004359/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4360 * sglist without mark the sg which contain last skb data as the end.
4361 * So the caller can mannipulate sg list as will when padding new data after
4362 * the first call without calling sg_unmark_end to expend sg list.
4363 *
4364 * Scenario to use skb_to_sgvec_nomark:
4365 * 1. sg_init_table
4366 * 2. skb_to_sgvec_nomark(payload1)
4367 * 3. skb_to_sgvec_nomark(payload2)
4368 *
4369 * This is equivalent to:
4370 * 1. sg_init_table
4371 * 2. skb_to_sgvec(payload1)
4372 * 3. sg_unmark_end
4373 * 4. skb_to_sgvec(payload2)
4374 *
4375 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4376 * is more preferable.
4377 */
4378int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4379 int offset, int len)
4380{
Jason A. Donenfeld48a1df62017-06-04 04:16:22 +02004381 return __skb_to_sgvec(skb, sg, offset, len, 0);
Fan Du25a91d82014-01-18 09:54:23 +08004382}
4383EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4384
David S. Miller51c739d2007-10-30 21:29:29 -07004385
David S. Miller51c739d2007-10-30 21:29:29 -07004386
David Howells716ea3a2007-04-02 20:19:53 -07004387/**
4388 * skb_cow_data - Check that a socket buffer's data buffers are writable
4389 * @skb: The socket buffer to check.
4390 * @tailbits: Amount of trailing space to be added
4391 * @trailer: Returned pointer to the skb where the @tailbits space begins
4392 *
4393 * Make sure that the data buffers attached to a socket buffer are
4394 * writable. If they are not, private copies are made of the data buffers
4395 * and the socket buffer is set to use these instead.
4396 *
4397 * If @tailbits is given, make sure that there is space to write @tailbits
4398 * bytes of data beyond current end of socket buffer. @trailer will be
4399 * set to point to the skb in which this space begins.
4400 *
4401 * The number of scatterlist elements required to completely map the
4402 * COW'd and extended socket buffer will be returned.
4403 */
4404int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4405{
4406 int copyflag;
4407 int elt;
4408 struct sk_buff *skb1, **skb_p;
4409
4410 /* If skb is cloned or its head is paged, reallocate
4411 * head pulling out all the pages (pages are considered not writable
4412 * at the moment even if they are anonymous).
4413 */
4414 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
Miaohe Linc15fc192020-08-01 17:30:23 +08004415 !__pskb_pull_tail(skb, __skb_pagelen(skb)))
David Howells716ea3a2007-04-02 20:19:53 -07004416 return -ENOMEM;
4417
4418 /* Easy case. Most of packets will go this way. */
David S. Miller21dc3302010-08-23 00:13:46 -07004419 if (!skb_has_frag_list(skb)) {
David Howells716ea3a2007-04-02 20:19:53 -07004420 /* A little of trouble, not enough of space for trailer.
4421 * This should not happen, when stack is tuned to generate
4422 * good frames. OK, on miss we reallocate and reserve even more
4423 * space, 128 bytes is fair. */
4424
4425 if (skb_tailroom(skb) < tailbits &&
4426 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4427 return -ENOMEM;
4428
4429 /* Voila! */
4430 *trailer = skb;
4431 return 1;
4432 }
4433
4434 /* Misery. We are in troubles, going to mincer fragments... */
4435
4436 elt = 1;
4437 skb_p = &skb_shinfo(skb)->frag_list;
4438 copyflag = 0;
4439
4440 while ((skb1 = *skb_p) != NULL) {
4441 int ntail = 0;
4442
4443 /* The fragment is partially pulled by someone,
4444 * this can happen on input. Copy it and everything
4445 * after it. */
4446
4447 if (skb_shared(skb1))
4448 copyflag = 1;
4449
4450 /* If the skb is the last, worry about trailer. */
4451
4452 if (skb1->next == NULL && tailbits) {
4453 if (skb_shinfo(skb1)->nr_frags ||
David S. Miller21dc3302010-08-23 00:13:46 -07004454 skb_has_frag_list(skb1) ||
David Howells716ea3a2007-04-02 20:19:53 -07004455 skb_tailroom(skb1) < tailbits)
4456 ntail = tailbits + 128;
4457 }
4458
4459 if (copyflag ||
4460 skb_cloned(skb1) ||
4461 ntail ||
4462 skb_shinfo(skb1)->nr_frags ||
David S. Miller21dc3302010-08-23 00:13:46 -07004463 skb_has_frag_list(skb1)) {
David Howells716ea3a2007-04-02 20:19:53 -07004464 struct sk_buff *skb2;
4465
4466 /* Fuck, we are miserable poor guys... */
4467 if (ntail == 0)
4468 skb2 = skb_copy(skb1, GFP_ATOMIC);
4469 else
4470 skb2 = skb_copy_expand(skb1,
4471 skb_headroom(skb1),
4472 ntail,
4473 GFP_ATOMIC);
4474 if (unlikely(skb2 == NULL))
4475 return -ENOMEM;
4476
4477 if (skb1->sk)
4478 skb_set_owner_w(skb2, skb1->sk);
4479
4480 /* Looking around. Are we still alive?
4481 * OK, link new skb, drop old one */
4482
4483 skb2->next = skb1->next;
4484 *skb_p = skb2;
4485 kfree_skb(skb1);
4486 skb1 = skb2;
4487 }
4488 elt++;
4489 *trailer = skb1;
4490 skb_p = &skb1->next;
4491 }
4492
4493 return elt;
4494}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08004495EXPORT_SYMBOL_GPL(skb_cow_data);
David Howells716ea3a2007-04-02 20:19:53 -07004496
Eric Dumazetb1faf562010-05-31 23:44:05 -07004497static void sock_rmem_free(struct sk_buff *skb)
4498{
4499 struct sock *sk = skb->sk;
4500
4501 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4502}
4503
Soheil Hassas Yeganeh8605330a2017-03-18 17:02:59 -04004504static void skb_set_err_queue(struct sk_buff *skb)
4505{
4506 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4507 * So, it is safe to (mis)use it to mark skbs on the error queue.
4508 */
4509 skb->pkt_type = PACKET_OUTGOING;
4510 BUILD_BUG_ON(PACKET_OUTGOING == 0);
4511}
4512
Eric Dumazetb1faf562010-05-31 23:44:05 -07004513/*
4514 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4515 */
4516int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4517{
4518 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
Eric Dumazetebb3b782019-10-10 20:17:44 -07004519 (unsigned int)READ_ONCE(sk->sk_rcvbuf))
Eric Dumazetb1faf562010-05-31 23:44:05 -07004520 return -ENOMEM;
4521
4522 skb_orphan(skb);
4523 skb->sk = sk;
4524 skb->destructor = sock_rmem_free;
4525 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
Soheil Hassas Yeganeh8605330a2017-03-18 17:02:59 -04004526 skb_set_err_queue(skb);
Eric Dumazetb1faf562010-05-31 23:44:05 -07004527
Eric Dumazetabb57ea2011-05-18 02:21:31 -04004528 /* before exiting rcu section, make sure dst is refcounted */
4529 skb_dst_force(skb);
4530
Eric Dumazetb1faf562010-05-31 23:44:05 -07004531 skb_queue_tail(&sk->sk_error_queue, skb);
4532 if (!sock_flag(sk, SOCK_DEAD))
Vinicius Costa Gomes6e5d58f2018-03-14 13:32:09 -07004533 sk->sk_error_report(sk);
Eric Dumazetb1faf562010-05-31 23:44:05 -07004534 return 0;
4535}
4536EXPORT_SYMBOL(sock_queue_err_skb);
4537
Soheil Hassas Yeganeh83a1a1a2016-11-30 14:01:08 -05004538static bool is_icmp_err_skb(const struct sk_buff *skb)
4539{
4540 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4541 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4542}
4543
Willem de Bruijn364a9e92014-08-31 21:30:27 -04004544struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4545{
4546 struct sk_buff_head *q = &sk->sk_error_queue;
Soheil Hassas Yeganeh83a1a1a2016-11-30 14:01:08 -05004547 struct sk_buff *skb, *skb_next = NULL;
4548 bool icmp_next = false;
Eric Dumazet997d5c32015-02-18 05:47:55 -08004549 unsigned long flags;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04004550
Eric Dumazet997d5c32015-02-18 05:47:55 -08004551 spin_lock_irqsave(&q->lock, flags);
Willem de Bruijn364a9e92014-08-31 21:30:27 -04004552 skb = __skb_dequeue(q);
Soheil Hassas Yeganeh38b25792017-06-02 12:38:22 -04004553 if (skb && (skb_next = skb_peek(q))) {
Soheil Hassas Yeganeh83a1a1a2016-11-30 14:01:08 -05004554 icmp_next = is_icmp_err_skb(skb_next);
Soheil Hassas Yeganeh38b25792017-06-02 12:38:22 -04004555 if (icmp_next)
4556 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
4557 }
Eric Dumazet997d5c32015-02-18 05:47:55 -08004558 spin_unlock_irqrestore(&q->lock, flags);
Willem de Bruijn364a9e92014-08-31 21:30:27 -04004559
Soheil Hassas Yeganeh83a1a1a2016-11-30 14:01:08 -05004560 if (is_icmp_err_skb(skb) && !icmp_next)
4561 sk->sk_err = 0;
4562
4563 if (skb_next)
Willem de Bruijn364a9e92014-08-31 21:30:27 -04004564 sk->sk_error_report(sk);
4565
4566 return skb;
4567}
4568EXPORT_SYMBOL(sock_dequeue_err_skb);
4569
Alexander Duyckcab41c42014-09-10 18:05:26 -04004570/**
4571 * skb_clone_sk - create clone of skb, and take reference to socket
4572 * @skb: the skb to clone
4573 *
4574 * This function creates a clone of a buffer that holds a reference on
4575 * sk_refcnt. Buffers created via this function are meant to be
4576 * returned using sock_queue_err_skb, or free via kfree_skb.
4577 *
4578 * When passing buffers allocated with this function to sock_queue_err_skb
4579 * it is necessary to wrap the call with sock_hold/sock_put in order to
4580 * prevent the socket from being released prior to being enqueued on
4581 * the sk_error_queue.
4582 */
Alexander Duyck62bccb82014-09-04 13:31:35 -04004583struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4584{
4585 struct sock *sk = skb->sk;
4586 struct sk_buff *clone;
4587
Reshetova, Elena41c6d652017-06-30 13:08:01 +03004588 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
Alexander Duyck62bccb82014-09-04 13:31:35 -04004589 return NULL;
4590
4591 clone = skb_clone(skb, GFP_ATOMIC);
4592 if (!clone) {
4593 sock_put(sk);
4594 return NULL;
4595 }
4596
4597 clone->sk = sk;
4598 clone->destructor = sock_efree;
4599
4600 return clone;
4601}
4602EXPORT_SYMBOL(skb_clone_sk);
4603
Alexander Duyck37846ef2014-09-04 13:31:10 -04004604static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4605 struct sock *sk,
Soheil Hassas Yeganeh4ef1b282017-03-18 17:03:00 -04004606 int tstype,
4607 bool opt_stats)
Patrick Ohlyac45f602009-02-12 05:03:37 +00004608{
Patrick Ohlyac45f602009-02-12 05:03:37 +00004609 struct sock_exterr_skb *serr;
Patrick Ohlyac45f602009-02-12 05:03:37 +00004610 int err;
4611
Soheil Hassas Yeganeh4ef1b282017-03-18 17:03:00 -04004612 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4613
Patrick Ohlyac45f602009-02-12 05:03:37 +00004614 serr = SKB_EXT_ERR(skb);
4615 memset(serr, 0, sizeof(*serr));
4616 serr->ee.ee_errno = ENOMSG;
4617 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
Willem de Bruijne7fd2882014-08-04 22:11:48 -04004618 serr->ee.ee_info = tstype;
Soheil Hassas Yeganeh4ef1b282017-03-18 17:03:00 -04004619 serr->opt_stats = opt_stats;
Willem de Bruijn1862d622017-04-12 19:24:35 -04004620 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
Willem de Bruijn4ed2d762014-08-04 22:11:49 -04004621 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
Willem de Bruijn09c2d252014-08-04 22:11:47 -04004622 serr->ee.ee_data = skb_shinfo(skb)->tskey;
WANG Congac5cc972015-12-16 23:39:04 -08004623 if (sk->sk_protocol == IPPROTO_TCP &&
4624 sk->sk_type == SOCK_STREAM)
Willem de Bruijn4ed2d762014-08-04 22:11:49 -04004625 serr->ee.ee_data -= sk->sk_tskey;
4626 }
Eric Dumazet29030372010-05-29 00:20:48 -07004627
Patrick Ohlyac45f602009-02-12 05:03:37 +00004628 err = sock_queue_err_skb(sk, skb);
Eric Dumazet29030372010-05-29 00:20:48 -07004629
Patrick Ohlyac45f602009-02-12 05:03:37 +00004630 if (err)
4631 kfree_skb(skb);
4632}
Alexander Duyck37846ef2014-09-04 13:31:10 -04004633
Willem de Bruijnb245be12015-01-30 13:29:32 -05004634static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4635{
4636 bool ret;
4637
4638 if (likely(sysctl_tstamp_allow_data || tsonly))
4639 return true;
4640
4641 read_lock_bh(&sk->sk_callback_lock);
4642 ret = sk->sk_socket && sk->sk_socket->file &&
4643 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4644 read_unlock_bh(&sk->sk_callback_lock);
4645 return ret;
4646}
4647
Alexander Duyck37846ef2014-09-04 13:31:10 -04004648void skb_complete_tx_timestamp(struct sk_buff *skb,
4649 struct skb_shared_hwtstamps *hwtstamps)
4650{
4651 struct sock *sk = skb->sk;
4652
Willem de Bruijnb245be12015-01-30 13:29:32 -05004653 if (!skb_may_tx_timestamp(sk, false))
Willem de Bruijn35b99df2017-12-13 14:41:06 -05004654 goto err;
Willem de Bruijnb245be12015-01-30 13:29:32 -05004655
Eric Dumazet9ac25fc2017-03-03 21:01:03 -08004656 /* Take a reference to prevent skb_orphan() from freeing the socket,
4657 * but only if the socket refcount is not zero.
4658 */
Reshetova, Elena41c6d652017-06-30 13:08:01 +03004659 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
Eric Dumazet9ac25fc2017-03-03 21:01:03 -08004660 *skb_hwtstamps(skb) = *hwtstamps;
Soheil Hassas Yeganeh4ef1b282017-03-18 17:03:00 -04004661 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
Eric Dumazet9ac25fc2017-03-03 21:01:03 -08004662 sock_put(sk);
Willem de Bruijn35b99df2017-12-13 14:41:06 -05004663 return;
Eric Dumazet9ac25fc2017-03-03 21:01:03 -08004664 }
Willem de Bruijn35b99df2017-12-13 14:41:06 -05004665
4666err:
4667 kfree_skb(skb);
Alexander Duyck37846ef2014-09-04 13:31:10 -04004668}
4669EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4670
4671void __skb_tstamp_tx(struct sk_buff *orig_skb,
4672 struct skb_shared_hwtstamps *hwtstamps,
4673 struct sock *sk, int tstype)
4674{
4675 struct sk_buff *skb;
Soheil Hassas Yeganeh4ef1b282017-03-18 17:03:00 -04004676 bool tsonly, opt_stats = false;
Alexander Duyck37846ef2014-09-04 13:31:10 -04004677
Willem de Bruijn3a8dd972015-03-11 15:43:55 -04004678 if (!sk)
4679 return;
4680
Miroslav Lichvarb50a5c72017-05-19 17:52:40 +02004681 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4682 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4683 return;
4684
Willem de Bruijn3a8dd972015-03-11 15:43:55 -04004685 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4686 if (!skb_may_tx_timestamp(sk, tsonly))
Alexander Duyck37846ef2014-09-04 13:31:10 -04004687 return;
4688
Francis Yan1c885802016-11-27 23:07:18 -08004689 if (tsonly) {
4690#ifdef CONFIG_INET
4691 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4692 sk->sk_protocol == IPPROTO_TCP &&
Soheil Hassas Yeganeh4ef1b282017-03-18 17:03:00 -04004693 sk->sk_type == SOCK_STREAM) {
Yousuk Seung48040792020-07-30 15:44:40 -07004694 skb = tcp_get_timestamping_opt_stats(sk, orig_skb);
Soheil Hassas Yeganeh4ef1b282017-03-18 17:03:00 -04004695 opt_stats = true;
4696 } else
Francis Yan1c885802016-11-27 23:07:18 -08004697#endif
4698 skb = alloc_skb(0, GFP_ATOMIC);
4699 } else {
Willem de Bruijn49ca0d82015-01-30 13:29:31 -05004700 skb = skb_clone(orig_skb, GFP_ATOMIC);
Francis Yan1c885802016-11-27 23:07:18 -08004701 }
Alexander Duyck37846ef2014-09-04 13:31:10 -04004702 if (!skb)
4703 return;
4704
Willem de Bruijn49ca0d82015-01-30 13:29:31 -05004705 if (tsonly) {
Willem de Bruijnfff88032017-06-08 11:35:03 -04004706 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4707 SKBTX_ANY_TSTAMP;
Willem de Bruijn49ca0d82015-01-30 13:29:31 -05004708 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4709 }
4710
4711 if (hwtstamps)
4712 *skb_hwtstamps(skb) = *hwtstamps;
4713 else
4714 skb->tstamp = ktime_get_real();
4715
Soheil Hassas Yeganeh4ef1b282017-03-18 17:03:00 -04004716 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
Alexander Duyck37846ef2014-09-04 13:31:10 -04004717}
Willem de Bruijne7fd2882014-08-04 22:11:48 -04004718EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4719
4720void skb_tstamp_tx(struct sk_buff *orig_skb,
4721 struct skb_shared_hwtstamps *hwtstamps)
4722{
4723 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4724 SCM_TSTAMP_SND);
4725}
Patrick Ohlyac45f602009-02-12 05:03:37 +00004726EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4727
Johannes Berg6e3e9392011-11-09 10:15:42 +01004728void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
4729{
4730 struct sock *sk = skb->sk;
4731 struct sock_exterr_skb *serr;
Eric Dumazetdd4f1072017-03-03 21:01:02 -08004732 int err = 1;
Johannes Berg6e3e9392011-11-09 10:15:42 +01004733
4734 skb->wifi_acked_valid = 1;
4735 skb->wifi_acked = acked;
4736
4737 serr = SKB_EXT_ERR(skb);
4738 memset(serr, 0, sizeof(*serr));
4739 serr->ee.ee_errno = ENOMSG;
4740 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
4741
Eric Dumazetdd4f1072017-03-03 21:01:02 -08004742 /* Take a reference to prevent skb_orphan() from freeing the socket,
4743 * but only if the socket refcount is not zero.
4744 */
Reshetova, Elena41c6d652017-06-30 13:08:01 +03004745 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
Eric Dumazetdd4f1072017-03-03 21:01:02 -08004746 err = sock_queue_err_skb(sk, skb);
4747 sock_put(sk);
4748 }
Johannes Berg6e3e9392011-11-09 10:15:42 +01004749 if (err)
4750 kfree_skb(skb);
4751}
4752EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4753
Rusty Russellf35d9d82008-02-04 23:49:54 -05004754/**
4755 * skb_partial_csum_set - set up and verify partial csum values for packet
4756 * @skb: the skb to set
4757 * @start: the number of bytes after skb->data to start checksumming.
4758 * @off: the offset from start to place the checksum.
4759 *
4760 * For untrusted partially-checksummed packets, we need to make sure the values
4761 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4762 *
4763 * This function checks and sets those values and skb->ip_summed: if this
4764 * returns false you should drop the packet.
4765 */
4766bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4767{
Eric Dumazet52b5d6f2018-10-10 06:59:35 -07004768 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
4769 u32 csum_start = skb_headroom(skb) + (u32)start;
4770
4771 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
4772 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
4773 start, off, skb_headroom(skb), skb_headlen(skb));
Rusty Russellf35d9d82008-02-04 23:49:54 -05004774 return false;
4775 }
4776 skb->ip_summed = CHECKSUM_PARTIAL;
Eric Dumazet52b5d6f2018-10-10 06:59:35 -07004777 skb->csum_start = csum_start;
Rusty Russellf35d9d82008-02-04 23:49:54 -05004778 skb->csum_offset = off;
Jason Wange5d5dec2013-03-26 23:11:20 +00004779 skb_set_transport_header(skb, start);
Rusty Russellf35d9d82008-02-04 23:49:54 -05004780 return true;
4781}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08004782EXPORT_SYMBOL_GPL(skb_partial_csum_set);
Rusty Russellf35d9d82008-02-04 23:49:54 -05004783
Paul Durranted1f50c2014-01-09 10:02:46 +00004784static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4785 unsigned int max)
4786{
4787 if (skb_headlen(skb) >= len)
4788 return 0;
4789
4790 /* If we need to pullup then pullup to the max, so we
4791 * won't need to do it again.
4792 */
4793 if (max > skb->len)
4794 max = skb->len;
4795
4796 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4797 return -ENOMEM;
4798
4799 if (skb_headlen(skb) < len)
4800 return -EPROTO;
4801
4802 return 0;
4803}
4804
Jan Beulichf9708b42014-03-11 13:56:05 +00004805#define MAX_TCP_HDR_LEN (15 * 4)
4806
4807static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4808 typeof(IPPROTO_IP) proto,
4809 unsigned int off)
4810{
Kees Cook161d1792020-02-19 22:23:04 -08004811 int err;
Jan Beulichf9708b42014-03-11 13:56:05 +00004812
Kees Cook161d1792020-02-19 22:23:04 -08004813 switch (proto) {
Jan Beulichf9708b42014-03-11 13:56:05 +00004814 case IPPROTO_TCP:
4815 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4816 off + MAX_TCP_HDR_LEN);
4817 if (!err && !skb_partial_csum_set(skb, off,
4818 offsetof(struct tcphdr,
4819 check)))
4820 err = -EPROTO;
4821 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4822
4823 case IPPROTO_UDP:
4824 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4825 off + sizeof(struct udphdr));
4826 if (!err && !skb_partial_csum_set(skb, off,
4827 offsetof(struct udphdr,
4828 check)))
4829 err = -EPROTO;
4830 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4831 }
4832
4833 return ERR_PTR(-EPROTO);
4834}
4835
Paul Durranted1f50c2014-01-09 10:02:46 +00004836/* This value should be large enough to cover a tagged ethernet header plus
4837 * maximally sized IP and TCP or UDP headers.
4838 */
4839#define MAX_IP_HDR_LEN 128
4840
Jan Beulichf9708b42014-03-11 13:56:05 +00004841static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
Paul Durranted1f50c2014-01-09 10:02:46 +00004842{
4843 unsigned int off;
4844 bool fragment;
Jan Beulichf9708b42014-03-11 13:56:05 +00004845 __sum16 *csum;
Paul Durranted1f50c2014-01-09 10:02:46 +00004846 int err;
4847
4848 fragment = false;
4849
4850 err = skb_maybe_pull_tail(skb,
4851 sizeof(struct iphdr),
4852 MAX_IP_HDR_LEN);
4853 if (err < 0)
4854 goto out;
4855
4856 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
4857 fragment = true;
4858
4859 off = ip_hdrlen(skb);
4860
4861 err = -EPROTO;
4862
4863 if (fragment)
4864 goto out;
4865
Jan Beulichf9708b42014-03-11 13:56:05 +00004866 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4867 if (IS_ERR(csum))
4868 return PTR_ERR(csum);
Paul Durranted1f50c2014-01-09 10:02:46 +00004869
Jan Beulichf9708b42014-03-11 13:56:05 +00004870 if (recalculate)
4871 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4872 ip_hdr(skb)->daddr,
4873 skb->len - off,
4874 ip_hdr(skb)->protocol, 0);
Paul Durranted1f50c2014-01-09 10:02:46 +00004875 err = 0;
4876
4877out:
4878 return err;
4879}
4880
4881/* This value should be large enough to cover a tagged ethernet header plus
4882 * an IPv6 header, all options, and a maximal TCP or UDP header.
4883 */
4884#define MAX_IPV6_HDR_LEN 256
4885
4886#define OPT_HDR(type, skb, off) \
4887 (type *)(skb_network_header(skb) + (off))
4888
4889static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4890{
4891 int err;
4892 u8 nexthdr;
4893 unsigned int off;
4894 unsigned int len;
4895 bool fragment;
4896 bool done;
Jan Beulichf9708b42014-03-11 13:56:05 +00004897 __sum16 *csum;
Paul Durranted1f50c2014-01-09 10:02:46 +00004898
4899 fragment = false;
4900 done = false;
4901
4902 off = sizeof(struct ipv6hdr);
4903
4904 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4905 if (err < 0)
4906 goto out;
4907
4908 nexthdr = ipv6_hdr(skb)->nexthdr;
4909
4910 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4911 while (off <= len && !done) {
4912 switch (nexthdr) {
4913 case IPPROTO_DSTOPTS:
4914 case IPPROTO_HOPOPTS:
4915 case IPPROTO_ROUTING: {
4916 struct ipv6_opt_hdr *hp;
4917
4918 err = skb_maybe_pull_tail(skb,
4919 off +
4920 sizeof(struct ipv6_opt_hdr),
4921 MAX_IPV6_HDR_LEN);
4922 if (err < 0)
4923 goto out;
4924
4925 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4926 nexthdr = hp->nexthdr;
4927 off += ipv6_optlen(hp);
4928 break;
4929 }
4930 case IPPROTO_AH: {
4931 struct ip_auth_hdr *hp;
4932
4933 err = skb_maybe_pull_tail(skb,
4934 off +
4935 sizeof(struct ip_auth_hdr),
4936 MAX_IPV6_HDR_LEN);
4937 if (err < 0)
4938 goto out;
4939
4940 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4941 nexthdr = hp->nexthdr;
4942 off += ipv6_authlen(hp);
4943 break;
4944 }
4945 case IPPROTO_FRAGMENT: {
4946 struct frag_hdr *hp;
4947
4948 err = skb_maybe_pull_tail(skb,
4949 off +
4950 sizeof(struct frag_hdr),
4951 MAX_IPV6_HDR_LEN);
4952 if (err < 0)
4953 goto out;
4954
4955 hp = OPT_HDR(struct frag_hdr, skb, off);
4956
4957 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4958 fragment = true;
4959
4960 nexthdr = hp->nexthdr;
4961 off += sizeof(struct frag_hdr);
4962 break;
4963 }
4964 default:
4965 done = true;
4966 break;
4967 }
4968 }
4969
4970 err = -EPROTO;
4971
4972 if (!done || fragment)
4973 goto out;
4974
Jan Beulichf9708b42014-03-11 13:56:05 +00004975 csum = skb_checksum_setup_ip(skb, nexthdr, off);
4976 if (IS_ERR(csum))
4977 return PTR_ERR(csum);
Paul Durranted1f50c2014-01-09 10:02:46 +00004978
Jan Beulichf9708b42014-03-11 13:56:05 +00004979 if (recalculate)
4980 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4981 &ipv6_hdr(skb)->daddr,
4982 skb->len - off, nexthdr, 0);
Paul Durranted1f50c2014-01-09 10:02:46 +00004983 err = 0;
4984
4985out:
4986 return err;
4987}
4988
4989/**
4990 * skb_checksum_setup - set up partial checksum offset
4991 * @skb: the skb to set up
4992 * @recalculate: if true the pseudo-header checksum will be recalculated
4993 */
4994int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4995{
4996 int err;
4997
4998 switch (skb->protocol) {
4999 case htons(ETH_P_IP):
Jan Beulichf9708b42014-03-11 13:56:05 +00005000 err = skb_checksum_setup_ipv4(skb, recalculate);
Paul Durranted1f50c2014-01-09 10:02:46 +00005001 break;
5002
5003 case htons(ETH_P_IPV6):
5004 err = skb_checksum_setup_ipv6(skb, recalculate);
5005 break;
5006
5007 default:
5008 err = -EPROTO;
5009 break;
5010 }
5011
5012 return err;
5013}
5014EXPORT_SYMBOL(skb_checksum_setup);
5015
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005016/**
5017 * skb_checksum_maybe_trim - maybe trims the given skb
5018 * @skb: the skb to check
5019 * @transport_len: the data length beyond the network header
5020 *
5021 * Checks whether the given skb has data beyond the given transport length.
5022 * If so, returns a cloned skb trimmed to this transport length.
5023 * Otherwise returns the provided skb. Returns NULL in error cases
5024 * (e.g. transport_len exceeds skb length or out-of-memory).
5025 *
Linus Lüssinga5169932015-08-13 05:54:07 +02005026 * Caller needs to set the skb transport header and free any returned skb if it
5027 * differs from the provided skb.
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005028 */
5029static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
5030 unsigned int transport_len)
5031{
5032 struct sk_buff *skb_chk;
5033 unsigned int len = skb_transport_offset(skb) + transport_len;
5034 int ret;
5035
Linus Lüssinga5169932015-08-13 05:54:07 +02005036 if (skb->len < len)
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005037 return NULL;
Linus Lüssinga5169932015-08-13 05:54:07 +02005038 else if (skb->len == len)
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005039 return skb;
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005040
5041 skb_chk = skb_clone(skb, GFP_ATOMIC);
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005042 if (!skb_chk)
5043 return NULL;
5044
5045 ret = pskb_trim_rcsum(skb_chk, len);
5046 if (ret) {
5047 kfree_skb(skb_chk);
5048 return NULL;
5049 }
5050
5051 return skb_chk;
5052}
5053
5054/**
5055 * skb_checksum_trimmed - validate checksum of an skb
5056 * @skb: the skb to check
5057 * @transport_len: the data length beyond the network header
5058 * @skb_chkf: checksum function to use
5059 *
5060 * Applies the given checksum function skb_chkf to the provided skb.
5061 * Returns a checked and maybe trimmed skb. Returns NULL on error.
5062 *
5063 * If the skb has data beyond the given transport length, then a
5064 * trimmed & cloned skb is checked and returned.
5065 *
Linus Lüssinga5169932015-08-13 05:54:07 +02005066 * Caller needs to set the skb transport header and free any returned skb if it
5067 * differs from the provided skb.
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005068 */
5069struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5070 unsigned int transport_len,
5071 __sum16(*skb_chkf)(struct sk_buff *skb))
5072{
5073 struct sk_buff *skb_chk;
5074 unsigned int offset = skb_transport_offset(skb);
Linus Lüssingfcba67c2015-05-05 00:19:35 +02005075 __sum16 ret;
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005076
5077 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
5078 if (!skb_chk)
Linus Lüssinga5169932015-08-13 05:54:07 +02005079 goto err;
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005080
Linus Lüssinga5169932015-08-13 05:54:07 +02005081 if (!pskb_may_pull(skb_chk, offset))
5082 goto err;
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005083
Linus Lüssing9b368812016-02-24 04:21:42 +01005084 skb_pull_rcsum(skb_chk, offset);
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005085 ret = skb_chkf(skb_chk);
Linus Lüssing9b368812016-02-24 04:21:42 +01005086 skb_push_rcsum(skb_chk, offset);
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005087
Linus Lüssinga5169932015-08-13 05:54:07 +02005088 if (ret)
5089 goto err;
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005090
5091 return skb_chk;
Linus Lüssinga5169932015-08-13 05:54:07 +02005092
5093err:
5094 if (skb_chk && skb_chk != skb)
5095 kfree_skb(skb_chk);
5096
5097 return NULL;
5098
Linus Lüssing9afd85c2015-05-02 14:01:07 +02005099}
5100EXPORT_SYMBOL(skb_checksum_trimmed);
5101
Ben Hutchings4497b072008-06-19 16:22:28 -07005102void __skb_warn_lro_forwarding(const struct sk_buff *skb)
5103{
Joe Perchese87cc472012-05-13 21:56:26 +00005104 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5105 skb->dev->name);
Ben Hutchings4497b072008-06-19 16:22:28 -07005106}
Ben Hutchings4497b072008-06-19 16:22:28 -07005107EXPORT_SYMBOL(__skb_warn_lro_forwarding);
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005108
5109void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5110{
Eric Dumazet3d861f62012-10-22 09:03:40 +00005111 if (head_stolen) {
5112 skb_release_head_state(skb);
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005113 kmem_cache_free(skbuff_head_cache, skb);
Eric Dumazet3d861f62012-10-22 09:03:40 +00005114 } else {
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005115 __kfree_skb(skb);
Eric Dumazet3d861f62012-10-22 09:03:40 +00005116 }
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005117}
5118EXPORT_SYMBOL(kfree_skb_partial);
5119
5120/**
5121 * skb_try_coalesce - try to merge skb to prior one
5122 * @to: prior buffer
5123 * @from: buffer to add
5124 * @fragstolen: pointer to boolean
Randy Dunlapc6c4b972012-06-08 14:01:44 +00005125 * @delta_truesize: how much more was allocated than was requested
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005126 */
5127bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
5128 bool *fragstolen, int *delta_truesize)
5129{
Eric Dumazetc818fa92017-10-04 10:48:35 -07005130 struct skb_shared_info *to_shinfo, *from_shinfo;
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005131 int i, delta, len = from->len;
5132
5133 *fragstolen = false;
5134
5135 if (skb_cloned(to))
5136 return false;
5137
5138 if (len <= skb_tailroom(to)) {
Eric Dumazete93a0432014-09-15 04:19:52 -07005139 if (len)
5140 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005141 *delta_truesize = 0;
5142 return true;
5143 }
5144
Eric Dumazetc818fa92017-10-04 10:48:35 -07005145 to_shinfo = skb_shinfo(to);
5146 from_shinfo = skb_shinfo(from);
5147 if (to_shinfo->frag_list || from_shinfo->frag_list)
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005148 return false;
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04005149 if (skb_zcopy(to) || skb_zcopy(from))
5150 return false;
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005151
5152 if (skb_headlen(from) != 0) {
5153 struct page *page;
5154 unsigned int offset;
5155
Eric Dumazetc818fa92017-10-04 10:48:35 -07005156 if (to_shinfo->nr_frags +
5157 from_shinfo->nr_frags >= MAX_SKB_FRAGS)
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005158 return false;
5159
5160 if (skb_head_is_locked(from))
5161 return false;
5162
5163 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
5164
5165 page = virt_to_head_page(from->head);
5166 offset = from->data - (unsigned char *)page_address(page);
5167
Eric Dumazetc818fa92017-10-04 10:48:35 -07005168 skb_fill_page_desc(to, to_shinfo->nr_frags,
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005169 page, offset, skb_headlen(from));
5170 *fragstolen = true;
5171 } else {
Eric Dumazetc818fa92017-10-04 10:48:35 -07005172 if (to_shinfo->nr_frags +
5173 from_shinfo->nr_frags > MAX_SKB_FRAGS)
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005174 return false;
5175
Weiping Panf4b549a2012-09-28 20:15:30 +00005176 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005177 }
5178
5179 WARN_ON_ONCE(delta < len);
5180
Eric Dumazetc818fa92017-10-04 10:48:35 -07005181 memcpy(to_shinfo->frags + to_shinfo->nr_frags,
5182 from_shinfo->frags,
5183 from_shinfo->nr_frags * sizeof(skb_frag_t));
5184 to_shinfo->nr_frags += from_shinfo->nr_frags;
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005185
5186 if (!skb_cloned(from))
Eric Dumazetc818fa92017-10-04 10:48:35 -07005187 from_shinfo->nr_frags = 0;
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005188
Li RongQing8ea853f2012-09-18 16:53:21 +00005189 /* if the skb is not cloned this does nothing
5190 * since we set nr_frags to 0.
5191 */
Eric Dumazetc818fa92017-10-04 10:48:35 -07005192 for (i = 0; i < from_shinfo->nr_frags; i++)
5193 __skb_frag_ref(&from_shinfo->frags[i]);
Eric Dumazetbad43ca2012-05-19 03:02:02 +00005194
5195 to->truesize += delta;
5196 to->len += len;
5197 to->data_len += len;
5198
5199 *delta_truesize = delta;
5200 return true;
5201}
5202EXPORT_SYMBOL(skb_try_coalesce);
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02005203
5204/**
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02005205 * skb_scrub_packet - scrub an skb
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02005206 *
5207 * @skb: buffer to clean
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02005208 * @xnet: packet is crossing netns
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02005209 *
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02005210 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
5211 * into/from a tunnel. Some information have to be cleared during these
5212 * operations.
5213 * skb_scrub_packet can also be used to clean a skb before injecting it in
5214 * another namespace (@xnet == true). We have to clear all information in the
5215 * skb that could impact namespace isolation.
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02005216 */
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02005217void skb_scrub_packet(struct sk_buff *skb, bool xnet)
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02005218{
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02005219 skb->pkt_type = PACKET_HOST;
5220 skb->skb_iif = 0;
WANG Cong60ff7462014-05-04 16:39:18 -07005221 skb->ignore_df = 0;
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02005222 skb_dst_drop(skb);
Florian Westphal174e2382019-09-26 20:37:05 +02005223 skb_ext_reset(skb);
Florian Westphal895b5c92019-09-29 20:54:03 +02005224 nf_reset_ct(skb);
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02005225 nf_reset_trace(skb);
Herbert Xu213dd742015-04-16 09:03:27 +08005226
Petr Machata6f9a5062018-11-19 16:11:07 +00005227#ifdef CONFIG_NET_SWITCHDEV
5228 skb->offload_fwd_mark = 0;
Ido Schimmel875e8932018-12-04 08:15:10 +00005229 skb->offload_l3_fwd_mark = 0;
Petr Machata6f9a5062018-11-19 16:11:07 +00005230#endif
5231
Herbert Xu213dd742015-04-16 09:03:27 +08005232 if (!xnet)
5233 return;
5234
Ye Yin2b5ec1a2017-10-26 16:57:05 +08005235 ipvs_reset(skb);
Herbert Xu213dd742015-04-16 09:03:27 +08005236 skb->mark = 0;
Jesus Sanchez-Palenciac47d8c22018-07-03 15:42:47 -07005237 skb->tstamp = 0;
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02005238}
5239EXPORT_SYMBOL_GPL(skb_scrub_packet);
Florian Westphalde960aa2014-01-26 10:58:16 +01005240
5241/**
5242 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
5243 *
5244 * @skb: GSO skb
5245 *
5246 * skb_gso_transport_seglen is used to determine the real size of the
5247 * individual segments, including Layer4 headers (TCP/UDP).
5248 *
5249 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
5250 */
Daniel Axtensa4a77712018-03-01 17:13:40 +11005251static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
Florian Westphalde960aa2014-01-26 10:58:16 +01005252{
5253 const struct skb_shared_info *shinfo = skb_shinfo(skb);
Florian Westphalf993bc22014-10-20 13:49:18 +02005254 unsigned int thlen = 0;
Florian Westphalde960aa2014-01-26 10:58:16 +01005255
Florian Westphalf993bc22014-10-20 13:49:18 +02005256 if (skb->encapsulation) {
5257 thlen = skb_inner_transport_header(skb) -
5258 skb_transport_header(skb);
Florian Westphal6d39d582014-04-09 10:28:50 +02005259
Florian Westphalf993bc22014-10-20 13:49:18 +02005260 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
5261 thlen += inner_tcp_hdrlen(skb);
5262 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
5263 thlen = tcp_hdrlen(skb);
Daniel Axtens1dd27cd2018-03-09 14:06:09 +11005264 } else if (unlikely(skb_is_gso_sctp(skb))) {
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -03005265 thlen = sizeof(struct sctphdr);
Willem de Bruijnee80d1e2018-04-26 13:42:16 -04005266 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
5267 thlen = sizeof(struct udphdr);
Florian Westphalf993bc22014-10-20 13:49:18 +02005268 }
Florian Westphal6d39d582014-04-09 10:28:50 +02005269 /* UFO sets gso_size to the size of the fragmentation
5270 * payload, i.e. the size of the L4 (UDP) header is already
5271 * accounted for.
5272 */
Florian Westphalf993bc22014-10-20 13:49:18 +02005273 return thlen + shinfo->gso_size;
Florian Westphalde960aa2014-01-26 10:58:16 +01005274}
Daniel Axtensa4a77712018-03-01 17:13:40 +11005275
5276/**
5277 * skb_gso_network_seglen - Return length of individual segments of a gso packet
5278 *
5279 * @skb: GSO skb
5280 *
5281 * skb_gso_network_seglen is used to determine the real size of the
5282 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
5283 *
5284 * The MAC/L2 header is not accounted for.
5285 */
5286static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
5287{
5288 unsigned int hdr_len = skb_transport_header(skb) -
5289 skb_network_header(skb);
5290
5291 return hdr_len + skb_gso_transport_seglen(skb);
5292}
5293
5294/**
5295 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
5296 *
5297 * @skb: GSO skb
5298 *
5299 * skb_gso_mac_seglen is used to determine the real size of the
5300 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
5301 * headers (TCP/UDP).
5302 */
5303static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
5304{
5305 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
5306
5307 return hdr_len + skb_gso_transport_seglen(skb);
5308}
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04005309
Marcelo Ricardo Leitnerae7ef812016-06-02 15:05:41 -03005310/**
Daniel Axtens2b16f042018-01-31 14:15:33 +11005311 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
5312 *
5313 * There are a couple of instances where we have a GSO skb, and we
5314 * want to determine what size it would be after it is segmented.
5315 *
5316 * We might want to check:
5317 * - L3+L4+payload size (e.g. IP forwarding)
5318 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
5319 *
5320 * This is a helper to do that correctly considering GSO_BY_FRAGS.
5321 *
Mathieu Malaterre49682bf2018-10-31 13:16:58 +01005322 * @skb: GSO skb
5323 *
Daniel Axtens2b16f042018-01-31 14:15:33 +11005324 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
5325 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
5326 *
5327 * @max_len: The maximum permissible length.
5328 *
5329 * Returns true if the segmented length <= max length.
5330 */
5331static inline bool skb_gso_size_check(const struct sk_buff *skb,
5332 unsigned int seg_len,
5333 unsigned int max_len) {
5334 const struct skb_shared_info *shinfo = skb_shinfo(skb);
5335 const struct sk_buff *iter;
5336
5337 if (shinfo->gso_size != GSO_BY_FRAGS)
5338 return seg_len <= max_len;
5339
5340 /* Undo this so we can re-use header sizes */
5341 seg_len -= GSO_BY_FRAGS;
5342
5343 skb_walk_frags(skb, iter) {
5344 if (seg_len + skb_headlen(iter) > max_len)
5345 return false;
5346 }
5347
5348 return true;
5349}
5350
5351/**
Daniel Axtens779b7932018-03-01 17:13:37 +11005352 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
Marcelo Ricardo Leitnerae7ef812016-06-02 15:05:41 -03005353 *
5354 * @skb: GSO skb
David S. Miller76f21b92016-06-03 22:56:28 -07005355 * @mtu: MTU to validate against
Marcelo Ricardo Leitnerae7ef812016-06-02 15:05:41 -03005356 *
Daniel Axtens779b7932018-03-01 17:13:37 +11005357 * skb_gso_validate_network_len validates if a given skb will fit a
5358 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5359 * payload.
Marcelo Ricardo Leitnerae7ef812016-06-02 15:05:41 -03005360 */
Daniel Axtens779b7932018-03-01 17:13:37 +11005361bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
Marcelo Ricardo Leitnerae7ef812016-06-02 15:05:41 -03005362{
Daniel Axtens2b16f042018-01-31 14:15:33 +11005363 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
Marcelo Ricardo Leitnerae7ef812016-06-02 15:05:41 -03005364}
Daniel Axtens779b7932018-03-01 17:13:37 +11005365EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
Marcelo Ricardo Leitnerae7ef812016-06-02 15:05:41 -03005366
Daniel Axtens2b16f042018-01-31 14:15:33 +11005367/**
5368 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5369 *
5370 * @skb: GSO skb
5371 * @len: length to validate against
5372 *
5373 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5374 * length once split, including L2, L3 and L4 headers and the payload.
5375 */
5376bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5377{
5378 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5379}
5380EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5381
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04005382static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5383{
Yuya Kusakabed85e8be2019-04-16 10:22:28 +09005384 int mac_len, meta_len;
5385 void *meta;
Toshiaki Makita4bbb3e02018-03-13 14:51:27 +09005386
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04005387 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5388 kfree_skb(skb);
5389 return NULL;
5390 }
5391
Toshiaki Makita4bbb3e02018-03-13 14:51:27 +09005392 mac_len = skb->data - skb_mac_header(skb);
Toshiaki Makitaae474572018-03-29 19:05:29 +09005393 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5394 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5395 mac_len - VLAN_HLEN - ETH_TLEN);
5396 }
Yuya Kusakabed85e8be2019-04-16 10:22:28 +09005397
5398 meta_len = skb_metadata_len(skb);
5399 if (meta_len) {
5400 meta = skb_metadata_end(skb) - meta_len;
5401 memmove(meta + VLAN_HLEN, meta, meta_len);
5402 }
5403
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04005404 skb->mac_header += VLAN_HLEN;
5405 return skb;
5406}
5407
5408struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5409{
5410 struct vlan_hdr *vhdr;
5411 u16 vlan_tci;
5412
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01005413 if (unlikely(skb_vlan_tag_present(skb))) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04005414 /* vlan_tci is already set-up so leave this for another time */
5415 return skb;
5416 }
5417
5418 skb = skb_share_check(skb, GFP_ATOMIC);
5419 if (unlikely(!skb))
5420 goto err_free;
5421
5422 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
5423 goto err_free;
5424
5425 vhdr = (struct vlan_hdr *)skb->data;
5426 vlan_tci = ntohs(vhdr->h_vlan_TCI);
5427 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5428
5429 skb_pull_rcsum(skb, VLAN_HLEN);
5430 vlan_set_encap_proto(skb, vhdr);
5431
5432 skb = skb_reorder_vlan_header(skb);
5433 if (unlikely(!skb))
5434 goto err_free;
5435
5436 skb_reset_network_header(skb);
5437 skb_reset_transport_header(skb);
5438 skb_reset_mac_len(skb);
5439
5440 return skb;
5441
5442err_free:
5443 kfree_skb(skb);
5444 return NULL;
5445}
5446EXPORT_SYMBOL(skb_vlan_untag);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07005447
Jiri Pirkoe2195122014-11-19 14:05:01 +01005448int skb_ensure_writable(struct sk_buff *skb, int write_len)
5449{
5450 if (!pskb_may_pull(skb, write_len))
5451 return -ENOMEM;
5452
5453 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5454 return 0;
5455
5456 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5457}
5458EXPORT_SYMBOL(skb_ensure_writable);
5459
Shmulik Ladkanibfca4c52016-09-19 19:11:09 +03005460/* remove VLAN header from packet and update csum accordingly.
5461 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5462 */
5463int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
Jiri Pirko93515d52014-11-19 14:05:02 +01005464{
5465 struct vlan_hdr *vhdr;
Shmulik Ladkanib6a79202016-09-29 12:10:41 +03005466 int offset = skb->data - skb_mac_header(skb);
Jiri Pirko93515d52014-11-19 14:05:02 +01005467 int err;
5468
Shmulik Ladkanib6a79202016-09-29 12:10:41 +03005469 if (WARN_ONCE(offset,
5470 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5471 offset)) {
5472 return -EINVAL;
5473 }
5474
Jiri Pirko93515d52014-11-19 14:05:02 +01005475 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5476 if (unlikely(err))
Shmulik Ladkanib6a79202016-09-29 12:10:41 +03005477 return err;
Jiri Pirko93515d52014-11-19 14:05:02 +01005478
5479 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5480
5481 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5482 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
5483
5484 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5485 __skb_pull(skb, VLAN_HLEN);
5486
5487 vlan_set_encap_proto(skb, vhdr);
5488 skb->mac_header += VLAN_HLEN;
5489
5490 if (skb_network_offset(skb) < ETH_HLEN)
5491 skb_set_network_header(skb, ETH_HLEN);
5492
5493 skb_reset_mac_len(skb);
Jiri Pirko93515d52014-11-19 14:05:02 +01005494
5495 return err;
5496}
Shmulik Ladkanibfca4c52016-09-19 19:11:09 +03005497EXPORT_SYMBOL(__skb_vlan_pop);
Jiri Pirko93515d52014-11-19 14:05:02 +01005498
Shmulik Ladkanib6a79202016-09-29 12:10:41 +03005499/* Pop a vlan tag either from hwaccel or from payload.
5500 * Expects skb->data at mac header.
5501 */
Jiri Pirko93515d52014-11-19 14:05:02 +01005502int skb_vlan_pop(struct sk_buff *skb)
5503{
5504 u16 vlan_tci;
5505 __be16 vlan_proto;
5506 int err;
5507
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01005508 if (likely(skb_vlan_tag_present(skb))) {
Michał Mirosławb18175242018-11-09 00:18:02 +01005509 __vlan_hwaccel_clear_tag(skb);
Jiri Pirko93515d52014-11-19 14:05:02 +01005510 } else {
Shmulik Ladkaniecf4ee42016-09-20 12:48:37 +03005511 if (unlikely(!eth_type_vlan(skb->protocol)))
Jiri Pirko93515d52014-11-19 14:05:02 +01005512 return 0;
5513
5514 err = __skb_vlan_pop(skb, &vlan_tci);
5515 if (err)
5516 return err;
5517 }
5518 /* move next vlan tag to hw accel tag */
Shmulik Ladkaniecf4ee42016-09-20 12:48:37 +03005519 if (likely(!eth_type_vlan(skb->protocol)))
Jiri Pirko93515d52014-11-19 14:05:02 +01005520 return 0;
5521
5522 vlan_proto = skb->protocol;
5523 err = __skb_vlan_pop(skb, &vlan_tci);
5524 if (unlikely(err))
5525 return err;
5526
5527 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5528 return 0;
5529}
5530EXPORT_SYMBOL(skb_vlan_pop);
5531
Shmulik Ladkanib6a79202016-09-29 12:10:41 +03005532/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5533 * Expects skb->data at mac header.
5534 */
Jiri Pirko93515d52014-11-19 14:05:02 +01005535int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5536{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01005537 if (skb_vlan_tag_present(skb)) {
Shmulik Ladkanib6a79202016-09-29 12:10:41 +03005538 int offset = skb->data - skb_mac_header(skb);
Jiri Pirko93515d52014-11-19 14:05:02 +01005539 int err;
5540
Shmulik Ladkanib6a79202016-09-29 12:10:41 +03005541 if (WARN_ONCE(offset,
5542 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5543 offset)) {
5544 return -EINVAL;
5545 }
5546
Jiri Pirko93515d52014-11-19 14:05:02 +01005547 err = __vlan_insert_tag(skb, skb->vlan_proto,
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01005548 skb_vlan_tag_get(skb));
Shmulik Ladkanib6a79202016-09-29 12:10:41 +03005549 if (err)
Jiri Pirko93515d52014-11-19 14:05:02 +01005550 return err;
Daniel Borkmann9241e2d2016-04-16 02:27:58 +02005551
Jiri Pirko93515d52014-11-19 14:05:02 +01005552 skb->protocol = skb->vlan_proto;
5553 skb->mac_len += VLAN_HLEN;
Jiri Pirko93515d52014-11-19 14:05:02 +01005554
Daniel Borkmann6b83d282016-02-20 00:29:30 +01005555 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
Jiri Pirko93515d52014-11-19 14:05:02 +01005556 }
5557 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5558 return 0;
5559}
5560EXPORT_SYMBOL(skb_vlan_push);
5561
John Hurley8822e272019-07-07 15:01:54 +01005562/* Update the ethertype of hdr and the skb csum value if required. */
5563static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
5564 __be16 ethertype)
5565{
5566 if (skb->ip_summed == CHECKSUM_COMPLETE) {
5567 __be16 diff[] = { ~hdr->h_proto, ethertype };
5568
5569 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5570 }
5571
5572 hdr->h_proto = ethertype;
5573}
5574
5575/**
Martin Varghesee7dbfed2019-12-21 08:50:01 +05305576 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
5577 * the packet
John Hurley8822e272019-07-07 15:01:54 +01005578 *
5579 * @skb: buffer
5580 * @mpls_lse: MPLS label stack entry to push
5581 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
Davide Carattifa4e0f82019-10-12 13:55:07 +02005582 * @mac_len: length of the MAC header
Martin Varghesee7dbfed2019-12-21 08:50:01 +05305583 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
5584 * ethernet
John Hurley8822e272019-07-07 15:01:54 +01005585 *
5586 * Expects skb->data at mac header.
5587 *
5588 * Returns 0 on success, -errno otherwise.
5589 */
Davide Carattifa4e0f82019-10-12 13:55:07 +02005590int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
Martin Varghesed04ac222019-12-05 05:57:22 +05305591 int mac_len, bool ethernet)
John Hurley8822e272019-07-07 15:01:54 +01005592{
5593 struct mpls_shim_hdr *lse;
5594 int err;
5595
5596 if (unlikely(!eth_p_mpls(mpls_proto)))
5597 return -EINVAL;
5598
5599 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
5600 if (skb->encapsulation)
5601 return -EINVAL;
5602
5603 err = skb_cow_head(skb, MPLS_HLEN);
5604 if (unlikely(err))
5605 return err;
5606
5607 if (!skb->inner_protocol) {
Martin Varghesee7dbfed2019-12-21 08:50:01 +05305608 skb_set_inner_network_header(skb, skb_network_offset(skb));
John Hurley8822e272019-07-07 15:01:54 +01005609 skb_set_inner_protocol(skb, skb->protocol);
5610 }
5611
5612 skb_push(skb, MPLS_HLEN);
5613 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
Davide Carattifa4e0f82019-10-12 13:55:07 +02005614 mac_len);
John Hurley8822e272019-07-07 15:01:54 +01005615 skb_reset_mac_header(skb);
Davide Carattifa4e0f82019-10-12 13:55:07 +02005616 skb_set_network_header(skb, mac_len);
Martin Varghesee7dbfed2019-12-21 08:50:01 +05305617 skb_reset_mac_len(skb);
John Hurley8822e272019-07-07 15:01:54 +01005618
5619 lse = mpls_hdr(skb);
5620 lse->label_stack_entry = mpls_lse;
5621 skb_postpush_rcsum(skb, lse, MPLS_HLEN);
5622
Martin Varghesed04ac222019-12-05 05:57:22 +05305623 if (ethernet)
John Hurley8822e272019-07-07 15:01:54 +01005624 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
5625 skb->protocol = mpls_proto;
5626
5627 return 0;
5628}
5629EXPORT_SYMBOL_GPL(skb_mpls_push);
5630
Eric Dumazet2e4e4412014-09-17 04:49:49 -07005631/**
John Hurleyed246ce2019-07-07 15:01:55 +01005632 * skb_mpls_pop() - pop the outermost MPLS header
5633 *
5634 * @skb: buffer
5635 * @next_proto: ethertype of header after popped MPLS header
Davide Carattifa4e0f82019-10-12 13:55:07 +02005636 * @mac_len: length of the MAC header
Martin Varghese76f99f92019-12-21 08:50:23 +05305637 * @ethernet: flag to indicate if the packet is ethernet
John Hurleyed246ce2019-07-07 15:01:55 +01005638 *
5639 * Expects skb->data at mac header.
5640 *
5641 * Returns 0 on success, -errno otherwise.
5642 */
Martin Varghese040b5cf2019-12-02 10:49:51 +05305643int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
5644 bool ethernet)
John Hurleyed246ce2019-07-07 15:01:55 +01005645{
5646 int err;
5647
5648 if (unlikely(!eth_p_mpls(skb->protocol)))
Davide Carattidedc5a02019-10-12 13:55:06 +02005649 return 0;
John Hurleyed246ce2019-07-07 15:01:55 +01005650
Davide Carattifa4e0f82019-10-12 13:55:07 +02005651 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
John Hurleyed246ce2019-07-07 15:01:55 +01005652 if (unlikely(err))
5653 return err;
5654
5655 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
5656 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
Davide Carattifa4e0f82019-10-12 13:55:07 +02005657 mac_len);
John Hurleyed246ce2019-07-07 15:01:55 +01005658
5659 __skb_pull(skb, MPLS_HLEN);
5660 skb_reset_mac_header(skb);
Davide Carattifa4e0f82019-10-12 13:55:07 +02005661 skb_set_network_header(skb, mac_len);
John Hurleyed246ce2019-07-07 15:01:55 +01005662
Martin Varghese040b5cf2019-12-02 10:49:51 +05305663 if (ethernet) {
John Hurleyed246ce2019-07-07 15:01:55 +01005664 struct ethhdr *hdr;
5665
5666 /* use mpls_hdr() to get ethertype to account for VLANs. */
5667 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
5668 skb_mod_eth_type(skb, hdr, next_proto);
5669 }
5670 skb->protocol = next_proto;
5671
5672 return 0;
5673}
5674EXPORT_SYMBOL_GPL(skb_mpls_pop);
5675
5676/**
John Hurleyd27cf5c2019-07-07 15:01:56 +01005677 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
5678 *
5679 * @skb: buffer
5680 * @mpls_lse: new MPLS label stack entry to update to
5681 *
5682 * Expects skb->data at mac header.
5683 *
5684 * Returns 0 on success, -errno otherwise.
5685 */
5686int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
5687{
5688 int err;
5689
5690 if (unlikely(!eth_p_mpls(skb->protocol)))
5691 return -EINVAL;
5692
5693 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
5694 if (unlikely(err))
5695 return err;
5696
5697 if (skb->ip_summed == CHECKSUM_COMPLETE) {
5698 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
5699
5700 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5701 }
5702
5703 mpls_hdr(skb)->label_stack_entry = mpls_lse;
5704
5705 return 0;
5706}
5707EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
5708
5709/**
John Hurley2a2ea502019-07-07 15:01:57 +01005710 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
5711 *
5712 * @skb: buffer
5713 *
5714 * Expects skb->data at mac header.
5715 *
5716 * Returns 0 on success, -errno otherwise.
5717 */
5718int skb_mpls_dec_ttl(struct sk_buff *skb)
5719{
5720 u32 lse;
5721 u8 ttl;
5722
5723 if (unlikely(!eth_p_mpls(skb->protocol)))
5724 return -EINVAL;
5725
5726 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
5727 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
5728 if (!--ttl)
5729 return -EINVAL;
5730
5731 lse &= ~MPLS_LS_TTL_MASK;
5732 lse |= ttl << MPLS_LS_TTL_SHIFT;
5733
5734 return skb_mpls_update_lse(skb, cpu_to_be32(lse));
5735}
5736EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
5737
5738/**
Eric Dumazet2e4e4412014-09-17 04:49:49 -07005739 * alloc_skb_with_frags - allocate skb with page frags
5740 *
Masanari Iidade3f0d02014-10-09 12:58:08 +09005741 * @header_len: size of linear part
5742 * @data_len: needed length in frags
5743 * @max_page_order: max page order desired.
5744 * @errcode: pointer to error code if any
5745 * @gfp_mask: allocation mask
Eric Dumazet2e4e4412014-09-17 04:49:49 -07005746 *
5747 * This can be used to allocate a paged skb, given a maximal order for frags.
5748 */
5749struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5750 unsigned long data_len,
5751 int max_page_order,
5752 int *errcode,
5753 gfp_t gfp_mask)
5754{
5755 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
5756 unsigned long chunk;
5757 struct sk_buff *skb;
5758 struct page *page;
Eric Dumazet2e4e4412014-09-17 04:49:49 -07005759 int i;
5760
5761 *errcode = -EMSGSIZE;
5762 /* Note this test could be relaxed, if we succeed to allocate
5763 * high order pages...
5764 */
5765 if (npages > MAX_SKB_FRAGS)
5766 return NULL;
5767
Eric Dumazet2e4e4412014-09-17 04:49:49 -07005768 *errcode = -ENOBUFS;
David Rientjesf8c468e2019-01-02 13:01:43 -08005769 skb = alloc_skb(header_len, gfp_mask);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07005770 if (!skb)
5771 return NULL;
5772
5773 skb->truesize += npages << PAGE_SHIFT;
5774
5775 for (i = 0; npages > 0; i++) {
5776 int order = max_page_order;
5777
5778 while (order) {
5779 if (npages >= 1 << order) {
Mel Gormand0164ad2015-11-06 16:28:21 -08005780 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
Eric Dumazet2e4e4412014-09-17 04:49:49 -07005781 __GFP_COMP |
Michal Hockod14b56f2018-06-28 17:53:06 +02005782 __GFP_NOWARN,
Eric Dumazet2e4e4412014-09-17 04:49:49 -07005783 order);
5784 if (page)
5785 goto fill_page;
5786 /* Do not retry other high order allocations */
5787 order = 1;
5788 max_page_order = 0;
5789 }
5790 order--;
5791 }
5792 page = alloc_page(gfp_mask);
5793 if (!page)
5794 goto failure;
5795fill_page:
5796 chunk = min_t(unsigned long, data_len,
5797 PAGE_SIZE << order);
5798 skb_fill_page_desc(skb, i, page, 0, chunk);
5799 data_len -= chunk;
5800 npages -= 1 << order;
5801 }
5802 return skb;
5803
5804failure:
5805 kfree_skb(skb);
5806 return NULL;
5807}
5808EXPORT_SYMBOL(alloc_skb_with_frags);
Sowmini Varadhan6fa01cc2016-04-22 18:36:35 -07005809
5810/* carve out the first off bytes from skb when off < headlen */
5811static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
5812 const int headlen, gfp_t gfp_mask)
5813{
5814 int i;
5815 int size = skb_end_offset(skb);
5816 int new_hlen = headlen - off;
5817 u8 *data;
Sowmini Varadhan6fa01cc2016-04-22 18:36:35 -07005818
5819 size = SKB_DATA_ALIGN(size);
5820
5821 if (skb_pfmemalloc(skb))
5822 gfp_mask |= __GFP_MEMALLOC;
5823 data = kmalloc_reserve(size +
5824 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5825 gfp_mask, NUMA_NO_NODE, NULL);
5826 if (!data)
5827 return -ENOMEM;
5828
5829 size = SKB_WITH_OVERHEAD(ksize(data));
5830
5831 /* Copy real data, and all frags */
5832 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
5833 skb->len -= off;
5834
5835 memcpy((struct skb_shared_info *)(data + size),
5836 skb_shinfo(skb),
5837 offsetof(struct skb_shared_info,
5838 frags[skb_shinfo(skb)->nr_frags]));
5839 if (skb_cloned(skb)) {
5840 /* drop the old head gracefully */
5841 if (skb_orphan_frags(skb, gfp_mask)) {
5842 kfree(data);
5843 return -ENOMEM;
5844 }
5845 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
5846 skb_frag_ref(skb, i);
5847 if (skb_has_frag_list(skb))
5848 skb_clone_fraglist(skb);
5849 skb_release_data(skb);
5850 } else {
5851 /* we can reuse existing recount- all we did was
5852 * relocate values
5853 */
5854 skb_free_head(skb);
5855 }
5856
Sowmini Varadhan6fa01cc2016-04-22 18:36:35 -07005857 skb->head = data;
5858 skb->data = data;
5859 skb->head_frag = 0;
5860#ifdef NET_SKBUFF_DATA_USES_OFFSET
5861 skb->end = size;
Sowmini Varadhan6fa01cc2016-04-22 18:36:35 -07005862#else
5863 skb->end = skb->head + size;
5864#endif
5865 skb_set_tail_pointer(skb, skb_headlen(skb));
5866 skb_headers_offset_update(skb, 0);
5867 skb->cloned = 0;
5868 skb->hdr_len = 0;
5869 skb->nohdr = 0;
5870 atomic_set(&skb_shinfo(skb)->dataref, 1);
5871
5872 return 0;
5873}
5874
5875static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
5876
5877/* carve out the first eat bytes from skb's frag_list. May recurse into
5878 * pskb_carve()
5879 */
5880static int pskb_carve_frag_list(struct sk_buff *skb,
5881 struct skb_shared_info *shinfo, int eat,
5882 gfp_t gfp_mask)
5883{
5884 struct sk_buff *list = shinfo->frag_list;
5885 struct sk_buff *clone = NULL;
5886 struct sk_buff *insp = NULL;
5887
5888 do {
5889 if (!list) {
5890 pr_err("Not enough bytes to eat. Want %d\n", eat);
5891 return -EFAULT;
5892 }
5893 if (list->len <= eat) {
5894 /* Eaten as whole. */
5895 eat -= list->len;
5896 list = list->next;
5897 insp = list;
5898 } else {
5899 /* Eaten partially. */
5900 if (skb_shared(list)) {
5901 clone = skb_clone(list, gfp_mask);
5902 if (!clone)
5903 return -ENOMEM;
5904 insp = list->next;
5905 list = clone;
5906 } else {
5907 /* This may be pulled without problems. */
5908 insp = list;
5909 }
5910 if (pskb_carve(list, eat, gfp_mask) < 0) {
5911 kfree_skb(clone);
5912 return -ENOMEM;
5913 }
5914 break;
5915 }
5916 } while (eat);
5917
5918 /* Free pulled out fragments. */
5919 while ((list = shinfo->frag_list) != insp) {
5920 shinfo->frag_list = list->next;
5921 kfree_skb(list);
5922 }
5923 /* And insert new clone at head. */
5924 if (clone) {
5925 clone->next = list;
5926 shinfo->frag_list = clone;
5927 }
5928 return 0;
5929}
5930
5931/* carve off first len bytes from skb. Split line (off) is in the
5932 * non-linear part of skb
5933 */
5934static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
5935 int pos, gfp_t gfp_mask)
5936{
5937 int i, k = 0;
5938 int size = skb_end_offset(skb);
5939 u8 *data;
5940 const int nfrags = skb_shinfo(skb)->nr_frags;
5941 struct skb_shared_info *shinfo;
Sowmini Varadhan6fa01cc2016-04-22 18:36:35 -07005942
5943 size = SKB_DATA_ALIGN(size);
5944
5945 if (skb_pfmemalloc(skb))
5946 gfp_mask |= __GFP_MEMALLOC;
5947 data = kmalloc_reserve(size +
5948 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5949 gfp_mask, NUMA_NO_NODE, NULL);
5950 if (!data)
5951 return -ENOMEM;
5952
5953 size = SKB_WITH_OVERHEAD(ksize(data));
5954
5955 memcpy((struct skb_shared_info *)(data + size),
5956 skb_shinfo(skb), offsetof(struct skb_shared_info,
5957 frags[skb_shinfo(skb)->nr_frags]));
5958 if (skb_orphan_frags(skb, gfp_mask)) {
5959 kfree(data);
5960 return -ENOMEM;
5961 }
5962 shinfo = (struct skb_shared_info *)(data + size);
5963 for (i = 0; i < nfrags; i++) {
5964 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
5965
5966 if (pos + fsize > off) {
5967 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
5968
5969 if (pos < off) {
5970 /* Split frag.
5971 * We have two variants in this case:
5972 * 1. Move all the frag to the second
5973 * part, if it is possible. F.e.
5974 * this approach is mandatory for TUX,
5975 * where splitting is expensive.
5976 * 2. Split is accurately. We make this.
5977 */
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07005978 skb_frag_off_add(&shinfo->frags[0], off - pos);
Sowmini Varadhan6fa01cc2016-04-22 18:36:35 -07005979 skb_frag_size_sub(&shinfo->frags[0], off - pos);
5980 }
5981 skb_frag_ref(skb, i);
5982 k++;
5983 }
5984 pos += fsize;
5985 }
5986 shinfo->nr_frags = k;
5987 if (skb_has_frag_list(skb))
5988 skb_clone_fraglist(skb);
5989
5990 if (k == 0) {
5991 /* split line is in frag list */
5992 pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
5993 }
5994 skb_release_data(skb);
5995
Sowmini Varadhan6fa01cc2016-04-22 18:36:35 -07005996 skb->head = data;
5997 skb->head_frag = 0;
5998 skb->data = data;
5999#ifdef NET_SKBUFF_DATA_USES_OFFSET
6000 skb->end = size;
Sowmini Varadhan6fa01cc2016-04-22 18:36:35 -07006001#else
6002 skb->end = skb->head + size;
6003#endif
6004 skb_reset_tail_pointer(skb);
6005 skb_headers_offset_update(skb, 0);
6006 skb->cloned = 0;
6007 skb->hdr_len = 0;
6008 skb->nohdr = 0;
6009 skb->len -= off;
6010 skb->data_len = skb->len;
6011 atomic_set(&skb_shinfo(skb)->dataref, 1);
6012 return 0;
6013}
6014
6015/* remove len bytes from the beginning of the skb */
6016static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
6017{
6018 int headlen = skb_headlen(skb);
6019
6020 if (len < headlen)
6021 return pskb_carve_inside_header(skb, len, headlen, gfp);
6022 else
6023 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
6024}
6025
6026/* Extract to_copy bytes starting at off from skb, and return this in
6027 * a new skb
6028 */
6029struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
6030 int to_copy, gfp_t gfp)
6031{
6032 struct sk_buff *clone = skb_clone(skb, gfp);
6033
6034 if (!clone)
6035 return NULL;
6036
6037 if (pskb_carve(clone, off, gfp) < 0 ||
6038 pskb_trim(clone, to_copy)) {
6039 kfree_skb(clone);
6040 return NULL;
6041 }
6042 return clone;
6043}
6044EXPORT_SYMBOL(pskb_extract);
Eric Dumazetc8c8b122016-12-07 09:19:33 -08006045
6046/**
6047 * skb_condense - try to get rid of fragments/frag_list if possible
6048 * @skb: buffer
6049 *
6050 * Can be used to save memory before skb is added to a busy queue.
6051 * If packet has bytes in frags and enough tail room in skb->head,
6052 * pull all of them, so that we can free the frags right now and adjust
6053 * truesize.
6054 * Notes:
6055 * We do not reallocate skb->head thus can not fail.
6056 * Caller must re-evaluate skb->truesize if needed.
6057 */
6058void skb_condense(struct sk_buff *skb)
6059{
Eric Dumazet3174fed2016-12-09 08:02:05 -08006060 if (skb->data_len) {
6061 if (skb->data_len > skb->end - skb->tail ||
6062 skb_cloned(skb))
6063 return;
Eric Dumazetc8c8b122016-12-07 09:19:33 -08006064
Eric Dumazet3174fed2016-12-09 08:02:05 -08006065 /* Nice, we can free page frag(s) right now */
6066 __pskb_pull_tail(skb, skb->data_len);
6067 }
6068 /* At this point, skb->truesize might be over estimated,
6069 * because skb had a fragment, and fragments do not tell
6070 * their truesize.
6071 * When we pulled its content into skb->head, fragment
6072 * was freed, but __pskb_pull_tail() could not possibly
6073 * adjust skb->truesize, not knowing the frag truesize.
Eric Dumazetc8c8b122016-12-07 09:19:33 -08006074 */
6075 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6076}
Florian Westphaldf5042f2018-12-18 17:15:16 +01006077
6078#ifdef CONFIG_SKB_EXTENSIONS
6079static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
6080{
6081 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
6082}
6083
Paolo Abeni8b69a802020-01-09 07:59:24 -08006084/**
6085 * __skb_ext_alloc - allocate a new skb extensions storage
6086 *
Florian Westphal4930f482020-05-16 10:46:23 +02006087 * @flags: See kmalloc().
6088 *
Paolo Abeni8b69a802020-01-09 07:59:24 -08006089 * Returns the newly allocated pointer. The pointer can later attached to a
6090 * skb via __skb_ext_set().
6091 * Note: caller must handle the skb_ext as an opaque data.
6092 */
Florian Westphal4930f482020-05-16 10:46:23 +02006093struct skb_ext *__skb_ext_alloc(gfp_t flags)
Florian Westphaldf5042f2018-12-18 17:15:16 +01006094{
Florian Westphal4930f482020-05-16 10:46:23 +02006095 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
Florian Westphaldf5042f2018-12-18 17:15:16 +01006096
6097 if (new) {
6098 memset(new->offset, 0, sizeof(new->offset));
6099 refcount_set(&new->refcnt, 1);
6100 }
6101
6102 return new;
6103}
6104
Florian Westphal41650792018-12-18 17:15:27 +01006105static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
6106 unsigned int old_active)
Florian Westphaldf5042f2018-12-18 17:15:16 +01006107{
6108 struct skb_ext *new;
6109
6110 if (refcount_read(&old->refcnt) == 1)
6111 return old;
6112
6113 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
6114 if (!new)
6115 return NULL;
6116
6117 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
6118 refcount_set(&new->refcnt, 1);
6119
Florian Westphal41650792018-12-18 17:15:27 +01006120#ifdef CONFIG_XFRM
6121 if (old_active & (1 << SKB_EXT_SEC_PATH)) {
6122 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
6123 unsigned int i;
6124
6125 for (i = 0; i < sp->len; i++)
6126 xfrm_state_hold(sp->xvec[i]);
6127 }
6128#endif
Florian Westphaldf5042f2018-12-18 17:15:16 +01006129 __skb_ext_put(old);
6130 return new;
6131}
6132
6133/**
Paolo Abeni8b69a802020-01-09 07:59:24 -08006134 * __skb_ext_set - attach the specified extension storage to this skb
6135 * @skb: buffer
6136 * @id: extension id
6137 * @ext: extension storage previously allocated via __skb_ext_alloc()
6138 *
6139 * Existing extensions, if any, are cleared.
6140 *
6141 * Returns the pointer to the extension.
6142 */
6143void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
6144 struct skb_ext *ext)
6145{
6146 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
6147
6148 skb_ext_put(skb);
6149 newlen = newoff + skb_ext_type_len[id];
6150 ext->chunks = newlen;
6151 ext->offset[id] = newoff;
6152 skb->extensions = ext;
6153 skb->active_extensions = 1 << id;
6154 return skb_ext_get_ptr(ext, id);
6155}
6156
6157/**
Florian Westphaldf5042f2018-12-18 17:15:16 +01006158 * skb_ext_add - allocate space for given extension, COW if needed
6159 * @skb: buffer
6160 * @id: extension to allocate space for
6161 *
6162 * Allocates enough space for the given extension.
6163 * If the extension is already present, a pointer to that extension
6164 * is returned.
6165 *
6166 * If the skb was cloned, COW applies and the returned memory can be
6167 * modified without changing the extension space of clones buffers.
6168 *
6169 * Returns pointer to the extension or NULL on allocation failure.
6170 */
6171void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6172{
6173 struct skb_ext *new, *old = NULL;
6174 unsigned int newlen, newoff;
6175
6176 if (skb->active_extensions) {
6177 old = skb->extensions;
6178
Florian Westphal41650792018-12-18 17:15:27 +01006179 new = skb_ext_maybe_cow(old, skb->active_extensions);
Florian Westphaldf5042f2018-12-18 17:15:16 +01006180 if (!new)
6181 return NULL;
6182
Paolo Abeni682ec852018-12-21 19:03:15 +01006183 if (__skb_ext_exist(new, id))
Florian Westphaldf5042f2018-12-18 17:15:16 +01006184 goto set_active;
Florian Westphaldf5042f2018-12-18 17:15:16 +01006185
Paolo Abenie94e50b2018-12-21 19:03:13 +01006186 newoff = new->chunks;
Florian Westphaldf5042f2018-12-18 17:15:16 +01006187 } else {
6188 newoff = SKB_EXT_CHUNKSIZEOF(*new);
6189
Florian Westphal4930f482020-05-16 10:46:23 +02006190 new = __skb_ext_alloc(GFP_ATOMIC);
Florian Westphaldf5042f2018-12-18 17:15:16 +01006191 if (!new)
6192 return NULL;
6193 }
6194
6195 newlen = newoff + skb_ext_type_len[id];
6196 new->chunks = newlen;
6197 new->offset[id] = newoff;
Florian Westphaldf5042f2018-12-18 17:15:16 +01006198set_active:
Paolo Abeni682ec852018-12-21 19:03:15 +01006199 skb->extensions = new;
Florian Westphaldf5042f2018-12-18 17:15:16 +01006200 skb->active_extensions |= 1 << id;
6201 return skb_ext_get_ptr(new, id);
6202}
6203EXPORT_SYMBOL(skb_ext_add);
6204
Florian Westphal41650792018-12-18 17:15:27 +01006205#ifdef CONFIG_XFRM
6206static void skb_ext_put_sp(struct sec_path *sp)
6207{
6208 unsigned int i;
6209
6210 for (i = 0; i < sp->len; i++)
6211 xfrm_state_put(sp->xvec[i]);
6212}
6213#endif
6214
Florian Westphaldf5042f2018-12-18 17:15:16 +01006215void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6216{
6217 struct skb_ext *ext = skb->extensions;
6218
6219 skb->active_extensions &= ~(1 << id);
6220 if (skb->active_extensions == 0) {
6221 skb->extensions = NULL;
6222 __skb_ext_put(ext);
Florian Westphal41650792018-12-18 17:15:27 +01006223#ifdef CONFIG_XFRM
6224 } else if (id == SKB_EXT_SEC_PATH &&
6225 refcount_read(&ext->refcnt) == 1) {
6226 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
6227
6228 skb_ext_put_sp(sp);
6229 sp->len = 0;
6230#endif
Florian Westphaldf5042f2018-12-18 17:15:16 +01006231 }
6232}
6233EXPORT_SYMBOL(__skb_ext_del);
6234
6235void __skb_ext_put(struct skb_ext *ext)
6236{
6237 /* If this is last clone, nothing can increment
6238 * it after check passes. Avoids one atomic op.
6239 */
6240 if (refcount_read(&ext->refcnt) == 1)
6241 goto free_now;
6242
6243 if (!refcount_dec_and_test(&ext->refcnt))
6244 return;
6245free_now:
Florian Westphal41650792018-12-18 17:15:27 +01006246#ifdef CONFIG_XFRM
6247 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
6248 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
6249#endif
6250
Florian Westphaldf5042f2018-12-18 17:15:16 +01006251 kmem_cache_free(skbuff_ext_cache, ext);
6252}
6253EXPORT_SYMBOL(__skb_ext_put);
6254#endif /* CONFIG_SKB_EXTENSIONS */