blob: d8af86d995d6fe0b7fe8afc3a21d7d8faac2d7ce [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Definitions for the 'struct sk_buff' memory handlers.
4 *
5 * Authors:
6 * Alan Cox, <gw4pts@gw4pts.ampr.org>
7 * Florian La Roche, <rzsfl@rz.uni-sb.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
10#ifndef _LINUX_SKBUFF_H
11#define _LINUX_SKBUFF_H
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/kernel.h>
14#include <linux/compiler.h>
15#include <linux/time.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050016#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/cache.h>
Eric Dumazet56b17422014-11-03 08:19:53 -080018#include <linux/rbtree.h>
David S. Miller51f3d022014-11-05 16:46:40 -050019#include <linux/socket.h>
Eric Dumazetc1d1b432017-08-31 16:48:22 -070020#include <linux/refcount.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Arun Sharma600634972011-07-26 16:09:06 -070022#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/types.h>
24#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/net.h>
Thomas Graf3fc7e8a2005-06-23 21:00:17 -070026#include <linux/textsearch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <net/checksum.h>
Al Viroa80958f2006-12-04 20:41:19 +000028#include <linux/rcupdate.h>
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -070029#include <linux/hrtimer.h>
Ian Campbell131ea662011-08-19 06:25:00 +000030#include <linux/dma-mapping.h>
Michał Mirosławc8f44af2011-11-15 15:29:55 +000031#include <linux/netdev_features.h>
Eric Dumazet363ec39232014-02-26 14:02:11 -080032#include <linux/sched.h>
Ingo Molnare6017572017-02-01 16:36:40 +010033#include <linux/sched/clock.h>
Jiri Pirko1bd758e2015-05-12 14:56:07 +020034#include <net/flow_dissector.h>
Hannes Frederic Sowaa60e3cc2015-05-21 17:00:00 +020035#include <linux/splice.h>
Bernhard Thaler72b31f72015-05-30 15:27:40 +020036#include <linux/in6.h>
Jamal Hadi Salim8b10cab2016-07-02 06:43:14 -040037#include <linux/if_packet.h>
Tom Herbertf70ea012015-07-31 16:52:10 -070038#include <net/flow.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Tom Herbert7a6ae712015-12-14 11:19:47 -080040/* The interface for checksum offload between the stack and networking drivers
41 * is as follows...
42 *
43 * A. IP checksum related features
44 *
45 * Drivers advertise checksum offload capabilities in the features of a device.
46 * From the stack's point of view these are capabilities offered by the driver,
47 * a driver typically only advertises features that it is capable of offloading
48 * to its device.
49 *
50 * The checksum related features are:
51 *
52 * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
53 * IP (one's complement) checksum for any combination
54 * of protocols or protocol layering. The checksum is
55 * computed and set in a packet per the CHECKSUM_PARTIAL
56 * interface (see below).
57 *
58 * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
59 * TCP or UDP packets over IPv4. These are specifically
60 * unencapsulated packets of the form IPv4|TCP or
61 * IPv4|UDP where the Protocol field in the IPv4 header
62 * is TCP or UDP. The IPv4 header may contain IP options
63 * This feature cannot be set in features for a device
64 * with NETIF_F_HW_CSUM also set. This feature is being
65 * DEPRECATED (see below).
66 *
67 * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
68 * TCP or UDP packets over IPv6. These are specifically
69 * unencapsulated packets of the form IPv6|TCP or
70 * IPv4|UDP where the Next Header field in the IPv6
71 * header is either TCP or UDP. IPv6 extension headers
72 * are not supported with this feature. This feature
73 * cannot be set in features for a device with
74 * NETIF_F_HW_CSUM also set. This feature is being
75 * DEPRECATED (see below).
76 *
77 * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
78 * This flag is used only used to disable the RX checksum
79 * feature for a device. The stack will accept receive
80 * checksum indication in packets received on a device
81 * regardless of whether NETIF_F_RXCSUM is set.
82 *
83 * B. Checksumming of received packets by device. Indication of checksum
84 * verification is in set skb->ip_summed. Possible values are:
Daniel Borkmann78ea85f2013-12-16 23:27:09 +010085 *
86 * CHECKSUM_NONE:
87 *
Tom Herbert7a6ae712015-12-14 11:19:47 -080088 * Device did not checksum this packet e.g. due to lack of capabilities.
Daniel Borkmann78ea85f2013-12-16 23:27:09 +010089 * The packet contains full (though not verified) checksum in packet but
90 * not in skb->csum. Thus, skb->csum is undefined in this case.
91 *
92 * CHECKSUM_UNNECESSARY:
93 *
94 * The hardware you're dealing with doesn't calculate the full checksum
95 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
Tom Herbert77cffe22014-08-27 21:26:46 -070096 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
97 * if their checksums are okay. skb->csum is still undefined in this case
Tom Herbert7a6ae712015-12-14 11:19:47 -080098 * though. A driver or device must never modify the checksum field in the
99 * packet even if checksum is verified.
Tom Herbert77cffe22014-08-27 21:26:46 -0700100 *
101 * CHECKSUM_UNNECESSARY is applicable to following protocols:
102 * TCP: IPv6 and IPv4.
103 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
104 * zero UDP checksum for either IPv4 or IPv6, the networking stack
105 * may perform further validation in this case.
106 * GRE: only if the checksum is present in the header.
107 * SCTP: indicates the CRC in SCTP header has been validated.
Davide Carattib4759dc2017-05-18 15:44:43 +0200108 * FCOE: indicates the CRC in FC frame has been validated.
Tom Herbert77cffe22014-08-27 21:26:46 -0700109 *
110 * skb->csum_level indicates the number of consecutive checksums found in
111 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
112 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
113 * and a device is able to verify the checksums for UDP (possibly zero),
114 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
115 * two. If the device were only able to verify the UDP checksum and not
116 * GRE, either because it doesn't support GRE checksum of because GRE
117 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
118 * not considered in this case).
Daniel Borkmann78ea85f2013-12-16 23:27:09 +0100119 *
120 * CHECKSUM_COMPLETE:
121 *
122 * This is the most generic way. The device supplied checksum of the _whole_
123 * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
124 * hardware doesn't need to parse L3/L4 headers to implement this.
125 *
Davide Carattib4759dc2017-05-18 15:44:43 +0200126 * Notes:
127 * - Even if device supports only some protocols, but is able to produce
128 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
129 * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
Daniel Borkmann78ea85f2013-12-16 23:27:09 +0100130 *
131 * CHECKSUM_PARTIAL:
132 *
Tom Herbert6edec0e2015-02-10 16:30:28 -0800133 * A checksum is set up to be offloaded to a device as described in the
134 * output description for CHECKSUM_PARTIAL. This may occur on a packet
Daniel Borkmann78ea85f2013-12-16 23:27:09 +0100135 * received directly from another Linux OS, e.g., a virtualized Linux kernel
Tom Herbert6edec0e2015-02-10 16:30:28 -0800136 * on the same host, or it may be set in the input path in GRO or remote
137 * checksum offload. For the purposes of checksum verification, the checksum
138 * referred to by skb->csum_start + skb->csum_offset and any preceding
139 * checksums in the packet are considered verified. Any checksums in the
140 * packet that are after the checksum being offloaded are not considered to
141 * be verified.
Daniel Borkmann78ea85f2013-12-16 23:27:09 +0100142 *
Tom Herbert7a6ae712015-12-14 11:19:47 -0800143 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
144 * in the skb->ip_summed for a packet. Values are:
145 *
146 * CHECKSUM_PARTIAL:
147 *
148 * The driver is required to checksum the packet as seen by hard_start_xmit()
149 * from skb->csum_start up to the end, and to record/write the checksum at
150 * offset skb->csum_start + skb->csum_offset. A driver may verify that the
151 * csum_start and csum_offset values are valid values given the length and
152 * offset of the packet, however they should not attempt to validate that the
153 * checksum refers to a legitimate transport layer checksum-- it is the
154 * purview of the stack to validate that csum_start and csum_offset are set
155 * correctly.
156 *
157 * When the stack requests checksum offload for a packet, the driver MUST
158 * ensure that the checksum is set correctly. A driver can either offload the
159 * checksum calculation to the device, or call skb_checksum_help (in the case
160 * that the device does not support offload for a particular checksum).
161 *
162 * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
163 * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
Davide Caratti43c26a12017-05-18 15:44:41 +0200164 * checksum offload capability.
165 * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
166 * on network device checksumming capabilities: if a packet does not match
167 * them, skb_checksum_help or skb_crc32c_help (depending on the value of
168 * csum_not_inet, see item D.) is called to resolve the checksum.
Daniel Borkmann78ea85f2013-12-16 23:27:09 +0100169 *
170 * CHECKSUM_NONE:
171 *
172 * The skb was already checksummed by the protocol, or a checksum is not
173 * required.
174 *
Daniel Borkmann78ea85f2013-12-16 23:27:09 +0100175 * CHECKSUM_UNNECESSARY:
176 *
Tom Herbert7a6ae712015-12-14 11:19:47 -0800177 * This has the same meaning on as CHECKSUM_NONE for checksum offload on
178 * output.
Daniel Borkmann78ea85f2013-12-16 23:27:09 +0100179 *
Tom Herbert7a6ae712015-12-14 11:19:47 -0800180 * CHECKSUM_COMPLETE:
181 * Not used in checksum output. If a driver observes a packet with this value
182 * set in skbuff, if should treat as CHECKSUM_NONE being set.
Daniel Borkmann78ea85f2013-12-16 23:27:09 +0100183 *
Tom Herbert7a6ae712015-12-14 11:19:47 -0800184 * D. Non-IP checksum (CRC) offloads
185 *
186 * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
187 * offloading the SCTP CRC in a packet. To perform this offload the stack
Davide Carattidba00302017-05-18 15:44:40 +0200188 * will set set csum_start and csum_offset accordingly, set ip_summed to
189 * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
190 * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
191 * A driver that supports both IP checksum offload and SCTP CRC32c offload
192 * must verify which offload is configured for a packet by testing the
193 * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
194 * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
Tom Herbert7a6ae712015-12-14 11:19:47 -0800195 *
196 * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
197 * offloading the FCOE CRC in a packet. To perform this offload the stack
198 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
199 * accordingly. Note the there is no indication in the skbuff that the
200 * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
201 * both IP checksum offload and FCOE CRC offload must verify which offload
202 * is configured for a packet presumably by inspecting packet headers.
203 *
204 * E. Checksumming on output with GSO.
205 *
206 * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
207 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
208 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
209 * part of the GSO operation is implied. If a checksum is being offloaded
210 * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
211 * are set to refer to the outermost checksum being offload (two offloaded
212 * checksums are possible with UDP encapsulation).
Daniel Borkmann78ea85f2013-12-16 23:27:09 +0100213 */
214
Herbert Xu60476372007-04-09 11:59:39 -0700215/* Don't change this without changing skb_csum_unnecessary! */
Daniel Borkmann78ea85f2013-12-16 23:27:09 +0100216#define CHECKSUM_NONE 0
217#define CHECKSUM_UNNECESSARY 1
218#define CHECKSUM_COMPLETE 2
219#define CHECKSUM_PARTIAL 3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Tom Herbert77cffe22014-08-27 21:26:46 -0700221/* Maximum value in skb->csum_level */
222#define SKB_MAX_CSUM_LEVEL 3
223
Tobias Klauser0bec8c82014-07-22 12:06:23 +0200224#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
David S. Millerfc910a22007-03-25 20:27:59 -0700225#define SKB_WITH_OVERHEAD(X) \
Herbert Xudeea84b2007-10-21 16:27:46 -0700226 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
David S. Millerfc910a22007-03-25 20:27:59 -0700227#define SKB_MAX_ORDER(X, ORDER) \
228 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
230#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
231
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000232/* return minimum truesize of one skb containing X bytes of data */
233#define SKB_TRUESIZE(X) ((X) + \
234 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
235 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237struct net_device;
David Howells716ea3a2007-04-02 20:19:53 -0700238struct scatterlist;
Jens Axboe9c55e012007-11-06 23:30:13 -0800239struct pipe_inode_info;
Herbert Xua8f820aa2014-11-07 21:22:22 +0800240struct iov_iter;
Alexander Duyckfd11a832014-12-09 19:40:49 -0800241struct napi_struct;
Petar Penkovd58e4682018-09-14 07:46:18 -0700242struct bpf_prog;
243union bpf_attr;
Florian Westphaldf5042f2018-12-18 17:15:16 +0100244struct skb_ext;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -0700246#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247struct nf_conntrack {
248 atomic_t use;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249};
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -0700250#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Pablo Neira Ayuso34666d42014-09-18 11:29:03 +0200252#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253struct nf_bridge_info {
Florian Westphal3eaf4022015-04-02 14:31:44 +0200254 enum {
255 BRNF_PROTO_UNCHANGED,
256 BRNF_PROTO_8021Q,
257 BRNF_PROTO_PPPOE
Florian Westphal7fb48c52015-05-03 22:05:28 +0200258 } orig_proto:8;
Florian Westphal72b1e5e2015-07-23 16:21:30 +0200259 u8 pkt_otherhost:1;
260 u8 in_prerouting:1;
261 u8 bridged_dnat:1;
Bernhard Thaler411ffb42015-05-30 15:28:28 +0200262 __u16 frag_max_size;
Eric Dumazetbf1ac5c2012-04-18 23:19:25 +0000263 struct net_device *physindev;
Florian Westphal63cdbc02015-09-14 17:06:27 +0200264
265 /* always valid & non-NULL from FORWARD on, for physdev match */
266 struct net_device *physoutdev;
Florian Westphal7fb48c52015-05-03 22:05:28 +0200267 union {
Florian Westphal72b1e5e2015-07-23 16:21:30 +0200268 /* prerouting: detect dnat in orig/reply direction */
Bernhard Thaler72b31f72015-05-30 15:27:40 +0200269 __be32 ipv4_daddr;
270 struct in6_addr ipv6_daddr;
Florian Westphal72b1e5e2015-07-23 16:21:30 +0200271
272 /* after prerouting + nat detected: store original source
273 * mac since neigh resolution overwrites it, only used while
274 * skb is out in neigh layer.
275 */
276 char neigh_header[8];
Bernhard Thaler72b31f72015-05-30 15:27:40 +0200277 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278};
279#endif
280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281struct sk_buff_head {
282 /* These two members must be first. */
283 struct sk_buff *next;
284 struct sk_buff *prev;
285
286 __u32 qlen;
287 spinlock_t lock;
288};
289
290struct sk_buff;
291
Ian Campbell9d4dde52011-12-22 23:39:14 +0000292/* To allow 64K frame to be packed as single skb without frag_list we
293 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
294 * buffers which do not start on a page boundary.
295 *
296 * Since GRO uses frags we allocate at least 16 regardless of page
297 * size.
Anton Blancharda715dea2011-03-27 14:57:26 +0000298 */
Ian Campbell9d4dde52011-12-22 23:39:14 +0000299#if (65536/PAGE_SIZE + 1) < 16
David S. Millereec00952011-03-29 23:34:08 -0700300#define MAX_SKB_FRAGS 16UL
Anton Blancharda715dea2011-03-27 14:57:26 +0000301#else
Ian Campbell9d4dde52011-12-22 23:39:14 +0000302#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
Anton Blancharda715dea2011-03-27 14:57:26 +0000303#endif
Hans Westgaard Ry5f74f82e2016-02-03 09:26:57 +0100304extern int sysctl_max_skb_frags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Marcelo Ricardo Leitner3953c462016-06-02 15:05:40 -0300306/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
307 * segment using its current segmentation instead.
308 */
309#define GSO_BY_FRAGS 0xFFFF
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311typedef struct skb_frag_struct skb_frag_t;
312
313struct skb_frag_struct {
Ian Campbella8605c62011-10-19 23:01:49 +0000314 struct {
315 struct page *p;
316 } page;
Eric Dumazetcb4dfe52010-09-23 05:06:54 +0000317#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
David S. Millera309bb02007-07-30 18:47:03 -0700318 __u32 page_offset;
319 __u32 size;
Eric Dumazetcb4dfe52010-09-23 05:06:54 +0000320#else
321 __u16 page_offset;
322 __u16 size;
323#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324};
325
Pedro Tammela161e6132019-03-05 11:35:54 -0300326/**
327 * skb_frag_size - Returns the size of a skb fragment
328 * @frag: skb fragment
329 */
Eric Dumazet9e903e02011-10-18 21:00:24 +0000330static inline unsigned int skb_frag_size(const skb_frag_t *frag)
331{
332 return frag->size;
333}
334
Pedro Tammela161e6132019-03-05 11:35:54 -0300335/**
336 * skb_frag_size_set - Sets the size of a skb fragment
337 * @frag: skb fragment
338 * @size: size of fragment
339 */
Eric Dumazet9e903e02011-10-18 21:00:24 +0000340static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
341{
342 frag->size = size;
343}
344
Pedro Tammela161e6132019-03-05 11:35:54 -0300345/**
346 * skb_frag_size_add - Incrementes the size of a skb fragment by %delta
347 * @frag: skb fragment
348 * @delta: value to add
349 */
Eric Dumazet9e903e02011-10-18 21:00:24 +0000350static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
351{
352 frag->size += delta;
353}
354
Pedro Tammela161e6132019-03-05 11:35:54 -0300355/**
356 * skb_frag_size_sub - Decrements the size of a skb fragment by %delta
357 * @frag: skb fragment
358 * @delta: value to subtract
359 */
Eric Dumazet9e903e02011-10-18 21:00:24 +0000360static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
361{
362 frag->size -= delta;
363}
364
Pedro Tammela161e6132019-03-05 11:35:54 -0300365/**
366 * skb_frag_must_loop - Test if %p is a high memory page
367 * @p: fragment's page
368 */
Willem de Bruijnc613c202017-07-31 08:15:47 -0400369static inline bool skb_frag_must_loop(struct page *p)
370{
371#if defined(CONFIG_HIGHMEM)
372 if (PageHighMem(p))
373 return true;
374#endif
375 return false;
376}
377
378/**
379 * skb_frag_foreach_page - loop over pages in a fragment
380 *
381 * @f: skb frag to operate on
382 * @f_off: offset from start of f->page.p
383 * @f_len: length from f_off to loop over
384 * @p: (temp var) current page
385 * @p_off: (temp var) offset from start of current page,
386 * non-zero only on first page.
387 * @p_len: (temp var) length in current page,
388 * < PAGE_SIZE only on first and last page.
389 * @copied: (temp var) length so far, excluding current p_len.
390 *
391 * A fragment can hold a compound page, in which case per-page
392 * operations, notably kmap_atomic, must be called for each
393 * regular page.
394 */
395#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
396 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
397 p_off = (f_off) & (PAGE_SIZE - 1), \
398 p_len = skb_frag_must_loop(p) ? \
399 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
400 copied = 0; \
401 copied < f_len; \
402 copied += p_len, p++, p_off = 0, \
403 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
404
Patrick Ohlyac45f602009-02-12 05:03:37 +0000405#define HAVE_HW_TIME_STAMP
406
407/**
Randy Dunlapd3a21be2009-03-02 03:15:58 -0800408 * struct skb_shared_hwtstamps - hardware time stamps
Patrick Ohlyac45f602009-02-12 05:03:37 +0000409 * @hwtstamp: hardware time stamp transformed into duration
410 * since arbitrary point in time
Patrick Ohlyac45f602009-02-12 05:03:37 +0000411 *
412 * Software time stamps generated by ktime_get_real() are stored in
Willem de Bruijn4d276eb2014-07-25 18:01:32 -0400413 * skb->tstamp.
Patrick Ohlyac45f602009-02-12 05:03:37 +0000414 *
415 * hwtstamps can only be compared against other hwtstamps from
416 * the same device.
417 *
418 * This structure is attached to packets as part of the
419 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
420 */
421struct skb_shared_hwtstamps {
422 ktime_t hwtstamp;
Patrick Ohlyac45f602009-02-12 05:03:37 +0000423};
424
Oliver Hartkopp2244d072010-08-17 08:59:14 +0000425/* Definitions for tx_flags in struct skb_shared_info */
426enum {
427 /* generate hardware time stamp */
428 SKBTX_HW_TSTAMP = 1 << 0,
429
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400430 /* generate software time stamp when queueing packet to NIC */
Oliver Hartkopp2244d072010-08-17 08:59:14 +0000431 SKBTX_SW_TSTAMP = 1 << 1,
432
433 /* device driver is going to provide hardware time stamp */
434 SKBTX_IN_PROGRESS = 1 << 2,
435
Shirley Maa6686f22011-07-06 12:22:12 +0000436 /* device driver supports TX zero-copy buffers */
Eric Dumazet62b1a8a2012-06-14 06:42:44 +0000437 SKBTX_DEV_ZEROCOPY = 1 << 3,
Johannes Berg6e3e9392011-11-09 10:15:42 +0100438
439 /* generate wifi status information (where possible) */
Eric Dumazet62b1a8a2012-06-14 06:42:44 +0000440 SKBTX_WIFI_STATUS = 1 << 4,
Pravin B Shelarc9af6db2013-02-11 09:27:41 +0000441
442 /* This indicates at least one fragment might be overwritten
443 * (as in vmsplice(), sendfile() ...)
444 * If we need to compute a TX checksum, we'll need to copy
445 * all frags to avoid possible bad checksum
446 */
447 SKBTX_SHARED_FRAG = 1 << 5,
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400448
449 /* generate software time stamp when entering packet scheduling */
450 SKBTX_SCHED_TSTAMP = 1 << 6,
Shirley Maa6686f22011-07-06 12:22:12 +0000451};
452
Willem de Bruijn52267792017-08-03 16:29:39 -0400453#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
Willem de Bruijne1c8a602014-08-04 22:11:50 -0400454#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
Soheil Hassas Yeganeh0a2cf202016-04-27 23:39:01 -0400455 SKBTX_SCHED_TSTAMP)
Willem de Bruijnf24b9be2014-08-04 22:11:45 -0400456#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
457
Shirley Maa6686f22011-07-06 12:22:12 +0000458/*
459 * The callback notifies userspace to release buffers when skb DMA is done in
460 * lower device, the skb last reference should be 0 when calling this.
Michael S. Tsirkine19d6762012-11-01 09:16:22 +0000461 * The zerocopy_success argument is true if zero copy transmit occurred,
462 * false on data copy or out of memory error caused by data copy attempt.
Michael S. Tsirkinca8f4fb2012-04-09 00:24:02 +0000463 * The ctx field is used to track device context.
464 * The desc field is used to track userspace buffer index.
Shirley Maa6686f22011-07-06 12:22:12 +0000465 */
466struct ubuf_info {
Michael S. Tsirkine19d6762012-11-01 09:16:22 +0000467 void (*callback)(struct ubuf_info *, bool zerocopy_success);
Willem de Bruijn4ab6c992017-08-03 16:29:42 -0400468 union {
469 struct {
470 unsigned long desc;
471 void *ctx;
472 };
473 struct {
474 u32 id;
475 u16 len;
476 u16 zerocopy:1;
477 u32 bytelen;
478 };
479 };
Eric Dumazetc1d1b432017-08-31 16:48:22 -0700480 refcount_t refcnt;
Willem de Bruijna91dbff2017-08-03 16:29:43 -0400481
482 struct mmpin {
483 struct user_struct *user;
484 unsigned int num_pg;
485 } mmp;
Patrick Ohlyac45f602009-02-12 05:03:37 +0000486};
487
Willem de Bruijn52267792017-08-03 16:29:39 -0400488#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
489
Sowmini Varadhan6f89dbc2018-02-15 10:49:32 -0800490int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
491void mm_unaccount_pinned_pages(struct mmpin *mmp);
492
Willem de Bruijn52267792017-08-03 16:29:39 -0400493struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
Willem de Bruijn4ab6c992017-08-03 16:29:42 -0400494struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
495 struct ubuf_info *uarg);
Willem de Bruijn52267792017-08-03 16:29:39 -0400496
497static inline void sock_zerocopy_get(struct ubuf_info *uarg)
498{
Eric Dumazetc1d1b432017-08-31 16:48:22 -0700499 refcount_inc(&uarg->refcnt);
Willem de Bruijn52267792017-08-03 16:29:39 -0400500}
501
502void sock_zerocopy_put(struct ubuf_info *uarg);
Willem de Bruijn52900d22018-11-30 15:32:40 -0500503void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
Willem de Bruijn52267792017-08-03 16:29:39 -0400504
505void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
506
Willem de Bruijnb5947e52018-11-30 15:32:39 -0500507int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
Willem de Bruijn52267792017-08-03 16:29:39 -0400508int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
509 struct msghdr *msg, int len,
510 struct ubuf_info *uarg);
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512/* This data is invariant across clones and lives at
513 * the end of the header data, ie. at skb->end.
514 */
515struct skb_shared_info {
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200516 __u8 __unused;
517 __u8 meta_len;
518 __u8 nr_frags;
Ian Campbell9f42f122012-01-05 07:13:39 +0000519 __u8 tx_flags;
Herbert Xu79671682006-06-22 02:40:14 -0700520 unsigned short gso_size;
521 /* Warning: this field is not always filled in (UFO)! */
522 unsigned short gso_segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 struct sk_buff *frag_list;
Patrick Ohlyac45f602009-02-12 05:03:37 +0000524 struct skb_shared_hwtstamps hwtstamps;
Steffen Klassert7f564522017-04-08 20:36:24 +0200525 unsigned int gso_type;
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400526 u32 tskey;
Eric Dumazetec7d2f22010-05-05 01:07:37 -0700527
528 /*
529 * Warning : all fields before dataref are cleared in __alloc_skb()
530 */
531 atomic_t dataref;
532
Johann Baudy69e3c752009-05-18 22:11:22 -0700533 /* Intermediate layers must ensure that destructor_arg
534 * remains valid until skb destructor */
535 void * destructor_arg;
Shirley Maa6686f22011-07-06 12:22:12 +0000536
Eric Dumazetfed66382010-07-22 19:09:08 +0000537 /* must be last field, see pskb_expand_head() */
538 skb_frag_t frags[MAX_SKB_FRAGS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539};
540
541/* We divide dataref into two halves. The higher 16 bits hold references
542 * to the payload part of skb->data. The lower 16 bits hold references to
Patrick McHardy334a8132007-06-25 04:35:20 -0700543 * the entire skb->data. A clone of a headerless skb holds the length of
544 * the header in skb->hdr_len.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 *
546 * All users must obey the rule that the skb->data reference count must be
547 * greater than or equal to the payload reference count.
548 *
549 * Holding a reference to the payload part means that the user does not
550 * care about modifications to the header part of skb->data.
551 */
552#define SKB_DATAREF_SHIFT 16
553#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
554
David S. Millerd179cd12005-08-17 14:57:30 -0700555
556enum {
Vijay Subramanianc8753d52014-10-02 10:00:43 -0700557 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
558 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
559 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
David S. Millerd179cd12005-08-17 14:57:30 -0700560};
561
Herbert Xu79671682006-06-22 02:40:14 -0700562enum {
563 SKB_GSO_TCPV4 = 1 << 0,
Herbert Xu576a30e2006-06-27 13:22:38 -0700564
565 /* This indicates the skb is from an untrusted source. */
David S. Millerd9d30ad2017-07-03 07:31:57 -0700566 SKB_GSO_DODGY = 1 << 1,
Michael Chanb0da85372006-06-29 12:30:00 -0700567
568 /* This indicates the tcp segment has CWR set. */
David S. Millerd9d30ad2017-07-03 07:31:57 -0700569 SKB_GSO_TCP_ECN = 1 << 2,
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700570
David S. Millerd9d30ad2017-07-03 07:31:57 -0700571 SKB_GSO_TCP_FIXEDID = 1 << 3,
Chris Leech01d5b2f2009-02-27 14:06:49 -0800572
David S. Millerd9d30ad2017-07-03 07:31:57 -0700573 SKB_GSO_TCPV6 = 1 << 4,
Pravin B Shelar68c33162013-02-14 14:02:41 +0000574
David S. Millerd9d30ad2017-07-03 07:31:57 -0700575 SKB_GSO_FCOE = 1 << 5,
Pravin B Shelar73136262013-03-07 13:21:51 +0000576
David S. Millerd9d30ad2017-07-03 07:31:57 -0700577 SKB_GSO_GRE = 1 << 6,
Simon Horman0d89d202013-05-23 21:02:52 +0000578
David S. Millerd9d30ad2017-07-03 07:31:57 -0700579 SKB_GSO_GRE_CSUM = 1 << 7,
Eric Dumazetcb32f512013-10-19 11:42:57 -0700580
David S. Millerd9d30ad2017-07-03 07:31:57 -0700581 SKB_GSO_IPXIP4 = 1 << 8,
Eric Dumazet61c1db72013-10-20 20:47:30 -0700582
David S. Millerd9d30ad2017-07-03 07:31:57 -0700583 SKB_GSO_IPXIP6 = 1 << 9,
Tom Herbert0f4f4ff2014-06-04 17:20:16 -0700584
David S. Millerd9d30ad2017-07-03 07:31:57 -0700585 SKB_GSO_UDP_TUNNEL = 1 << 10,
Tom Herbert4749c092014-06-04 17:20:23 -0700586
David S. Millerd9d30ad2017-07-03 07:31:57 -0700587 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
Alexander Duyckcbc53e02016-04-10 21:44:51 -0400588
David S. Millerd9d30ad2017-07-03 07:31:57 -0700589 SKB_GSO_PARTIAL = 1 << 12,
Alexander Duyck802ab552016-04-10 21:45:03 -0400590
David S. Millerd9d30ad2017-07-03 07:31:57 -0700591 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -0300592
David S. Millerd9d30ad2017-07-03 07:31:57 -0700593 SKB_GSO_SCTP = 1 << 14,
Steffen Klassertc7ef8f02017-04-14 10:05:36 +0200594
David S. Millerd9d30ad2017-07-03 07:31:57 -0700595 SKB_GSO_ESP = 1 << 15,
Willem de Bruijn0c19f8462017-11-21 10:22:25 -0500596
597 SKB_GSO_UDP = 1 << 16,
Willem de Bruijnee80d1e2018-04-26 13:42:16 -0400598
599 SKB_GSO_UDP_L4 = 1 << 17,
Herbert Xu79671682006-06-22 02:40:14 -0700600};
601
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -0700602#if BITS_PER_LONG > 32
603#define NET_SKBUFF_DATA_USES_OFFSET 1
604#endif
605
606#ifdef NET_SKBUFF_DATA_USES_OFFSET
607typedef unsigned int sk_buff_data_t;
608#else
609typedef unsigned char *sk_buff_data_t;
610#endif
611
Pedro Tammela161e6132019-03-05 11:35:54 -0300612/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 * struct sk_buff - socket buffer
614 * @next: Next buffer in list
615 * @prev: Previous buffer in list
Eric Dumazet363ec39232014-02-26 14:02:11 -0800616 * @tstamp: Time we arrived/left
Eric Dumazet56b17422014-11-03 08:19:53 -0800617 * @rbnode: RB tree node, alternative to next/prev for netem/tcp
Daniel Balutad84e0bd2011-07-10 07:04:04 -0700618 * @sk: Socket we are owned by
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 * @dev: Device we arrived on/are leaving by
Daniel Balutad84e0bd2011-07-10 07:04:04 -0700620 * @cb: Control buffer. Free for use by every layer. Put private vars here
Eric Dumazet7fee2262010-05-11 23:19:48 +0000621 * @_skb_refdst: destination entry (with norefcount bit)
Martin Waitz67be2dd2005-05-01 08:59:26 -0700622 * @sp: the security path, used for xfrm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 * @len: Length of actual data
624 * @data_len: Data length
625 * @mac_len: Length of link layer header
Patrick McHardy334a8132007-06-25 04:35:20 -0700626 * @hdr_len: writable header length of cloned skb
Herbert Xu663ead32007-04-09 11:59:07 -0700627 * @csum: Checksum (must include start/offset pair)
628 * @csum_start: Offset from skb->head where checksumming should start
629 * @csum_offset: Offset from csum_start where checksum should be stored
Daniel Balutad84e0bd2011-07-10 07:04:04 -0700630 * @priority: Packet queueing priority
WANG Cong60ff7462014-05-04 16:39:18 -0700631 * @ignore_df: allow local fragmentation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 * @cloned: Head may be cloned (check refcnt to be sure)
Daniel Balutad84e0bd2011-07-10 07:04:04 -0700633 * @ip_summed: Driver fed us an IP checksum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 * @nohdr: Payload reference only, must not modify header
635 * @pkt_type: Packet class
Randy Dunlapc83c2482005-10-18 22:07:41 -0700636 * @fclone: skbuff clone status
Randy Dunlapc83c2482005-10-18 22:07:41 -0700637 * @ipvs_property: skbuff is owned by ipvs
Ido Schimmel875e8932018-12-04 08:15:10 +0000638 * @offload_fwd_mark: Packet was L2-forwarded in hardware
639 * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
Willem de Bruijne7246e12017-01-07 17:06:35 -0500640 * @tc_skip_classify: do not classify packet. set by IFB device
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -0500641 * @tc_at_ingress: used within tc_classify to distinguish in/egress
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500642 * @tc_redirected: packet was redirected by a tc action
643 * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
Randy Dunlap31729362008-02-18 20:52:13 -0800644 * @peeked: this packet has been seen already, so stats have been
645 * done for it, don't do them again
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700646 * @nf_trace: netfilter packet trace flag
Daniel Balutad84e0bd2011-07-10 07:04:04 -0700647 * @protocol: Packet protocol from driver
648 * @destructor: Destruct function
Eric Dumazete2080072017-10-04 12:59:58 -0700649 * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
Florian Westphala9e419d2017-01-23 18:21:59 +0100650 * @_nfct: Associated connection, if any (with nfctinfo bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
Eric Dumazet8964be42009-11-20 15:35:04 -0800652 * @skb_iif: ifindex of device we arrived on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 * @tc_index: Traffic control index
Tom Herbert61b905d2014-03-24 15:34:47 -0700654 * @hash: the packet hash
Daniel Balutad84e0bd2011-07-10 07:04:04 -0700655 * @queue_mapping: Queue mapping for multiqueue devices
Stefano Brivio8b700862018-07-11 14:39:42 +0200656 * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
Florian Westphaldf5042f2018-12-18 17:15:16 +0100657 * @active_extensions: active extensions (skb_ext_id types)
Randy Dunlap553a5672008-04-20 10:51:01 -0700658 * @ndisc_nodetype: router type (from link layer)
Daniel Balutad84e0bd2011-07-10 07:04:04 -0700659 * @ooo_okay: allow the mapping of a socket to a queue to be changed
Tom Herbert61b905d2014-03-24 15:34:47 -0700660 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
Changli Gao4ca24622011-08-19 07:26:44 -0700661 * ports.
Tom Herberta3b18dd2014-07-01 21:33:17 -0700662 * @sw_hash: indicates hash was computed in software stack
Johannes Berg6e3e9392011-11-09 10:15:42 +0100663 * @wifi_acked_valid: wifi_acked was set
664 * @wifi_acked: whether frame was acked on wifi or not
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000665 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
Davide Carattidba00302017-05-18 15:44:40 +0200666 * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
Julian Anastasov4ff06202017-02-06 23:14:12 +0200667 * @dst_pending_confirm: need to confirm neighbour
Stefano Brivioa48d1892018-07-17 11:52:57 +0200668 * @decrypted: Decrypted SKB
Pedro Tammela161e6132019-03-05 11:35:54 -0300669 * @napi_id: id of the NAPI struct this skb came from
James Morris984bc162006-06-09 00:29:17 -0700670 * @secmark: security marking
Daniel Balutad84e0bd2011-07-10 07:04:04 -0700671 * @mark: Generic packet mark
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000672 * @vlan_proto: vlan encapsulation protocol
Patrick McHardy6aa895b2008-07-14 22:49:06 -0700673 * @vlan_tci: vlan tag control information
Simon Horman0d89d202013-05-23 21:02:52 +0000674 * @inner_protocol: Protocol (encapsulation)
Joseph Gasparakis6a674e92012-12-07 14:14:14 +0000675 * @inner_transport_header: Inner transport layer header (encapsulation)
676 * @inner_network_header: Network layer header (encapsulation)
Pravin B Shelaraefbd2b2013-03-07 13:21:46 +0000677 * @inner_mac_header: Link layer header (encapsulation)
Daniel Balutad84e0bd2011-07-10 07:04:04 -0700678 * @transport_header: Transport layer header
679 * @network_header: Network layer header
680 * @mac_header: Link layer header
681 * @tail: Tail pointer
682 * @end: End pointer
683 * @head: Head of buffer
684 * @data: Data head pointer
685 * @truesize: Buffer size
686 * @users: User count - see {datagram,tcp}.c
Florian Westphaldf5042f2018-12-18 17:15:16 +0100687 * @extensions: allocated extensions, valid if active_extensions is nonzero
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 */
689
690struct sk_buff {
Eric Dumazet363ec39232014-02-26 14:02:11 -0800691 union {
Eric Dumazet56b17422014-11-03 08:19:53 -0800692 struct {
693 /* These two members must be first. */
694 struct sk_buff *next;
695 struct sk_buff *prev;
Felix Fietkauda3f5cf2010-02-23 11:45:51 +0000696
Eric Dumazet56b17422014-11-03 08:19:53 -0800697 union {
Eric Dumazetbffa72c2017-09-19 05:14:24 -0700698 struct net_device *dev;
699 /* Some protocols might use this space to store information,
700 * while device pointer would be NULL.
701 * UDP receive path is one user.
702 */
703 unsigned long dev_scratch;
Eric Dumazet56b17422014-11-03 08:19:53 -0800704 };
705 };
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000706 struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
David Millerd4546c22018-06-24 14:13:49 +0900707 struct list_head list;
Eric Dumazet56b17422014-11-03 08:19:53 -0800708 };
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000709
710 union {
711 struct sock *sk;
712 int ip_defrag_offset;
713 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
Eric Dumazetc84d9492016-12-08 11:41:55 -0800715 union {
Eric Dumazetbffa72c2017-09-19 05:14:24 -0700716 ktime_t tstamp;
Eric Dumazetd3edd062018-09-21 08:51:50 -0700717 u64 skb_mstamp_ns; /* earliest departure time */
Eric Dumazetc84d9492016-12-08 11:41:55 -0800718 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 /*
720 * This is the control buffer. It is free to use for every
721 * layer. Please put your private variables there. If you
722 * want to keep them across layers you have to do a skb_clone()
723 * first. This is owned by whoever has the skb queued ATM.
724 */
Felix Fietkauda3f5cf2010-02-23 11:45:51 +0000725 char cb[48] __aligned(8);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Eric Dumazete2080072017-10-04 12:59:58 -0700727 union {
728 struct {
729 unsigned long _skb_refdst;
730 void (*destructor)(struct sk_buff *skb);
731 };
732 struct list_head tcp_tsorted_anchor;
733 };
734
Eric Dumazetb1937222014-09-28 22:18:47 -0700735#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Florian Westphala9e419d2017-01-23 18:21:59 +0100736 unsigned long _nfct;
Eric Dumazetb1937222014-09-28 22:18:47 -0700737#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 unsigned int len,
Patrick McHardy334a8132007-06-25 04:35:20 -0700739 data_len;
740 __u16 mac_len,
741 hdr_len;
Eric Dumazetb1937222014-09-28 22:18:47 -0700742
743 /* Following fields are _not_ copied in __copy_skb_header()
744 * Note that queue_mapping is here mostly to fill a hole.
745 */
Eric Dumazetb1937222014-09-28 22:18:47 -0700746 __u16 queue_mapping;
Daniel Borkmann36bbef52016-09-20 00:26:13 +0200747
748/* if you move cloned around you also must adapt those constants */
749#ifdef __BIG_ENDIAN_BITFIELD
750#define CLONED_MASK (1 << 7)
751#else
752#define CLONED_MASK 1
753#endif
754#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
755
756 __u8 __cloned_offset[0];
Eric Dumazetb1937222014-09-28 22:18:47 -0700757 __u8 cloned:1,
Harald Welte6869c4d2005-08-09 19:24:19 -0700758 nohdr:1,
Patrick McHardyb84f4cc2005-11-20 21:19:21 -0800759 fclone:2,
Herbert Xua59322b2007-12-05 01:53:40 -0800760 peeked:1,
Eric Dumazetb1937222014-09-28 22:18:47 -0700761 head_frag:1,
Stefano Brivio8b700862018-07-11 14:39:42 +0200762 pfmemalloc:1;
Florian Westphaldf5042f2018-12-18 17:15:16 +0100763#ifdef CONFIG_SKB_EXTENSIONS
764 __u8 active_extensions;
765#endif
Eric Dumazetb1937222014-09-28 22:18:47 -0700766 /* fields enclosed in headers_start/headers_end are copied
767 * using a single memcpy() in __copy_skb_header()
768 */
Randy Dunlapebcf34f2014-10-26 19:14:06 -0700769 /* private: */
Eric Dumazetb1937222014-09-28 22:18:47 -0700770 __u32 headers_start[0];
Randy Dunlapebcf34f2014-10-26 19:14:06 -0700771 /* public: */
Hannes Frederic Sowa233577a2014-09-12 14:04:43 +0200772
773/* if you move pkt_type around you also must adapt those constants */
774#ifdef __BIG_ENDIAN_BITFIELD
775#define PKT_TYPE_MAX (7 << 5)
776#else
777#define PKT_TYPE_MAX 7
KOVACS Krisztian2fc72c72011-01-12 20:25:08 +0100778#endif
Hannes Frederic Sowa233577a2014-09-12 14:04:43 +0200779#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
780
781 __u8 __pkt_type_offset[0];
Eric Dumazetb1937222014-09-28 22:18:47 -0700782 __u8 pkt_type:3;
Eric Dumazetb1937222014-09-28 22:18:47 -0700783 __u8 ignore_df:1;
Eric Dumazetb1937222014-09-28 22:18:47 -0700784 __u8 nf_trace:1;
785 __u8 ip_summed:2;
786 __u8 ooo_okay:1;
Stefano Brivio8b700862018-07-11 14:39:42 +0200787
Eric Dumazetb1937222014-09-28 22:18:47 -0700788 __u8 l4_hash:1;
789 __u8 sw_hash:1;
790 __u8 wifi_acked_valid:1;
791 __u8 wifi_acked:1;
Eric Dumazetb1937222014-09-28 22:18:47 -0700792 __u8 no_fcs:1;
793 /* Indicates the inner headers are valid in the skbuff. */
794 __u8 encapsulation:1;
795 __u8 encap_hdr_csum:1;
796 __u8 csum_valid:1;
Stefano Brivio8b700862018-07-11 14:39:42 +0200797
Michał Mirosław0c4b2d32018-11-10 19:58:36 +0100798#ifdef __BIG_ENDIAN_BITFIELD
799#define PKT_VLAN_PRESENT_BIT 7
800#else
801#define PKT_VLAN_PRESENT_BIT 0
802#endif
803#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
804 __u8 __pkt_vlan_present_offset[0];
805 __u8 vlan_present:1;
Eric Dumazetb1937222014-09-28 22:18:47 -0700806 __u8 csum_complete_sw:1;
807 __u8 csum_level:2;
Davide Carattidba00302017-05-18 15:44:40 +0200808 __u8 csum_not_inet:1;
Julian Anastasov4ff06202017-02-06 23:14:12 +0200809 __u8 dst_pending_confirm:1;
Eric Dumazetb1937222014-09-28 22:18:47 -0700810#ifdef CONFIG_IPV6_NDISC_NODETYPE
811 __u8 ndisc_nodetype:2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812#endif
Stefano Brivio8b700862018-07-11 14:39:42 +0200813
Michał Mirosław0c4b2d32018-11-10 19:58:36 +0100814 __u8 ipvs_property:1;
Tom Herbert8bce6d72014-09-29 20:22:29 -0700815 __u8 inner_protocol_type:1;
Tom Herberte585f232014-11-04 09:06:54 -0800816 __u8 remcsum_offload:1;
Ido Schimmel6bc506b2016-08-25 18:42:37 +0200817#ifdef CONFIG_NET_SWITCHDEV
818 __u8 offload_fwd_mark:1;
Ido Schimmel875e8932018-12-04 08:15:10 +0000819 __u8 offload_l3_fwd_mark:1;
Ido Schimmel6bc506b2016-08-25 18:42:37 +0200820#endif
Willem de Bruijne7246e12017-01-07 17:06:35 -0500821#ifdef CONFIG_NET_CLS_ACT
822 __u8 tc_skip_classify:1;
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -0500823 __u8 tc_at_ingress:1;
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500824 __u8 tc_redirected:1;
825 __u8 tc_from_ingress:1;
Willem de Bruijne7246e12017-01-07 17:06:35 -0500826#endif
Stefano Brivioa48d1892018-07-17 11:52:57 +0200827#ifdef CONFIG_TLS_DEVICE
828 __u8 decrypted:1;
829#endif
Alexander Duyck4031ae62012-01-27 06:22:53 +0000830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831#ifdef CONFIG_NET_SCHED
Patrick McHardyb6b99eb2005-08-09 19:33:51 -0700832 __u16 tc_index; /* traffic control index */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833#endif
Vegard Nossumfe55f6d2008-08-30 12:16:35 +0200834
Eric Dumazetb1937222014-09-28 22:18:47 -0700835 union {
836 __wsum csum;
837 struct {
838 __u16 csum_start;
839 __u16 csum_offset;
840 };
841 };
842 __u32 priority;
843 int skb_iif;
844 __u32 hash;
845 __be16 vlan_proto;
846 __u16 vlan_tci;
Eric Dumazet2bd82482015-02-03 23:48:24 -0800847#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
848 union {
849 unsigned int napi_id;
850 unsigned int sender_cpu;
851 };
Chris Leech97fc2f02006-05-23 17:55:33 -0700852#endif
James Morris984bc162006-06-09 00:29:17 -0700853#ifdef CONFIG_NETWORK_SECMARK
Ido Schimmel6bc506b2016-08-25 18:42:37 +0200854 __u32 secmark;
James Morris984bc162006-06-09 00:29:17 -0700855#endif
Scott Feldman0c4f6912015-07-18 18:24:48 -0700856
Neil Horman3b885782009-10-12 13:26:31 -0700857 union {
858 __u32 mark;
Eric Dumazet16fad692013-03-14 05:40:32 +0000859 __u32 reserved_tailroom;
Neil Horman3b885782009-10-12 13:26:31 -0700860 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
Tom Herbert8bce6d72014-09-29 20:22:29 -0700862 union {
863 __be16 inner_protocol;
864 __u8 inner_ipproto;
865 };
866
Simon Horman1a37e412013-05-23 21:02:51 +0000867 __u16 inner_transport_header;
868 __u16 inner_network_header;
869 __u16 inner_mac_header;
Eric Dumazetb1937222014-09-28 22:18:47 -0700870
871 __be16 protocol;
Simon Horman1a37e412013-05-23 21:02:51 +0000872 __u16 transport_header;
873 __u16 network_header;
874 __u16 mac_header;
Eric Dumazetb1937222014-09-28 22:18:47 -0700875
Randy Dunlapebcf34f2014-10-26 19:14:06 -0700876 /* private: */
Eric Dumazetb1937222014-09-28 22:18:47 -0700877 __u32 headers_end[0];
Randy Dunlapebcf34f2014-10-26 19:14:06 -0700878 /* public: */
Eric Dumazetb1937222014-09-28 22:18:47 -0700879
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 /* These elements must be at the end, see alloc_skb() for details. */
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700881 sk_buff_data_t tail;
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700882 sk_buff_data_t end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 unsigned char *head,
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700884 *data;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700885 unsigned int truesize;
Reshetova, Elena63354792017-06-30 13:07:58 +0300886 refcount_t users;
Florian Westphaldf5042f2018-12-18 17:15:16 +0100887
888#ifdef CONFIG_SKB_EXTENSIONS
889 /* only useable after checking ->active_extensions != 0 */
890 struct skb_ext *extensions;
891#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892};
893
894#ifdef __KERNEL__
895/*
896 * Handling routines are only of interest to the kernel
897 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
Mel Gormanc93bdd02012-07-31 16:44:19 -0700899#define SKB_ALLOC_FCLONE 0x01
900#define SKB_ALLOC_RX 0x02
Alexander Duyckfd11a832014-12-09 19:40:49 -0800901#define SKB_ALLOC_NAPI 0x04
Mel Gormanc93bdd02012-07-31 16:44:19 -0700902
Pedro Tammela161e6132019-03-05 11:35:54 -0300903/**
904 * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
905 * @skb: buffer
906 */
Mel Gormanc93bdd02012-07-31 16:44:19 -0700907static inline bool skb_pfmemalloc(const struct sk_buff *skb)
908{
909 return unlikely(skb->pfmemalloc);
910}
911
Eric Dumazet7fee2262010-05-11 23:19:48 +0000912/*
913 * skb might have a dst pointer attached, refcounted or not.
914 * _skb_refdst low order bit is set if refcount was _not_ taken
915 */
916#define SKB_DST_NOREF 1UL
917#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
918
Florian Westphala9e419d2017-01-23 18:21:59 +0100919#define SKB_NFCT_PTRMASK ~(7UL)
Eric Dumazet7fee2262010-05-11 23:19:48 +0000920/**
921 * skb_dst - returns skb dst_entry
922 * @skb: buffer
923 *
924 * Returns skb dst_entry, regardless of reference taken or not.
925 */
Eric Dumazetadf30902009-06-02 05:19:30 +0000926static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
927{
Pedro Tammela161e6132019-03-05 11:35:54 -0300928 /* If refdst was not refcounted, check we still are in a
Eric Dumazet7fee2262010-05-11 23:19:48 +0000929 * rcu_read_lock section
930 */
931 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
932 !rcu_read_lock_held() &&
933 !rcu_read_lock_bh_held());
934 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
Eric Dumazetadf30902009-06-02 05:19:30 +0000935}
936
Eric Dumazet7fee2262010-05-11 23:19:48 +0000937/**
938 * skb_dst_set - sets skb dst
939 * @skb: buffer
940 * @dst: dst entry
941 *
942 * Sets skb dst, assuming a reference was taken on dst and should
943 * be released by skb_dst_drop()
944 */
Eric Dumazetadf30902009-06-02 05:19:30 +0000945static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
946{
Eric Dumazet7fee2262010-05-11 23:19:48 +0000947 skb->_skb_refdst = (unsigned long)dst;
948}
949
Julian Anastasov932bc4d2013-03-21 11:57:58 +0200950/**
951 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
952 * @skb: buffer
953 * @dst: dst entry
954 *
955 * Sets skb dst, assuming a reference was not taken on dst.
956 * If dst entry is cached, we do not take reference and dst_release
957 * will be avoided by refdst_drop. If dst entry is not cached, we take
958 * reference, so that last dst_release can destroy the dst immediately.
959 */
960static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
961{
Hannes Frederic Sowadbfc4fb2014-12-06 19:19:42 +0100962 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
963 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
Julian Anastasov932bc4d2013-03-21 11:57:58 +0200964}
Eric Dumazet7fee2262010-05-11 23:19:48 +0000965
966/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300967 * skb_dst_is_noref - Test if skb dst isn't refcounted
Eric Dumazet7fee2262010-05-11 23:19:48 +0000968 * @skb: buffer
969 */
970static inline bool skb_dst_is_noref(const struct sk_buff *skb)
971{
972 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
Eric Dumazetadf30902009-06-02 05:19:30 +0000973}
974
Pedro Tammela161e6132019-03-05 11:35:54 -0300975/**
976 * skb_rtable - Returns the skb &rtable
977 * @skb: buffer
978 */
Eric Dumazet511c3f92009-06-02 05:14:27 +0000979static inline struct rtable *skb_rtable(const struct sk_buff *skb)
980{
Eric Dumazetadf30902009-06-02 05:19:30 +0000981 return (struct rtable *)skb_dst(skb);
Eric Dumazet511c3f92009-06-02 05:14:27 +0000982}
983
Jamal Hadi Salim8b10cab2016-07-02 06:43:14 -0400984/* For mangling skb->pkt_type from user space side from applications
985 * such as nft, tc, etc, we only allow a conservative subset of
986 * possible pkt_types to be set.
987*/
988static inline bool skb_pkt_type_ok(u32 ptype)
989{
990 return ptype <= PACKET_OTHERHOST;
991}
992
Pedro Tammela161e6132019-03-05 11:35:54 -0300993/**
994 * skb_napi_id - Returns the skb's NAPI id
995 * @skb: buffer
996 */
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200997static inline unsigned int skb_napi_id(const struct sk_buff *skb)
998{
999#ifdef CONFIG_NET_RX_BUSY_POLL
1000 return skb->napi_id;
1001#else
1002 return 0;
1003#endif
1004}
1005
Pedro Tammela161e6132019-03-05 11:35:54 -03001006/**
1007 * skb_unref - decrement the skb's reference count
1008 * @skb: buffer
1009 *
1010 * Returns true if we can free the skb.
1011 */
Paolo Abeni3889a8032017-06-12 11:23:41 +02001012static inline bool skb_unref(struct sk_buff *skb)
1013{
1014 if (unlikely(!skb))
1015 return false;
Reshetova, Elena63354792017-06-30 13:07:58 +03001016 if (likely(refcount_read(&skb->users) == 1))
Paolo Abeni3889a8032017-06-12 11:23:41 +02001017 smp_rmb();
Reshetova, Elena63354792017-06-30 13:07:58 +03001018 else if (likely(!refcount_dec_and_test(&skb->users)))
Paolo Abeni3889a8032017-06-12 11:23:41 +02001019 return false;
1020
1021 return true;
1022}
1023
Paolo Abeni0a463c72017-06-12 11:23:42 +02001024void skb_release_head_state(struct sk_buff *skb);
Joe Perches7965bd42013-09-26 14:48:15 -07001025void kfree_skb(struct sk_buff *skb);
1026void kfree_skb_list(struct sk_buff *segs);
Willem de Bruijn64131392019-07-07 05:51:55 -04001027void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
Joe Perches7965bd42013-09-26 14:48:15 -07001028void skb_tx_error(struct sk_buff *skb);
1029void consume_skb(struct sk_buff *skb);
Paolo Abenica2c1412017-09-06 14:44:36 +02001030void __consume_stateless_skb(struct sk_buff *skb);
Joe Perches7965bd42013-09-26 14:48:15 -07001031void __kfree_skb(struct sk_buff *skb);
Eric Dumazetd7e88832012-04-30 08:10:34 +00001032extern struct kmem_cache *skbuff_head_cache;
Eric Dumazetbad43ca2012-05-19 03:02:02 +00001033
Joe Perches7965bd42013-09-26 14:48:15 -07001034void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1035bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1036 bool *fragstolen, int *delta_truesize);
Eric Dumazetbad43ca2012-05-19 03:02:02 +00001037
Joe Perches7965bd42013-09-26 14:48:15 -07001038struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1039 int node);
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001040struct sk_buff *__build_skb(void *data, unsigned int frag_size);
Joe Perches7965bd42013-09-26 14:48:15 -07001041struct sk_buff *build_skb(void *data, unsigned int frag_size);
Jesper Dangaard Brouerba0509b2019-04-12 17:07:37 +02001042struct sk_buff *build_skb_around(struct sk_buff *skb,
1043 void *data, unsigned int frag_size);
Pedro Tammela161e6132019-03-05 11:35:54 -03001044
1045/**
1046 * alloc_skb - allocate a network buffer
1047 * @size: size to allocate
1048 * @priority: allocation mask
1049 *
1050 * This function is a convenient wrapper around __alloc_skb().
1051 */
David S. Millerd179cd12005-08-17 14:57:30 -07001052static inline struct sk_buff *alloc_skb(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +01001053 gfp_t priority)
David S. Millerd179cd12005-08-17 14:57:30 -07001054{
Eric Dumazet564824b2010-10-11 19:05:25 +00001055 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
David S. Millerd179cd12005-08-17 14:57:30 -07001056}
1057
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001058struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1059 unsigned long data_len,
1060 int max_page_order,
1061 int *errcode,
1062 gfp_t gfp_mask);
Jakub Kicinskida29e4b2019-06-03 15:16:58 -07001063struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001064
Eric Dumazetd0bf4a92014-09-29 13:29:15 -07001065/* Layout of fast clones : [skb1][skb2][fclone_ref] */
1066struct sk_buff_fclones {
1067 struct sk_buff skb1;
1068
1069 struct sk_buff skb2;
1070
Reshetova, Elena26385952017-06-30 13:07:59 +03001071 refcount_t fclone_ref;
Eric Dumazetd0bf4a92014-09-29 13:29:15 -07001072};
1073
1074/**
1075 * skb_fclone_busy - check if fclone is busy
Stephen Hemminger293de7d2016-10-23 09:28:29 -07001076 * @sk: socket
Eric Dumazetd0bf4a92014-09-29 13:29:15 -07001077 * @skb: buffer
1078 *
Masanari Iidabda13fe2015-12-13 16:53:02 +09001079 * Returns true if skb is a fast clone, and its clone is not freed.
Eric Dumazet39bb5e62014-10-30 10:32:34 -07001080 * Some drivers call skb_orphan() in their ndo_start_xmit(),
1081 * so we also check that this didnt happen.
Eric Dumazetd0bf4a92014-09-29 13:29:15 -07001082 */
Eric Dumazet39bb5e62014-10-30 10:32:34 -07001083static inline bool skb_fclone_busy(const struct sock *sk,
1084 const struct sk_buff *skb)
Eric Dumazetd0bf4a92014-09-29 13:29:15 -07001085{
1086 const struct sk_buff_fclones *fclones;
1087
1088 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1089
1090 return skb->fclone == SKB_FCLONE_ORIG &&
Reshetova, Elena26385952017-06-30 13:07:59 +03001091 refcount_read(&fclones->fclone_ref) > 1 &&
Eric Dumazet39bb5e62014-10-30 10:32:34 -07001092 fclones->skb2.sk == sk;
Eric Dumazetd0bf4a92014-09-29 13:29:15 -07001093}
1094
Pedro Tammela161e6132019-03-05 11:35:54 -03001095/**
1096 * alloc_skb_fclone - allocate a network buffer from fclone cache
1097 * @size: size to allocate
1098 * @priority: allocation mask
1099 *
1100 * This function is a convenient wrapper around __alloc_skb().
1101 */
David S. Millerd179cd12005-08-17 14:57:30 -07001102static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +01001103 gfp_t priority)
David S. Millerd179cd12005-08-17 14:57:30 -07001104{
Mel Gormanc93bdd02012-07-31 16:44:19 -07001105 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
David S. Millerd179cd12005-08-17 14:57:30 -07001106}
1107
Joe Perches7965bd42013-09-26 14:48:15 -07001108struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
Toshiaki Makitab0768a82018-08-03 16:58:09 +09001109void skb_headers_offset_update(struct sk_buff *skb, int off);
Joe Perches7965bd42013-09-26 14:48:15 -07001110int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1111struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
Ilya Lesokhin08303c12018-04-30 10:16:11 +03001112void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
Joe Perches7965bd42013-09-26 14:48:15 -07001113struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
Octavian Purdilabad93e92014-06-12 01:36:26 +03001114struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1115 gfp_t gfp_mask, bool fclone);
1116static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1117 gfp_t gfp_mask)
1118{
1119 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1120}
Eric Dumazet117632e2011-12-03 21:39:53 +00001121
Joe Perches7965bd42013-09-26 14:48:15 -07001122int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1123struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1124 unsigned int headroom);
1125struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1126 int newtailroom, gfp_t priority);
Jason A. Donenfeld48a1df62017-06-04 04:16:22 +02001127int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1128 int offset, int len);
1129int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1130 int offset, int len);
Joe Perches7965bd42013-09-26 14:48:15 -07001131int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
Florian Fainellicd0a1372017-08-22 15:12:14 -07001132int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1133
1134/**
1135 * skb_pad - zero pad the tail of an skb
1136 * @skb: buffer to pad
1137 * @pad: space to pad
1138 *
1139 * Ensure that a buffer is followed by a padding area that is zero
1140 * filled. Used by network drivers which may DMA or transfer data
1141 * beyond the buffer end onto the wire.
1142 *
1143 * May return error in out of memory cases. The skb is freed on error.
1144 */
1145static inline int skb_pad(struct sk_buff *skb, int pad)
1146{
1147 return __skb_pad(skb, pad, true);
1148}
Neil Hormanead2ceb2009-03-11 09:49:55 +00001149#define dev_kfree_skb(a) consume_skb(a)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Hannes Frederic Sowabe12a1f2015-05-21 16:59:58 +02001151int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1152 int offset, size_t size);
1153
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001154struct skb_seq_state {
Thomas Graf677e90e2005-06-23 20:59:51 -07001155 __u32 lower_offset;
1156 __u32 upper_offset;
1157 __u32 frag_idx;
1158 __u32 stepped_offset;
1159 struct sk_buff *root_skb;
1160 struct sk_buff *cur_skb;
1161 __u8 *frag_data;
1162};
1163
Joe Perches7965bd42013-09-26 14:48:15 -07001164void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1165 unsigned int to, struct skb_seq_state *st);
1166unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1167 struct skb_seq_state *st);
1168void skb_abort_seq_read(struct skb_seq_state *st);
Thomas Graf677e90e2005-06-23 20:59:51 -07001169
Joe Perches7965bd42013-09-26 14:48:15 -07001170unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
Bojan Prtvar059a2442015-02-22 11:46:35 +01001171 unsigned int to, struct ts_config *config);
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07001172
Tom Herbert09323cc2013-12-15 22:16:19 -08001173/*
1174 * Packet hash types specify the type of hash in skb_set_hash.
1175 *
1176 * Hash types refer to the protocol layer addresses which are used to
1177 * construct a packet's hash. The hashes are used to differentiate or identify
1178 * flows of the protocol layer for the hash type. Hash types are either
1179 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
1180 *
1181 * Properties of hashes:
1182 *
1183 * 1) Two packets in different flows have different hash values
1184 * 2) Two packets in the same flow should have the same hash value
1185 *
1186 * A hash at a higher layer is considered to be more specific. A driver should
1187 * set the most specific hash possible.
1188 *
1189 * A driver cannot indicate a more specific hash than the layer at which a hash
1190 * was computed. For instance an L3 hash cannot be set as an L4 hash.
1191 *
1192 * A driver may indicate a hash level which is less specific than the
1193 * actual layer the hash was computed on. For instance, a hash computed
1194 * at L4 may be considered an L3 hash. This should only be done if the
1195 * driver can't unambiguously determine that the HW computed the hash at
1196 * the higher layer. Note that the "should" in the second property above
1197 * permits this.
1198 */
1199enum pkt_hash_types {
1200 PKT_HASH_TYPE_NONE, /* Undefined type */
1201 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
1202 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
1203 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
1204};
1205
Tom Herbertbcc83832015-09-01 09:24:24 -07001206static inline void skb_clear_hash(struct sk_buff *skb)
1207{
1208 skb->hash = 0;
1209 skb->sw_hash = 0;
1210 skb->l4_hash = 0;
1211}
1212
1213static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1214{
1215 if (!skb->l4_hash)
1216 skb_clear_hash(skb);
1217}
1218
1219static inline void
1220__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1221{
1222 skb->l4_hash = is_l4;
1223 skb->sw_hash = is_sw;
1224 skb->hash = hash;
1225}
1226
Tom Herbert09323cc2013-12-15 22:16:19 -08001227static inline void
1228skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1229{
Tom Herbertbcc83832015-09-01 09:24:24 -07001230 /* Used by drivers to set hash from HW */
1231 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1232}
1233
1234static inline void
1235__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1236{
1237 __skb_set_hash(skb, hash, true, is_l4);
Tom Herbert09323cc2013-12-15 22:16:19 -08001238}
1239
Tom Herberte5276932015-09-01 09:24:23 -07001240void __skb_get_hash(struct sk_buff *skb);
Florian Westphalb9177832016-10-26 18:49:46 +02001241u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
Tom Herberte5276932015-09-01 09:24:23 -07001242u32 skb_get_poff(const struct sk_buff *skb);
1243u32 __skb_get_poff(const struct sk_buff *skb, void *data,
Paolo Abeni72a338b2018-05-04 11:32:59 +02001244 const struct flow_keys_basic *keys, int hlen);
Tom Herberte5276932015-09-01 09:24:23 -07001245__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1246 void *data, int hlen_proto);
1247
1248static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1249 int thoff, u8 ip_proto)
1250{
1251 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1252}
1253
1254void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1255 const struct flow_dissector_key *key,
1256 unsigned int key_count);
1257
Willem de Bruijn2dfd1842018-09-18 16:20:18 -04001258#ifdef CONFIG_NET
Stanislav Fomichev118c8e92019-04-25 14:37:23 -07001259int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1260 union bpf_attr __user *uattr);
Petar Penkovd58e4682018-09-14 07:46:18 -07001261int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1262 struct bpf_prog *prog);
1263
1264int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
Willem de Bruijn2dfd1842018-09-18 16:20:18 -04001265#else
Stanislav Fomichev118c8e92019-04-25 14:37:23 -07001266static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1267 union bpf_attr __user *uattr)
1268{
1269 return -EOPNOTSUPP;
1270}
1271
Willem de Bruijn2dfd1842018-09-18 16:20:18 -04001272static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1273 struct bpf_prog *prog)
1274{
1275 return -EOPNOTSUPP;
1276}
1277
1278static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
1279{
1280 return -EOPNOTSUPP;
1281}
1282#endif
Petar Penkovd58e4682018-09-14 07:46:18 -07001283
Stanislav Fomichev089b19a2019-04-22 08:55:44 -07001284struct bpf_flow_dissector;
1285bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1286 __be16 proto, int nhoff, int hlen);
1287
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -07001288bool __skb_flow_dissect(const struct net *net,
1289 const struct sk_buff *skb,
Tom Herberte5276932015-09-01 09:24:23 -07001290 struct flow_dissector *flow_dissector,
1291 void *target_container,
Tom Herbertcd79a232015-09-01 09:24:27 -07001292 void *data, __be16 proto, int nhoff, int hlen,
1293 unsigned int flags);
Tom Herberte5276932015-09-01 09:24:23 -07001294
1295static inline bool skb_flow_dissect(const struct sk_buff *skb,
1296 struct flow_dissector *flow_dissector,
Tom Herbertcd79a232015-09-01 09:24:27 -07001297 void *target_container, unsigned int flags)
Tom Herberte5276932015-09-01 09:24:23 -07001298{
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -07001299 return __skb_flow_dissect(NULL, skb, flow_dissector,
1300 target_container, NULL, 0, 0, 0, flags);
Tom Herberte5276932015-09-01 09:24:23 -07001301}
1302
1303static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
Tom Herbertcd79a232015-09-01 09:24:27 -07001304 struct flow_keys *flow,
1305 unsigned int flags)
Tom Herberte5276932015-09-01 09:24:23 -07001306{
1307 memset(flow, 0, sizeof(*flow));
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -07001308 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1309 flow, NULL, 0, 0, 0, flags);
Tom Herberte5276932015-09-01 09:24:23 -07001310}
1311
Paolo Abeni72a338b2018-05-04 11:32:59 +02001312static inline bool
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -07001313skb_flow_dissect_flow_keys_basic(const struct net *net,
1314 const struct sk_buff *skb,
Paolo Abeni72a338b2018-05-04 11:32:59 +02001315 struct flow_keys_basic *flow, void *data,
1316 __be16 proto, int nhoff, int hlen,
1317 unsigned int flags)
Tom Herberte5276932015-09-01 09:24:23 -07001318{
1319 memset(flow, 0, sizeof(*flow));
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -07001320 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
Tom Herbertcd79a232015-09-01 09:24:27 -07001321 data, proto, nhoff, hlen, flags);
Tom Herberte5276932015-09-01 09:24:23 -07001322}
1323
Jiri Pirko82828b82019-06-19 09:41:02 +03001324void skb_flow_dissect_meta(const struct sk_buff *skb,
1325 struct flow_dissector *flow_dissector,
1326 void *target_container);
1327
Paul Blakey75a56752019-07-09 10:30:49 +03001328/* Gets a skb connection tracking info, ctinfo map should be a
1329 * a map of mapsize to translate enum ip_conntrack_info states
1330 * to user states.
1331 */
1332void
1333skb_flow_dissect_ct(const struct sk_buff *skb,
1334 struct flow_dissector *flow_dissector,
1335 void *target_container,
1336 u16 *ctinfo_map,
1337 size_t mapsize);
Simon Horman62b32372017-12-04 11:31:48 +01001338void
1339skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1340 struct flow_dissector *flow_dissector,
1341 void *target_container);
1342
Tom Herbert3958afa1b2013-12-15 22:12:06 -08001343static inline __u32 skb_get_hash(struct sk_buff *skb)
Krishna Kumarbfb564e2010-08-04 06:15:52 +00001344{
Tom Herberta3b18dd2014-07-01 21:33:17 -07001345 if (!skb->l4_hash && !skb->sw_hash)
Tom Herbert3958afa1b2013-12-15 22:12:06 -08001346 __skb_get_hash(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00001347
Tom Herbert61b905d2014-03-24 15:34:47 -07001348 return skb->hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00001349}
1350
David S. Miller20a17bf2015-09-01 21:19:17 -07001351static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
Tom Herbertf70ea012015-07-31 16:52:10 -07001352{
Tom Herbertc6cc1ca2015-09-01 09:24:25 -07001353 if (!skb->l4_hash && !skb->sw_hash) {
1354 struct flow_keys keys;
Tom Herbertde4c1f82015-09-01 18:11:04 -07001355 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
Tom Herbertc6cc1ca2015-09-01 09:24:25 -07001356
Tom Herbertde4c1f82015-09-01 18:11:04 -07001357 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
Tom Herbertc6cc1ca2015-09-01 09:24:25 -07001358 }
Tom Herbertf70ea012015-07-31 16:52:10 -07001359
1360 return skb->hash;
1361}
1362
Tom Herbert50fb7992015-05-01 11:30:12 -07001363__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1364
Tom Herbert57bdf7f42014-01-15 08:57:54 -08001365static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1366{
Tom Herbert61b905d2014-03-24 15:34:47 -07001367 return skb->hash;
Tom Herbert57bdf7f42014-01-15 08:57:54 -08001368}
1369
Tom Herbert3df7a742013-12-15 22:16:29 -08001370static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1371{
Tom Herbert61b905d2014-03-24 15:34:47 -07001372 to->hash = from->hash;
Tom Herberta3b18dd2014-07-01 21:33:17 -07001373 to->sw_hash = from->sw_hash;
Tom Herbert61b905d2014-03-24 15:34:47 -07001374 to->l4_hash = from->l4_hash;
Tom Herbert3df7a742013-12-15 22:16:29 -08001375};
1376
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001377#ifdef NET_SKBUFF_DATA_USES_OFFSET
1378static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1379{
1380 return skb->head + skb->end;
1381}
Alexander Duyckec47ea82012-05-04 14:26:56 +00001382
1383static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1384{
1385 return skb->end;
1386}
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001387#else
1388static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1389{
1390 return skb->end;
1391}
Alexander Duyckec47ea82012-05-04 14:26:56 +00001392
1393static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1394{
1395 return skb->end - skb->head;
1396}
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001397#endif
1398
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399/* Internal */
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001400#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401
Patrick Ohlyac45f602009-02-12 05:03:37 +00001402static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1403{
1404 return &skb_shinfo(skb)->hwtstamps;
1405}
1406
Willem de Bruijn52267792017-08-03 16:29:39 -04001407static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1408{
1409 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1410
1411 return is_zcopy ? skb_uarg(skb) : NULL;
1412}
1413
Willem de Bruijn52900d22018-11-30 15:32:40 -05001414static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1415 bool *have_ref)
Willem de Bruijn52267792017-08-03 16:29:39 -04001416{
1417 if (skb && uarg && !skb_zcopy(skb)) {
Willem de Bruijn52900d22018-11-30 15:32:40 -05001418 if (unlikely(have_ref && *have_ref))
1419 *have_ref = false;
1420 else
1421 sock_zerocopy_get(uarg);
Willem de Bruijn52267792017-08-03 16:29:39 -04001422 skb_shinfo(skb)->destructor_arg = uarg;
1423 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1424 }
1425}
1426
Willem de Bruijn5cd8d462018-11-20 13:00:18 -05001427static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1428{
1429 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1430 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1431}
1432
1433static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1434{
1435 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1436}
1437
1438static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1439{
1440 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1441}
1442
Willem de Bruijn52267792017-08-03 16:29:39 -04001443/* Release a reference on a zerocopy structure */
1444static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1445{
1446 struct ubuf_info *uarg = skb_zcopy(skb);
1447
1448 if (uarg) {
Willem de Bruijn185ce5c2019-05-15 13:29:16 -04001449 if (skb_zcopy_is_nouarg(skb)) {
1450 /* no notification callback */
1451 } else if (uarg->callback == sock_zerocopy_callback) {
Willem de Bruijn0a4a0602017-08-09 19:09:44 -04001452 uarg->zerocopy = uarg->zerocopy && zerocopy;
1453 sock_zerocopy_put(uarg);
Willem de Bruijn185ce5c2019-05-15 13:29:16 -04001454 } else {
Willem de Bruijn0a4a0602017-08-09 19:09:44 -04001455 uarg->callback(uarg, zerocopy);
1456 }
1457
Willem de Bruijn52267792017-08-03 16:29:39 -04001458 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1459 }
1460}
1461
1462/* Abort a zerocopy operation and revert zckey on error in send syscall */
1463static inline void skb_zcopy_abort(struct sk_buff *skb)
1464{
1465 struct ubuf_info *uarg = skb_zcopy(skb);
1466
1467 if (uarg) {
Willem de Bruijn52900d22018-11-30 15:32:40 -05001468 sock_zerocopy_put_abort(uarg, false);
Willem de Bruijn52267792017-08-03 16:29:39 -04001469 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1470 }
1471}
1472
David S. Millera8305bf2018-07-29 20:42:53 -07001473static inline void skb_mark_not_on_list(struct sk_buff *skb)
1474{
1475 skb->next = NULL;
1476}
1477
David S. Miller992cba72018-07-31 15:27:56 -07001478static inline void skb_list_del_init(struct sk_buff *skb)
1479{
1480 __list_del_entry(&skb->list);
1481 skb_mark_not_on_list(skb);
1482}
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484/**
1485 * skb_queue_empty - check if a queue is empty
1486 * @list: queue head
1487 *
1488 * Returns true if the queue is empty, false otherwise.
1489 */
1490static inline int skb_queue_empty(const struct sk_buff_head *list)
1491{
Daniel Borkmannfd44b932014-01-07 23:23:44 +01001492 return list->next == (const struct sk_buff *) list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493}
1494
1495/**
David S. Millerfc7ebb22008-09-23 00:34:07 -07001496 * skb_queue_is_last - check if skb is the last entry in the queue
1497 * @list: queue head
1498 * @skb: buffer
1499 *
1500 * Returns true if @skb is the last buffer on the list.
1501 */
1502static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1503 const struct sk_buff *skb)
1504{
Daniel Borkmannfd44b932014-01-07 23:23:44 +01001505 return skb->next == (const struct sk_buff *) list;
David S. Millerfc7ebb22008-09-23 00:34:07 -07001506}
1507
1508/**
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001509 * skb_queue_is_first - check if skb is the first entry in the queue
1510 * @list: queue head
1511 * @skb: buffer
1512 *
1513 * Returns true if @skb is the first buffer on the list.
1514 */
1515static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1516 const struct sk_buff *skb)
1517{
Daniel Borkmannfd44b932014-01-07 23:23:44 +01001518 return skb->prev == (const struct sk_buff *) list;
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001519}
1520
1521/**
David S. Miller249c8b42008-09-23 00:44:42 -07001522 * skb_queue_next - return the next packet in the queue
1523 * @list: queue head
1524 * @skb: current buffer
1525 *
1526 * Return the next packet in @list after @skb. It is only valid to
1527 * call this if skb_queue_is_last() evaluates to false.
1528 */
1529static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1530 const struct sk_buff *skb)
1531{
1532 /* This BUG_ON may seem severe, but if we just return then we
1533 * are going to dereference garbage.
1534 */
1535 BUG_ON(skb_queue_is_last(list, skb));
1536 return skb->next;
1537}
1538
1539/**
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001540 * skb_queue_prev - return the prev packet in the queue
1541 * @list: queue head
1542 * @skb: current buffer
1543 *
1544 * Return the prev packet in @list before @skb. It is only valid to
1545 * call this if skb_queue_is_first() evaluates to false.
1546 */
1547static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1548 const struct sk_buff *skb)
1549{
1550 /* This BUG_ON may seem severe, but if we just return then we
1551 * are going to dereference garbage.
1552 */
1553 BUG_ON(skb_queue_is_first(list, skb));
1554 return skb->prev;
1555}
1556
1557/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 * skb_get - reference buffer
1559 * @skb: buffer to reference
1560 *
1561 * Makes another reference to a socket buffer and returns a pointer
1562 * to the buffer.
1563 */
1564static inline struct sk_buff *skb_get(struct sk_buff *skb)
1565{
Reshetova, Elena63354792017-06-30 13:07:58 +03001566 refcount_inc(&skb->users);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 return skb;
1568}
1569
1570/*
Geert Uytterhoevenf8821f92017-11-30 14:33:56 +01001571 * If users == 1, we are the only owner and can avoid redundant atomic changes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 */
1573
1574/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 * skb_cloned - is the buffer a clone
1576 * @skb: buffer to check
1577 *
1578 * Returns true if the buffer was generated with skb_clone() and is
1579 * one of multiple shared copies of the buffer. Cloned buffers are
1580 * shared data so must not be written to under normal circumstances.
1581 */
1582static inline int skb_cloned(const struct sk_buff *skb)
1583{
1584 return skb->cloned &&
1585 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1586}
1587
Pravin B Shelar14bbd6a2013-02-14 09:44:49 +00001588static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1589{
Mel Gormand0164ad2015-11-06 16:28:21 -08001590 might_sleep_if(gfpflags_allow_blocking(pri));
Pravin B Shelar14bbd6a2013-02-14 09:44:49 +00001591
1592 if (skb_cloned(skb))
1593 return pskb_expand_head(skb, 0, 0, pri);
1594
1595 return 0;
1596}
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598/**
1599 * skb_header_cloned - is the header a clone
1600 * @skb: buffer to check
1601 *
1602 * Returns true if modifying the header part of the buffer requires
1603 * the data to be copied.
1604 */
1605static inline int skb_header_cloned(const struct sk_buff *skb)
1606{
1607 int dataref;
1608
1609 if (!skb->cloned)
1610 return 0;
1611
1612 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1613 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1614 return dataref != 1;
1615}
1616
Eric Dumazet9580bf22016-04-30 10:19:29 -07001617static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1618{
1619 might_sleep_if(gfpflags_allow_blocking(pri));
1620
1621 if (skb_header_cloned(skb))
1622 return pskb_expand_head(skb, 0, 0, pri);
1623
1624 return 0;
1625}
1626
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627/**
Eric Dumazetf4a775d2014-09-22 16:29:32 -07001628 * __skb_header_release - release reference to header
1629 * @skb: buffer to operate on
Eric Dumazetf4a775d2014-09-22 16:29:32 -07001630 */
1631static inline void __skb_header_release(struct sk_buff *skb)
1632{
1633 skb->nohdr = 1;
1634 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1635}
1636
1637
1638/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 * skb_shared - is the buffer shared
1640 * @skb: buffer to check
1641 *
1642 * Returns true if more than one person has a reference to this
1643 * buffer.
1644 */
1645static inline int skb_shared(const struct sk_buff *skb)
1646{
Reshetova, Elena63354792017-06-30 13:07:58 +03001647 return refcount_read(&skb->users) != 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648}
1649
1650/**
1651 * skb_share_check - check if buffer is shared and if so clone it
1652 * @skb: buffer to check
1653 * @pri: priority for memory allocation
1654 *
1655 * If the buffer is shared the buffer is cloned and the old copy
1656 * drops a reference. A new clone with a single reference is returned.
1657 * If the buffer is not shared the original buffer is returned. When
1658 * being called from interrupt status or with spinlocks held pri must
1659 * be GFP_ATOMIC.
1660 *
1661 * NULL is returned on a memory allocation failure.
1662 */
Eric Dumazet47061bc2012-08-03 20:54:15 +00001663static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664{
Mel Gormand0164ad2015-11-06 16:28:21 -08001665 might_sleep_if(gfpflags_allow_blocking(pri));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 if (skb_shared(skb)) {
1667 struct sk_buff *nskb = skb_clone(skb, pri);
Eric Dumazet47061bc2012-08-03 20:54:15 +00001668
1669 if (likely(nskb))
1670 consume_skb(skb);
1671 else
1672 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 skb = nskb;
1674 }
1675 return skb;
1676}
1677
1678/*
1679 * Copy shared buffers into a new sk_buff. We effectively do COW on
1680 * packets to handle cases where we have a local reader and forward
1681 * and a couple of other messy ones. The normal one is tcpdumping
1682 * a packet thats being forwarded.
1683 */
1684
1685/**
1686 * skb_unshare - make a copy of a shared buffer
1687 * @skb: buffer to check
1688 * @pri: priority for memory allocation
1689 *
1690 * If the socket buffer is a clone then this function creates a new
1691 * copy of the data, drops a reference count on the old copy and returns
1692 * the new copy with the reference count at 1. If the buffer is not a clone
1693 * the original buffer is returned. When called with a spinlock held or
1694 * from interrupt state @pri must be %GFP_ATOMIC
1695 *
1696 * %NULL is returned on a memory allocation failure.
1697 */
Victor Fuscoe2bf5212005-07-18 13:36:38 -07001698static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +01001699 gfp_t pri)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700{
Mel Gormand0164ad2015-11-06 16:28:21 -08001701 might_sleep_if(gfpflags_allow_blocking(pri));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 if (skb_cloned(skb)) {
1703 struct sk_buff *nskb = skb_copy(skb, pri);
Alexander Aring31eff812014-10-10 23:10:47 +02001704
1705 /* Free our shared copy */
1706 if (likely(nskb))
1707 consume_skb(skb);
1708 else
1709 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 skb = nskb;
1711 }
1712 return skb;
1713}
1714
1715/**
Ben Hutchings1a5778a2010-02-14 22:35:47 -08001716 * skb_peek - peek at the head of an &sk_buff_head
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 * @list_: list to peek at
1718 *
1719 * Peek an &sk_buff. Unlike most other operations you _MUST_
1720 * be careful with this one. A peek leaves the buffer on the
1721 * list and someone else may run off with it. You must hold
1722 * the appropriate locks or have a private queue to do this.
1723 *
1724 * Returns %NULL for an empty list or a pointer to the head element.
1725 * The reference count is not incremented and the reference is therefore
1726 * volatile. Use with caution.
1727 */
Eric Dumazet05bdd2f2011-10-20 17:45:43 -04001728static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729{
Eric Dumazet18d07002012-04-30 16:31:46 +00001730 struct sk_buff *skb = list_->next;
1731
1732 if (skb == (struct sk_buff *)list_)
1733 skb = NULL;
1734 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735}
1736
1737/**
David S. Miller8b69bd72018-08-11 18:43:38 -07001738 * __skb_peek - peek at the head of a non-empty &sk_buff_head
1739 * @list_: list to peek at
1740 *
1741 * Like skb_peek(), but the caller knows that the list is not empty.
1742 */
1743static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1744{
1745 return list_->next;
1746}
1747
1748/**
Pavel Emelyanovda5ef6e2012-02-21 07:31:18 +00001749 * skb_peek_next - peek skb following the given one from a queue
1750 * @skb: skb to start from
1751 * @list_: list to peek at
1752 *
1753 * Returns %NULL when the end of the list is met or a pointer to the
1754 * next element. The reference count is not incremented and the
1755 * reference is therefore volatile. Use with caution.
1756 */
1757static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1758 const struct sk_buff_head *list_)
1759{
1760 struct sk_buff *next = skb->next;
Eric Dumazet18d07002012-04-30 16:31:46 +00001761
Pavel Emelyanovda5ef6e2012-02-21 07:31:18 +00001762 if (next == (struct sk_buff *)list_)
1763 next = NULL;
1764 return next;
1765}
1766
1767/**
Ben Hutchings1a5778a2010-02-14 22:35:47 -08001768 * skb_peek_tail - peek at the tail of an &sk_buff_head
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 * @list_: list to peek at
1770 *
1771 * Peek an &sk_buff. Unlike most other operations you _MUST_
1772 * be careful with this one. A peek leaves the buffer on the
1773 * list and someone else may run off with it. You must hold
1774 * the appropriate locks or have a private queue to do this.
1775 *
1776 * Returns %NULL for an empty list or a pointer to the tail element.
1777 * The reference count is not incremented and the reference is therefore
1778 * volatile. Use with caution.
1779 */
Eric Dumazet05bdd2f2011-10-20 17:45:43 -04001780static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781{
Eric Dumazet18d07002012-04-30 16:31:46 +00001782 struct sk_buff *skb = list_->prev;
1783
1784 if (skb == (struct sk_buff *)list_)
1785 skb = NULL;
1786 return skb;
1787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788}
1789
1790/**
1791 * skb_queue_len - get queue length
1792 * @list_: list to measure
1793 *
1794 * Return the length of an &sk_buff queue.
1795 */
1796static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1797{
1798 return list_->qlen;
1799}
1800
David S. Miller67fed452008-09-21 22:36:24 -07001801/**
1802 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1803 * @list: queue to initialize
1804 *
1805 * This initializes only the list and queue length aspects of
1806 * an sk_buff_head object. This allows to initialize the list
1807 * aspects of an sk_buff_head without reinitializing things like
1808 * the spinlock. It can also be used for on-stack sk_buff_head
1809 * objects where the spinlock is known to not be used.
1810 */
1811static inline void __skb_queue_head_init(struct sk_buff_head *list)
1812{
1813 list->prev = list->next = (struct sk_buff *)list;
1814 list->qlen = 0;
1815}
1816
Arjan van de Ven76f10ad2006-08-02 14:06:55 -07001817/*
1818 * This function creates a split out lock class for each invocation;
1819 * this is needed for now since a whole lot of users of the skb-queue
1820 * infrastructure in drivers have different locking usage (in hardirq)
1821 * than the networking core (in softirq only). In the long run either the
1822 * network layer or drivers should need annotation to consolidate the
1823 * main types of usage into 3 classes.
1824 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825static inline void skb_queue_head_init(struct sk_buff_head *list)
1826{
1827 spin_lock_init(&list->lock);
David S. Miller67fed452008-09-21 22:36:24 -07001828 __skb_queue_head_init(list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829}
1830
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001831static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1832 struct lock_class_key *class)
1833{
1834 skb_queue_head_init(list);
1835 lockdep_set_class(&list->lock, class);
1836}
1837
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838/*
Gerrit Renkerbf299272008-04-14 00:04:51 -07001839 * Insert an sk_buff on a list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 *
1841 * The "__skb_xxxx()" functions are the non-atomic ones that
1842 * can only be called with interrupts disabled.
1843 */
Gerrit Renkerbf299272008-04-14 00:04:51 -07001844static inline void __skb_insert(struct sk_buff *newsk,
1845 struct sk_buff *prev, struct sk_buff *next,
1846 struct sk_buff_head *list)
1847{
1848 newsk->next = next;
1849 newsk->prev = prev;
1850 next->prev = prev->next = newsk;
1851 list->qlen++;
1852}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853
David S. Miller67fed452008-09-21 22:36:24 -07001854static inline void __skb_queue_splice(const struct sk_buff_head *list,
1855 struct sk_buff *prev,
1856 struct sk_buff *next)
1857{
1858 struct sk_buff *first = list->next;
1859 struct sk_buff *last = list->prev;
1860
1861 first->prev = prev;
1862 prev->next = first;
1863
1864 last->next = next;
1865 next->prev = last;
1866}
1867
1868/**
1869 * skb_queue_splice - join two skb lists, this is designed for stacks
1870 * @list: the new list to add
1871 * @head: the place to add it in the first list
1872 */
1873static inline void skb_queue_splice(const struct sk_buff_head *list,
1874 struct sk_buff_head *head)
1875{
1876 if (!skb_queue_empty(list)) {
1877 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
David S. Miller1d4a31d2008-09-22 21:57:21 -07001878 head->qlen += list->qlen;
David S. Miller67fed452008-09-21 22:36:24 -07001879 }
1880}
1881
1882/**
Eric Dumazetd961949662012-04-30 21:29:16 +00001883 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
David S. Miller67fed452008-09-21 22:36:24 -07001884 * @list: the new list to add
1885 * @head: the place to add it in the first list
1886 *
1887 * The list at @list is reinitialised
1888 */
1889static inline void skb_queue_splice_init(struct sk_buff_head *list,
1890 struct sk_buff_head *head)
1891{
1892 if (!skb_queue_empty(list)) {
1893 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
David S. Miller1d4a31d2008-09-22 21:57:21 -07001894 head->qlen += list->qlen;
David S. Miller67fed452008-09-21 22:36:24 -07001895 __skb_queue_head_init(list);
1896 }
1897}
1898
1899/**
1900 * skb_queue_splice_tail - join two skb lists, each list being a queue
1901 * @list: the new list to add
1902 * @head: the place to add it in the first list
1903 */
1904static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1905 struct sk_buff_head *head)
1906{
1907 if (!skb_queue_empty(list)) {
1908 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
David S. Miller1d4a31d2008-09-22 21:57:21 -07001909 head->qlen += list->qlen;
David S. Miller67fed452008-09-21 22:36:24 -07001910 }
1911}
1912
1913/**
Eric Dumazetd961949662012-04-30 21:29:16 +00001914 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
David S. Miller67fed452008-09-21 22:36:24 -07001915 * @list: the new list to add
1916 * @head: the place to add it in the first list
1917 *
1918 * Each of the lists is a queue.
1919 * The list at @list is reinitialised
1920 */
1921static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1922 struct sk_buff_head *head)
1923{
1924 if (!skb_queue_empty(list)) {
1925 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
David S. Miller1d4a31d2008-09-22 21:57:21 -07001926 head->qlen += list->qlen;
David S. Miller67fed452008-09-21 22:36:24 -07001927 __skb_queue_head_init(list);
1928 }
1929}
1930
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931/**
Stephen Hemminger300ce172005-10-30 13:47:34 -08001932 * __skb_queue_after - queue a buffer at the list head
1933 * @list: list to use
1934 * @prev: place after this buffer
1935 * @newsk: buffer to queue
1936 *
1937 * Queue a buffer int the middle of a list. This function takes no locks
1938 * and you must therefore hold required locks before calling it.
1939 *
1940 * A buffer cannot be placed on two lists at the same time.
1941 */
1942static inline void __skb_queue_after(struct sk_buff_head *list,
1943 struct sk_buff *prev,
1944 struct sk_buff *newsk)
1945{
Gerrit Renkerbf299272008-04-14 00:04:51 -07001946 __skb_insert(newsk, prev, prev->next, list);
Stephen Hemminger300ce172005-10-30 13:47:34 -08001947}
1948
Joe Perches7965bd42013-09-26 14:48:15 -07001949void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1950 struct sk_buff_head *list);
Gerrit Renker7de6c032008-04-14 00:05:09 -07001951
Gerrit Renkerf5572852008-04-14 00:05:28 -07001952static inline void __skb_queue_before(struct sk_buff_head *list,
1953 struct sk_buff *next,
1954 struct sk_buff *newsk)
1955{
1956 __skb_insert(newsk, next->prev, next, list);
1957}
1958
Stephen Hemminger300ce172005-10-30 13:47:34 -08001959/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 * __skb_queue_head - queue a buffer at the list head
1961 * @list: list to use
1962 * @newsk: buffer to queue
1963 *
1964 * Queue a buffer at the start of a list. This function takes no locks
1965 * and you must therefore hold required locks before calling it.
1966 *
1967 * A buffer cannot be placed on two lists at the same time.
1968 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969static inline void __skb_queue_head(struct sk_buff_head *list,
1970 struct sk_buff *newsk)
1971{
Stephen Hemminger300ce172005-10-30 13:47:34 -08001972 __skb_queue_after(list, (struct sk_buff *)list, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973}
Brian Norris4ea7b0c2019-02-11 13:02:25 -08001974void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
1976/**
1977 * __skb_queue_tail - queue a buffer at the list tail
1978 * @list: list to use
1979 * @newsk: buffer to queue
1980 *
1981 * Queue a buffer at the end of a list. This function takes no locks
1982 * and you must therefore hold required locks before calling it.
1983 *
1984 * A buffer cannot be placed on two lists at the same time.
1985 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986static inline void __skb_queue_tail(struct sk_buff_head *list,
1987 struct sk_buff *newsk)
1988{
Gerrit Renkerf5572852008-04-14 00:05:28 -07001989 __skb_queue_before(list, (struct sk_buff *)list, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990}
Brian Norris4ea7b0c2019-02-11 13:02:25 -08001991void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 * remove sk_buff from list. _Must_ be called atomically, and with
1995 * the list known..
1996 */
Joe Perches7965bd42013-09-26 14:48:15 -07001997void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1999{
2000 struct sk_buff *next, *prev;
2001
2002 list->qlen--;
2003 next = skb->next;
2004 prev = skb->prev;
2005 skb->next = skb->prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 next->prev = prev;
2007 prev->next = next;
2008}
2009
Gerrit Renkerf525c062008-04-14 00:04:12 -07002010/**
2011 * __skb_dequeue - remove from the head of the queue
2012 * @list: list to dequeue from
2013 *
2014 * Remove the head of the list. This function does not take any locks
2015 * so must be used with appropriate locks held only. The head item is
2016 * returned or %NULL if the list is empty.
2017 */
Gerrit Renkerf525c062008-04-14 00:04:12 -07002018static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2019{
2020 struct sk_buff *skb = skb_peek(list);
2021 if (skb)
2022 __skb_unlink(skb, list);
2023 return skb;
2024}
Brian Norris4ea7b0c2019-02-11 13:02:25 -08002025struct sk_buff *skb_dequeue(struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
2027/**
2028 * __skb_dequeue_tail - remove from the tail of the queue
2029 * @list: list to dequeue from
2030 *
2031 * Remove the tail of the list. This function does not take any locks
2032 * so must be used with appropriate locks held only. The tail item is
2033 * returned or %NULL if the list is empty.
2034 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2036{
2037 struct sk_buff *skb = skb_peek_tail(list);
2038 if (skb)
2039 __skb_unlink(skb, list);
2040 return skb;
2041}
Brian Norris4ea7b0c2019-02-11 13:02:25 -08002042struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043
2044
David S. Millerbdcc0922012-03-07 20:53:36 -05002045static inline bool skb_is_nonlinear(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046{
2047 return skb->data_len;
2048}
2049
2050static inline unsigned int skb_headlen(const struct sk_buff *skb)
2051{
2052 return skb->len - skb->data_len;
2053}
2054
Willem de Bruijn3ece7822017-08-03 16:29:38 -04002055static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056{
Alexey Dobriyanc72d8cd2016-11-19 04:08:08 +03002057 unsigned int i, len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
Alexey Dobriyanc72d8cd2016-11-19 04:08:08 +03002059 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
Eric Dumazet9e903e02011-10-18 21:00:24 +00002060 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
Willem de Bruijn3ece7822017-08-03 16:29:38 -04002061 return len;
2062}
2063
2064static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2065{
2066 return skb_headlen(skb) + __skb_pagelen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067}
2068
Ian Campbell131ea662011-08-19 06:25:00 +00002069/**
2070 * __skb_fill_page_desc - initialise a paged fragment in an skb
2071 * @skb: buffer containing fragment to be initialised
2072 * @i: paged fragment index to initialise
2073 * @page: the page to use for this fragment
2074 * @off: the offset to the data with @page
2075 * @size: the length of the data
2076 *
2077 * Initialises the @i'th fragment of @skb to point to &size bytes at
2078 * offset @off within @page.
2079 *
2080 * Does not take any additional reference on the fragment.
2081 */
2082static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2083 struct page *page, int off, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084{
2085 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2086
Mel Gormanc48a11c2012-07-31 16:44:23 -07002087 /*
Michal Hocko2f064f32015-08-21 14:11:51 -07002088 * Propagate page pfmemalloc to the skb if we can. The problem is
2089 * that not all callers have unique ownership of the page but rely
2090 * on page_is_pfmemalloc doing the right thing(tm).
Mel Gormanc48a11c2012-07-31 16:44:23 -07002091 */
Ian Campbella8605c62011-10-19 23:01:49 +00002092 frag->page.p = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 frag->page_offset = off;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002094 skb_frag_size_set(frag, size);
Pavel Emelyanovcca7af32013-03-14 03:29:40 +00002095
2096 page = compound_head(page);
Michal Hocko2f064f32015-08-21 14:11:51 -07002097 if (page_is_pfmemalloc(page))
Pavel Emelyanovcca7af32013-03-14 03:29:40 +00002098 skb->pfmemalloc = true;
Ian Campbell131ea662011-08-19 06:25:00 +00002099}
2100
2101/**
2102 * skb_fill_page_desc - initialise a paged fragment in an skb
2103 * @skb: buffer containing fragment to be initialised
2104 * @i: paged fragment index to initialise
2105 * @page: the page to use for this fragment
2106 * @off: the offset to the data with @page
2107 * @size: the length of the data
2108 *
2109 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
Mathias Krausebc323832013-11-07 14:18:26 +01002110 * @skb to point to @size bytes at offset @off within @page. In
Ian Campbell131ea662011-08-19 06:25:00 +00002111 * addition updates @skb such that @i is the last fragment.
2112 *
2113 * Does not take any additional reference on the fragment.
2114 */
2115static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2116 struct page *page, int off, int size)
2117{
2118 __skb_fill_page_desc(skb, i, page, off, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 skb_shinfo(skb)->nr_frags = i + 1;
2120}
2121
Joe Perches7965bd42013-09-26 14:48:15 -07002122void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2123 int size, unsigned int truesize);
Peter Zijlstra654bed12008-10-07 14:22:33 -07002124
Jason Wangf8e617e2013-11-01 14:07:47 +08002125void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2126 unsigned int truesize);
2127
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2129
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002130#ifdef NET_SKBUFF_DATA_USES_OFFSET
2131static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2132{
2133 return skb->head + skb->tail;
2134}
2135
2136static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2137{
2138 skb->tail = skb->data - skb->head;
2139}
2140
2141static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2142{
2143 skb_reset_tail_pointer(skb);
2144 skb->tail += offset;
2145}
Simon Horman7cc46192013-05-28 20:34:29 +00002146
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002147#else /* NET_SKBUFF_DATA_USES_OFFSET */
2148static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2149{
2150 return skb->tail;
2151}
2152
2153static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2154{
2155 skb->tail = skb->data;
2156}
2157
2158static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2159{
2160 skb->tail = skb->data + offset;
2161}
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07002162
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002163#endif /* NET_SKBUFF_DATA_USES_OFFSET */
2164
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165/*
2166 * Add data to an sk_buff
2167 */
Johannes Berg4df864c2017-06-16 14:29:21 +02002168void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2169void *skb_put(struct sk_buff *skb, unsigned int len);
2170static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171{
Johannes Berg4df864c2017-06-16 14:29:21 +02002172 void *tmp = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 SKB_LINEAR_ASSERT(skb);
2174 skb->tail += len;
2175 skb->len += len;
2176 return tmp;
2177}
2178
yuan linyude77b962017-06-18 22:48:17 +08002179static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2180{
2181 void *tmp = __skb_put(skb, len);
2182
2183 memset(tmp, 0, len);
2184 return tmp;
2185}
2186
2187static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2188 unsigned int len)
2189{
2190 void *tmp = __skb_put(skb, len);
2191
2192 memcpy(tmp, data, len);
2193 return tmp;
2194}
2195
2196static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2197{
2198 *(u8 *)__skb_put(skb, 1) = val;
2199}
2200
Johannes Berg83ad3572017-06-14 22:17:20 +02002201static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
Johannes Berge45a79d2017-05-24 09:07:47 +02002202{
Johannes Berg83ad3572017-06-14 22:17:20 +02002203 void *tmp = skb_put(skb, len);
Johannes Berge45a79d2017-05-24 09:07:47 +02002204
2205 memset(tmp, 0, len);
2206
2207 return tmp;
2208}
2209
Johannes Berg59ae1d12017-06-16 14:29:20 +02002210static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2211 unsigned int len)
2212{
2213 void *tmp = skb_put(skb, len);
2214
2215 memcpy(tmp, data, len);
2216
2217 return tmp;
2218}
2219
Johannes Berg634fef62017-06-16 14:29:24 +02002220static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2221{
2222 *(u8 *)skb_put(skb, 1) = val;
2223}
2224
Johannes Bergd58ff352017-06-16 14:29:23 +02002225void *skb_push(struct sk_buff *skb, unsigned int len);
2226static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227{
2228 skb->data -= len;
2229 skb->len += len;
2230 return skb->data;
2231}
2232
Johannes Bergaf728682017-06-16 14:29:22 +02002233void *skb_pull(struct sk_buff *skb, unsigned int len);
2234static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235{
2236 skb->len -= len;
2237 BUG_ON(skb->len < skb->data_len);
2238 return skb->data += len;
2239}
2240
Johannes Bergaf728682017-06-16 14:29:22 +02002241static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
David S. Miller47d29642010-05-02 02:21:44 -07002242{
2243 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2244}
2245
Johannes Bergaf728682017-06-16 14:29:22 +02002246void *__pskb_pull_tail(struct sk_buff *skb, int delta);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
Johannes Bergaf728682017-06-16 14:29:22 +02002248static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249{
2250 if (len > skb_headlen(skb) &&
Gerrit Renker987c4022008-08-11 18:17:17 -07002251 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 return NULL;
2253 skb->len -= len;
2254 return skb->data += len;
2255}
2256
Johannes Bergaf728682017-06-16 14:29:22 +02002257static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258{
2259 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2260}
2261
2262static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2263{
2264 if (likely(len <= skb_headlen(skb)))
2265 return 1;
2266 if (unlikely(len > skb->len))
2267 return 0;
Gerrit Renker987c4022008-08-11 18:17:17 -07002268 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269}
2270
Eric Dumazetc8c8b122016-12-07 09:19:33 -08002271void skb_condense(struct sk_buff *skb);
2272
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273/**
2274 * skb_headroom - bytes at buffer head
2275 * @skb: buffer to check
2276 *
2277 * Return the number of bytes of free space at the head of an &sk_buff.
2278 */
Chuck Leverc2636b42007-10-23 21:07:32 -07002279static inline unsigned int skb_headroom(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280{
2281 return skb->data - skb->head;
2282}
2283
2284/**
2285 * skb_tailroom - bytes at buffer end
2286 * @skb: buffer to check
2287 *
2288 * Return the number of bytes of free space at the tail of an sk_buff
2289 */
2290static inline int skb_tailroom(const struct sk_buff *skb)
2291{
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07002292 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293}
2294
2295/**
Eric Dumazeta21d4572012-04-10 20:30:48 +00002296 * skb_availroom - bytes at buffer end
2297 * @skb: buffer to check
2298 *
2299 * Return the number of bytes of free space at the tail of an sk_buff
2300 * allocated by sk_stream_alloc()
2301 */
2302static inline int skb_availroom(const struct sk_buff *skb)
2303{
Eric Dumazet16fad692013-03-14 05:40:32 +00002304 if (skb_is_nonlinear(skb))
2305 return 0;
2306
2307 return skb->end - skb->tail - skb->reserved_tailroom;
Eric Dumazeta21d4572012-04-10 20:30:48 +00002308}
2309
2310/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 * skb_reserve - adjust headroom
2312 * @skb: buffer to alter
2313 * @len: bytes to move
2314 *
2315 * Increase the headroom of an empty &sk_buff by reducing the tail
2316 * room. This is only allowed for an empty buffer.
2317 */
David S. Miller8243126c2006-01-17 02:54:21 -08002318static inline void skb_reserve(struct sk_buff *skb, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319{
2320 skb->data += len;
2321 skb->tail += len;
2322}
2323
Benjamin Poirier1837b2e2016-02-29 15:03:33 -08002324/**
2325 * skb_tailroom_reserve - adjust reserved_tailroom
2326 * @skb: buffer to alter
2327 * @mtu: maximum amount of headlen permitted
2328 * @needed_tailroom: minimum amount of reserved_tailroom
2329 *
2330 * Set reserved_tailroom so that headlen can be as large as possible but
2331 * not larger than mtu and tailroom cannot be smaller than
2332 * needed_tailroom.
2333 * The required headroom should already have been reserved before using
2334 * this function.
2335 */
2336static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2337 unsigned int needed_tailroom)
2338{
2339 SKB_LINEAR_ASSERT(skb);
2340 if (mtu < skb_tailroom(skb) - needed_tailroom)
2341 /* use at most mtu */
2342 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2343 else
2344 /* use up to all available space */
2345 skb->reserved_tailroom = needed_tailroom;
2346}
2347
Tom Herbert8bce6d72014-09-29 20:22:29 -07002348#define ENCAP_TYPE_ETHER 0
2349#define ENCAP_TYPE_IPPROTO 1
2350
2351static inline void skb_set_inner_protocol(struct sk_buff *skb,
2352 __be16 protocol)
2353{
2354 skb->inner_protocol = protocol;
2355 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2356}
2357
2358static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2359 __u8 ipproto)
2360{
2361 skb->inner_ipproto = ipproto;
2362 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2363}
2364
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00002365static inline void skb_reset_inner_headers(struct sk_buff *skb)
2366{
Pravin B Shelaraefbd2b2013-03-07 13:21:46 +00002367 skb->inner_mac_header = skb->mac_header;
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00002368 skb->inner_network_header = skb->network_header;
2369 skb->inner_transport_header = skb->transport_header;
2370}
2371
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00002372static inline void skb_reset_mac_len(struct sk_buff *skb)
2373{
2374 skb->mac_len = skb->network_header - skb->mac_header;
2375}
2376
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00002377static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2378 *skb)
2379{
2380 return skb->head + skb->inner_transport_header;
2381}
2382
Tom Herbert55dc5a92015-12-14 11:19:40 -08002383static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2384{
2385 return skb_inner_transport_header(skb) - skb->data;
2386}
2387
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00002388static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2389{
2390 skb->inner_transport_header = skb->data - skb->head;
2391}
2392
2393static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2394 const int offset)
2395{
2396 skb_reset_inner_transport_header(skb);
2397 skb->inner_transport_header += offset;
2398}
2399
2400static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2401{
2402 return skb->head + skb->inner_network_header;
2403}
2404
2405static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2406{
2407 skb->inner_network_header = skb->data - skb->head;
2408}
2409
2410static inline void skb_set_inner_network_header(struct sk_buff *skb,
2411 const int offset)
2412{
2413 skb_reset_inner_network_header(skb);
2414 skb->inner_network_header += offset;
2415}
2416
Pravin B Shelaraefbd2b2013-03-07 13:21:46 +00002417static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2418{
2419 return skb->head + skb->inner_mac_header;
2420}
2421
2422static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2423{
2424 skb->inner_mac_header = skb->data - skb->head;
2425}
2426
2427static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2428 const int offset)
2429{
2430 skb_reset_inner_mac_header(skb);
2431 skb->inner_mac_header += offset;
2432}
Eric Dumazetfda55ec2013-01-07 09:28:21 +00002433static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2434{
Cong Wang35d04612013-05-29 15:16:05 +08002435 return skb->transport_header != (typeof(skb->transport_header))~0U;
Eric Dumazetfda55ec2013-01-07 09:28:21 +00002436}
2437
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -07002438static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2439{
2440 return skb->head + skb->transport_header;
2441}
2442
2443static inline void skb_reset_transport_header(struct sk_buff *skb)
2444{
2445 skb->transport_header = skb->data - skb->head;
2446}
2447
2448static inline void skb_set_transport_header(struct sk_buff *skb,
2449 const int offset)
2450{
2451 skb_reset_transport_header(skb);
2452 skb->transport_header += offset;
2453}
2454
2455static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2456{
2457 return skb->head + skb->network_header;
2458}
2459
2460static inline void skb_reset_network_header(struct sk_buff *skb)
2461{
2462 skb->network_header = skb->data - skb->head;
2463}
2464
2465static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2466{
2467 skb_reset_network_header(skb);
2468 skb->network_header += offset;
2469}
2470
2471static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2472{
2473 return skb->head + skb->mac_header;
2474}
2475
Amir Vadaiea6da4f2017-02-07 09:56:06 +02002476static inline int skb_mac_offset(const struct sk_buff *skb)
2477{
2478 return skb_mac_header(skb) - skb->data;
2479}
2480
Daniel Borkmann0daf4342017-07-02 02:13:25 +02002481static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2482{
2483 return skb->network_header - skb->mac_header;
2484}
2485
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -07002486static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2487{
Cong Wang35d04612013-05-29 15:16:05 +08002488 return skb->mac_header != (typeof(skb->mac_header))~0U;
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -07002489}
2490
2491static inline void skb_reset_mac_header(struct sk_buff *skb)
2492{
2493 skb->mac_header = skb->data - skb->head;
2494}
2495
2496static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2497{
2498 skb_reset_mac_header(skb);
2499 skb->mac_header += offset;
2500}
2501
Timo Teräs0e3da5b2013-12-16 11:02:09 +02002502static inline void skb_pop_mac_header(struct sk_buff *skb)
2503{
2504 skb->mac_header = skb->network_header;
2505}
2506
Maxim Mikityanskiyd2aa1252019-02-21 12:39:57 +00002507static inline void skb_probe_transport_header(struct sk_buff *skb)
Ying Xuefbbdb8f2013-03-27 16:46:06 +00002508{
Paolo Abeni72a338b2018-05-04 11:32:59 +02002509 struct flow_keys_basic keys;
Ying Xuefbbdb8f2013-03-27 16:46:06 +00002510
2511 if (skb_transport_header_was_set(skb))
2512 return;
Paolo Abeni72a338b2018-05-04 11:32:59 +02002513
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -07002514 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2515 NULL, 0, 0, 0, 0))
Tom Herbert42aecaa2015-06-04 09:16:39 -07002516 skb_set_transport_header(skb, keys.control.thoff);
Ying Xuefbbdb8f2013-03-27 16:46:06 +00002517}
2518
Eric Dumazet03606892012-02-23 10:55:02 +00002519static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2520{
2521 if (skb_mac_header_was_set(skb)) {
2522 const unsigned char *old_mac = skb_mac_header(skb);
2523
2524 skb_set_mac_header(skb, -skb->mac_len);
2525 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2526 }
2527}
2528
Michał Mirosław04fb4512010-12-14 15:24:08 +00002529static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2530{
2531 return skb->csum_start - skb_headroom(skb);
2532}
2533
Alexander Duyck08b64fc2016-02-05 15:27:49 -08002534static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2535{
2536 return skb->head + skb->csum_start;
2537}
2538
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -07002539static inline int skb_transport_offset(const struct sk_buff *skb)
2540{
2541 return skb_transport_header(skb) - skb->data;
2542}
2543
2544static inline u32 skb_network_header_len(const struct sk_buff *skb)
2545{
2546 return skb->transport_header - skb->network_header;
2547}
2548
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00002549static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2550{
2551 return skb->inner_transport_header - skb->inner_network_header;
2552}
2553
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -07002554static inline int skb_network_offset(const struct sk_buff *skb)
2555{
2556 return skb_network_header(skb) - skb->data;
2557}
Arnaldo Carvalho de Melo48d49d0c2007-03-10 12:30:58 -03002558
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00002559static inline int skb_inner_network_offset(const struct sk_buff *skb)
2560{
2561 return skb_inner_network_header(skb) - skb->data;
2562}
2563
Changli Gaof9599ce2010-08-04 04:43:44 +00002564static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2565{
2566 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2567}
2568
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569/*
2570 * CPUs often take a performance hit when accessing unaligned memory
2571 * locations. The actual performance hit varies, it can be small if the
2572 * hardware handles it or large if we have to take an exception and fix it
2573 * in software.
2574 *
2575 * Since an ethernet header is 14 bytes network drivers often end up with
2576 * the IP header at an unaligned offset. The IP header can be aligned by
2577 * shifting the start of the packet by 2 bytes. Drivers should do this
2578 * with:
2579 *
Tobias Klauser8660c122009-07-13 22:48:16 +00002580 * skb_reserve(skb, NET_IP_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 *
2582 * The downside to this alignment of the IP header is that the DMA is now
2583 * unaligned. On some architectures the cost of an unaligned DMA is high
2584 * and this cost outweighs the gains made by aligning the IP header.
Tobias Klauser8660c122009-07-13 22:48:16 +00002585 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
2587 * to be overridden.
2588 */
2589#ifndef NET_IP_ALIGN
2590#define NET_IP_ALIGN 2
2591#endif
2592
Anton Blanchard025be812006-03-31 02:27:06 -08002593/*
2594 * The networking layer reserves some headroom in skb data (via
2595 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
2596 * the header has to grow. In the default case, if the header has to grow
David S. Millerd6301d32009-02-08 19:24:13 -08002597 * 32 bytes or less we avoid the reallocation.
Anton Blanchard025be812006-03-31 02:27:06 -08002598 *
2599 * Unfortunately this headroom changes the DMA alignment of the resulting
2600 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
2601 * on some architectures. An architecture can override this value,
2602 * perhaps setting it to a cacheline in size (since that will maintain
2603 * cacheline alignment of the DMA). It must be a power of 2.
2604 *
David S. Millerd6301d32009-02-08 19:24:13 -08002605 * Various parts of the networking layer expect at least 32 bytes of
Anton Blanchard025be812006-03-31 02:27:06 -08002606 * headroom, you should not reduce this.
Eric Dumazet5933dd22010-06-15 18:16:43 -07002607 *
2608 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
2609 * to reduce average number of cache lines per packet.
2610 * get_rps_cpus() for example only access one 64 bytes aligned block :
Eric Dumazet18e8c132010-05-06 21:58:51 -07002611 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
Anton Blanchard025be812006-03-31 02:27:06 -08002612 */
2613#ifndef NET_SKB_PAD
Eric Dumazet5933dd22010-06-15 18:16:43 -07002614#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
Anton Blanchard025be812006-03-31 02:27:06 -08002615#endif
2616
Joe Perches7965bd42013-09-26 14:48:15 -07002617int ___pskb_trim(struct sk_buff *skb, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618
Daniel Borkmann5293efe2016-08-18 01:00:39 +02002619static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620{
Yangtao Li5e1abdc2018-11-06 10:45:36 -05002621 if (WARN_ON(skb_is_nonlinear(skb)))
Herbert Xu3cc0e872006-06-09 16:13:38 -07002622 return;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002623 skb->len = len;
2624 skb_set_tail_pointer(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625}
2626
Daniel Borkmann5293efe2016-08-18 01:00:39 +02002627static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2628{
2629 __skb_set_length(skb, len);
2630}
2631
Joe Perches7965bd42013-09-26 14:48:15 -07002632void skb_trim(struct sk_buff *skb, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
2634static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2635{
Herbert Xu3cc0e872006-06-09 16:13:38 -07002636 if (skb->data_len)
2637 return ___pskb_trim(skb, len);
2638 __skb_trim(skb, len);
2639 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640}
2641
2642static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2643{
2644 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2645}
2646
2647/**
Herbert Xue9fa4f72006-08-13 20:12:58 -07002648 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
2649 * @skb: buffer to alter
2650 * @len: new length
2651 *
2652 * This is identical to pskb_trim except that the caller knows that
2653 * the skb is not cloned so we should never get an error due to out-
2654 * of-memory.
2655 */
2656static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2657{
2658 int err = pskb_trim(skb, len);
2659 BUG_ON(err);
2660}
2661
Daniel Borkmann5293efe2016-08-18 01:00:39 +02002662static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2663{
2664 unsigned int diff = len - skb->len;
2665
2666 if (skb_tailroom(skb) < diff) {
2667 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2668 GFP_ATOMIC);
2669 if (ret)
2670 return ret;
2671 }
2672 __skb_set_length(skb, len);
2673 return 0;
2674}
2675
Herbert Xue9fa4f72006-08-13 20:12:58 -07002676/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 * skb_orphan - orphan a buffer
2678 * @skb: buffer to orphan
2679 *
2680 * If a buffer currently has an owner then we call the owner's
2681 * destructor function and make the @skb unowned. The buffer continues
2682 * to exist but is no longer charged to its former owner.
2683 */
2684static inline void skb_orphan(struct sk_buff *skb)
2685{
Eric Dumazetc34a7612013-07-30 16:11:15 -07002686 if (skb->destructor) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 skb->destructor(skb);
Eric Dumazetc34a7612013-07-30 16:11:15 -07002688 skb->destructor = NULL;
2689 skb->sk = NULL;
Eric Dumazet376c7312013-08-01 11:43:08 -07002690 } else {
2691 BUG_ON(skb->sk);
Eric Dumazetc34a7612013-07-30 16:11:15 -07002692 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693}
2694
2695/**
Michael S. Tsirkina353e0c2012-07-20 09:23:07 +00002696 * skb_orphan_frags - orphan the frags contained in a buffer
2697 * @skb: buffer to orphan frags from
2698 * @gfp_mask: allocation mask for replacement pages
2699 *
2700 * For each frag in the SKB which needs a destructor (i.e. has an
2701 * owner) create a copy of that frag and release the original
2702 * page by calling the destructor.
2703 */
2704static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2705{
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04002706 if (likely(!skb_zcopy(skb)))
2707 return 0;
Willem de Bruijn185ce5c2019-05-15 13:29:16 -04002708 if (!skb_zcopy_is_nouarg(skb) &&
2709 skb_uarg(skb)->callback == sock_zerocopy_callback)
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04002710 return 0;
2711 return skb_copy_ubufs(skb, gfp_mask);
2712}
2713
2714/* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
2715static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2716{
2717 if (likely(!skb_zcopy(skb)))
Michael S. Tsirkina353e0c2012-07-20 09:23:07 +00002718 return 0;
2719 return skb_copy_ubufs(skb, gfp_mask);
2720}
2721
2722/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 * __skb_queue_purge - empty a list
2724 * @list: list to empty
2725 *
2726 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2727 * the list and one reference dropped. This function does not take the
2728 * list lock and the caller must hold the relevant locks to use it.
2729 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730static inline void __skb_queue_purge(struct sk_buff_head *list)
2731{
2732 struct sk_buff *skb;
2733 while ((skb = __skb_dequeue(list)) != NULL)
2734 kfree_skb(skb);
2735}
Brian Norris4ea7b0c2019-02-11 13:02:25 -08002736void skb_queue_purge(struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737
Peter Oskolkov385114d2018-08-02 23:34:38 +00002738unsigned int skb_rbtree_purge(struct rb_root *root);
Yaogong Wang9f5afea2016-09-07 14:49:28 -07002739
Joe Perches7965bd42013-09-26 14:48:15 -07002740void *netdev_alloc_frag(unsigned int fragsz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741
Joe Perches7965bd42013-09-26 14:48:15 -07002742struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2743 gfp_t gfp_mask);
Christoph Hellwig8af27452006-07-31 22:35:23 -07002744
2745/**
2746 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
2747 * @dev: network device to receive on
2748 * @length: length to allocate
2749 *
2750 * Allocate a new &sk_buff and assign it a usage count of one. The
2751 * buffer has unspecified headroom built in. Users should allocate
2752 * the headroom they think they need without accounting for the
2753 * built in space. The built in space is used for optimisations.
2754 *
2755 * %NULL is returned if there is no free memory. Although this function
2756 * allocates memory it can be called from an interrupt.
2757 */
2758static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
Eric Dumazet6f532612012-05-18 05:12:12 +00002759 unsigned int length)
Christoph Hellwig8af27452006-07-31 22:35:23 -07002760{
2761 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2762}
2763
Eric Dumazet6f532612012-05-18 05:12:12 +00002764/* legacy helper around __netdev_alloc_skb() */
2765static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2766 gfp_t gfp_mask)
2767{
2768 return __netdev_alloc_skb(NULL, length, gfp_mask);
2769}
2770
2771/* legacy helper around netdev_alloc_skb() */
2772static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2773{
2774 return netdev_alloc_skb(NULL, length);
2775}
2776
2777
Eric Dumazet4915a0d2011-07-11 20:08:34 -07002778static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2779 unsigned int length, gfp_t gfp)
Eric Dumazet61321bb2009-10-07 17:11:23 +00002780{
Eric Dumazet4915a0d2011-07-11 20:08:34 -07002781 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
Eric Dumazet61321bb2009-10-07 17:11:23 +00002782
2783 if (NET_IP_ALIGN && skb)
2784 skb_reserve(skb, NET_IP_ALIGN);
2785 return skb;
2786}
2787
Eric Dumazet4915a0d2011-07-11 20:08:34 -07002788static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2789 unsigned int length)
2790{
2791 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2792}
2793
Alexander Duyck181edb22015-05-06 21:12:03 -07002794static inline void skb_free_frag(void *addr)
2795{
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08002796 page_frag_free(addr);
Alexander Duyck181edb22015-05-06 21:12:03 -07002797}
2798
Alexander Duyckffde7322014-12-09 19:40:42 -08002799void *napi_alloc_frag(unsigned int fragsz);
Alexander Duyckfd11a832014-12-09 19:40:49 -08002800struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2801 unsigned int length, gfp_t gfp_mask);
2802static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2803 unsigned int length)
2804{
2805 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2806}
Jesper Dangaard Brouer795bb1c2016-02-08 13:14:59 +01002807void napi_consume_skb(struct sk_buff *skb, int budget);
2808
2809void __kfree_skb_flush(void);
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01002810void __kfree_skb_defer(struct sk_buff *skb);
Alexander Duyckffde7322014-12-09 19:40:42 -08002811
Florian Fainellibc6fc9f2013-08-30 15:36:14 +01002812/**
Alexander Duyck71dfda52014-11-11 09:26:34 -08002813 * __dev_alloc_pages - allocate page for network Rx
2814 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2815 * @order: size of the allocation
2816 *
2817 * Allocate a new page.
2818 *
2819 * %NULL is returned if there is no free memory.
2820*/
2821static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2822 unsigned int order)
2823{
2824 /* This piece of code contains several assumptions.
2825 * 1. This is for device Rx, therefor a cold page is preferred.
2826 * 2. The expectation is the user wants a compound page.
2827 * 3. If requesting a order 0 page it will not be compound
2828 * due to the check to see if order has a value in prep_new_page
2829 * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
2830 * code in gfp_to_alloc_flags that should be enforcing this.
2831 */
Mel Gorman453f85d2017-11-15 17:38:03 -08002832 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
Alexander Duyck71dfda52014-11-11 09:26:34 -08002833
2834 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2835}
2836
2837static inline struct page *dev_alloc_pages(unsigned int order)
2838{
Neil Horman95829b32016-05-19 11:30:54 -04002839 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
Alexander Duyck71dfda52014-11-11 09:26:34 -08002840}
2841
2842/**
2843 * __dev_alloc_page - allocate a page for network Rx
2844 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2845 *
2846 * Allocate a new page.
2847 *
2848 * %NULL is returned if there is no free memory.
2849 */
2850static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2851{
2852 return __dev_alloc_pages(gfp_mask, 0);
2853}
2854
2855static inline struct page *dev_alloc_page(void)
2856{
Neil Horman95829b32016-05-19 11:30:54 -04002857 return dev_alloc_pages(0);
Alexander Duyck71dfda52014-11-11 09:26:34 -08002858}
2859
2860/**
Mel Gorman06140022012-07-31 16:44:24 -07002861 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
2862 * @page: The page that was allocated from skb_alloc_page
2863 * @skb: The skb that may need pfmemalloc set
2864 */
2865static inline void skb_propagate_pfmemalloc(struct page *page,
2866 struct sk_buff *skb)
2867{
Michal Hocko2f064f32015-08-21 14:11:51 -07002868 if (page_is_pfmemalloc(page))
Mel Gorman06140022012-07-31 16:44:24 -07002869 skb->pfmemalloc = true;
2870}
2871
Eric Dumazet564824b2010-10-11 19:05:25 +00002872/**
Masanari Iidae2278672014-02-18 22:54:36 +09002873 * skb_frag_page - retrieve the page referred to by a paged fragment
Ian Campbell131ea662011-08-19 06:25:00 +00002874 * @frag: the paged fragment
2875 *
2876 * Returns the &struct page associated with @frag.
2877 */
2878static inline struct page *skb_frag_page(const skb_frag_t *frag)
2879{
Ian Campbella8605c62011-10-19 23:01:49 +00002880 return frag->page.p;
Ian Campbell131ea662011-08-19 06:25:00 +00002881}
2882
2883/**
2884 * __skb_frag_ref - take an addition reference on a paged fragment.
2885 * @frag: the paged fragment
2886 *
2887 * Takes an additional reference on the paged fragment @frag.
2888 */
2889static inline void __skb_frag_ref(skb_frag_t *frag)
2890{
2891 get_page(skb_frag_page(frag));
2892}
2893
2894/**
2895 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
2896 * @skb: the buffer
2897 * @f: the fragment offset.
2898 *
2899 * Takes an additional reference on the @f'th paged fragment of @skb.
2900 */
2901static inline void skb_frag_ref(struct sk_buff *skb, int f)
2902{
2903 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2904}
2905
2906/**
2907 * __skb_frag_unref - release a reference on a paged fragment.
2908 * @frag: the paged fragment
2909 *
2910 * Releases a reference on the paged fragment @frag.
2911 */
2912static inline void __skb_frag_unref(skb_frag_t *frag)
2913{
2914 put_page(skb_frag_page(frag));
2915}
2916
2917/**
2918 * skb_frag_unref - release a reference on a paged fragment of an skb.
2919 * @skb: the buffer
2920 * @f: the fragment offset
2921 *
2922 * Releases a reference on the @f'th paged fragment of @skb.
2923 */
2924static inline void skb_frag_unref(struct sk_buff *skb, int f)
2925{
2926 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2927}
2928
2929/**
2930 * skb_frag_address - gets the address of the data contained in a paged fragment
2931 * @frag: the paged fragment buffer
2932 *
2933 * Returns the address of the data within @frag. The page must already
2934 * be mapped.
2935 */
2936static inline void *skb_frag_address(const skb_frag_t *frag)
2937{
2938 return page_address(skb_frag_page(frag)) + frag->page_offset;
2939}
2940
2941/**
2942 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
2943 * @frag: the paged fragment buffer
2944 *
2945 * Returns the address of the data within @frag. Checks that the page
2946 * is mapped and returns %NULL otherwise.
2947 */
2948static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2949{
2950 void *ptr = page_address(skb_frag_page(frag));
2951 if (unlikely(!ptr))
2952 return NULL;
2953
2954 return ptr + frag->page_offset;
2955}
2956
2957/**
2958 * __skb_frag_set_page - sets the page contained in a paged fragment
2959 * @frag: the paged fragment
2960 * @page: the page to set
2961 *
2962 * Sets the fragment @frag to contain @page.
2963 */
2964static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2965{
Ian Campbella8605c62011-10-19 23:01:49 +00002966 frag->page.p = page;
Ian Campbell131ea662011-08-19 06:25:00 +00002967}
2968
2969/**
2970 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
2971 * @skb: the buffer
2972 * @f: the fragment offset
2973 * @page: the page to set
2974 *
2975 * Sets the @f'th fragment of @skb to contain @page.
2976 */
2977static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2978 struct page *page)
2979{
2980 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2981}
2982
Eric Dumazet400dfd32013-10-17 16:27:07 -07002983bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2984
Ian Campbell131ea662011-08-19 06:25:00 +00002985/**
2986 * skb_frag_dma_map - maps a paged fragment via the DMA API
Marcos Paulo de Souzaf83347d2011-10-31 15:11:45 +00002987 * @dev: the device to map the fragment to
Ian Campbell131ea662011-08-19 06:25:00 +00002988 * @frag: the paged fragment to map
2989 * @offset: the offset within the fragment (starting at the
2990 * fragment's own offset)
2991 * @size: the number of bytes to map
Mauro Carvalho Chehab771b00a2017-05-12 09:19:29 -03002992 * @dir: the direction of the mapping (``PCI_DMA_*``)
Ian Campbell131ea662011-08-19 06:25:00 +00002993 *
2994 * Maps the page associated with @frag to @device.
2995 */
2996static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2997 const skb_frag_t *frag,
2998 size_t offset, size_t size,
2999 enum dma_data_direction dir)
3000{
3001 return dma_map_page(dev, skb_frag_page(frag),
3002 frag->page_offset + offset, size, dir);
3003}
3004
Eric Dumazet117632e2011-12-03 21:39:53 +00003005static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3006 gfp_t gfp_mask)
3007{
3008 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3009}
3010
Octavian Purdilabad93e92014-06-12 01:36:26 +03003011
3012static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3013 gfp_t gfp_mask)
3014{
3015 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3016}
3017
3018
Ian Campbell131ea662011-08-19 06:25:00 +00003019/**
Patrick McHardy334a8132007-06-25 04:35:20 -07003020 * skb_clone_writable - is the header of a clone writable
3021 * @skb: buffer to check
3022 * @len: length up to which to write
3023 *
3024 * Returns true if modifying the header part of the cloned buffer
3025 * does not requires the data to be copied.
3026 */
Eric Dumazet05bdd2f2011-10-20 17:45:43 -04003027static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
Patrick McHardy334a8132007-06-25 04:35:20 -07003028{
3029 return !skb_header_cloned(skb) &&
3030 skb_headroom(skb) + len <= skb->hdr_len;
3031}
3032
Daniel Borkmann36976492016-02-19 23:05:25 +01003033static inline int skb_try_make_writable(struct sk_buff *skb,
3034 unsigned int write_len)
3035{
3036 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3037 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3038}
3039
Herbert Xud9cc2042007-09-16 16:21:16 -07003040static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3041 int cloned)
3042{
3043 int delta = 0;
3044
Herbert Xud9cc2042007-09-16 16:21:16 -07003045 if (headroom > skb_headroom(skb))
3046 delta = headroom - skb_headroom(skb);
3047
3048 if (delta || cloned)
3049 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3050 GFP_ATOMIC);
3051 return 0;
3052}
3053
Patrick McHardy334a8132007-06-25 04:35:20 -07003054/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 * skb_cow - copy header of skb when it is required
3056 * @skb: buffer to cow
3057 * @headroom: needed headroom
3058 *
3059 * If the skb passed lacks sufficient headroom or its data part
3060 * is shared, data is reallocated. If reallocation fails, an error
3061 * is returned and original skb is not changed.
3062 *
3063 * The result is skb with writable area skb->head...skb->tail
3064 * and at least @headroom of space at head.
3065 */
3066static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3067{
Herbert Xud9cc2042007-09-16 16:21:16 -07003068 return __skb_cow(skb, headroom, skb_cloned(skb));
3069}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070
Herbert Xud9cc2042007-09-16 16:21:16 -07003071/**
3072 * skb_cow_head - skb_cow but only making the head writable
3073 * @skb: buffer to cow
3074 * @headroom: needed headroom
3075 *
3076 * This function is identical to skb_cow except that we replace the
3077 * skb_cloned check by skb_header_cloned. It should be used when
3078 * you only need to push on some header and do not need to modify
3079 * the data.
3080 */
3081static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3082{
3083 return __skb_cow(skb, headroom, skb_header_cloned(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084}
3085
3086/**
3087 * skb_padto - pad an skbuff up to a minimal size
3088 * @skb: buffer to pad
3089 * @len: minimal length
3090 *
3091 * Pads up a buffer to ensure the trailing bytes exist and are
3092 * blanked. If the buffer already contains sufficient data it
Herbert Xu5b057c62006-06-23 02:06:41 -07003093 * is untouched. Otherwise it is extended. Returns zero on
3094 * success. The skb is freed on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 */
Herbert Xu5b057c62006-06-23 02:06:41 -07003096static inline int skb_padto(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097{
3098 unsigned int size = skb->len;
3099 if (likely(size >= len))
Herbert Xu5b057c62006-06-23 02:06:41 -07003100 return 0;
Gerrit Renker987c4022008-08-11 18:17:17 -07003101 return skb_pad(skb, len - size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102}
3103
Alexander Duyck9c0c1122014-12-03 08:17:33 -08003104/**
Brian Norris4ea7b0c2019-02-11 13:02:25 -08003105 * __skb_put_padto - increase size and pad an skbuff up to a minimal size
Alexander Duyck9c0c1122014-12-03 08:17:33 -08003106 * @skb: buffer to pad
3107 * @len: minimal length
Florian Fainellicd0a1372017-08-22 15:12:14 -07003108 * @free_on_error: free buffer on error
3109 *
3110 * Pads up a buffer to ensure the trailing bytes exist and are
3111 * blanked. If the buffer already contains sufficient data it
3112 * is untouched. Otherwise it is extended. Returns zero on
3113 * success. The skb is freed on error if @free_on_error is true.
3114 */
3115static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
3116 bool free_on_error)
3117{
3118 unsigned int size = skb->len;
3119
3120 if (unlikely(size < len)) {
3121 len -= size;
3122 if (__skb_pad(skb, len, free_on_error))
3123 return -ENOMEM;
3124 __skb_put(skb, len);
3125 }
3126 return 0;
3127}
3128
3129/**
3130 * skb_put_padto - increase size and pad an skbuff up to a minimal size
3131 * @skb: buffer to pad
3132 * @len: minimal length
Alexander Duyck9c0c1122014-12-03 08:17:33 -08003133 *
3134 * Pads up a buffer to ensure the trailing bytes exist and are
3135 * blanked. If the buffer already contains sufficient data it
3136 * is untouched. Otherwise it is extended. Returns zero on
3137 * success. The skb is freed on error.
3138 */
3139static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
3140{
Florian Fainellicd0a1372017-08-22 15:12:14 -07003141 return __skb_put_padto(skb, len, true);
Alexander Duyck9c0c1122014-12-03 08:17:33 -08003142}
3143
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144static inline int skb_add_data(struct sk_buff *skb,
Al Viroaf2b0402014-11-27 21:44:24 -05003145 struct iov_iter *from, int copy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146{
3147 const int off = skb->len;
3148
3149 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viroaf2b0402014-11-27 21:44:24 -05003150 __wsum csum = 0;
Al Viro15e6cb42016-11-01 22:42:45 -04003151 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3152 &csum, from)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 skb->csum = csum_block_add(skb->csum, csum, off);
3154 return 0;
3155 }
Al Viro15e6cb42016-11-01 22:42:45 -04003156 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 return 0;
3158
3159 __skb_trim(skb, off);
3160 return -EFAULT;
3161}
3162
Eric Dumazet38ba0a62012-04-23 17:48:27 +00003163static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3164 const struct page *page, int off)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165{
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04003166 if (skb_zcopy(skb))
3167 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 if (i) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00003169 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170
Ian Campbellea2ab692011-08-22 23:44:58 +00003171 return page == skb_frag_page(frag) &&
Eric Dumazet9e903e02011-10-18 21:00:24 +00003172 off == frag->page_offset + skb_frag_size(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 }
Eric Dumazet38ba0a62012-04-23 17:48:27 +00003174 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175}
3176
Herbert Xu364c6ba2006-06-09 16:10:40 -07003177static inline int __skb_linearize(struct sk_buff *skb)
3178{
3179 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3180}
3181
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182/**
3183 * skb_linearize - convert paged skb to linear one
3184 * @skb: buffer to linarize
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185 *
3186 * If there is no free memory -ENOMEM is returned, otherwise zero
3187 * is returned and the old skb data released.
3188 */
Herbert Xu364c6ba2006-06-09 16:10:40 -07003189static inline int skb_linearize(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190{
Herbert Xu364c6ba2006-06-09 16:10:40 -07003191 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3192}
3193
3194/**
Eric Dumazetcef401d2013-01-25 20:34:37 +00003195 * skb_has_shared_frag - can any frag be overwritten
3196 * @skb: buffer to test
3197 *
3198 * Return true if the skb has at least one frag that might be modified
3199 * by an external entity (as in vmsplice()/sendfile())
3200 */
3201static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3202{
Pravin B Shelarc9af6db2013-02-11 09:27:41 +00003203 return skb_is_nonlinear(skb) &&
3204 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
Eric Dumazetcef401d2013-01-25 20:34:37 +00003205}
3206
3207/**
Herbert Xu364c6ba2006-06-09 16:10:40 -07003208 * skb_linearize_cow - make sure skb is linear and writable
3209 * @skb: buffer to process
3210 *
3211 * If there is no free memory -ENOMEM is returned, otherwise zero
3212 * is returned and the old skb data released.
3213 */
3214static inline int skb_linearize_cow(struct sk_buff *skb)
3215{
3216 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3217 __skb_linearize(skb) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218}
3219
Daniel Borkmann479ffccc2016-08-05 00:11:12 +02003220static __always_inline void
3221__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3222 unsigned int off)
3223{
3224 if (skb->ip_summed == CHECKSUM_COMPLETE)
3225 skb->csum = csum_block_sub(skb->csum,
3226 csum_partial(start, len, 0), off);
3227 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3228 skb_checksum_start_offset(skb) < 0)
3229 skb->ip_summed = CHECKSUM_NONE;
3230}
3231
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232/**
3233 * skb_postpull_rcsum - update checksum for received skb after pull
3234 * @skb: buffer to update
3235 * @start: start of data before pull
3236 * @len: length of data pulled
3237 *
3238 * After doing a pull on a received packet, you need to call this to
Patrick McHardy84fa7932006-08-29 16:44:56 -07003239 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
3240 * CHECKSUM_NONE so that it can be recomputed from scratch.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242static inline void skb_postpull_rcsum(struct sk_buff *skb,
Herbert Xucbb042f2006-03-20 22:43:56 -08003243 const void *start, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244{
Daniel Borkmann479ffccc2016-08-05 00:11:12 +02003245 __skb_postpull_rcsum(skb, start, len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246}
3247
Daniel Borkmann479ffccc2016-08-05 00:11:12 +02003248static __always_inline void
3249__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3250 unsigned int off)
3251{
3252 if (skb->ip_summed == CHECKSUM_COMPLETE)
3253 skb->csum = csum_block_add(skb->csum,
3254 csum_partial(start, len, 0), off);
3255}
Herbert Xucbb042f2006-03-20 22:43:56 -08003256
Daniel Borkmann479ffccc2016-08-05 00:11:12 +02003257/**
3258 * skb_postpush_rcsum - update checksum for received skb after push
3259 * @skb: buffer to update
3260 * @start: start of data after push
3261 * @len: length of data pushed
3262 *
3263 * After doing a push on a received packet, you need to call this to
3264 * update the CHECKSUM_COMPLETE checksum.
3265 */
Daniel Borkmannf8ffad62016-01-07 15:50:23 +01003266static inline void skb_postpush_rcsum(struct sk_buff *skb,
3267 const void *start, unsigned int len)
3268{
Daniel Borkmann479ffccc2016-08-05 00:11:12 +02003269 __skb_postpush_rcsum(skb, start, len, 0);
Daniel Borkmannf8ffad62016-01-07 15:50:23 +01003270}
3271
Johannes Bergaf728682017-06-16 14:29:22 +02003272void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
Daniel Borkmann479ffccc2016-08-05 00:11:12 +02003273
David S. Miller7ce5a272013-12-02 17:26:05 -05003274/**
WANG Cong82a31b92016-06-30 10:15:22 -07003275 * skb_push_rcsum - push skb and update receive checksum
3276 * @skb: buffer to update
3277 * @len: length of data pulled
3278 *
3279 * This function performs an skb_push on the packet and updates
3280 * the CHECKSUM_COMPLETE checksum. It should be used on
3281 * receive path processing instead of skb_push unless you know
3282 * that the checksum difference is zero (e.g., a valid IP header)
3283 * or you are setting ip_summed to CHECKSUM_NONE.
3284 */
Johannes Bergd58ff352017-06-16 14:29:23 +02003285static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
WANG Cong82a31b92016-06-30 10:15:22 -07003286{
3287 skb_push(skb, len);
3288 skb_postpush_rcsum(skb, skb->data, len);
3289 return skb->data;
3290}
3291
Eric Dumazet88078d92018-04-18 11:43:15 -07003292int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
WANG Cong82a31b92016-06-30 10:15:22 -07003293/**
David S. Miller7ce5a272013-12-02 17:26:05 -05003294 * pskb_trim_rcsum - trim received skb and update checksum
3295 * @skb: buffer to trim
3296 * @len: new length
3297 *
3298 * This is exactly the same as pskb_trim except that it ensures the
3299 * checksum of received packets are still valid after the operation.
Ross Lagerwall6c57f042019-01-17 15:34:38 +00003300 * It can change skb pointers.
David S. Miller7ce5a272013-12-02 17:26:05 -05003301 */
3302
3303static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3304{
3305 if (likely(len >= skb->len))
3306 return 0;
Eric Dumazet88078d92018-04-18 11:43:15 -07003307 return pskb_trim_rcsum_slow(skb, len);
David S. Miller7ce5a272013-12-02 17:26:05 -05003308}
3309
Daniel Borkmann5293efe2016-08-18 01:00:39 +02003310static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3311{
3312 if (skb->ip_summed == CHECKSUM_COMPLETE)
3313 skb->ip_summed = CHECKSUM_NONE;
3314 __skb_trim(skb, len);
3315 return 0;
3316}
3317
3318static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3319{
3320 if (skb->ip_summed == CHECKSUM_COMPLETE)
3321 skb->ip_summed = CHECKSUM_NONE;
3322 return __skb_grow(skb, len);
3323}
3324
Eric Dumazet18a4c0e2017-10-05 22:21:21 -07003325#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3326#define skb_rb_first(root) rb_to_skb(rb_first(root))
3327#define skb_rb_last(root) rb_to_skb(rb_last(root))
3328#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3329#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3330
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331#define skb_queue_walk(queue, skb) \
3332 for (skb = (queue)->next; \
Linus Torvaldsa1e48912011-05-22 16:51:43 -07003333 skb != (struct sk_buff *)(queue); \
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 skb = skb->next)
3335
James Chapman46f89142007-04-30 00:07:31 -07003336#define skb_queue_walk_safe(queue, skb, tmp) \
3337 for (skb = (queue)->next, tmp = skb->next; \
3338 skb != (struct sk_buff *)(queue); \
3339 skb = tmp, tmp = skb->next)
3340
David S. Miller1164f522008-09-23 00:49:44 -07003341#define skb_queue_walk_from(queue, skb) \
Linus Torvaldsa1e48912011-05-22 16:51:43 -07003342 for (; skb != (struct sk_buff *)(queue); \
David S. Miller1164f522008-09-23 00:49:44 -07003343 skb = skb->next)
3344
Eric Dumazet18a4c0e2017-10-05 22:21:21 -07003345#define skb_rbtree_walk(skb, root) \
3346 for (skb = skb_rb_first(root); skb != NULL; \
3347 skb = skb_rb_next(skb))
3348
3349#define skb_rbtree_walk_from(skb) \
3350 for (; skb != NULL; \
3351 skb = skb_rb_next(skb))
3352
3353#define skb_rbtree_walk_from_safe(skb, tmp) \
3354 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3355 skb = tmp)
3356
David S. Miller1164f522008-09-23 00:49:44 -07003357#define skb_queue_walk_from_safe(queue, skb, tmp) \
3358 for (tmp = skb->next; \
3359 skb != (struct sk_buff *)(queue); \
3360 skb = tmp, tmp = skb->next)
3361
Stephen Hemminger300ce172005-10-30 13:47:34 -08003362#define skb_queue_reverse_walk(queue, skb) \
3363 for (skb = (queue)->prev; \
Linus Torvaldsa1e48912011-05-22 16:51:43 -07003364 skb != (struct sk_buff *)(queue); \
Stephen Hemminger300ce172005-10-30 13:47:34 -08003365 skb = skb->prev)
3366
David S. Miller686a2952011-01-20 22:47:32 -08003367#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3368 for (skb = (queue)->prev, tmp = skb->prev; \
3369 skb != (struct sk_buff *)(queue); \
3370 skb = tmp, tmp = skb->prev)
3371
3372#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3373 for (tmp = skb->prev; \
3374 skb != (struct sk_buff *)(queue); \
3375 skb = tmp, tmp = skb->prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376
David S. Miller21dc3302010-08-23 00:13:46 -07003377static inline bool skb_has_frag_list(const struct sk_buff *skb)
David S. Milleree039872009-06-09 00:17:13 -07003378{
3379 return skb_shinfo(skb)->frag_list != NULL;
3380}
3381
3382static inline void skb_frag_list_init(struct sk_buff *skb)
3383{
3384 skb_shinfo(skb)->frag_list = NULL;
3385}
3386
David S. Milleree039872009-06-09 00:17:13 -07003387#define skb_walk_frags(skb, iter) \
3388 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3389
Rainer Weikusatea3793e2015-12-06 21:11:34 +00003390
3391int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3392 const struct sk_buff *skb);
Paolo Abeni65101ae2017-05-16 11:20:13 +02003393struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3394 struct sk_buff_head *queue,
3395 unsigned int flags,
3396 void (*destructor)(struct sock *sk,
3397 struct sk_buff *skb),
Paolo Abenifd69c392019-04-08 10:15:59 +02003398 int *off, int *err,
Paolo Abeni65101ae2017-05-16 11:20:13 +02003399 struct sk_buff **last);
Rainer Weikusatea3793e2015-12-06 21:11:34 +00003400struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
Paolo Abeni7c13f972016-11-04 11:28:59 +01003401 void (*destructor)(struct sock *sk,
3402 struct sk_buff *skb),
Paolo Abenifd69c392019-04-08 10:15:59 +02003403 int *off, int *err,
Rainer Weikusatea3793e2015-12-06 21:11:34 +00003404 struct sk_buff **last);
Joe Perches7965bd42013-09-26 14:48:15 -07003405struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
Paolo Abeni7c13f972016-11-04 11:28:59 +01003406 void (*destructor)(struct sock *sk,
3407 struct sk_buff *skb),
Paolo Abenifd69c392019-04-08 10:15:59 +02003408 int *off, int *err);
Joe Perches7965bd42013-09-26 14:48:15 -07003409struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3410 int *err);
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07003411__poll_t datagram_poll(struct file *file, struct socket *sock,
3412 struct poll_table_struct *wait);
Al Viroc0371da2014-11-24 10:42:55 -05003413int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3414 struct iov_iter *to, int size);
David S. Miller51f3d022014-11-05 16:46:40 -05003415static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3416 struct msghdr *msg, int size)
3417{
Al Viroe5a4b0b2014-11-24 18:17:55 -05003418 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
David S. Miller51f3d022014-11-05 16:46:40 -05003419}
Al Viroe5a4b0b2014-11-24 18:17:55 -05003420int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3421 struct msghdr *msg);
Sagi Grimberg65d69e22018-12-03 17:52:10 -08003422int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3423 struct iov_iter *to, int len,
3424 struct ahash_request *hash);
Al Viro3a654f92014-06-19 14:15:22 -04003425int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3426 struct iov_iter *from, int len);
Al Viro3a654f92014-06-19 14:15:22 -04003427int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
Joe Perches7965bd42013-09-26 14:48:15 -07003428void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
samanthakumar627d2d62016-04-05 12:41:16 -04003429void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3430static inline void skb_free_datagram_locked(struct sock *sk,
3431 struct sk_buff *skb)
3432{
3433 __skb_free_datagram_locked(sk, skb, 0);
3434}
Joe Perches7965bd42013-09-26 14:48:15 -07003435int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
Joe Perches7965bd42013-09-26 14:48:15 -07003436int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3437int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3438__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3439 int len, __wsum csum);
Hannes Frederic Sowaa60e3cc2015-05-21 17:00:00 +02003440int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
Joe Perches7965bd42013-09-26 14:48:15 -07003441 struct pipe_inode_info *pipe, unsigned int len,
Al Viro25869262016-09-17 21:02:10 -04003442 unsigned int flags);
Tom Herbert20bf50d2017-07-28 16:22:42 -07003443int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3444 int len);
Joe Perches7965bd42013-09-26 14:48:15 -07003445void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
Thomas Grafaf2806f2013-12-13 15:22:17 +01003446unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00003447int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3448 int len, int hlen);
Joe Perches7965bd42013-09-26 14:48:15 -07003449void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3450int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3451void skb_scrub_packet(struct sk_buff *skb, bool xnet);
Daniel Axtens779b7932018-03-01 17:13:37 +11003452bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
Daniel Axtens2b16f042018-01-31 14:15:33 +11003453bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
Joe Perches7965bd42013-09-26 14:48:15 -07003454struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04003455struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
Jiri Pirkoe2195122014-11-19 14:05:01 +01003456int skb_ensure_writable(struct sk_buff *skb, int write_len);
Shmulik Ladkanibfca4c52016-09-19 19:11:09 +03003457int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
Jiri Pirko93515d52014-11-19 14:05:02 +01003458int skb_vlan_pop(struct sk_buff *skb);
3459int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
John Hurley8822e272019-07-07 15:01:54 +01003460int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto);
John Hurleyed246ce2019-07-07 15:01:55 +01003461int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto);
John Hurleyd27cf5c2019-07-07 15:01:56 +01003462int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
John Hurley2a2ea502019-07-07 15:01:57 +01003463int skb_mpls_dec_ttl(struct sk_buff *skb);
Sowmini Varadhan6fa01cc2016-04-22 18:36:35 -07003464struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3465 gfp_t gfp);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03003466
Al Viro6ce8e9c2014-04-06 21:25:44 -04003467static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3468{
Al Viro3073f072017-02-17 23:13:25 -05003469 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
Al Viro6ce8e9c2014-04-06 21:25:44 -04003470}
3471
Al Viro7eab8d92014-04-06 21:51:23 -04003472static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3473{
Al Viroe5a4b0b2014-11-24 18:17:55 -05003474 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
Al Viro7eab8d92014-04-06 21:51:23 -04003475}
3476
Daniel Borkmann2817a332013-10-30 11:50:51 +01003477struct skb_checksum_ops {
3478 __wsum (*update)(const void *mem, int len, __wsum wsum);
3479 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3480};
3481
Davide Caratti96178132017-05-18 15:44:37 +02003482extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3483
Daniel Borkmann2817a332013-10-30 11:50:51 +01003484__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3485 __wsum csum, const struct skb_checksum_ops *ops);
3486__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3487 __wsum csum);
3488
Eric Dumazet1e98a0f2015-06-12 19:31:32 -07003489static inline void * __must_check
3490__skb_header_pointer(const struct sk_buff *skb, int offset,
3491 int len, void *data, int hlen, void *buffer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492{
Patrick McHardy55820ee2005-07-05 14:08:10 -07003493 if (hlen - offset >= len)
David S. Miller690e36e2014-08-23 12:13:41 -07003494 return data + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495
David S. Miller690e36e2014-08-23 12:13:41 -07003496 if (!skb ||
3497 skb_copy_bits(skb, offset, buffer, len) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 return NULL;
3499
3500 return buffer;
3501}
3502
Eric Dumazet1e98a0f2015-06-12 19:31:32 -07003503static inline void * __must_check
3504skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
David S. Miller690e36e2014-08-23 12:13:41 -07003505{
3506 return __skb_header_pointer(skb, offset, len, skb->data,
3507 skb_headlen(skb), buffer);
3508}
3509
Daniel Borkmann4262e5c2013-12-06 11:36:16 +01003510/**
3511 * skb_needs_linearize - check if we need to linearize a given skb
3512 * depending on the given device features.
3513 * @skb: socket buffer to check
3514 * @features: net device features
3515 *
3516 * Returns true if either:
3517 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
3518 * 2. skb is fragmented and the device does not support SG.
3519 */
3520static inline bool skb_needs_linearize(struct sk_buff *skb,
3521 netdev_features_t features)
3522{
3523 return skb_is_nonlinear(skb) &&
3524 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3525 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3526}
3527
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03003528static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3529 void *to,
3530 const unsigned int len)
3531{
3532 memcpy(to, skb->data, len);
3533}
3534
3535static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3536 const int offset, void *to,
3537 const unsigned int len)
3538{
3539 memcpy(to, skb->data + offset, len);
3540}
3541
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03003542static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3543 const void *from,
3544 const unsigned int len)
3545{
3546 memcpy(skb->data, from, len);
3547}
3548
3549static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3550 const int offset,
3551 const void *from,
3552 const unsigned int len)
3553{
3554 memcpy(skb->data + offset, from, len);
3555}
3556
Joe Perches7965bd42013-09-26 14:48:15 -07003557void skb_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558
Patrick Ohlyac45f602009-02-12 05:03:37 +00003559static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3560{
3561 return skb->tstamp;
3562}
3563
Patrick McHardya61bbcf2005-08-14 17:24:31 -07003564/**
3565 * skb_get_timestamp - get timestamp from a skb
3566 * @skb: skb to get stamp from
Deepa Dinamani13c6ee22019-02-02 07:34:48 -08003567 * @stamp: pointer to struct __kernel_old_timeval to store stamp in
Patrick McHardya61bbcf2005-08-14 17:24:31 -07003568 *
3569 * Timestamps are stored in the skb as offsets to a base timestamp.
3570 * This function converts the offset back to a struct timeval and stores
3571 * it in stamp.
3572 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00003573static inline void skb_get_timestamp(const struct sk_buff *skb,
Deepa Dinamani13c6ee22019-02-02 07:34:48 -08003574 struct __kernel_old_timeval *stamp)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07003575{
Deepa Dinamani13c6ee22019-02-02 07:34:48 -08003576 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
Patrick McHardya61bbcf2005-08-14 17:24:31 -07003577}
3578
Deepa Dinamani887feae2019-02-02 07:34:50 -08003579static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3580 struct __kernel_sock_timeval *stamp)
3581{
3582 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3583
3584 stamp->tv_sec = ts.tv_sec;
3585 stamp->tv_usec = ts.tv_nsec / 1000;
3586}
3587
Patrick Ohlyac45f602009-02-12 05:03:37 +00003588static inline void skb_get_timestampns(const struct sk_buff *skb,
3589 struct timespec *stamp)
3590{
3591 *stamp = ktime_to_timespec(skb->tstamp);
3592}
3593
Deepa Dinamani887feae2019-02-02 07:34:50 -08003594static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3595 struct __kernel_timespec *stamp)
3596{
3597 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3598
3599 stamp->tv_sec = ts.tv_sec;
3600 stamp->tv_nsec = ts.tv_nsec;
3601}
3602
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07003603static inline void __net_timestamp(struct sk_buff *skb)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07003604{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07003605 skb->tstamp = ktime_get_real();
Patrick McHardya61bbcf2005-08-14 17:24:31 -07003606}
3607
Stephen Hemminger164891a2007-04-23 22:26:16 -07003608static inline ktime_t net_timedelta(ktime_t t)
3609{
3610 return ktime_sub(ktime_get_real(), t);
3611}
3612
Ilpo Järvinenb9ce2042007-06-15 15:08:43 -07003613static inline ktime_t net_invalid_timestamp(void)
3614{
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01003615 return 0;
Ilpo Järvinenb9ce2042007-06-15 15:08:43 -07003616}
Patrick McHardya61bbcf2005-08-14 17:24:31 -07003617
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003618static inline u8 skb_metadata_len(const struct sk_buff *skb)
3619{
3620 return skb_shinfo(skb)->meta_len;
3621}
3622
3623static inline void *skb_metadata_end(const struct sk_buff *skb)
3624{
3625 return skb_mac_header(skb);
3626}
3627
3628static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3629 const struct sk_buff *skb_b,
3630 u8 meta_len)
3631{
3632 const void *a = skb_metadata_end(skb_a);
3633 const void *b = skb_metadata_end(skb_b);
3634 /* Using more efficient varaiant than plain call to memcmp(). */
3635#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3636 u64 diffs = 0;
3637
3638 switch (meta_len) {
3639#define __it(x, op) (x -= sizeof(u##op))
3640#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3641 case 32: diffs |= __it_diff(a, b, 64);
Gustavo A. R. Silva82385b02018-10-17 15:01:37 +02003642 /* fall through */
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003643 case 24: diffs |= __it_diff(a, b, 64);
Gustavo A. R. Silva82385b02018-10-17 15:01:37 +02003644 /* fall through */
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003645 case 16: diffs |= __it_diff(a, b, 64);
Gustavo A. R. Silva82385b02018-10-17 15:01:37 +02003646 /* fall through */
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003647 case 8: diffs |= __it_diff(a, b, 64);
3648 break;
3649 case 28: diffs |= __it_diff(a, b, 64);
Gustavo A. R. Silva82385b02018-10-17 15:01:37 +02003650 /* fall through */
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003651 case 20: diffs |= __it_diff(a, b, 64);
Gustavo A. R. Silva82385b02018-10-17 15:01:37 +02003652 /* fall through */
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003653 case 12: diffs |= __it_diff(a, b, 64);
Gustavo A. R. Silva82385b02018-10-17 15:01:37 +02003654 /* fall through */
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003655 case 4: diffs |= __it_diff(a, b, 32);
3656 break;
3657 }
3658 return diffs;
3659#else
3660 return memcmp(a - meta_len, b - meta_len, meta_len);
3661#endif
3662}
3663
3664static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3665 const struct sk_buff *skb_b)
3666{
3667 u8 len_a = skb_metadata_len(skb_a);
3668 u8 len_b = skb_metadata_len(skb_b);
3669
3670 if (!(len_a | len_b))
3671 return false;
3672
3673 return len_a != len_b ?
3674 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3675}
3676
3677static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3678{
3679 skb_shinfo(skb)->meta_len = meta_len;
3680}
3681
3682static inline void skb_metadata_clear(struct sk_buff *skb)
3683{
3684 skb_metadata_set(skb, 0);
3685}
3686
Alexander Duyck62bccb82014-09-04 13:31:35 -04003687struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3688
Richard Cochranc1f19b52010-07-17 08:49:36 +00003689#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3690
Joe Perches7965bd42013-09-26 14:48:15 -07003691void skb_clone_tx_timestamp(struct sk_buff *skb);
3692bool skb_defer_rx_timestamp(struct sk_buff *skb);
Richard Cochranc1f19b52010-07-17 08:49:36 +00003693
3694#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
3695
3696static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3697{
3698}
3699
3700static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3701{
3702 return false;
3703}
3704
3705#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
3706
3707/**
3708 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
3709 *
Richard Cochranda92b192011-10-21 00:49:15 +00003710 * PHY drivers may accept clones of transmitted packets for
3711 * timestamping via their phy_driver.txtstamp method. These drivers
Benjamin Poirier7a76a022015-08-07 09:32:21 -07003712 * must call this function to return the skb back to the stack with a
3713 * timestamp.
Richard Cochranda92b192011-10-21 00:49:15 +00003714 *
Richard Cochranc1f19b52010-07-17 08:49:36 +00003715 * @skb: clone of the the original outgoing packet
Benjamin Poirier7a76a022015-08-07 09:32:21 -07003716 * @hwtstamps: hardware time stamps
Richard Cochranc1f19b52010-07-17 08:49:36 +00003717 *
3718 */
3719void skb_complete_tx_timestamp(struct sk_buff *skb,
3720 struct skb_shared_hwtstamps *hwtstamps);
3721
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003722void __skb_tstamp_tx(struct sk_buff *orig_skb,
3723 struct skb_shared_hwtstamps *hwtstamps,
3724 struct sock *sk, int tstype);
3725
Patrick Ohlyac45f602009-02-12 05:03:37 +00003726/**
3727 * skb_tstamp_tx - queue clone of skb with send time stamps
3728 * @orig_skb: the original outgoing packet
3729 * @hwtstamps: hardware time stamps, may be NULL if not available
3730 *
3731 * If the skb has a socket associated, then this function clones the
3732 * skb (thus sharing the actual data and optional structures), stores
3733 * the optional hardware time stamping information (if non NULL) or
3734 * generates a software time stamp (otherwise), then queues the clone
3735 * to the error queue of the socket. Errors are silently ignored.
3736 */
Joe Perches7965bd42013-09-26 14:48:15 -07003737void skb_tstamp_tx(struct sk_buff *orig_skb,
3738 struct skb_shared_hwtstamps *hwtstamps);
Patrick Ohlyac45f602009-02-12 05:03:37 +00003739
Richard Cochran4507a712010-07-17 08:48:28 +00003740/**
3741 * skb_tx_timestamp() - Driver hook for transmit timestamping
3742 *
3743 * Ethernet MAC Drivers should call this function in their hard_xmit()
Richard Cochran4ff75b72011-06-19 03:31:39 +00003744 * function immediately before giving the sk_buff to the MAC hardware.
Richard Cochran4507a712010-07-17 08:48:28 +00003745 *
David S. Miller73409f32013-12-27 13:04:33 -05003746 * Specifically, one should make absolutely sure that this function is
3747 * called before TX completion of this packet can trigger. Otherwise
3748 * the packet could potentially already be freed.
3749 *
Richard Cochran4507a712010-07-17 08:48:28 +00003750 * @skb: A socket buffer.
3751 */
3752static inline void skb_tx_timestamp(struct sk_buff *skb)
3753{
Richard Cochranc1f19b52010-07-17 08:49:36 +00003754 skb_clone_tx_timestamp(skb);
Miroslav Lichvarb50a5c72017-05-19 17:52:40 +02003755 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3756 skb_tstamp_tx(skb, NULL);
Richard Cochran4507a712010-07-17 08:48:28 +00003757}
3758
Johannes Berg6e3e9392011-11-09 10:15:42 +01003759/**
3760 * skb_complete_wifi_ack - deliver skb with wifi status
3761 *
3762 * @skb: the original outgoing packet
3763 * @acked: ack status
3764 *
3765 */
3766void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3767
Joe Perches7965bd42013-09-26 14:48:15 -07003768__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3769__sum16 __skb_checksum_complete(struct sk_buff *skb);
Herbert Xufb286bb2005-11-10 13:01:24 -08003770
Herbert Xu60476372007-04-09 11:59:39 -07003771static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3772{
Tom Herbert6edec0e2015-02-10 16:30:28 -08003773 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3774 skb->csum_valid ||
3775 (skb->ip_summed == CHECKSUM_PARTIAL &&
3776 skb_checksum_start_offset(skb) >= 0));
Herbert Xu60476372007-04-09 11:59:39 -07003777}
3778
Herbert Xufb286bb2005-11-10 13:01:24 -08003779/**
3780 * skb_checksum_complete - Calculate checksum of an entire packet
3781 * @skb: packet to process
3782 *
3783 * This function calculates the checksum over the entire packet plus
3784 * the value of skb->csum. The latter can be used to supply the
3785 * checksum of a pseudo header as used by TCP/UDP. It returns the
3786 * checksum.
3787 *
3788 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
3789 * this function can be used to verify that checksum on received
3790 * packets. In that case the function should return zero if the
3791 * checksum is correct. In particular, this function will return zero
3792 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
3793 * hardware has already verified the correctness of the checksum.
3794 */
Al Viro4381ca32007-07-15 21:00:11 +01003795static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
Herbert Xufb286bb2005-11-10 13:01:24 -08003796{
Herbert Xu60476372007-04-09 11:59:39 -07003797 return skb_csum_unnecessary(skb) ?
3798 0 : __skb_checksum_complete(skb);
Herbert Xufb286bb2005-11-10 13:01:24 -08003799}
3800
Tom Herbert77cffe22014-08-27 21:26:46 -07003801static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3802{
3803 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3804 if (skb->csum_level == 0)
3805 skb->ip_summed = CHECKSUM_NONE;
3806 else
3807 skb->csum_level--;
3808 }
3809}
3810
3811static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3812{
3813 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3814 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3815 skb->csum_level++;
3816 } else if (skb->ip_summed == CHECKSUM_NONE) {
3817 skb->ip_summed = CHECKSUM_UNNECESSARY;
3818 skb->csum_level = 0;
3819 }
3820}
3821
Tom Herbert76ba0aa2014-05-02 16:29:18 -07003822/* Check if we need to perform checksum complete validation.
3823 *
3824 * Returns true if checksum complete is needed, false otherwise
3825 * (either checksum is unnecessary or zero checksum is allowed).
3826 */
3827static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3828 bool zero_okay,
3829 __sum16 check)
3830{
Tom Herbert5d0c2b92014-06-10 18:54:13 -07003831 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3832 skb->csum_valid = 1;
Tom Herbert77cffe22014-08-27 21:26:46 -07003833 __skb_decr_checksum_unnecessary(skb);
Tom Herbert76ba0aa2014-05-02 16:29:18 -07003834 return false;
3835 }
3836
3837 return true;
3838}
3839
David S. Millerda279882018-02-16 15:52:42 -05003840/* For small packets <= CHECKSUM_BREAK perform checksum complete directly
Tom Herbert76ba0aa2014-05-02 16:29:18 -07003841 * in checksum_init.
3842 */
3843#define CHECKSUM_BREAK 76
3844
Tom Herbert4e18b9a2015-04-20 14:10:04 -07003845/* Unset checksum-complete
3846 *
3847 * Unset checksum complete can be done when packet is being modified
3848 * (uncompressed for instance) and checksum-complete value is
3849 * invalidated.
3850 */
3851static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3852{
3853 if (skb->ip_summed == CHECKSUM_COMPLETE)
3854 skb->ip_summed = CHECKSUM_NONE;
3855}
3856
Tom Herbert76ba0aa2014-05-02 16:29:18 -07003857/* Validate (init) checksum based on checksum complete.
3858 *
3859 * Return values:
3860 * 0: checksum is validated or try to in skb_checksum_complete. In the latter
3861 * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
3862 * checksum is stored in skb->csum for use in __skb_checksum_complete
3863 * non-zero: value of invalid checksum
3864 *
3865 */
3866static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3867 bool complete,
3868 __wsum psum)
3869{
3870 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3871 if (!csum_fold(csum_add(psum, skb->csum))) {
Tom Herbert5d0c2b92014-06-10 18:54:13 -07003872 skb->csum_valid = 1;
Tom Herbert76ba0aa2014-05-02 16:29:18 -07003873 return 0;
3874 }
3875 }
3876
3877 skb->csum = psum;
3878
Tom Herbert5d0c2b92014-06-10 18:54:13 -07003879 if (complete || skb->len <= CHECKSUM_BREAK) {
3880 __sum16 csum;
3881
3882 csum = __skb_checksum_complete(skb);
3883 skb->csum_valid = !csum;
3884 return csum;
3885 }
Tom Herbert76ba0aa2014-05-02 16:29:18 -07003886
3887 return 0;
3888}
3889
3890static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3891{
3892 return 0;
3893}
3894
3895/* Perform checksum validate (init). Note that this is a macro since we only
3896 * want to calculate the pseudo header which is an input function if necessary.
3897 * First we try to validate without any computation (checksum unnecessary) and
3898 * then calculate based on checksum complete calling the function to compute
3899 * pseudo header.
3900 *
3901 * Return values:
3902 * 0: checksum is validated or try to in skb_checksum_complete
3903 * non-zero: value of invalid checksum
3904 */
3905#define __skb_checksum_validate(skb, proto, complete, \
3906 zero_okay, check, compute_pseudo) \
3907({ \
3908 __sum16 __ret = 0; \
Tom Herbert5d0c2b92014-06-10 18:54:13 -07003909 skb->csum_valid = 0; \
Tom Herbert76ba0aa2014-05-02 16:29:18 -07003910 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3911 __ret = __skb_checksum_validate_complete(skb, \
3912 complete, compute_pseudo(skb, proto)); \
3913 __ret; \
3914})
3915
3916#define skb_checksum_init(skb, proto, compute_pseudo) \
3917 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3918
3919#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3920 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3921
3922#define skb_checksum_validate(skb, proto, compute_pseudo) \
3923 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3924
3925#define skb_checksum_validate_zero_check(skb, proto, check, \
3926 compute_pseudo) \
Sabrina Dubroca096a4cf2015-02-06 18:54:19 +01003927 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
Tom Herbert76ba0aa2014-05-02 16:29:18 -07003928
3929#define skb_checksum_simple_validate(skb) \
3930 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3931
Tom Herbertd96535a2014-08-31 15:12:42 -07003932static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3933{
Davide Caratti219f1d792017-05-18 15:44:39 +02003934 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
Tom Herbertd96535a2014-08-31 15:12:42 -07003935}
3936
Li RongQinge4aa33a2019-07-04 17:03:26 +08003937static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
Tom Herbertd96535a2014-08-31 15:12:42 -07003938{
3939 skb->csum = ~pseudo;
3940 skb->ip_summed = CHECKSUM_COMPLETE;
3941}
3942
Li RongQinge4aa33a2019-07-04 17:03:26 +08003943#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
Tom Herbertd96535a2014-08-31 15:12:42 -07003944do { \
3945 if (__skb_checksum_convert_check(skb)) \
Li RongQinge4aa33a2019-07-04 17:03:26 +08003946 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
Tom Herbertd96535a2014-08-31 15:12:42 -07003947} while (0)
3948
Tom Herbert15e23962015-02-10 16:30:31 -08003949static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3950 u16 start, u16 offset)
3951{
3952 skb->ip_summed = CHECKSUM_PARTIAL;
3953 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3954 skb->csum_offset = offset - start;
3955}
3956
Tom Herbertdcdc8992015-02-02 16:07:34 -08003957/* Update skbuf and packet to reflect the remote checksum offload operation.
3958 * When called, ptr indicates the starting point for skb->csum when
3959 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
3960 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
3961 */
3962static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
Tom Herbert15e23962015-02-10 16:30:31 -08003963 int start, int offset, bool nopartial)
Tom Herbertdcdc8992015-02-02 16:07:34 -08003964{
3965 __wsum delta;
3966
Tom Herbert15e23962015-02-10 16:30:31 -08003967 if (!nopartial) {
3968 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3969 return;
3970 }
3971
Tom Herbertdcdc8992015-02-02 16:07:34 -08003972 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3973 __skb_checksum_complete(skb);
3974 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3975 }
3976
3977 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3978
3979 /* Adjust skb->csum since we changed the packet */
3980 skb->csum = csum_add(skb->csum, delta);
3981}
3982
Florian Westphalcb9c6832017-01-23 18:21:56 +01003983static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
3984{
3985#if IS_ENABLED(CONFIG_NF_CONNTRACK)
Florian Westphala9e419d2017-01-23 18:21:59 +01003986 return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
Florian Westphalcb9c6832017-01-23 18:21:56 +01003987#else
3988 return NULL;
3989#endif
3990}
3991
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07003992#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Joe Perches7965bd42013-09-26 14:48:15 -07003993void nf_conntrack_destroy(struct nf_conntrack *nfct);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3995{
3996 if (nfct && atomic_dec_and_test(&nfct->use))
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -07003997 nf_conntrack_destroy(nfct);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998}
3999static inline void nf_conntrack_get(struct nf_conntrack *nfct)
4000{
4001 if (nfct)
4002 atomic_inc(&nfct->use);
4003}
KOVACS Krisztian2fc72c72011-01-12 20:25:08 +01004004#endif
Florian Westphaldf5042f2018-12-18 17:15:16 +01004005
4006#ifdef CONFIG_SKB_EXTENSIONS
4007enum skb_ext_id {
4008#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4009 SKB_EXT_BRIDGE_NF,
4010#endif
Florian Westphal41650792018-12-18 17:15:27 +01004011#ifdef CONFIG_XFRM
4012 SKB_EXT_SEC_PATH,
4013#endif
Florian Westphaldf5042f2018-12-18 17:15:16 +01004014 SKB_EXT_NUM, /* must be last */
4015};
4016
4017/**
4018 * struct skb_ext - sk_buff extensions
4019 * @refcnt: 1 on allocation, deallocated on 0
4020 * @offset: offset to add to @data to obtain extension address
4021 * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
4022 * @data: start of extension data, variable sized
4023 *
4024 * Note: offsets/lengths are stored in chunks of 8 bytes, this allows
4025 * to use 'u8' types while allowing up to 2kb worth of extension data.
4026 */
4027struct skb_ext {
4028 refcount_t refcnt;
4029 u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
4030 u8 chunks; /* same */
4031 char data[0] __aligned(8);
4032};
4033
4034void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4035void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4036void __skb_ext_put(struct skb_ext *ext);
4037
4038static inline void skb_ext_put(struct sk_buff *skb)
4039{
4040 if (skb->active_extensions)
4041 __skb_ext_put(skb->extensions);
4042}
4043
Florian Westphaldf5042f2018-12-18 17:15:16 +01004044static inline void __skb_ext_copy(struct sk_buff *dst,
4045 const struct sk_buff *src)
4046{
4047 dst->active_extensions = src->active_extensions;
4048
4049 if (src->active_extensions) {
4050 struct skb_ext *ext = src->extensions;
4051
4052 refcount_inc(&ext->refcnt);
4053 dst->extensions = ext;
4054 }
4055}
4056
4057static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4058{
4059 skb_ext_put(dst);
4060 __skb_ext_copy(dst, src);
4061}
4062
4063static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4064{
4065 return !!ext->offset[i];
4066}
4067
4068static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4069{
4070 return skb->active_extensions & (1 << id);
4071}
4072
4073static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4074{
4075 if (skb_ext_exist(skb, id))
4076 __skb_ext_del(skb, id);
4077}
4078
4079static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4080{
4081 if (skb_ext_exist(skb, id)) {
4082 struct skb_ext *ext = skb->extensions;
4083
4084 return (void *)ext + (ext->offset[id] << 3);
4085 }
4086
4087 return NULL;
4088}
4089#else
4090static inline void skb_ext_put(struct sk_buff *skb) {}
Florian Westphaldf5042f2018-12-18 17:15:16 +01004091static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4092static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4093static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4094#endif /* CONFIG_SKB_EXTENSIONS */
4095
Patrick McHardya193a4a2006-03-20 19:23:05 -08004096static inline void nf_reset(struct sk_buff *skb)
4097{
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07004098#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Florian Westphala9e419d2017-01-23 18:21:59 +01004099 nf_conntrack_put(skb_nfct(skb));
4100 skb->_nfct = 0;
KOVACS Krisztian2fc72c72011-01-12 20:25:08 +01004101#endif
Pablo Neira Ayuso34666d42014-09-18 11:29:03 +02004102#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
Florian Westphalde8bda12018-12-18 17:15:17 +01004103 skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
Patrick McHardya193a4a2006-03-20 19:23:05 -08004104#endif
4105}
4106
Patrick McHardy124dff02013-04-05 20:42:05 +02004107static inline void nf_reset_trace(struct sk_buff *skb)
4108{
Florian Westphal478b3602014-02-15 23:48:45 +01004109#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
Gao feng130549fe2013-03-21 19:48:41 +00004110 skb->nf_trace = 0;
4111#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112}
4113
Ye Yin2b5ec1a2017-10-26 16:57:05 +08004114static inline void ipvs_reset(struct sk_buff *skb)
4115{
4116#if IS_ENABLED(CONFIG_IP_VS)
4117 skb->ipvs_property = 0;
4118#endif
4119}
4120
Florian Westphalde8bda12018-12-18 17:15:17 +01004121/* Note: This doesn't put any conntrack info in dst. */
Eric Dumazetb1937222014-09-28 22:18:47 -07004122static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4123 bool copy)
Yasuyuki Kozakaiedda5532007-03-14 16:43:37 -07004124{
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07004125#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Florian Westphala9e419d2017-01-23 18:21:59 +01004126 dst->_nfct = src->_nfct;
4127 nf_conntrack_get(skb_nfct(src));
KOVACS Krisztian2fc72c72011-01-12 20:25:08 +01004128#endif
Florian Westphal478b3602014-02-15 23:48:45 +01004129#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
Eric Dumazetb1937222014-09-28 22:18:47 -07004130 if (copy)
4131 dst->nf_trace = src->nf_trace;
Florian Westphal478b3602014-02-15 23:48:45 +01004132#endif
Yasuyuki Kozakaiedda5532007-03-14 16:43:37 -07004133}
4134
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07004135static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4136{
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07004137#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Florian Westphala9e419d2017-01-23 18:21:59 +01004138 nf_conntrack_put(skb_nfct(dst));
KOVACS Krisztian2fc72c72011-01-12 20:25:08 +01004139#endif
Eric Dumazetb1937222014-09-28 22:18:47 -07004140 __nf_copy(dst, src, true);
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07004141}
4142
James Morris984bc162006-06-09 00:29:17 -07004143#ifdef CONFIG_NETWORK_SECMARK
4144static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4145{
4146 to->secmark = from->secmark;
4147}
4148
4149static inline void skb_init_secmark(struct sk_buff *skb)
4150{
4151 skb->secmark = 0;
4152}
4153#else
4154static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4155{ }
4156
4157static inline void skb_init_secmark(struct sk_buff *skb)
4158{ }
4159#endif
4160
Florian Westphal7af8f4c2018-12-18 17:15:19 +01004161static inline int secpath_exists(const struct sk_buff *skb)
4162{
4163#ifdef CONFIG_XFRM
Florian Westphal41650792018-12-18 17:15:27 +01004164 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
Florian Westphal7af8f4c2018-12-18 17:15:19 +01004165#else
4166 return 0;
4167#endif
4168}
4169
Eric W. Biederman574f7192014-04-01 12:20:24 -07004170static inline bool skb_irq_freeable(const struct sk_buff *skb)
4171{
4172 return !skb->destructor &&
Florian Westphal7af8f4c2018-12-18 17:15:19 +01004173 !secpath_exists(skb) &&
Florian Westphalcb9c6832017-01-23 18:21:56 +01004174 !skb_nfct(skb) &&
Eric W. Biederman574f7192014-04-01 12:20:24 -07004175 !skb->_skb_refdst &&
4176 !skb_has_frag_list(skb);
4177}
4178
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004179static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4180{
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004181 skb->queue_mapping = queue_mapping;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004182}
4183
Stephen Hemminger92477442009-03-21 13:39:26 -07004184static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
Pavel Emelyanov4e3ab472007-10-21 17:01:29 -07004185{
Pavel Emelyanov4e3ab472007-10-21 17:01:29 -07004186 return skb->queue_mapping;
Pavel Emelyanov4e3ab472007-10-21 17:01:29 -07004187}
4188
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004189static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4190{
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004191 to->queue_mapping = from->queue_mapping;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004192}
4193
David S. Millerd5a9e242009-01-27 16:22:11 -08004194static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4195{
4196 skb->queue_mapping = rx_queue + 1;
4197}
4198
Stephen Hemminger92477442009-03-21 13:39:26 -07004199static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
David S. Millerd5a9e242009-01-27 16:22:11 -08004200{
4201 return skb->queue_mapping - 1;
4202}
4203
Stephen Hemminger92477442009-03-21 13:39:26 -07004204static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
David S. Millerd5a9e242009-01-27 16:22:11 -08004205{
Eric Dumazeta02cec22010-09-22 20:43:57 +00004206 return skb->queue_mapping != 0;
David S. Millerd5a9e242009-01-27 16:22:11 -08004207}
4208
Julian Anastasov4ff06202017-02-06 23:14:12 +02004209static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4210{
4211 skb->dst_pending_confirm = val;
4212}
4213
4214static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4215{
4216 return skb->dst_pending_confirm != 0;
4217}
4218
Florian Westphal2294be0f2018-12-18 17:15:20 +01004219static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
Denis Kirjanov0b3d8e02013-10-02 05:58:32 +04004220{
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -07004221#ifdef CONFIG_XFRM
Florian Westphal41650792018-12-18 17:15:27 +01004222 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -07004223#else
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -07004224 return NULL;
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -07004225#endif
Denis Kirjanov0b3d8e02013-10-02 05:58:32 +04004226}
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -07004227
Pravin B Shelar68c33162013-02-14 14:02:41 +00004228/* Keeps track of mac header offset relative to skb->head.
4229 * It is useful for TSO of Tunneling protocol. e.g. GRE.
4230 * For non-tunnel skb it points to skb_mac_header() and for
Eric Dumazet3347c962013-10-19 11:42:56 -07004231 * tunnel skb it points to outer mac header.
4232 * Keeps track of level of encapsulation of network headers.
4233 */
Pravin B Shelar68c33162013-02-14 14:02:41 +00004234struct skb_gso_cb {
Alexander Duyck802ab552016-04-10 21:45:03 -04004235 union {
4236 int mac_offset;
4237 int data_offset;
4238 };
Eric Dumazet3347c962013-10-19 11:42:56 -07004239 int encap_level;
Alexander Duyck76443452016-02-05 15:27:37 -08004240 __wsum csum;
Tom Herbert7e2b10c2014-06-04 17:20:02 -07004241 __u16 csum_start;
Pravin B Shelar68c33162013-02-14 14:02:41 +00004242};
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03004243#define SKB_SGO_CB_OFFSET 32
4244#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
Pravin B Shelar68c33162013-02-14 14:02:41 +00004245
4246static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4247{
4248 return (skb_mac_header(inner_skb) - inner_skb->head) -
4249 SKB_GSO_CB(inner_skb)->mac_offset;
4250}
4251
Pravin B Shelar1e2bd512013-05-30 06:45:27 +00004252static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4253{
4254 int new_headroom, headroom;
4255 int ret;
4256
4257 headroom = skb_headroom(skb);
4258 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4259 if (ret)
4260 return ret;
4261
4262 new_headroom = skb_headroom(skb);
4263 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4264 return 0;
4265}
4266
Alexander Duyck08b64fc2016-02-05 15:27:49 -08004267static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4268{
4269 /* Do not update partial checksums if remote checksum is enabled. */
4270 if (skb->remcsum_offload)
4271 return;
4272
4273 SKB_GSO_CB(skb)->csum = res;
4274 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4275}
4276
Tom Herbert7e2b10c2014-06-04 17:20:02 -07004277/* Compute the checksum for a gso segment. First compute the checksum value
4278 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
4279 * then add in skb->csum (checksum from csum_start to end of packet).
4280 * skb->csum and csum_start are then updated to reflect the checksum of the
4281 * resultant packet starting from the transport header-- the resultant checksum
4282 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
4283 * header.
4284 */
4285static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4286{
Alexander Duyck76443452016-02-05 15:27:37 -08004287 unsigned char *csum_start = skb_transport_header(skb);
4288 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4289 __wsum partial = SKB_GSO_CB(skb)->csum;
Tom Herbert7e2b10c2014-06-04 17:20:02 -07004290
Alexander Duyck76443452016-02-05 15:27:37 -08004291 SKB_GSO_CB(skb)->csum = res;
4292 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
Tom Herbert7e2b10c2014-06-04 17:20:02 -07004293
Alexander Duyck76443452016-02-05 15:27:37 -08004294 return csum_fold(csum_partial(csum_start, plen, partial));
Tom Herbert7e2b10c2014-06-04 17:20:02 -07004295}
4296
David S. Millerbdcc0922012-03-07 20:53:36 -05004297static inline bool skb_is_gso(const struct sk_buff *skb)
Herbert Xu89114af2006-07-08 13:34:32 -07004298{
4299 return skb_shinfo(skb)->gso_size;
4300}
4301
Eric Dumazet36a8f392013-09-29 01:21:32 -07004302/* Note: Should be called only if skb_is_gso(skb) is true */
David S. Millerbdcc0922012-03-07 20:53:36 -05004303static inline bool skb_is_gso_v6(const struct sk_buff *skb)
Brice Goglineabd7e32007-10-13 12:33:32 +02004304{
4305 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4306}
4307
Daniel Axtensd02f51c2018-03-03 03:03:46 +01004308/* Note: Should be called only if skb_is_gso(skb) is true */
4309static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4310{
4311 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4312}
4313
Willem de Bruijn4c3024d2019-03-06 14:35:15 -05004314/* Note: Should be called only if skb_is_gso(skb) is true */
Willem de Bruijnb90efd22019-02-07 14:54:16 -05004315static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4316{
Willem de Bruijn4c3024d2019-03-06 14:35:15 -05004317 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
Willem de Bruijnb90efd22019-02-07 14:54:16 -05004318}
4319
Daniel Borkmann5293efe2016-08-18 01:00:39 +02004320static inline void skb_gso_reset(struct sk_buff *skb)
4321{
4322 skb_shinfo(skb)->gso_size = 0;
4323 skb_shinfo(skb)->gso_segs = 0;
4324 skb_shinfo(skb)->gso_type = 0;
4325}
4326
Daniel Axtensd02f51c2018-03-03 03:03:46 +01004327static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4328 u16 increment)
4329{
4330 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4331 return;
4332 shinfo->gso_size += increment;
4333}
4334
4335static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4336 u16 decrement)
4337{
4338 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4339 return;
4340 shinfo->gso_size -= decrement;
4341}
4342
Joe Perches7965bd42013-09-26 14:48:15 -07004343void __skb_warn_lro_forwarding(const struct sk_buff *skb);
Ben Hutchings4497b072008-06-19 16:22:28 -07004344
4345static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4346{
4347 /* LRO sets gso_size but not gso_type, whereas if GSO is really
4348 * wanted then gso_type will be set. */
Eric Dumazet05bdd2f2011-10-20 17:45:43 -04004349 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4350
Alexander Duyckb78462e2010-06-02 12:24:37 +00004351 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4352 unlikely(shinfo->gso_type == 0)) {
Ben Hutchings4497b072008-06-19 16:22:28 -07004353 __skb_warn_lro_forwarding(skb);
4354 return true;
4355 }
4356 return false;
4357}
4358
Herbert Xu35fc92a2007-03-26 23:22:20 -07004359static inline void skb_forward_csum(struct sk_buff *skb)
4360{
4361 /* Unfortunately we don't support this one. Any brave souls? */
4362 if (skb->ip_summed == CHECKSUM_COMPLETE)
4363 skb->ip_summed = CHECKSUM_NONE;
4364}
4365
Eric Dumazetbc8acf22010-09-02 13:07:41 -07004366/**
4367 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
4368 * @skb: skb to check
4369 *
4370 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
4371 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
4372 * use this helper, to document places where we make this assertion.
4373 */
Eric Dumazet05bdd2f2011-10-20 17:45:43 -04004374static inline void skb_checksum_none_assert(const struct sk_buff *skb)
Eric Dumazetbc8acf22010-09-02 13:07:41 -07004375{
4376#ifdef DEBUG
4377 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4378#endif
4379}
4380
Rusty Russellf35d9d82008-02-04 23:49:54 -05004381bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
Shirley Maa6686f22011-07-06 12:22:12 +00004382
Paul Durranted1f50c2014-01-09 10:02:46 +00004383int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
Linus Lüssing9afd85c2015-05-02 14:01:07 +02004384struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4385 unsigned int transport_len,
4386 __sum16(*skb_chkf)(struct sk_buff *skb));
Paul Durranted1f50c2014-01-09 10:02:46 +00004387
Alexander Duyck3a7c1ee42012-05-03 01:09:42 +00004388/**
4389 * skb_head_is_locked - Determine if the skb->head is locked down
4390 * @skb: skb to check
4391 *
4392 * The head on skbs build around a head frag can be removed if they are
4393 * not cloned. This function returns true if the skb head is locked down
4394 * due to either being allocated via kmalloc, or by being a clone with
4395 * multiple references to the head.
4396 */
4397static inline bool skb_head_is_locked(const struct sk_buff *skb)
4398{
4399 return !skb->head_frag || skb_cloned(skb);
4400}
Florian Westphalfe6cc552014-02-13 23:09:12 +01004401
Edward Cree179bc672016-02-11 20:48:04 +00004402/* Local Checksum Offload.
4403 * Compute outer checksum based on the assumption that the
4404 * inner checksum will be offloaded later.
Otto Sabartd0dcde62019-01-06 00:29:15 +01004405 * See Documentation/networking/checksum-offloads.rst for
Edward Creee8ae7b02016-02-11 21:03:37 +00004406 * explanation of how this works.
Edward Cree179bc672016-02-11 20:48:04 +00004407 * Fill in outer checksum adjustment (e.g. with sum of outer
4408 * pseudo-header) before calling.
4409 * Also ensure that inner checksum is in linear data area.
4410 */
4411static inline __wsum lco_csum(struct sk_buff *skb)
4412{
Alexander Duyck9e74a6d2016-02-17 11:23:55 -08004413 unsigned char *csum_start = skb_checksum_start(skb);
4414 unsigned char *l4_hdr = skb_transport_header(skb);
4415 __wsum partial;
Edward Cree179bc672016-02-11 20:48:04 +00004416
4417 /* Start with complement of inner checksum adjustment */
Alexander Duyck9e74a6d2016-02-17 11:23:55 -08004418 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4419 skb->csum_offset));
4420
Edward Cree179bc672016-02-11 20:48:04 +00004421 /* Add in checksum of our headers (incl. outer checksum
Alexander Duyck9e74a6d2016-02-17 11:23:55 -08004422 * adjustment filled in by caller) and return result.
Edward Cree179bc672016-02-11 20:48:04 +00004423 */
Alexander Duyck9e74a6d2016-02-17 11:23:55 -08004424 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
Edward Cree179bc672016-02-11 20:48:04 +00004425}
4426
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427#endif /* __KERNEL__ */
4428#endif /* _LINUX_SKBUFF_H */