Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Routines having to do with the 'struct sk_buff' memory handlers. |
| 4 | * |
Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 5 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Florian La Roche <rzsfl@rz.uni-sb.de> |
| 7 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * Fixes: |
| 9 | * Alan Cox : Fixed the worst of the load |
| 10 | * balancer bugs. |
| 11 | * Dave Platt : Interrupt stacking fix. |
| 12 | * Richard Kooijman : Timestamp fixes. |
| 13 | * Alan Cox : Changed buffer format. |
| 14 | * Alan Cox : destructor hook for AF_UNIX etc. |
| 15 | * Linus Torvalds : Better skb_clone. |
| 16 | * Alan Cox : Added skb_copy. |
| 17 | * Alan Cox : Added all the changed routines Linus |
| 18 | * only put in the headers |
| 19 | * Ray VanTassle : Fixed --skb->lock in free |
| 20 | * Alan Cox : skb_copy copy arp field |
| 21 | * Andi Kleen : slabified it. |
| 22 | * Robert Olsson : Removed skb_head_pool |
| 23 | * |
| 24 | * NOTE: |
| 25 | * The __skb_ routines should be called with interrupts |
| 26 | * disabled, or you better be *real* sure that the operation is atomic |
| 27 | * with respect to whatever list is being frobbed (e.g. via lock_sock() |
| 28 | * or via disabling bottom half handlers, etc). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | */ |
| 30 | |
| 31 | /* |
| 32 | * The functions in this file will not compile correctly with gcc 2.4.x |
| 33 | */ |
| 34 | |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/module.h> |
| 38 | #include <linux/types.h> |
| 39 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include <linux/mm.h> |
| 41 | #include <linux/interrupt.h> |
| 42 | #include <linux/in.h> |
| 43 | #include <linux/inet.h> |
| 44 | #include <linux/slab.h> |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 45 | #include <linux/tcp.h> |
| 46 | #include <linux/udp.h> |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 47 | #include <linux/sctp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <linux/netdevice.h> |
| 49 | #ifdef CONFIG_NET_CLS_ACT |
| 50 | #include <net/pkt_sched.h> |
| 51 | #endif |
| 52 | #include <linux/string.h> |
| 53 | #include <linux/skbuff.h> |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 54 | #include <linux/splice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | #include <linux/cache.h> |
| 56 | #include <linux/rtnetlink.h> |
| 57 | #include <linux/init.h> |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 58 | #include <linux/scatterlist.h> |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 59 | #include <linux/errqueue.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 60 | #include <linux/prefetch.h> |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 61 | #include <linux/if_vlan.h> |
John Hurley | 2a2ea50 | 2019-07-07 15:01:57 +0100 | [diff] [blame] | 62 | #include <linux/mpls.h> |
Sebastian Andrzej Siewior | 183f47f | 2021-02-18 18:31:24 +0100 | [diff] [blame] | 63 | #include <linux/kcov.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
| 65 | #include <net/protocol.h> |
| 66 | #include <net/dst.h> |
| 67 | #include <net/sock.h> |
| 68 | #include <net/checksum.h> |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 69 | #include <net/ip6_checksum.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | #include <net/xfrm.h> |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 71 | #include <net/mpls.h> |
Mat Martineau | 3ee17bc | 2020-01-09 07:59:19 -0800 | [diff] [blame] | 72 | #include <net/mptcp.h> |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 73 | #include <net/page_pool.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 75 | #include <linux/uaccess.h> |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 76 | #include <trace/events/skb.h> |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 77 | #include <linux/highmem.h> |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 78 | #include <linux/capability.h> |
| 79 | #include <linux/user_namespace.h> |
Matteo Croce | 2544af0 | 2019-05-29 17:13:48 +0200 | [diff] [blame] | 80 | #include <linux/indirect_call_wrapper.h> |
Al Viro | a1f8e7f7 | 2006-10-19 16:08:53 -0400 | [diff] [blame] | 81 | |
Bart Van Assche | 7b7ed88 | 2019-03-25 09:17:23 -0700 | [diff] [blame] | 82 | #include "datagram.h" |
| 83 | |
Alexey Dobriyan | 08009a7 | 2018-02-24 21:20:33 +0300 | [diff] [blame] | 84 | struct kmem_cache *skbuff_head_cache __ro_after_init; |
| 85 | static struct kmem_cache *skbuff_fclone_cache __ro_after_init; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 86 | #ifdef CONFIG_SKB_EXTENSIONS |
| 87 | static struct kmem_cache *skbuff_ext_cache __ro_after_init; |
| 88 | #endif |
Hans Westgaard Ry | 5f74f82e | 2016-02-03 09:26:57 +0100 | [diff] [blame] | 89 | int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; |
| 90 | EXPORT_SYMBOL(sysctl_max_skb_frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | /** |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 93 | * skb_panic - private function for out-of-line support |
| 94 | * @skb: buffer |
| 95 | * @sz: size |
| 96 | * @addr: address |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 97 | * @msg: skb_over_panic or skb_under_panic |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | * |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 99 | * Out-of-line support for skb_put() and skb_push(). |
| 100 | * Called via the wrapper skb_over_panic() or skb_under_panic(). |
| 101 | * Keep out of line to prevent kernel bloat. |
| 102 | * __builtin_return_address is not used because it is not always reliable. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | */ |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 104 | static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 105 | const char msg[]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | { |
Jesper Dangaard Brouer | 41a4691 | 2020-04-27 18:37:43 +0200 | [diff] [blame] | 107 | pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 108 | msg, addr, skb->len, sz, skb->head, skb->data, |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 109 | (unsigned long)skb->tail, (unsigned long)skb->end, |
| 110 | skb->dev ? skb->dev->name : "<NULL>"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | BUG(); |
| 112 | } |
| 113 | |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 114 | static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | { |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 116 | skb_panic(skb, sz, addr, __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | } |
| 118 | |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 119 | static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
| 120 | { |
| 121 | skb_panic(skb, sz, addr, __func__); |
| 122 | } |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 123 | |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 124 | #define NAPI_SKB_CACHE_SIZE 64 |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 125 | #define NAPI_SKB_CACHE_BULK 16 |
| 126 | #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 127 | |
| 128 | struct napi_alloc_cache { |
| 129 | struct page_frag_cache page; |
| 130 | unsigned int skb_count; |
| 131 | void *skb_cache[NAPI_SKB_CACHE_SIZE]; |
| 132 | }; |
| 133 | |
| 134 | static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); |
| 135 | static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); |
| 136 | |
| 137 | static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, |
| 138 | unsigned int align_mask) |
| 139 | { |
| 140 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
| 141 | |
| 142 | return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); |
| 143 | } |
| 144 | |
| 145 | void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) |
| 146 | { |
| 147 | fragsz = SKB_DATA_ALIGN(fragsz); |
| 148 | |
| 149 | return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); |
| 150 | } |
| 151 | EXPORT_SYMBOL(__napi_alloc_frag_align); |
| 152 | |
| 153 | void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) |
| 154 | { |
| 155 | struct page_frag_cache *nc; |
| 156 | void *data; |
| 157 | |
| 158 | fragsz = SKB_DATA_ALIGN(fragsz); |
Changbin Du | afa79d0 | 2021-08-13 22:57:49 +0800 | [diff] [blame^] | 159 | if (in_hardirq() || irqs_disabled()) { |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 160 | nc = this_cpu_ptr(&netdev_alloc_cache); |
| 161 | data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); |
| 162 | } else { |
| 163 | local_bh_disable(); |
| 164 | data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); |
| 165 | local_bh_enable(); |
| 166 | } |
| 167 | return data; |
| 168 | } |
| 169 | EXPORT_SYMBOL(__netdev_alloc_frag_align); |
| 170 | |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 171 | static struct sk_buff *napi_skb_cache_get(void) |
| 172 | { |
| 173 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
| 174 | struct sk_buff *skb; |
| 175 | |
| 176 | if (unlikely(!nc->skb_count)) |
| 177 | nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache, |
| 178 | GFP_ATOMIC, |
| 179 | NAPI_SKB_CACHE_BULK, |
| 180 | nc->skb_cache); |
| 181 | if (unlikely(!nc->skb_count)) |
| 182 | return NULL; |
| 183 | |
| 184 | skb = nc->skb_cache[--nc->skb_count]; |
| 185 | kasan_unpoison_object_data(skbuff_head_cache, skb); |
| 186 | |
| 187 | return skb; |
| 188 | } |
| 189 | |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 190 | /* Caller must provide SKB that is memset cleared */ |
Alexander Lobakin | 483126b | 2021-02-13 14:11:26 +0000 | [diff] [blame] | 191 | static void __build_skb_around(struct sk_buff *skb, void *data, |
| 192 | unsigned int frag_size) |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 193 | { |
| 194 | struct skb_shared_info *shinfo; |
| 195 | unsigned int size = frag_size ? : ksize(data); |
| 196 | |
| 197 | size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 198 | |
| 199 | /* Assumes caller memset cleared SKB */ |
| 200 | skb->truesize = SKB_TRUESIZE(size); |
| 201 | refcount_set(&skb->users, 1); |
| 202 | skb->head = data; |
| 203 | skb->data = data; |
| 204 | skb_reset_tail_pointer(skb); |
| 205 | skb->end = skb->tail + size; |
| 206 | skb->mac_header = (typeof(skb->mac_header))~0U; |
| 207 | skb->transport_header = (typeof(skb->transport_header))~0U; |
| 208 | |
| 209 | /* make sure we initialize shinfo sequentially */ |
| 210 | shinfo = skb_shinfo(skb); |
| 211 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
| 212 | atomic_set(&shinfo->dataref, 1); |
| 213 | |
Aleksandr Nogikh | 6370cc3 | 2020-10-29 17:36:19 +0000 | [diff] [blame] | 214 | skb_set_kcov_handle(skb, kcov_common_handle()); |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 215 | } |
| 216 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | /** |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 218 | * __build_skb - build a network buffer |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 219 | * @data: data buffer provided by caller |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 220 | * @frag_size: size of data, or 0 if head was kmalloced |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 221 | * |
| 222 | * Allocate a new &sk_buff. Caller provides space holding head and |
Florian Fainelli | deceb4c | 2013-07-23 20:22:39 +0100 | [diff] [blame] | 223 | * skb_shared_info. @data must have been allocated by kmalloc() only if |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 224 | * @frag_size is 0, otherwise data should come from the page allocator |
| 225 | * or vmalloc() |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 226 | * The return is the new skb buffer. |
| 227 | * On a failure the return is %NULL, and @data is not freed. |
| 228 | * Notes : |
| 229 | * Before IO, driver allocates only data buffer where NIC put incoming frame |
| 230 | * Driver should add room at head (NET_SKB_PAD) and |
| 231 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) |
| 232 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it |
| 233 | * before giving packet to stack. |
| 234 | * RX rings only contains data buffers, not full skbs. |
| 235 | */ |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 236 | struct sk_buff *__build_skb(void *data, unsigned int frag_size) |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 237 | { |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 238 | struct sk_buff *skb; |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 239 | |
| 240 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 241 | if (unlikely(!skb)) |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 242 | return NULL; |
| 243 | |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 244 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
Alexander Lobakin | 483126b | 2021-02-13 14:11:26 +0000 | [diff] [blame] | 245 | __build_skb_around(skb, data, frag_size); |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 246 | |
Alexander Lobakin | 483126b | 2021-02-13 14:11:26 +0000 | [diff] [blame] | 247 | return skb; |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 248 | } |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 249 | |
| 250 | /* build_skb() is wrapper over __build_skb(), that specifically |
| 251 | * takes care of skb->head and skb->pfmemalloc |
| 252 | * This means that if @frag_size is not zero, then @data must be backed |
| 253 | * by a page fragment, not kmalloc() or vmalloc() |
| 254 | */ |
| 255 | struct sk_buff *build_skb(void *data, unsigned int frag_size) |
| 256 | { |
| 257 | struct sk_buff *skb = __build_skb(data, frag_size); |
| 258 | |
| 259 | if (skb && frag_size) { |
| 260 | skb->head_frag = 1; |
Michal Hocko | 2f064f3 | 2015-08-21 14:11:51 -0700 | [diff] [blame] | 261 | if (page_is_pfmemalloc(virt_to_head_page(data))) |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 262 | skb->pfmemalloc = 1; |
| 263 | } |
| 264 | return skb; |
| 265 | } |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 266 | EXPORT_SYMBOL(build_skb); |
| 267 | |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 268 | /** |
| 269 | * build_skb_around - build a network buffer around provided skb |
| 270 | * @skb: sk_buff provide by caller, must be memset cleared |
| 271 | * @data: data buffer provided by caller |
| 272 | * @frag_size: size of data, or 0 if head was kmalloced |
| 273 | */ |
| 274 | struct sk_buff *build_skb_around(struct sk_buff *skb, |
| 275 | void *data, unsigned int frag_size) |
| 276 | { |
| 277 | if (unlikely(!skb)) |
| 278 | return NULL; |
| 279 | |
Alexander Lobakin | 483126b | 2021-02-13 14:11:26 +0000 | [diff] [blame] | 280 | __build_skb_around(skb, data, frag_size); |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 281 | |
Alexander Lobakin | 483126b | 2021-02-13 14:11:26 +0000 | [diff] [blame] | 282 | if (frag_size) { |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 283 | skb->head_frag = 1; |
| 284 | if (page_is_pfmemalloc(virt_to_head_page(data))) |
| 285 | skb->pfmemalloc = 1; |
| 286 | } |
| 287 | return skb; |
| 288 | } |
| 289 | EXPORT_SYMBOL(build_skb_around); |
| 290 | |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 291 | /** |
| 292 | * __napi_build_skb - build a network buffer |
| 293 | * @data: data buffer provided by caller |
| 294 | * @frag_size: size of data, or 0 if head was kmalloced |
| 295 | * |
| 296 | * Version of __build_skb() that uses NAPI percpu caches to obtain |
| 297 | * skbuff_head instead of inplace allocation. |
| 298 | * |
| 299 | * Returns a new &sk_buff on success, %NULL on allocation failure. |
| 300 | */ |
| 301 | static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) |
| 302 | { |
| 303 | struct sk_buff *skb; |
| 304 | |
| 305 | skb = napi_skb_cache_get(); |
| 306 | if (unlikely(!skb)) |
| 307 | return NULL; |
| 308 | |
| 309 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
| 310 | __build_skb_around(skb, data, frag_size); |
| 311 | |
| 312 | return skb; |
| 313 | } |
| 314 | |
| 315 | /** |
| 316 | * napi_build_skb - build a network buffer |
| 317 | * @data: data buffer provided by caller |
| 318 | * @frag_size: size of data, or 0 if head was kmalloced |
| 319 | * |
| 320 | * Version of __napi_build_skb() that takes care of skb->head_frag |
| 321 | * and skb->pfmemalloc when the data is a page or page fragment. |
| 322 | * |
| 323 | * Returns a new &sk_buff on success, %NULL on allocation failure. |
| 324 | */ |
| 325 | struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) |
| 326 | { |
| 327 | struct sk_buff *skb = __napi_build_skb(data, frag_size); |
| 328 | |
| 329 | if (likely(skb) && frag_size) { |
| 330 | skb->head_frag = 1; |
| 331 | skb_propagate_pfmemalloc(virt_to_head_page(data), skb); |
| 332 | } |
| 333 | |
| 334 | return skb; |
| 335 | } |
| 336 | EXPORT_SYMBOL(napi_build_skb); |
| 337 | |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 338 | /* |
| 339 | * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells |
| 340 | * the caller if emergency pfmemalloc reserves are being used. If it is and |
| 341 | * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves |
| 342 | * may be used. Otherwise, the packet data may be discarded until enough |
| 343 | * memory is free |
| 344 | */ |
Alexander Lobakin | ef28095 | 2021-02-13 14:11:11 +0000 | [diff] [blame] | 345 | static void *kmalloc_reserve(size_t size, gfp_t flags, int node, |
| 346 | bool *pfmemalloc) |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 347 | { |
| 348 | void *obj; |
| 349 | bool ret_pfmemalloc = false; |
| 350 | |
| 351 | /* |
| 352 | * Try a regular allocation, when that fails and we're not entitled |
| 353 | * to the reserves, fail. |
| 354 | */ |
| 355 | obj = kmalloc_node_track_caller(size, |
| 356 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, |
| 357 | node); |
| 358 | if (obj || !(gfp_pfmemalloc_allowed(flags))) |
| 359 | goto out; |
| 360 | |
| 361 | /* Try again but now we are using pfmemalloc reserves */ |
| 362 | ret_pfmemalloc = true; |
| 363 | obj = kmalloc_node_track_caller(size, flags, node); |
| 364 | |
| 365 | out: |
| 366 | if (pfmemalloc) |
| 367 | *pfmemalloc = ret_pfmemalloc; |
| 368 | |
| 369 | return obj; |
| 370 | } |
| 371 | |
| 372 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
| 373 | * 'private' fields and also do memory statistics to find all the |
| 374 | * [BEEP] leaks. |
| 375 | * |
| 376 | */ |
| 377 | |
| 378 | /** |
| 379 | * __alloc_skb - allocate a network buffer |
| 380 | * @size: size to allocate |
| 381 | * @gfp_mask: allocation mask |
| 382 | * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache |
| 383 | * instead of head cache and allocate a cloned (child) skb. |
| 384 | * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for |
| 385 | * allocations in case the data is required for writeback |
| 386 | * @node: numa node to allocate memory on |
| 387 | * |
| 388 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
| 389 | * tail room of at least size bytes. The object has a reference count |
| 390 | * of one. The return is the buffer. On a failure the return is %NULL. |
| 391 | * |
| 392 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
| 393 | * %GFP_ATOMIC. |
| 394 | */ |
| 395 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
| 396 | int flags, int node) |
| 397 | { |
| 398 | struct kmem_cache *cache; |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 399 | struct sk_buff *skb; |
| 400 | u8 *data; |
| 401 | bool pfmemalloc; |
| 402 | |
| 403 | cache = (flags & SKB_ALLOC_FCLONE) |
| 404 | ? skbuff_fclone_cache : skbuff_head_cache; |
| 405 | |
| 406 | if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) |
| 407 | gfp_mask |= __GFP_MEMALLOC; |
| 408 | |
| 409 | /* Get the HEAD */ |
Alexander Lobakin | d13612b | 2021-02-13 14:12:38 +0000 | [diff] [blame] | 410 | if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && |
| 411 | likely(node == NUMA_NO_NODE || node == numa_mem_id())) |
| 412 | skb = napi_skb_cache_get(); |
| 413 | else |
| 414 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); |
Alexander Lobakin | df1ae02 | 2021-02-13 14:11:39 +0000 | [diff] [blame] | 415 | if (unlikely(!skb)) |
| 416 | return NULL; |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 417 | prefetchw(skb); |
| 418 | |
| 419 | /* We do our best to align skb_shared_info on a separate cache |
| 420 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives |
| 421 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. |
| 422 | * Both skb->head and skb_shared_info are cache line aligned. |
| 423 | */ |
| 424 | size = SKB_DATA_ALIGN(size); |
| 425 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 426 | data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); |
Alexander Lobakin | df1ae02 | 2021-02-13 14:11:39 +0000 | [diff] [blame] | 427 | if (unlikely(!data)) |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 428 | goto nodata; |
| 429 | /* kmalloc(size) might give us more room than requested. |
| 430 | * Put skb_shared_info exactly at the end of allocated zone, |
| 431 | * to allow max possible filling before reallocation. |
| 432 | */ |
| 433 | size = SKB_WITH_OVERHEAD(ksize(data)); |
| 434 | prefetchw(data + size); |
| 435 | |
| 436 | /* |
| 437 | * Only clear those fields we need to clear, not those that we will |
| 438 | * actually initialise below. Hence, don't put any more fields after |
| 439 | * the tail pointer in struct sk_buff! |
| 440 | */ |
| 441 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
Alexander Lobakin | f9d6725 | 2021-02-13 14:11:50 +0000 | [diff] [blame] | 442 | __build_skb_around(skb, data, 0); |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 443 | skb->pfmemalloc = pfmemalloc; |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 444 | |
| 445 | if (flags & SKB_ALLOC_FCLONE) { |
| 446 | struct sk_buff_fclones *fclones; |
| 447 | |
| 448 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
| 449 | |
| 450 | skb->fclone = SKB_FCLONE_ORIG; |
| 451 | refcount_set(&fclones->fclone_ref, 1); |
| 452 | |
| 453 | fclones->skb2.fclone = SKB_FCLONE_CLONE; |
| 454 | } |
| 455 | |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 456 | return skb; |
Alexander Lobakin | df1ae02 | 2021-02-13 14:11:39 +0000 | [diff] [blame] | 457 | |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 458 | nodata: |
| 459 | kmem_cache_free(cache, skb); |
Alexander Lobakin | df1ae02 | 2021-02-13 14:11:39 +0000 | [diff] [blame] | 460 | return NULL; |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 461 | } |
| 462 | EXPORT_SYMBOL(__alloc_skb); |
| 463 | |
Sebastian Andrzej Siewior | 7ba7aea | 2019-06-07 21:20:34 +0200 | [diff] [blame] | 464 | /** |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 465 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |
| 466 | * @dev: network device to receive on |
Masanari Iida | d749916 | 2015-08-24 22:56:54 +0900 | [diff] [blame] | 467 | * @len: length to allocate |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 468 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
| 469 | * |
| 470 | * Allocate a new &sk_buff and assign it a usage count of one. The |
| 471 | * buffer has NET_SKB_PAD headroom built in. Users should allocate |
| 472 | * the headroom they think they need without accounting for the |
| 473 | * built in space. The built in space is used for optimisations. |
| 474 | * |
| 475 | * %NULL is returned if there is no free memory. |
| 476 | */ |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 477 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, |
| 478 | gfp_t gfp_mask) |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 479 | { |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 480 | struct page_frag_cache *nc; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 481 | struct sk_buff *skb; |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 482 | bool pfmemalloc; |
| 483 | void *data; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 484 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 485 | len += NET_SKB_PAD; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 486 | |
Alexander Lobakin | 66c5560 | 2021-01-15 15:04:40 +0000 | [diff] [blame] | 487 | /* If requested length is either too small or too big, |
| 488 | * we use kmalloc() for skb->head allocation. |
| 489 | */ |
| 490 | if (len <= SKB_WITH_OVERHEAD(1024) || |
| 491 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 492 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 493 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); |
| 494 | if (!skb) |
| 495 | goto skb_fail; |
| 496 | goto skb_success; |
| 497 | } |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 498 | |
| 499 | len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 500 | len = SKB_DATA_ALIGN(len); |
| 501 | |
| 502 | if (sk_memalloc_socks()) |
| 503 | gfp_mask |= __GFP_MEMALLOC; |
| 504 | |
Changbin Du | afa79d0 | 2021-08-13 22:57:49 +0800 | [diff] [blame^] | 505 | if (in_hardirq() || irqs_disabled()) { |
Sebastian Andrzej Siewior | 92dcabd | 2019-06-07 21:20:35 +0200 | [diff] [blame] | 506 | nc = this_cpu_ptr(&netdev_alloc_cache); |
| 507 | data = page_frag_alloc(nc, len, gfp_mask); |
| 508 | pfmemalloc = nc->pfmemalloc; |
| 509 | } else { |
| 510 | local_bh_disable(); |
| 511 | nc = this_cpu_ptr(&napi_alloc_cache.page); |
| 512 | data = page_frag_alloc(nc, len, gfp_mask); |
| 513 | pfmemalloc = nc->pfmemalloc; |
| 514 | local_bh_enable(); |
| 515 | } |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 516 | |
| 517 | if (unlikely(!data)) |
| 518 | return NULL; |
| 519 | |
| 520 | skb = __build_skb(data, len); |
| 521 | if (unlikely(!skb)) { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 522 | skb_free_frag(data); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 523 | return NULL; |
Christoph Hellwig | 7b2e497 | 2006-08-07 16:09:04 -0700 | [diff] [blame] | 524 | } |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 525 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 526 | if (pfmemalloc) |
| 527 | skb->pfmemalloc = 1; |
| 528 | skb->head_frag = 1; |
| 529 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 530 | skb_success: |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 531 | skb_reserve(skb, NET_SKB_PAD); |
| 532 | skb->dev = dev; |
| 533 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 534 | skb_fail: |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 535 | return skb; |
| 536 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 537 | EXPORT_SYMBOL(__netdev_alloc_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 539 | /** |
| 540 | * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance |
| 541 | * @napi: napi instance this buffer was allocated for |
Masanari Iida | d749916 | 2015-08-24 22:56:54 +0900 | [diff] [blame] | 542 | * @len: length to allocate |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 543 | * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages |
| 544 | * |
| 545 | * Allocate a new sk_buff for use in NAPI receive. This buffer will |
| 546 | * attempt to allocate the head from a special reserved region used |
| 547 | * only for NAPI Rx allocation. By doing this we can save several |
| 548 | * CPU cycles by avoiding having to disable and re-enable IRQs. |
| 549 | * |
| 550 | * %NULL is returned if there is no free memory. |
| 551 | */ |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 552 | struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, |
| 553 | gfp_t gfp_mask) |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 554 | { |
Eric Dumazet | 3226b15 | 2021-01-13 08:18:19 -0800 | [diff] [blame] | 555 | struct napi_alloc_cache *nc; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 556 | struct sk_buff *skb; |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 557 | void *data; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 558 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 559 | len += NET_SKB_PAD + NET_IP_ALIGN; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 560 | |
Eric Dumazet | 3226b15 | 2021-01-13 08:18:19 -0800 | [diff] [blame] | 561 | /* If requested length is either too small or too big, |
| 562 | * we use kmalloc() for skb->head allocation. |
| 563 | */ |
| 564 | if (len <= SKB_WITH_OVERHEAD(1024) || |
| 565 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 566 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
Alexander Lobakin | cfb8ec6 | 2021-02-13 14:12:49 +0000 | [diff] [blame] | 567 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, |
| 568 | NUMA_NO_NODE); |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 569 | if (!skb) |
| 570 | goto skb_fail; |
| 571 | goto skb_success; |
| 572 | } |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 573 | |
Eric Dumazet | 3226b15 | 2021-01-13 08:18:19 -0800 | [diff] [blame] | 574 | nc = this_cpu_ptr(&napi_alloc_cache); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 575 | len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 576 | len = SKB_DATA_ALIGN(len); |
| 577 | |
| 578 | if (sk_memalloc_socks()) |
| 579 | gfp_mask |= __GFP_MEMALLOC; |
| 580 | |
Alexander Duyck | 8c2dd3e | 2017-01-10 16:58:06 -0800 | [diff] [blame] | 581 | data = page_frag_alloc(&nc->page, len, gfp_mask); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 582 | if (unlikely(!data)) |
| 583 | return NULL; |
| 584 | |
Alexander Lobakin | cfb8ec6 | 2021-02-13 14:12:49 +0000 | [diff] [blame] | 585 | skb = __napi_build_skb(data, len); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 586 | if (unlikely(!skb)) { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 587 | skb_free_frag(data); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 588 | return NULL; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 589 | } |
| 590 | |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 591 | if (nc->page.pfmemalloc) |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 592 | skb->pfmemalloc = 1; |
| 593 | skb->head_frag = 1; |
| 594 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 595 | skb_success: |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 596 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
| 597 | skb->dev = napi->dev; |
| 598 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 599 | skb_fail: |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 600 | return skb; |
| 601 | } |
| 602 | EXPORT_SYMBOL(__napi_alloc_skb); |
| 603 | |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 604 | void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
Eric Dumazet | 50269e1 | 2012-03-23 23:59:33 +0000 | [diff] [blame] | 605 | int size, unsigned int truesize) |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 606 | { |
| 607 | skb_fill_page_desc(skb, i, page, off, size); |
| 608 | skb->len += size; |
| 609 | skb->data_len += size; |
Eric Dumazet | 50269e1 | 2012-03-23 23:59:33 +0000 | [diff] [blame] | 610 | skb->truesize += truesize; |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 611 | } |
| 612 | EXPORT_SYMBOL(skb_add_rx_frag); |
| 613 | |
Jason Wang | f8e617e | 2013-11-01 14:07:47 +0800 | [diff] [blame] | 614 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, |
| 615 | unsigned int truesize) |
| 616 | { |
| 617 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 618 | |
| 619 | skb_frag_size_add(frag, size); |
| 620 | skb->len += size; |
| 621 | skb->data_len += size; |
| 622 | skb->truesize += truesize; |
| 623 | } |
| 624 | EXPORT_SYMBOL(skb_coalesce_rx_frag); |
| 625 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 626 | static void skb_drop_list(struct sk_buff **listp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | { |
Eric Dumazet | bd8a703 | 2013-06-24 06:26:00 -0700 | [diff] [blame] | 628 | kfree_skb_list(*listp); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 629 | *listp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | } |
| 631 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 632 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
| 633 | { |
| 634 | skb_drop_list(&skb_shinfo(skb)->frag_list); |
| 635 | } |
| 636 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | static void skb_clone_fraglist(struct sk_buff *skb) |
| 638 | { |
| 639 | struct sk_buff *list; |
| 640 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 641 | skb_walk_frags(skb, list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | skb_get(list); |
| 643 | } |
| 644 | |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 645 | static void skb_free_head(struct sk_buff *skb) |
| 646 | { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 647 | unsigned char *head = skb->head; |
| 648 | |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 649 | if (skb->head_frag) { |
| 650 | if (skb_pp_recycle(skb, head)) |
| 651 | return; |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 652 | skb_free_frag(head); |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 653 | } else { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 654 | kfree(head); |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 655 | } |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 656 | } |
| 657 | |
Adrian Bunk | 5bba171 | 2006-06-29 13:02:35 -0700 | [diff] [blame] | 658 | static void skb_release_data(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | { |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 660 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 661 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 663 | if (skb->cloned && |
| 664 | atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, |
| 665 | &shinfo->dataref)) |
Ilias Apalodimas | 2cc3aeb | 2021-07-16 10:02:18 +0300 | [diff] [blame] | 666 | goto exit; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 667 | |
Jonathan Lemon | 70c4316 | 2021-01-06 14:18:36 -0800 | [diff] [blame] | 668 | skb_zcopy_clear(skb, true); |
| 669 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 670 | for (i = 0; i < shinfo->nr_frags; i++) |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 671 | __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 672 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 673 | if (shinfo->frag_list) |
| 674 | kfree_skb_list(shinfo->frag_list); |
| 675 | |
| 676 | skb_free_head(skb); |
Ilias Apalodimas | 2cc3aeb | 2021-07-16 10:02:18 +0300 | [diff] [blame] | 677 | exit: |
| 678 | /* When we clone an SKB we copy the reycling bit. The pp_recycle |
| 679 | * bit is only set on the head though, so in order to avoid races |
| 680 | * while trying to recycle fragments on __skb_frag_unref() we need |
| 681 | * to make one SKB responsible for triggering the recycle path. |
| 682 | * So disable the recycling bit if an SKB is cloned and we have |
| 683 | * additional references to to the fragmented part of the SKB. |
| 684 | * Eventually the last SKB will have the recycling bit set and it's |
| 685 | * dataref set to 0, which will trigger the recycling |
| 686 | */ |
| 687 | skb->pp_recycle = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | } |
| 689 | |
| 690 | /* |
| 691 | * Free an skbuff by memory without cleaning the state. |
| 692 | */ |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 693 | static void kfree_skbmem(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | { |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 695 | struct sk_buff_fclones *fclones; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 696 | |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 697 | switch (skb->fclone) { |
| 698 | case SKB_FCLONE_UNAVAILABLE: |
| 699 | kmem_cache_free(skbuff_head_cache, skb); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 700 | return; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 701 | |
| 702 | case SKB_FCLONE_ORIG: |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 703 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 704 | |
| 705 | /* We usually free the clone (TX completion) before original skb |
| 706 | * This test would have no chance to be true for the clone, |
| 707 | * while here, branch prediction will be good. |
| 708 | */ |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 709 | if (refcount_read(&fclones->fclone_ref) == 1) |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 710 | goto fastpath; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 711 | break; |
| 712 | |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 713 | default: /* SKB_FCLONE_CLONE */ |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 714 | fclones = container_of(skb, struct sk_buff_fclones, skb2); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 715 | break; |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 716 | } |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 717 | if (!refcount_dec_and_test(&fclones->fclone_ref)) |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 718 | return; |
| 719 | fastpath: |
| 720 | kmem_cache_free(skbuff_fclone_cache, fclones); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 | } |
| 722 | |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 723 | void skb_release_head_state(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | { |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 725 | skb_dst_drop(skb); |
Stephen Hemminger | 9c2b332 | 2005-04-19 22:39:42 -0700 | [diff] [blame] | 726 | if (skb->destructor) { |
Changbin Du | afa79d0 | 2021-08-13 22:57:49 +0800 | [diff] [blame^] | 727 | WARN_ON(in_hardirq()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | skb->destructor(skb); |
| 729 | } |
Igor Maravić | a3bf7ae | 2011-12-12 02:58:22 +0000 | [diff] [blame] | 730 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
Florian Westphal | cb9c683 | 2017-01-23 18:21:56 +0100 | [diff] [blame] | 731 | nf_conntrack_put(skb_nfct(skb)); |
KOVACS Krisztian | 2fc72c7 | 2011-01-12 20:25:08 +0100 | [diff] [blame] | 732 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 733 | skb_ext_put(skb); |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 734 | } |
| 735 | |
| 736 | /* Free everything but the sk_buff shell. */ |
| 737 | static void skb_release_all(struct sk_buff *skb) |
| 738 | { |
| 739 | skb_release_head_state(skb); |
Florian Westphal | a28b1b9 | 2017-07-23 19:54:47 +0200 | [diff] [blame] | 740 | if (likely(skb->head)) |
| 741 | skb_release_data(skb); |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 742 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 744 | /** |
| 745 | * __kfree_skb - private function |
| 746 | * @skb: buffer |
| 747 | * |
| 748 | * Free an sk_buff. Release anything attached to the buffer. |
| 749 | * Clean the state. This is an internal helper function. Users should |
| 750 | * always call kfree_skb |
| 751 | */ |
| 752 | |
| 753 | void __kfree_skb(struct sk_buff *skb) |
| 754 | { |
| 755 | skb_release_all(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | kfree_skbmem(skb); |
| 757 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 758 | EXPORT_SYMBOL(__kfree_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | |
| 760 | /** |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 761 | * kfree_skb - free an sk_buff |
| 762 | * @skb: buffer to free |
| 763 | * |
| 764 | * Drop a reference to the buffer and free it if the usage count has |
| 765 | * hit zero. |
| 766 | */ |
| 767 | void kfree_skb(struct sk_buff *skb) |
| 768 | { |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 769 | if (!skb_unref(skb)) |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 770 | return; |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 771 | |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 772 | trace_kfree_skb(skb, __builtin_return_address(0)); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 773 | __kfree_skb(skb); |
| 774 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 775 | EXPORT_SYMBOL(kfree_skb); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 776 | |
Eric Dumazet | bd8a703 | 2013-06-24 06:26:00 -0700 | [diff] [blame] | 777 | void kfree_skb_list(struct sk_buff *segs) |
| 778 | { |
| 779 | while (segs) { |
| 780 | struct sk_buff *next = segs->next; |
| 781 | |
| 782 | kfree_skb(segs); |
| 783 | segs = next; |
| 784 | } |
| 785 | } |
| 786 | EXPORT_SYMBOL(kfree_skb_list); |
| 787 | |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 788 | /* Dump skb information and contents. |
| 789 | * |
| 790 | * Must only be called from net_ratelimit()-ed paths. |
| 791 | * |
Vladimir Oltean | 302af7c | 2020-10-05 17:48:38 +0300 | [diff] [blame] | 792 | * Dumps whole packets if full_pkt, only headers otherwise. |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 793 | */ |
| 794 | void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) |
| 795 | { |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 796 | struct skb_shared_info *sh = skb_shinfo(skb); |
| 797 | struct net_device *dev = skb->dev; |
| 798 | struct sock *sk = skb->sk; |
| 799 | struct sk_buff *list_skb; |
| 800 | bool has_mac, has_trans; |
| 801 | int headroom, tailroom; |
| 802 | int i, len, seg_len; |
| 803 | |
| 804 | if (full_pkt) |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 805 | len = skb->len; |
| 806 | else |
| 807 | len = min_t(int, skb->len, MAX_HEADER + 128); |
| 808 | |
| 809 | headroom = skb_headroom(skb); |
| 810 | tailroom = skb_tailroom(skb); |
| 811 | |
| 812 | has_mac = skb_mac_header_was_set(skb); |
| 813 | has_trans = skb_transport_header_was_set(skb); |
| 814 | |
| 815 | printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" |
| 816 | "mac=(%d,%d) net=(%d,%d) trans=%d\n" |
| 817 | "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" |
| 818 | "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" |
| 819 | "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", |
| 820 | level, skb->len, headroom, skb_headlen(skb), tailroom, |
| 821 | has_mac ? skb->mac_header : -1, |
| 822 | has_mac ? skb_mac_header_len(skb) : -1, |
| 823 | skb->network_header, |
| 824 | has_trans ? skb_network_header_len(skb) : -1, |
| 825 | has_trans ? skb->transport_header : -1, |
| 826 | sh->tx_flags, sh->nr_frags, |
| 827 | sh->gso_size, sh->gso_type, sh->gso_segs, |
| 828 | skb->csum, skb->ip_summed, skb->csum_complete_sw, |
| 829 | skb->csum_valid, skb->csum_level, |
| 830 | skb->hash, skb->sw_hash, skb->l4_hash, |
| 831 | ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); |
| 832 | |
| 833 | if (dev) |
| 834 | printk("%sdev name=%s feat=0x%pNF\n", |
| 835 | level, dev->name, &dev->features); |
| 836 | if (sk) |
Qian Cai | db8051f | 2019-07-16 11:43:05 -0400 | [diff] [blame] | 837 | printk("%ssk family=%hu type=%u proto=%u\n", |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 838 | level, sk->sk_family, sk->sk_type, sk->sk_protocol); |
| 839 | |
| 840 | if (full_pkt && headroom) |
| 841 | print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, |
| 842 | 16, 1, skb->head, headroom, false); |
| 843 | |
| 844 | seg_len = min_t(int, skb_headlen(skb), len); |
| 845 | if (seg_len) |
| 846 | print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, |
| 847 | 16, 1, skb->data, seg_len, false); |
| 848 | len -= seg_len; |
| 849 | |
| 850 | if (full_pkt && tailroom) |
| 851 | print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, |
| 852 | 16, 1, skb_tail_pointer(skb), tailroom, false); |
| 853 | |
| 854 | for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { |
| 855 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 856 | u32 p_off, p_len, copied; |
| 857 | struct page *p; |
| 858 | u8 *vaddr; |
| 859 | |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 860 | skb_frag_foreach_page(frag, skb_frag_off(frag), |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 861 | skb_frag_size(frag), p, p_off, p_len, |
| 862 | copied) { |
| 863 | seg_len = min_t(int, p_len, len); |
| 864 | vaddr = kmap_atomic(p); |
| 865 | print_hex_dump(level, "skb frag: ", |
| 866 | DUMP_PREFIX_OFFSET, |
| 867 | 16, 1, vaddr + p_off, seg_len, false); |
| 868 | kunmap_atomic(vaddr); |
| 869 | len -= seg_len; |
| 870 | if (!len) |
| 871 | break; |
| 872 | } |
| 873 | } |
| 874 | |
| 875 | if (full_pkt && skb_has_frag_list(skb)) { |
| 876 | printk("skb fraglist:\n"); |
| 877 | skb_walk_frags(skb, list_skb) |
| 878 | skb_dump(level, list_skb, true); |
| 879 | } |
| 880 | } |
| 881 | EXPORT_SYMBOL(skb_dump); |
| 882 | |
Stephen Hemminger | d1a203e | 2008-11-01 21:01:09 -0700 | [diff] [blame] | 883 | /** |
Michael S. Tsirkin | 2512117 | 2012-11-01 09:16:28 +0000 | [diff] [blame] | 884 | * skb_tx_error - report an sk_buff xmit error |
| 885 | * @skb: buffer that triggered an error |
| 886 | * |
| 887 | * Report xmit error if a device callback is tracking this skb. |
| 888 | * skb must be freed afterwards. |
| 889 | */ |
| 890 | void skb_tx_error(struct sk_buff *skb) |
| 891 | { |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 892 | skb_zcopy_clear(skb, true); |
Michael S. Tsirkin | 2512117 | 2012-11-01 09:16:28 +0000 | [diff] [blame] | 893 | } |
| 894 | EXPORT_SYMBOL(skb_tx_error); |
| 895 | |
Herbert Xu | be769db | 2020-08-22 08:23:29 +1000 | [diff] [blame] | 896 | #ifdef CONFIG_TRACEPOINTS |
Michael S. Tsirkin | 2512117 | 2012-11-01 09:16:28 +0000 | [diff] [blame] | 897 | /** |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 898 | * consume_skb - free an skbuff |
| 899 | * @skb: buffer to free |
| 900 | * |
| 901 | * Drop a ref to the buffer and free it if the usage count has hit zero |
| 902 | * Functions identically to kfree_skb, but kfree_skb assumes that the frame |
| 903 | * is being dropped after a failure and notes that |
| 904 | */ |
| 905 | void consume_skb(struct sk_buff *skb) |
| 906 | { |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 907 | if (!skb_unref(skb)) |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 908 | return; |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 909 | |
Koki Sanagi | 07dc22e | 2010-08-23 18:46:12 +0900 | [diff] [blame] | 910 | trace_consume_skb(skb); |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 911 | __kfree_skb(skb); |
| 912 | } |
| 913 | EXPORT_SYMBOL(consume_skb); |
Herbert Xu | be769db | 2020-08-22 08:23:29 +1000 | [diff] [blame] | 914 | #endif |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 915 | |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 916 | /** |
Mauro Carvalho Chehab | c1639be | 2020-11-16 11:17:58 +0100 | [diff] [blame] | 917 | * __consume_stateless_skb - free an skbuff, assuming it is stateless |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 918 | * @skb: buffer to free |
| 919 | * |
Paolo Abeni | ca2c141 | 2017-09-06 14:44:36 +0200 | [diff] [blame] | 920 | * Alike consume_skb(), but this variant assumes that this is the last |
| 921 | * skb reference and all the head states have been already dropped |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 922 | */ |
Paolo Abeni | ca2c141 | 2017-09-06 14:44:36 +0200 | [diff] [blame] | 923 | void __consume_stateless_skb(struct sk_buff *skb) |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 924 | { |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 925 | trace_consume_skb(skb); |
Florian Westphal | 06dc75a | 2017-07-17 18:56:54 +0200 | [diff] [blame] | 926 | skb_release_data(skb); |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 927 | kfree_skbmem(skb); |
| 928 | } |
| 929 | |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 930 | static void napi_skb_cache_put(struct sk_buff *skb) |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 931 | { |
| 932 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 933 | u32 i; |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 934 | |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 935 | kasan_poison_object_data(skbuff_head_cache, skb); |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 936 | nc->skb_cache[nc->skb_count++] = skb; |
| 937 | |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 938 | if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 939 | for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) |
| 940 | kasan_unpoison_object_data(skbuff_head_cache, |
| 941 | nc->skb_cache[i]); |
| 942 | |
| 943 | kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF, |
| 944 | nc->skb_cache + NAPI_SKB_CACHE_HALF); |
| 945 | nc->skb_count = NAPI_SKB_CACHE_HALF; |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 946 | } |
| 947 | } |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 948 | |
Jesper Dangaard Brouer | 15fad71 | 2016-02-08 13:15:04 +0100 | [diff] [blame] | 949 | void __kfree_skb_defer(struct sk_buff *skb) |
| 950 | { |
Alexander Lobakin | 9243adf | 2021-02-13 14:13:09 +0000 | [diff] [blame] | 951 | skb_release_all(skb); |
| 952 | napi_skb_cache_put(skb); |
| 953 | } |
| 954 | |
| 955 | void napi_skb_free_stolen_head(struct sk_buff *skb) |
| 956 | { |
Paolo Abeni | 9efb4b5 | 2021-07-28 18:24:02 +0200 | [diff] [blame] | 957 | if (unlikely(skb->slow_gro)) { |
| 958 | nf_reset_ct(skb); |
| 959 | skb_dst_drop(skb); |
| 960 | skb_ext_put(skb); |
Paolo Abeni | 5e10da5 | 2021-07-28 18:24:03 +0200 | [diff] [blame] | 961 | skb_orphan(skb); |
Paolo Abeni | 9efb4b5 | 2021-07-28 18:24:02 +0200 | [diff] [blame] | 962 | skb->slow_gro = 0; |
| 963 | } |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 964 | napi_skb_cache_put(skb); |
Jesper Dangaard Brouer | 15fad71 | 2016-02-08 13:15:04 +0100 | [diff] [blame] | 965 | } |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 966 | |
| 967 | void napi_consume_skb(struct sk_buff *skb, int budget) |
| 968 | { |
Jesper Dangaard Brouer | 885eb0a | 2016-03-11 09:43:58 +0100 | [diff] [blame] | 969 | /* Zero budget indicate non-NAPI context called us, like netpoll */ |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 970 | if (unlikely(!budget)) { |
Jesper Dangaard Brouer | 885eb0a | 2016-03-11 09:43:58 +0100 | [diff] [blame] | 971 | dev_consume_skb_any(skb); |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 972 | return; |
| 973 | } |
| 974 | |
Yunsheng Lin | 6454eca | 2020-11-24 18:49:29 +0800 | [diff] [blame] | 975 | lockdep_assert_in_softirq(); |
| 976 | |
Paolo Abeni | 7608894 | 2017-06-14 11:48:48 +0200 | [diff] [blame] | 977 | if (!skb_unref(skb)) |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 978 | return; |
Paolo Abeni | 7608894 | 2017-06-14 11:48:48 +0200 | [diff] [blame] | 979 | |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 980 | /* if reaching here SKB is ready to free */ |
| 981 | trace_consume_skb(skb); |
| 982 | |
| 983 | /* if SKB is a clone, don't handle this case */ |
Eric Dumazet | abbdb5a | 2016-03-20 11:27:47 -0700 | [diff] [blame] | 984 | if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 985 | __kfree_skb(skb); |
| 986 | return; |
| 987 | } |
| 988 | |
Alexander Lobakin | 9243adf | 2021-02-13 14:13:09 +0000 | [diff] [blame] | 989 | skb_release_all(skb); |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 990 | napi_skb_cache_put(skb); |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 991 | } |
| 992 | EXPORT_SYMBOL(napi_consume_skb); |
| 993 | |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 994 | /* Make sure a field is enclosed inside headers_start/headers_end section */ |
| 995 | #define CHECK_SKB_FIELD(field) \ |
| 996 | BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ |
| 997 | offsetof(struct sk_buff, headers_start)); \ |
| 998 | BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ |
| 999 | offsetof(struct sk_buff, headers_end)); \ |
| 1000 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1001 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
| 1002 | { |
| 1003 | new->tstamp = old->tstamp; |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1004 | /* We do not copy old->sk */ |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1005 | new->dev = old->dev; |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1006 | memcpy(new->cb, old->cb, sizeof(old->cb)); |
Eric Dumazet | 7fee226 | 2010-05-11 23:19:48 +0000 | [diff] [blame] | 1007 | skb_dst_copy(new, old); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 1008 | __skb_ext_copy(new, old); |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1009 | __nf_copy(new, old, false); |
Patrick McHardy | 6aa895b | 2008-07-14 22:49:06 -0700 | [diff] [blame] | 1010 | |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1011 | /* Note : this field could be in headers_start/headers_end section |
| 1012 | * It is not yet because we do not want to have a 16 bit hole |
| 1013 | */ |
| 1014 | new->queue_mapping = old->queue_mapping; |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 1015 | |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1016 | memcpy(&new->headers_start, &old->headers_start, |
| 1017 | offsetof(struct sk_buff, headers_end) - |
| 1018 | offsetof(struct sk_buff, headers_start)); |
| 1019 | CHECK_SKB_FIELD(protocol); |
| 1020 | CHECK_SKB_FIELD(csum); |
| 1021 | CHECK_SKB_FIELD(hash); |
| 1022 | CHECK_SKB_FIELD(priority); |
| 1023 | CHECK_SKB_FIELD(skb_iif); |
| 1024 | CHECK_SKB_FIELD(vlan_proto); |
| 1025 | CHECK_SKB_FIELD(vlan_tci); |
| 1026 | CHECK_SKB_FIELD(transport_header); |
| 1027 | CHECK_SKB_FIELD(network_header); |
| 1028 | CHECK_SKB_FIELD(mac_header); |
| 1029 | CHECK_SKB_FIELD(inner_protocol); |
| 1030 | CHECK_SKB_FIELD(inner_transport_header); |
| 1031 | CHECK_SKB_FIELD(inner_network_header); |
| 1032 | CHECK_SKB_FIELD(inner_mac_header); |
| 1033 | CHECK_SKB_FIELD(mark); |
| 1034 | #ifdef CONFIG_NETWORK_SECMARK |
| 1035 | CHECK_SKB_FIELD(secmark); |
| 1036 | #endif |
Cong Wang | e0d1095 | 2013-08-01 11:10:25 +0800 | [diff] [blame] | 1037 | #ifdef CONFIG_NET_RX_BUSY_POLL |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1038 | CHECK_SKB_FIELD(napi_id); |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 1039 | #endif |
Eric Dumazet | 2bd8248 | 2015-02-03 23:48:24 -0800 | [diff] [blame] | 1040 | #ifdef CONFIG_XPS |
| 1041 | CHECK_SKB_FIELD(sender_cpu); |
| 1042 | #endif |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1043 | #ifdef CONFIG_NET_SCHED |
| 1044 | CHECK_SKB_FIELD(tc_index); |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1045 | #endif |
| 1046 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1047 | } |
| 1048 | |
Herbert Xu | 82c49a3 | 2009-05-22 22:11:37 +0000 | [diff] [blame] | 1049 | /* |
| 1050 | * You should not add any new code to this function. Add it to |
| 1051 | * __copy_skb_header above instead. |
| 1052 | */ |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1053 | static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | #define C(x) n->x = skb->x |
| 1056 | |
| 1057 | n->next = n->prev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 | n->sk = NULL; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1059 | __copy_skb_header(n, skb); |
| 1060 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1061 | C(len); |
| 1062 | C(data_len); |
Alexey Dobriyan | 3e6b3b2 | 2007-03-16 15:00:46 -0700 | [diff] [blame] | 1063 | C(mac_len); |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 1064 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 1065 | n->cloned = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1066 | n->nohdr = 0; |
Eric Dumazet | b13dda9 | 2018-04-07 13:42:39 -0700 | [diff] [blame] | 1067 | n->peeked = 0; |
Stefano Brivio | e78bfb0 | 2018-07-13 13:21:07 +0200 | [diff] [blame] | 1068 | C(pfmemalloc); |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 1069 | C(pp_recycle); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 | n->destructor = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | C(tail); |
| 1072 | C(end); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 1073 | C(head); |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 1074 | C(head_frag); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 1075 | C(data); |
| 1076 | C(truesize); |
Reshetova, Elena | 6335479 | 2017-06-30 13:07:58 +0300 | [diff] [blame] | 1077 | refcount_set(&n->users, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | |
| 1079 | atomic_inc(&(skb_shinfo(skb)->dataref)); |
| 1080 | skb->cloned = 1; |
| 1081 | |
| 1082 | return n; |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1083 | #undef C |
| 1084 | } |
| 1085 | |
| 1086 | /** |
Jakub Kicinski | da29e4b | 2019-06-03 15:16:58 -0700 | [diff] [blame] | 1087 | * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg |
| 1088 | * @first: first sk_buff of the msg |
| 1089 | */ |
| 1090 | struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) |
| 1091 | { |
| 1092 | struct sk_buff *n; |
| 1093 | |
| 1094 | n = alloc_skb(0, GFP_ATOMIC); |
| 1095 | if (!n) |
| 1096 | return NULL; |
| 1097 | |
| 1098 | n->len = first->len; |
| 1099 | n->data_len = first->len; |
| 1100 | n->truesize = first->truesize; |
| 1101 | |
| 1102 | skb_shinfo(n)->frag_list = first; |
| 1103 | |
| 1104 | __copy_skb_header(n, first); |
| 1105 | n->destructor = NULL; |
| 1106 | |
| 1107 | return n; |
| 1108 | } |
| 1109 | EXPORT_SYMBOL_GPL(alloc_skb_for_msg); |
| 1110 | |
| 1111 | /** |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1112 | * skb_morph - morph one skb into another |
| 1113 | * @dst: the skb to receive the contents |
| 1114 | * @src: the skb to supply the contents |
| 1115 | * |
| 1116 | * This is identical to skb_clone except that the target skb is |
| 1117 | * supplied by the user. |
| 1118 | * |
| 1119 | * The target skb is returned upon exit. |
| 1120 | */ |
| 1121 | struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) |
| 1122 | { |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 1123 | skb_release_all(dst); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1124 | return __skb_clone(dst, src); |
| 1125 | } |
| 1126 | EXPORT_SYMBOL_GPL(skb_morph); |
| 1127 | |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 1128 | int mm_account_pinned_pages(struct mmpin *mmp, size_t size) |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1129 | { |
| 1130 | unsigned long max_pg, num_pg, new_pg, old_pg; |
| 1131 | struct user_struct *user; |
| 1132 | |
| 1133 | if (capable(CAP_IPC_LOCK) || !size) |
| 1134 | return 0; |
| 1135 | |
| 1136 | num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ |
| 1137 | max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 1138 | user = mmp->user ? : current_user(); |
| 1139 | |
| 1140 | do { |
| 1141 | old_pg = atomic_long_read(&user->locked_vm); |
| 1142 | new_pg = old_pg + num_pg; |
| 1143 | if (new_pg > max_pg) |
| 1144 | return -ENOBUFS; |
| 1145 | } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != |
| 1146 | old_pg); |
| 1147 | |
| 1148 | if (!mmp->user) { |
| 1149 | mmp->user = get_uid(user); |
| 1150 | mmp->num_pg = num_pg; |
| 1151 | } else { |
| 1152 | mmp->num_pg += num_pg; |
| 1153 | } |
| 1154 | |
| 1155 | return 0; |
| 1156 | } |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 1157 | EXPORT_SYMBOL_GPL(mm_account_pinned_pages); |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1158 | |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 1159 | void mm_unaccount_pinned_pages(struct mmpin *mmp) |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1160 | { |
| 1161 | if (mmp->user) { |
| 1162 | atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); |
| 1163 | free_uid(mmp->user); |
| 1164 | } |
| 1165 | } |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 1166 | EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1167 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1168 | struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1169 | { |
| 1170 | struct ubuf_info *uarg; |
| 1171 | struct sk_buff *skb; |
| 1172 | |
| 1173 | WARN_ON_ONCE(!in_task()); |
| 1174 | |
| 1175 | skb = sock_omalloc(sk, 0, GFP_KERNEL); |
| 1176 | if (!skb) |
| 1177 | return NULL; |
| 1178 | |
| 1179 | BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); |
| 1180 | uarg = (void *)skb->cb; |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1181 | uarg->mmp.user = NULL; |
| 1182 | |
| 1183 | if (mm_account_pinned_pages(&uarg->mmp, size)) { |
| 1184 | kfree_skb(skb); |
| 1185 | return NULL; |
| 1186 | } |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1187 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1188 | uarg->callback = msg_zerocopy_callback; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1189 | uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; |
| 1190 | uarg->len = 1; |
| 1191 | uarg->bytelen = size; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1192 | uarg->zerocopy = 1; |
Jonathan Lemon | 04c2d33 | 2021-01-06 14:18:39 -0800 | [diff] [blame] | 1193 | uarg->flags = SKBFL_ZEROCOPY_FRAG; |
Eric Dumazet | c1d1b43 | 2017-08-31 16:48:22 -0700 | [diff] [blame] | 1194 | refcount_set(&uarg->refcnt, 1); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1195 | sock_hold(sk); |
| 1196 | |
| 1197 | return uarg; |
| 1198 | } |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1199 | EXPORT_SYMBOL_GPL(msg_zerocopy_alloc); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1200 | |
| 1201 | static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) |
| 1202 | { |
| 1203 | return container_of((void *)uarg, struct sk_buff, cb); |
| 1204 | } |
| 1205 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1206 | struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, |
| 1207 | struct ubuf_info *uarg) |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1208 | { |
| 1209 | if (uarg) { |
| 1210 | const u32 byte_limit = 1 << 19; /* limit to a few TSO */ |
| 1211 | u32 bytelen, next; |
| 1212 | |
| 1213 | /* realloc only when socket is locked (TCP, UDP cork), |
| 1214 | * so uarg->len and sk_zckey access is serialized |
| 1215 | */ |
| 1216 | if (!sock_owned_by_user(sk)) { |
| 1217 | WARN_ON_ONCE(1); |
| 1218 | return NULL; |
| 1219 | } |
| 1220 | |
| 1221 | bytelen = uarg->bytelen + size; |
| 1222 | if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) { |
| 1223 | /* TCP can create new skb to attach new uarg */ |
| 1224 | if (sk->sk_type == SOCK_STREAM) |
| 1225 | goto new_alloc; |
| 1226 | return NULL; |
| 1227 | } |
| 1228 | |
| 1229 | next = (u32)atomic_read(&sk->sk_zckey); |
| 1230 | if ((u32)(uarg->id + uarg->len) == next) { |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1231 | if (mm_account_pinned_pages(&uarg->mmp, size)) |
| 1232 | return NULL; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1233 | uarg->len++; |
| 1234 | uarg->bytelen = bytelen; |
| 1235 | atomic_set(&sk->sk_zckey, ++next); |
Willem de Bruijn | 100f6d8 | 2019-05-30 18:01:21 -0400 | [diff] [blame] | 1236 | |
| 1237 | /* no extra ref when appending to datagram (MSG_MORE) */ |
| 1238 | if (sk->sk_type == SOCK_STREAM) |
Jonathan Lemon | 8e04491 | 2021-01-06 14:18:41 -0800 | [diff] [blame] | 1239 | net_zcopy_get(uarg); |
Willem de Bruijn | 100f6d8 | 2019-05-30 18:01:21 -0400 | [diff] [blame] | 1240 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1241 | return uarg; |
| 1242 | } |
| 1243 | } |
| 1244 | |
| 1245 | new_alloc: |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1246 | return msg_zerocopy_alloc(sk, size); |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1247 | } |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1248 | EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1249 | |
| 1250 | static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) |
| 1251 | { |
| 1252 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
| 1253 | u32 old_lo, old_hi; |
| 1254 | u64 sum_len; |
| 1255 | |
| 1256 | old_lo = serr->ee.ee_info; |
| 1257 | old_hi = serr->ee.ee_data; |
| 1258 | sum_len = old_hi - old_lo + 1ULL + len; |
| 1259 | |
| 1260 | if (sum_len >= (1ULL << 32)) |
| 1261 | return false; |
| 1262 | |
| 1263 | if (lo != old_hi + 1) |
| 1264 | return false; |
| 1265 | |
| 1266 | serr->ee.ee_data += len; |
| 1267 | return true; |
| 1268 | } |
| 1269 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1270 | static void __msg_zerocopy_callback(struct ubuf_info *uarg) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1271 | { |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1272 | struct sk_buff *tail, *skb = skb_from_uarg(uarg); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1273 | struct sock_exterr_skb *serr; |
| 1274 | struct sock *sk = skb->sk; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1275 | struct sk_buff_head *q; |
| 1276 | unsigned long flags; |
Willem de Bruijn | 3bdd5ee | 2021-06-09 18:41:57 -0400 | [diff] [blame] | 1277 | bool is_zerocopy; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1278 | u32 lo, hi; |
| 1279 | u16 len; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1280 | |
Willem de Bruijn | ccaffff | 2017-08-09 19:09:43 -0400 | [diff] [blame] | 1281 | mm_unaccount_pinned_pages(&uarg->mmp); |
| 1282 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1283 | /* if !len, there was only 1 call, and it was aborted |
| 1284 | * so do not queue a completion notification |
| 1285 | */ |
| 1286 | if (!uarg->len || sock_flag(sk, SOCK_DEAD)) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1287 | goto release; |
| 1288 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1289 | len = uarg->len; |
| 1290 | lo = uarg->id; |
| 1291 | hi = uarg->id + len - 1; |
Willem de Bruijn | 3bdd5ee | 2021-06-09 18:41:57 -0400 | [diff] [blame] | 1292 | is_zerocopy = uarg->zerocopy; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1293 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1294 | serr = SKB_EXT_ERR(skb); |
| 1295 | memset(serr, 0, sizeof(*serr)); |
| 1296 | serr->ee.ee_errno = 0; |
| 1297 | serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1298 | serr->ee.ee_data = hi; |
| 1299 | serr->ee.ee_info = lo; |
Willem de Bruijn | 3bdd5ee | 2021-06-09 18:41:57 -0400 | [diff] [blame] | 1300 | if (!is_zerocopy) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1301 | serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; |
| 1302 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1303 | q = &sk->sk_error_queue; |
| 1304 | spin_lock_irqsave(&q->lock, flags); |
| 1305 | tail = skb_peek_tail(q); |
| 1306 | if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || |
| 1307 | !skb_zerocopy_notify_extend(tail, lo, len)) { |
| 1308 | __skb_queue_tail(q, skb); |
| 1309 | skb = NULL; |
| 1310 | } |
| 1311 | spin_unlock_irqrestore(&q->lock, flags); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1312 | |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 1313 | sk_error_report(sk); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1314 | |
| 1315 | release: |
| 1316 | consume_skb(skb); |
| 1317 | sock_put(sk); |
| 1318 | } |
Jonathan Lemon | 7551885 | 2021-01-06 14:18:31 -0800 | [diff] [blame] | 1319 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1320 | void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, |
| 1321 | bool success) |
Jonathan Lemon | 7551885 | 2021-01-06 14:18:31 -0800 | [diff] [blame] | 1322 | { |
| 1323 | uarg->zerocopy = uarg->zerocopy & success; |
| 1324 | |
| 1325 | if (refcount_dec_and_test(&uarg->refcnt)) |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1326 | __msg_zerocopy_callback(uarg); |
Jonathan Lemon | 7551885 | 2021-01-06 14:18:31 -0800 | [diff] [blame] | 1327 | } |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1328 | EXPORT_SYMBOL_GPL(msg_zerocopy_callback); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1329 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1330 | void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1331 | { |
Jonathan Lemon | 236a6b1 | 2021-01-06 14:18:35 -0800 | [diff] [blame] | 1332 | struct sock *sk = skb_from_uarg(uarg)->sk; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1333 | |
Jonathan Lemon | 236a6b1 | 2021-01-06 14:18:35 -0800 | [diff] [blame] | 1334 | atomic_dec(&sk->sk_zckey); |
| 1335 | uarg->len--; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1336 | |
Jonathan Lemon | 236a6b1 | 2021-01-06 14:18:35 -0800 | [diff] [blame] | 1337 | if (have_uref) |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1338 | msg_zerocopy_callback(NULL, uarg, true); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1339 | } |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1340 | EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1341 | |
Willem de Bruijn | b5947e5 | 2018-11-30 15:32:39 -0500 | [diff] [blame] | 1342 | int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) |
| 1343 | { |
| 1344 | return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); |
| 1345 | } |
| 1346 | EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram); |
| 1347 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1348 | int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, |
| 1349 | struct msghdr *msg, int len, |
| 1350 | struct ubuf_info *uarg) |
| 1351 | { |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1352 | struct ubuf_info *orig_uarg = skb_zcopy(skb); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1353 | struct iov_iter orig_iter = msg->msg_iter; |
| 1354 | int err, orig_len = skb->len; |
| 1355 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1356 | /* An skb can only point to one uarg. This edge case happens when |
| 1357 | * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. |
| 1358 | */ |
| 1359 | if (orig_uarg && uarg != orig_uarg) |
| 1360 | return -EEXIST; |
| 1361 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1362 | err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); |
| 1363 | if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { |
Willem de Bruijn | 54d43117 | 2017-10-19 12:40:39 -0400 | [diff] [blame] | 1364 | struct sock *save_sk = skb->sk; |
| 1365 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1366 | /* Streams do not free skb on error. Reset to prev state. */ |
| 1367 | msg->msg_iter = orig_iter; |
Willem de Bruijn | 54d43117 | 2017-10-19 12:40:39 -0400 | [diff] [blame] | 1368 | skb->sk = sk; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1369 | ___pskb_trim(skb, orig_len); |
Willem de Bruijn | 54d43117 | 2017-10-19 12:40:39 -0400 | [diff] [blame] | 1370 | skb->sk = save_sk; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1371 | return err; |
| 1372 | } |
| 1373 | |
Willem de Bruijn | 52900d2 | 2018-11-30 15:32:40 -0500 | [diff] [blame] | 1374 | skb_zcopy_set(skb, uarg, NULL); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1375 | return skb->len - orig_len; |
| 1376 | } |
| 1377 | EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); |
| 1378 | |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1379 | static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1380 | gfp_t gfp_mask) |
| 1381 | { |
| 1382 | if (skb_zcopy(orig)) { |
| 1383 | if (skb_zcopy(nskb)) { |
| 1384 | /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ |
| 1385 | if (!gfp_mask) { |
| 1386 | WARN_ON_ONCE(1); |
| 1387 | return -ENOMEM; |
| 1388 | } |
| 1389 | if (skb_uarg(nskb) == skb_uarg(orig)) |
| 1390 | return 0; |
| 1391 | if (skb_copy_ubufs(nskb, GFP_ATOMIC)) |
| 1392 | return -EIO; |
| 1393 | } |
Willem de Bruijn | 52900d2 | 2018-11-30 15:32:40 -0500 | [diff] [blame] | 1394 | skb_zcopy_set(nskb, skb_uarg(orig), NULL); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1395 | } |
| 1396 | return 0; |
| 1397 | } |
| 1398 | |
Ben Hutchings | 2c53040 | 2012-07-10 10:55:09 +0000 | [diff] [blame] | 1399 | /** |
| 1400 | * skb_copy_ubufs - copy userspace skb frags buffers to kernel |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 1401 | * @skb: the skb to modify |
| 1402 | * @gfp_mask: allocation priority |
| 1403 | * |
Jonathan Lemon | 06b4feb | 2021-01-06 14:18:38 -0800 | [diff] [blame] | 1404 | * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 1405 | * It will copy all frags into kernel and drop the reference |
| 1406 | * to userspace pages. |
| 1407 | * |
| 1408 | * If this function is called from an interrupt gfp_mask() must be |
| 1409 | * %GFP_ATOMIC. |
| 1410 | * |
| 1411 | * Returns 0 on success or a negative error code on failure |
| 1412 | * to allocate kernel memory to copy to. |
| 1413 | */ |
| 1414 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1415 | { |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1416 | int num_frags = skb_shinfo(skb)->nr_frags; |
| 1417 | struct page *page, *head = NULL; |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1418 | int i, new_frags; |
| 1419 | u32 d_off; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1420 | |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1421 | if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) |
| 1422 | return -EINVAL; |
| 1423 | |
Willem de Bruijn | f72c4ac | 2017-12-28 12:38:13 -0500 | [diff] [blame] | 1424 | if (!num_frags) |
| 1425 | goto release; |
| 1426 | |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1427 | new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 1428 | for (i = 0; i < new_frags; i++) { |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 1429 | page = alloc_page(gfp_mask); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1430 | if (!page) { |
| 1431 | while (head) { |
Sunghan Suh | 40dadff | 2013-07-12 16:17:23 +0900 | [diff] [blame] | 1432 | struct page *next = (struct page *)page_private(head); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1433 | put_page(head); |
| 1434 | head = next; |
| 1435 | } |
| 1436 | return -ENOMEM; |
| 1437 | } |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1438 | set_page_private(page, (unsigned long)head); |
| 1439 | head = page; |
| 1440 | } |
| 1441 | |
| 1442 | page = head; |
| 1443 | d_off = 0; |
| 1444 | for (i = 0; i < num_frags; i++) { |
| 1445 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
| 1446 | u32 p_off, p_len, copied; |
| 1447 | struct page *p; |
| 1448 | u8 *vaddr; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1449 | |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 1450 | skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1451 | p, p_off, p_len, copied) { |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1452 | u32 copy, done = 0; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1453 | vaddr = kmap_atomic(p); |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1454 | |
| 1455 | while (done < p_len) { |
| 1456 | if (d_off == PAGE_SIZE) { |
| 1457 | d_off = 0; |
| 1458 | page = (struct page *)page_private(page); |
| 1459 | } |
| 1460 | copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); |
| 1461 | memcpy(page_address(page) + d_off, |
| 1462 | vaddr + p_off + done, copy); |
| 1463 | done += copy; |
| 1464 | d_off += copy; |
| 1465 | } |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1466 | kunmap_atomic(vaddr); |
| 1467 | } |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1468 | } |
| 1469 | |
| 1470 | /* skb frags release userspace buffers */ |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 1471 | for (i = 0; i < num_frags; i++) |
Ian Campbell | a8605c6 | 2011-10-19 23:01:49 +0000 | [diff] [blame] | 1472 | skb_frag_unref(skb, i); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1473 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1474 | /* skb frags point to kernel buffers */ |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1475 | for (i = 0; i < new_frags - 1; i++) { |
| 1476 | __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); |
Sunghan Suh | 40dadff | 2013-07-12 16:17:23 +0900 | [diff] [blame] | 1477 | head = (struct page *)page_private(head); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1478 | } |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1479 | __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); |
| 1480 | skb_shinfo(skb)->nr_frags = new_frags; |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 1481 | |
Willem de Bruijn | b90ddd5 | 2017-12-20 17:37:50 -0500 | [diff] [blame] | 1482 | release: |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1483 | skb_zcopy_clear(skb, false); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1484 | return 0; |
| 1485 | } |
Michael S. Tsirkin | dcc0fb7 | 2012-07-20 09:23:20 +0000 | [diff] [blame] | 1486 | EXPORT_SYMBOL_GPL(skb_copy_ubufs); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1487 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1488 | /** |
| 1489 | * skb_clone - duplicate an sk_buff |
| 1490 | * @skb: buffer to clone |
| 1491 | * @gfp_mask: allocation priority |
| 1492 | * |
| 1493 | * Duplicate an &sk_buff. The new one is not owned by a socket. Both |
| 1494 | * copies share the same packet data but not structure. The new |
| 1495 | * buffer has a reference count of 1. If the allocation fails the |
| 1496 | * function returns %NULL otherwise the new buffer is returned. |
| 1497 | * |
| 1498 | * If this function is called from an interrupt gfp_mask() must be |
| 1499 | * %GFP_ATOMIC. |
| 1500 | */ |
| 1501 | |
| 1502 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) |
| 1503 | { |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 1504 | struct sk_buff_fclones *fclones = container_of(skb, |
| 1505 | struct sk_buff_fclones, |
| 1506 | skb1); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 1507 | struct sk_buff *n; |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1508 | |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1509 | if (skb_orphan_frags(skb, gfp_mask)) |
| 1510 | return NULL; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1511 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1512 | if (skb->fclone == SKB_FCLONE_ORIG && |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 1513 | refcount_read(&fclones->fclone_ref) == 1) { |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 1514 | n = &fclones->skb2; |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 1515 | refcount_set(&fclones->fclone_ref, 2); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1516 | } else { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1517 | if (skb_pfmemalloc(skb)) |
| 1518 | gfp_mask |= __GFP_MEMALLOC; |
| 1519 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1520 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); |
| 1521 | if (!n) |
| 1522 | return NULL; |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 1523 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1524 | n->fclone = SKB_FCLONE_UNAVAILABLE; |
| 1525 | } |
| 1526 | |
| 1527 | return __skb_clone(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1528 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1529 | EXPORT_SYMBOL(skb_clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1530 | |
Toshiaki Makita | b0768a8 | 2018-08-03 16:58:09 +0900 | [diff] [blame] | 1531 | void skb_headers_offset_update(struct sk_buff *skb, int off) |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1532 | { |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 1533 | /* Only adjust this if it actually is csum_start rather than csum */ |
| 1534 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 1535 | skb->csum_start += off; |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1536 | /* {transport,network,mac}_header and tail are relative to skb->head */ |
| 1537 | skb->transport_header += off; |
| 1538 | skb->network_header += off; |
| 1539 | if (skb_mac_header_was_set(skb)) |
| 1540 | skb->mac_header += off; |
| 1541 | skb->inner_transport_header += off; |
| 1542 | skb->inner_network_header += off; |
Pravin B Shelar | aefbd2b | 2013-03-07 13:21:46 +0000 | [diff] [blame] | 1543 | skb->inner_mac_header += off; |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1544 | } |
Toshiaki Makita | b0768a8 | 2018-08-03 16:58:09 +0900 | [diff] [blame] | 1545 | EXPORT_SYMBOL(skb_headers_offset_update); |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1546 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1547 | void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1548 | { |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1549 | __copy_skb_header(new, old); |
| 1550 | |
Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 1551 | skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; |
| 1552 | skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; |
| 1553 | skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1554 | } |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1555 | EXPORT_SYMBOL(skb_copy_header); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1557 | static inline int skb_alloc_rx_flag(const struct sk_buff *skb) |
| 1558 | { |
| 1559 | if (skb_pfmemalloc(skb)) |
| 1560 | return SKB_ALLOC_RX; |
| 1561 | return 0; |
| 1562 | } |
| 1563 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1564 | /** |
| 1565 | * skb_copy - create private copy of an sk_buff |
| 1566 | * @skb: buffer to copy |
| 1567 | * @gfp_mask: allocation priority |
| 1568 | * |
| 1569 | * Make a copy of both an &sk_buff and its data. This is used when the |
| 1570 | * caller wishes to modify the data and needs a private copy of the |
| 1571 | * data to alter. Returns %NULL on failure or the pointer to the buffer |
| 1572 | * on success. The returned buffer has a reference count of 1. |
| 1573 | * |
| 1574 | * As by-product this function converts non-linear &sk_buff to linear |
| 1575 | * one, so that &sk_buff becomes completely private and caller is allowed |
| 1576 | * to modify all the data of returned buffer. This means that this |
| 1577 | * function is not recommended for use in circumstances when only |
| 1578 | * header is going to be modified. Use pskb_copy() instead. |
| 1579 | */ |
| 1580 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1581 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1582 | { |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1583 | int headerlen = skb_headroom(skb); |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 1584 | unsigned int size = skb_end_offset(skb) + skb->data_len; |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1585 | struct sk_buff *n = __alloc_skb(size, gfp_mask, |
| 1586 | skb_alloc_rx_flag(skb), NUMA_NO_NODE); |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1587 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 | if (!n) |
| 1589 | return NULL; |
| 1590 | |
| 1591 | /* Set the data pointer */ |
| 1592 | skb_reserve(n, headerlen); |
| 1593 | /* Set the tail pointer and length */ |
| 1594 | skb_put(n, skb->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 1596 | BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1598 | skb_copy_header(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1599 | return n; |
| 1600 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1601 | EXPORT_SYMBOL(skb_copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1602 | |
| 1603 | /** |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1604 | * __pskb_copy_fclone - create copy of an sk_buff with private head. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | * @skb: buffer to copy |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1606 | * @headroom: headroom of new skb |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1607 | * @gfp_mask: allocation priority |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1608 | * @fclone: if true allocate the copy of the skb from the fclone |
| 1609 | * cache instead of the head cache; it is recommended to set this |
| 1610 | * to true for the cases where the copy will likely be cloned |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1611 | * |
| 1612 | * Make a copy of both an &sk_buff and part of its data, located |
| 1613 | * in header. Fragmented data remain shared. This is used when |
| 1614 | * the caller wishes to modify only header of &sk_buff and needs |
| 1615 | * private copy of the header to alter. Returns %NULL on failure |
| 1616 | * or the pointer to the buffer on success. |
| 1617 | * The returned buffer has a reference count of 1. |
| 1618 | */ |
| 1619 | |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1620 | struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, |
| 1621 | gfp_t gfp_mask, bool fclone) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | { |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1623 | unsigned int size = skb_headlen(skb) + headroom; |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1624 | int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); |
| 1625 | struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1626 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1627 | if (!n) |
| 1628 | goto out; |
| 1629 | |
| 1630 | /* Set the data pointer */ |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1631 | skb_reserve(n, headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1632 | /* Set the tail pointer and length */ |
| 1633 | skb_put(n, skb_headlen(skb)); |
| 1634 | /* Copy the bytes */ |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 1635 | skb_copy_from_linear_data(skb, n->data, n->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1636 | |
Herbert Xu | 25f484a | 2006-11-07 14:57:15 -0800 | [diff] [blame] | 1637 | n->truesize += skb->data_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1638 | n->data_len = skb->data_len; |
| 1639 | n->len = skb->len; |
| 1640 | |
| 1641 | if (skb_shinfo(skb)->nr_frags) { |
| 1642 | int i; |
| 1643 | |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1644 | if (skb_orphan_frags(skb, gfp_mask) || |
| 1645 | skb_zerocopy_clone(n, skb, gfp_mask)) { |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1646 | kfree_skb(n); |
| 1647 | n = NULL; |
| 1648 | goto out; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1649 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1650 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 1651 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1652 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1653 | } |
| 1654 | skb_shinfo(n)->nr_frags = i; |
| 1655 | } |
| 1656 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1657 | if (skb_has_frag_list(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1658 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; |
| 1659 | skb_clone_fraglist(n); |
| 1660 | } |
| 1661 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1662 | skb_copy_header(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1663 | out: |
| 1664 | return n; |
| 1665 | } |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1666 | EXPORT_SYMBOL(__pskb_copy_fclone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | |
| 1668 | /** |
| 1669 | * pskb_expand_head - reallocate header of &sk_buff |
| 1670 | * @skb: buffer to reallocate |
| 1671 | * @nhead: room to add at head |
| 1672 | * @ntail: room to add at tail |
| 1673 | * @gfp_mask: allocation priority |
| 1674 | * |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 1675 | * Expands (or creates identical copy, if @nhead and @ntail are zero) |
| 1676 | * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1677 | * reference count of 1. Returns zero in the case of success or error, |
| 1678 | * if expansion failed. In the last case, &sk_buff is not changed. |
| 1679 | * |
| 1680 | * All the pointers pointing into skb header may change and must be |
| 1681 | * reloaded after call to this function. |
| 1682 | */ |
| 1683 | |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1684 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1685 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1686 | { |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1687 | int i, osize = skb_end_offset(skb); |
| 1688 | int size = osize + nhead + ntail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1689 | long off; |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1690 | u8 *data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1691 | |
Herbert Xu | 4edd87a | 2008-10-01 07:09:38 -0700 | [diff] [blame] | 1692 | BUG_ON(nhead < 0); |
| 1693 | |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 1694 | BUG_ON(skb_shared(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1695 | |
| 1696 | size = SKB_DATA_ALIGN(size); |
| 1697 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1698 | if (skb_pfmemalloc(skb)) |
| 1699 | gfp_mask |= __GFP_MEMALLOC; |
| 1700 | data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 1701 | gfp_mask, NUMA_NO_NODE, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1702 | if (!data) |
| 1703 | goto nodata; |
Eric Dumazet | 87151b8 | 2012-04-10 20:08:39 +0000 | [diff] [blame] | 1704 | size = SKB_WITH_OVERHEAD(ksize(data)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1705 | |
| 1706 | /* Copy only real data... and, alas, header. This should be |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1707 | * optimized for the cases when header is void. |
| 1708 | */ |
| 1709 | memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); |
| 1710 | |
| 1711 | memcpy((struct skb_shared_info *)(data + size), |
| 1712 | skb_shinfo(skb), |
Eric Dumazet | fed6638 | 2010-07-22 19:09:08 +0000 | [diff] [blame] | 1713 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1714 | |
Alexander Duyck | 3e24591 | 2012-05-04 14:26:51 +0000 | [diff] [blame] | 1715 | /* |
| 1716 | * if shinfo is shared we must drop the old head gracefully, but if it |
| 1717 | * is not we can just drop the old head and let the existing refcount |
| 1718 | * be since all we did is relocate the values |
| 1719 | */ |
| 1720 | if (skb_cloned(skb)) { |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1721 | if (skb_orphan_frags(skb, gfp_mask)) |
| 1722 | goto nofrags; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1723 | if (skb_zcopy(skb)) |
Eric Dumazet | c1d1b43 | 2017-08-31 16:48:22 -0700 | [diff] [blame] | 1724 | refcount_inc(&skb_uarg(skb)->refcnt); |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1725 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1726 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1727 | |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1728 | if (skb_has_frag_list(skb)) |
| 1729 | skb_clone_fraglist(skb); |
| 1730 | |
| 1731 | skb_release_data(skb); |
Alexander Duyck | 3e24591 | 2012-05-04 14:26:51 +0000 | [diff] [blame] | 1732 | } else { |
| 1733 | skb_free_head(skb); |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1734 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1735 | off = (data + nhead) - skb->head; |
| 1736 | |
| 1737 | skb->head = data; |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 1738 | skb->head_frag = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1739 | skb->data += off; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1740 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 1741 | skb->end = size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 1742 | off = nhead; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1743 | #else |
| 1744 | skb->end = skb->head + size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 1745 | #endif |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1746 | skb->tail += off; |
Peter Pan(潘卫平) | b41abb4 | 2013-06-06 21:27:21 +0800 | [diff] [blame] | 1747 | skb_headers_offset_update(skb, nhead); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1748 | skb->cloned = 0; |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 1749 | skb->hdr_len = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1750 | skb->nohdr = 0; |
| 1751 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1752 | |
Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 1753 | skb_metadata_clear(skb); |
| 1754 | |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1755 | /* It is not generally safe to change skb->truesize. |
| 1756 | * For the moment, we really care of rx path, or |
| 1757 | * when skb is orphaned (not attached to a socket). |
| 1758 | */ |
| 1759 | if (!skb->sk || skb->destructor == sock_edemux) |
| 1760 | skb->truesize += size - osize; |
| 1761 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1762 | return 0; |
| 1763 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1764 | nofrags: |
| 1765 | kfree(data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1766 | nodata: |
| 1767 | return -ENOMEM; |
| 1768 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1769 | EXPORT_SYMBOL(pskb_expand_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1770 | |
| 1771 | /* Make private copy of skb with writable head and some headroom */ |
| 1772 | |
| 1773 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) |
| 1774 | { |
| 1775 | struct sk_buff *skb2; |
| 1776 | int delta = headroom - skb_headroom(skb); |
| 1777 | |
| 1778 | if (delta <= 0) |
| 1779 | skb2 = pskb_copy(skb, GFP_ATOMIC); |
| 1780 | else { |
| 1781 | skb2 = skb_clone(skb, GFP_ATOMIC); |
| 1782 | if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, |
| 1783 | GFP_ATOMIC)) { |
| 1784 | kfree_skb(skb2); |
| 1785 | skb2 = NULL; |
| 1786 | } |
| 1787 | } |
| 1788 | return skb2; |
| 1789 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1790 | EXPORT_SYMBOL(skb_realloc_headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1791 | |
| 1792 | /** |
Vasily Averin | f1260ff | 2021-08-02 11:52:15 +0300 | [diff] [blame] | 1793 | * skb_expand_head - reallocate header of &sk_buff |
| 1794 | * @skb: buffer to reallocate |
| 1795 | * @headroom: needed headroom |
| 1796 | * |
| 1797 | * Unlike skb_realloc_headroom, this one does not allocate a new skb |
| 1798 | * if possible; copies skb->sk to new skb as needed |
| 1799 | * and frees original skb in case of failures. |
| 1800 | * |
| 1801 | * It expect increased headroom and generates warning otherwise. |
| 1802 | */ |
| 1803 | |
| 1804 | struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) |
| 1805 | { |
| 1806 | int delta = headroom - skb_headroom(skb); |
| 1807 | |
| 1808 | if (WARN_ONCE(delta <= 0, |
| 1809 | "%s is expecting an increase in the headroom", __func__)) |
| 1810 | return skb; |
| 1811 | |
| 1812 | /* pskb_expand_head() might crash, if skb is shared */ |
| 1813 | if (skb_shared(skb)) { |
| 1814 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); |
| 1815 | |
| 1816 | if (likely(nskb)) { |
| 1817 | if (skb->sk) |
| 1818 | skb_set_owner_w(nskb, skb->sk); |
| 1819 | consume_skb(skb); |
| 1820 | } else { |
| 1821 | kfree_skb(skb); |
| 1822 | } |
| 1823 | skb = nskb; |
| 1824 | } |
| 1825 | if (skb && |
| 1826 | pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { |
| 1827 | kfree_skb(skb); |
| 1828 | skb = NULL; |
| 1829 | } |
| 1830 | return skb; |
| 1831 | } |
| 1832 | EXPORT_SYMBOL(skb_expand_head); |
| 1833 | |
| 1834 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1835 | * skb_copy_expand - copy and expand sk_buff |
| 1836 | * @skb: buffer to copy |
| 1837 | * @newheadroom: new free bytes at head |
| 1838 | * @newtailroom: new free bytes at tail |
| 1839 | * @gfp_mask: allocation priority |
| 1840 | * |
| 1841 | * Make a copy of both an &sk_buff and its data and while doing so |
| 1842 | * allocate additional space. |
| 1843 | * |
| 1844 | * This is used when the caller wishes to modify the data and needs a |
| 1845 | * private copy of the data to alter as well as more space for new fields. |
| 1846 | * Returns %NULL on failure or the pointer to the buffer |
| 1847 | * on success. The returned buffer has a reference count of 1. |
| 1848 | * |
| 1849 | * You must pass %GFP_ATOMIC as the allocation priority if this function |
| 1850 | * is called from an interrupt. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1851 | */ |
| 1852 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1853 | int newheadroom, int newtailroom, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1854 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1855 | { |
| 1856 | /* |
| 1857 | * Allocate the copy buffer |
| 1858 | */ |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1859 | struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, |
| 1860 | gfp_mask, skb_alloc_rx_flag(skb), |
| 1861 | NUMA_NO_NODE); |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1862 | int oldheadroom = skb_headroom(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1863 | int head_copy_len, head_copy_off; |
| 1864 | |
| 1865 | if (!n) |
| 1866 | return NULL; |
| 1867 | |
| 1868 | skb_reserve(n, newheadroom); |
| 1869 | |
| 1870 | /* Set the tail pointer and length */ |
| 1871 | skb_put(n, skb->len); |
| 1872 | |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1873 | head_copy_len = oldheadroom; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1874 | head_copy_off = 0; |
| 1875 | if (newheadroom <= head_copy_len) |
| 1876 | head_copy_len = newheadroom; |
| 1877 | else |
| 1878 | head_copy_off = newheadroom - head_copy_len; |
| 1879 | |
| 1880 | /* Copy the linear header and data. */ |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 1881 | BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, |
| 1882 | skb->len + head_copy_len)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1883 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1884 | skb_copy_header(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1885 | |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 1886 | skb_headers_offset_update(n, newheadroom - oldheadroom); |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1887 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1888 | return n; |
| 1889 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1890 | EXPORT_SYMBOL(skb_copy_expand); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1891 | |
| 1892 | /** |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1893 | * __skb_pad - zero pad the tail of an skb |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1894 | * @skb: buffer to pad |
| 1895 | * @pad: space to pad |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1896 | * @free_on_error: free buffer on error |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1897 | * |
| 1898 | * Ensure that a buffer is followed by a padding area that is zero |
| 1899 | * filled. Used by network drivers which may DMA or transfer data |
| 1900 | * beyond the buffer end onto the wire. |
| 1901 | * |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1902 | * May return error in out of memory cases. The skb is freed on error |
| 1903 | * if @free_on_error is true. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1904 | */ |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1905 | |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1906 | int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1907 | { |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1908 | int err; |
| 1909 | int ntail; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1910 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1911 | /* If the skbuff is non linear tailroom is always zero.. */ |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1912 | if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1913 | memset(skb->data+skb->len, 0, pad); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1914 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1915 | } |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1916 | |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1917 | ntail = skb->data_len + pad - (skb->end - skb->tail); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1918 | if (likely(skb_cloned(skb) || ntail > 0)) { |
| 1919 | err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); |
| 1920 | if (unlikely(err)) |
| 1921 | goto free_skb; |
| 1922 | } |
| 1923 | |
| 1924 | /* FIXME: The use of this function with non-linear skb's really needs |
| 1925 | * to be audited. |
| 1926 | */ |
| 1927 | err = skb_linearize(skb); |
| 1928 | if (unlikely(err)) |
| 1929 | goto free_skb; |
| 1930 | |
| 1931 | memset(skb->data + skb->len, 0, pad); |
| 1932 | return 0; |
| 1933 | |
| 1934 | free_skb: |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1935 | if (free_on_error) |
| 1936 | kfree_skb(skb); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1937 | return err; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1938 | } |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1939 | EXPORT_SYMBOL(__skb_pad); |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1940 | |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1941 | /** |
Mathias Krause | 0c7ddf3 | 2013-11-07 14:18:24 +0100 | [diff] [blame] | 1942 | * pskb_put - add data to the tail of a potentially fragmented buffer |
| 1943 | * @skb: start of the buffer to use |
| 1944 | * @tail: tail fragment of the buffer to use |
| 1945 | * @len: amount of data to add |
| 1946 | * |
| 1947 | * This function extends the used data area of the potentially |
| 1948 | * fragmented buffer. @tail must be the last fragment of @skb -- or |
| 1949 | * @skb itself. If this would exceed the total buffer size the kernel |
| 1950 | * will panic. A pointer to the first byte of the extra data is |
| 1951 | * returned. |
| 1952 | */ |
| 1953 | |
Johannes Berg | 4df864c | 2017-06-16 14:29:21 +0200 | [diff] [blame] | 1954 | void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) |
Mathias Krause | 0c7ddf3 | 2013-11-07 14:18:24 +0100 | [diff] [blame] | 1955 | { |
| 1956 | if (tail != skb) { |
| 1957 | skb->data_len += len; |
| 1958 | skb->len += len; |
| 1959 | } |
| 1960 | return skb_put(tail, len); |
| 1961 | } |
| 1962 | EXPORT_SYMBOL_GPL(pskb_put); |
| 1963 | |
| 1964 | /** |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1965 | * skb_put - add data to a buffer |
| 1966 | * @skb: buffer to use |
| 1967 | * @len: amount of data to add |
| 1968 | * |
| 1969 | * This function extends the used data area of the buffer. If this would |
| 1970 | * exceed the total buffer size the kernel will panic. A pointer to the |
| 1971 | * first byte of the extra data is returned. |
| 1972 | */ |
Johannes Berg | 4df864c | 2017-06-16 14:29:21 +0200 | [diff] [blame] | 1973 | void *skb_put(struct sk_buff *skb, unsigned int len) |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1974 | { |
Johannes Berg | 4df864c | 2017-06-16 14:29:21 +0200 | [diff] [blame] | 1975 | void *tmp = skb_tail_pointer(skb); |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1976 | SKB_LINEAR_ASSERT(skb); |
| 1977 | skb->tail += len; |
| 1978 | skb->len += len; |
| 1979 | if (unlikely(skb->tail > skb->end)) |
| 1980 | skb_over_panic(skb, len, __builtin_return_address(0)); |
| 1981 | return tmp; |
| 1982 | } |
| 1983 | EXPORT_SYMBOL(skb_put); |
| 1984 | |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1985 | /** |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 1986 | * skb_push - add data to the start of a buffer |
| 1987 | * @skb: buffer to use |
| 1988 | * @len: amount of data to add |
| 1989 | * |
| 1990 | * This function extends the used data area of the buffer at the buffer |
| 1991 | * start. If this would exceed the total buffer headroom the kernel will |
| 1992 | * panic. A pointer to the first byte of the extra data is returned. |
| 1993 | */ |
Johannes Berg | d58ff35 | 2017-06-16 14:29:23 +0200 | [diff] [blame] | 1994 | void *skb_push(struct sk_buff *skb, unsigned int len) |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 1995 | { |
| 1996 | skb->data -= len; |
| 1997 | skb->len += len; |
Ganesh Goudar | 9aba2f8 | 2018-08-02 15:34:52 +0530 | [diff] [blame] | 1998 | if (unlikely(skb->data < skb->head)) |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 1999 | skb_under_panic(skb, len, __builtin_return_address(0)); |
| 2000 | return skb->data; |
| 2001 | } |
| 2002 | EXPORT_SYMBOL(skb_push); |
| 2003 | |
| 2004 | /** |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 2005 | * skb_pull - remove data from the start of a buffer |
| 2006 | * @skb: buffer to use |
| 2007 | * @len: amount of data to remove |
| 2008 | * |
| 2009 | * This function removes data from the start of a buffer, returning |
| 2010 | * the memory to the headroom. A pointer to the next data in the buffer |
| 2011 | * is returned. Once the data has been pulled future pushes will overwrite |
| 2012 | * the old data. |
| 2013 | */ |
Johannes Berg | af72868 | 2017-06-16 14:29:22 +0200 | [diff] [blame] | 2014 | void *skb_pull(struct sk_buff *skb, unsigned int len) |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 2015 | { |
David S. Miller | 47d2964 | 2010-05-02 02:21:44 -0700 | [diff] [blame] | 2016 | return skb_pull_inline(skb, len); |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 2017 | } |
| 2018 | EXPORT_SYMBOL(skb_pull); |
| 2019 | |
Ilpo Järvinen | 419ae74 | 2008-03-27 17:54:01 -0700 | [diff] [blame] | 2020 | /** |
| 2021 | * skb_trim - remove end from a buffer |
| 2022 | * @skb: buffer to alter |
| 2023 | * @len: new length |
| 2024 | * |
| 2025 | * Cut the length of a buffer down by removing data from the tail. If |
| 2026 | * the buffer is already under the length specified it is not modified. |
| 2027 | * The skb must be linear. |
| 2028 | */ |
| 2029 | void skb_trim(struct sk_buff *skb, unsigned int len) |
| 2030 | { |
| 2031 | if (skb->len > len) |
| 2032 | __skb_trim(skb, len); |
| 2033 | } |
| 2034 | EXPORT_SYMBOL(skb_trim); |
| 2035 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 2036 | /* Trims skb to length len. It can change skb pointers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2037 | */ |
| 2038 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 2039 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2040 | { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2041 | struct sk_buff **fragp; |
| 2042 | struct sk_buff *frag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2043 | int offset = skb_headlen(skb); |
| 2044 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 2045 | int i; |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2046 | int err; |
| 2047 | |
| 2048 | if (skb_cloned(skb) && |
| 2049 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) |
| 2050 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2051 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 2052 | i = 0; |
| 2053 | if (offset >= len) |
| 2054 | goto drop_pages; |
| 2055 | |
| 2056 | for (; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2057 | int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2058 | |
| 2059 | if (end < len) { |
| 2060 | offset = end; |
| 2061 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2062 | } |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2063 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2064 | skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2065 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 2066 | drop_pages: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2067 | skb_shinfo(skb)->nr_frags = i; |
| 2068 | |
| 2069 | for (; i < nfrags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2070 | skb_frag_unref(skb, i); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2071 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 2072 | if (skb_has_frag_list(skb)) |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2073 | skb_drop_fraglist(skb); |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 2074 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2075 | } |
| 2076 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2077 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); |
| 2078 | fragp = &frag->next) { |
| 2079 | int end = offset + frag->len; |
| 2080 | |
| 2081 | if (skb_shared(frag)) { |
| 2082 | struct sk_buff *nfrag; |
| 2083 | |
| 2084 | nfrag = skb_clone(frag, GFP_ATOMIC); |
| 2085 | if (unlikely(!nfrag)) |
| 2086 | return -ENOMEM; |
| 2087 | |
| 2088 | nfrag->next = frag->next; |
Eric Dumazet | 85bb2a6 | 2012-04-19 02:24:53 +0000 | [diff] [blame] | 2089 | consume_skb(frag); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2090 | frag = nfrag; |
| 2091 | *fragp = frag; |
| 2092 | } |
| 2093 | |
| 2094 | if (end < len) { |
| 2095 | offset = end; |
| 2096 | continue; |
| 2097 | } |
| 2098 | |
| 2099 | if (end > len && |
| 2100 | unlikely((err = pskb_trim(frag, len - offset)))) |
| 2101 | return err; |
| 2102 | |
| 2103 | if (frag->next) |
| 2104 | skb_drop_list(&frag->next); |
| 2105 | break; |
| 2106 | } |
| 2107 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 2108 | done: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2109 | if (len > skb_headlen(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2110 | skb->data_len -= skb->len - len; |
| 2111 | skb->len = len; |
| 2112 | } else { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2113 | skb->len = len; |
| 2114 | skb->data_len = 0; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 2115 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2116 | } |
| 2117 | |
Eric Dumazet | c21b48c | 2017-04-26 09:07:46 -0700 | [diff] [blame] | 2118 | if (!skb->sk || skb->destructor == sock_edemux) |
| 2119 | skb_condense(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2120 | return 0; |
| 2121 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2122 | EXPORT_SYMBOL(___pskb_trim); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2123 | |
Eric Dumazet | 88078d9 | 2018-04-18 11:43:15 -0700 | [diff] [blame] | 2124 | /* Note : use pskb_trim_rcsum() instead of calling this directly |
| 2125 | */ |
| 2126 | int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) |
| 2127 | { |
| 2128 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
| 2129 | int delta = skb->len - len; |
| 2130 | |
Dimitris Michailidis | d55bef50 | 2018-10-19 17:07:13 -0700 | [diff] [blame] | 2131 | skb->csum = csum_block_sub(skb->csum, |
| 2132 | skb_checksum(skb, len, delta, 0), |
| 2133 | len); |
Vasily Averin | 54970a2 | 2020-12-14 22:07:39 +0300 | [diff] [blame] | 2134 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 2135 | int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; |
| 2136 | int offset = skb_checksum_start_offset(skb) + skb->csum_offset; |
| 2137 | |
| 2138 | if (offset + sizeof(__sum16) > hdlen) |
| 2139 | return -EINVAL; |
Eric Dumazet | 88078d9 | 2018-04-18 11:43:15 -0700 | [diff] [blame] | 2140 | } |
| 2141 | return __pskb_trim(skb, len); |
| 2142 | } |
| 2143 | EXPORT_SYMBOL(pskb_trim_rcsum_slow); |
| 2144 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2145 | /** |
| 2146 | * __pskb_pull_tail - advance tail of skb header |
| 2147 | * @skb: buffer to reallocate |
| 2148 | * @delta: number of bytes to advance tail |
| 2149 | * |
| 2150 | * The function makes a sense only on a fragmented &sk_buff, |
| 2151 | * it expands header moving its tail forward and copying necessary |
| 2152 | * data from fragmented part. |
| 2153 | * |
| 2154 | * &sk_buff MUST have reference count of 1. |
| 2155 | * |
| 2156 | * Returns %NULL (and &sk_buff does not change) if pull failed |
| 2157 | * or value of new tail of skb in the case of success. |
| 2158 | * |
| 2159 | * All the pointers pointing into skb header may change and must be |
| 2160 | * reloaded after call to this function. |
| 2161 | */ |
| 2162 | |
| 2163 | /* Moves tail of skb head forward, copying data from fragmented part, |
| 2164 | * when it is necessary. |
| 2165 | * 1. It may fail due to malloc failure. |
| 2166 | * 2. It may change skb pointers. |
| 2167 | * |
| 2168 | * It is pretty complicated. Luckily, it is called only in exceptional cases. |
| 2169 | */ |
Johannes Berg | af72868 | 2017-06-16 14:29:22 +0200 | [diff] [blame] | 2170 | void *__pskb_pull_tail(struct sk_buff *skb, int delta) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2171 | { |
| 2172 | /* If skb has not enough free space at tail, get new one |
| 2173 | * plus 128 bytes for future expansions. If we have enough |
| 2174 | * room at tail, reallocate without expansion only if skb is cloned. |
| 2175 | */ |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 2176 | int i, k, eat = (skb->tail + delta) - skb->end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2177 | |
| 2178 | if (eat > 0 || skb_cloned(skb)) { |
| 2179 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, |
| 2180 | GFP_ATOMIC)) |
| 2181 | return NULL; |
| 2182 | } |
| 2183 | |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 2184 | BUG_ON(skb_copy_bits(skb, skb_headlen(skb), |
| 2185 | skb_tail_pointer(skb), delta)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2186 | |
| 2187 | /* Optimization: no fragments, no reasons to preestimate |
| 2188 | * size of pulled pages. Superb. |
| 2189 | */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 2190 | if (!skb_has_frag_list(skb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2191 | goto pull_pages; |
| 2192 | |
| 2193 | /* Estimate size of pulled pages. */ |
| 2194 | eat = delta; |
| 2195 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2196 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 2197 | |
| 2198 | if (size >= eat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2199 | goto pull_pages; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2200 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2201 | } |
| 2202 | |
| 2203 | /* If we need update frag list, we are in troubles. |
Wenhua Shi | 09001b0 | 2017-10-14 18:51:36 +0200 | [diff] [blame] | 2204 | * Certainly, it is possible to add an offset to skb data, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2205 | * but taking into account that pulling is expected to |
| 2206 | * be very rare operation, it is worth to fight against |
| 2207 | * further bloating skb head and crucify ourselves here instead. |
| 2208 | * Pure masohism, indeed. 8)8) |
| 2209 | */ |
| 2210 | if (eat) { |
| 2211 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
| 2212 | struct sk_buff *clone = NULL; |
| 2213 | struct sk_buff *insp = NULL; |
| 2214 | |
| 2215 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2216 | if (list->len <= eat) { |
| 2217 | /* Eaten as whole. */ |
| 2218 | eat -= list->len; |
| 2219 | list = list->next; |
| 2220 | insp = list; |
| 2221 | } else { |
| 2222 | /* Eaten partially. */ |
| 2223 | |
| 2224 | if (skb_shared(list)) { |
| 2225 | /* Sucks! We need to fork list. :-( */ |
| 2226 | clone = skb_clone(list, GFP_ATOMIC); |
| 2227 | if (!clone) |
| 2228 | return NULL; |
| 2229 | insp = list->next; |
| 2230 | list = clone; |
| 2231 | } else { |
| 2232 | /* This may be pulled without |
| 2233 | * problems. */ |
| 2234 | insp = list; |
| 2235 | } |
| 2236 | if (!pskb_pull(list, eat)) { |
Wei Yongjun | f3fbbe0 | 2009-02-25 00:37:32 +0000 | [diff] [blame] | 2237 | kfree_skb(clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2238 | return NULL; |
| 2239 | } |
| 2240 | break; |
| 2241 | } |
| 2242 | } while (eat); |
| 2243 | |
| 2244 | /* Free pulled out fragments. */ |
| 2245 | while ((list = skb_shinfo(skb)->frag_list) != insp) { |
| 2246 | skb_shinfo(skb)->frag_list = list->next; |
| 2247 | kfree_skb(list); |
| 2248 | } |
| 2249 | /* And insert new clone at head. */ |
| 2250 | if (clone) { |
| 2251 | clone->next = list; |
| 2252 | skb_shinfo(skb)->frag_list = clone; |
| 2253 | } |
| 2254 | } |
| 2255 | /* Success! Now we may commit changes to skb data. */ |
| 2256 | |
| 2257 | pull_pages: |
| 2258 | eat = delta; |
| 2259 | k = 0; |
| 2260 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2261 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 2262 | |
| 2263 | if (size <= eat) { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2264 | skb_frag_unref(skb, i); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2265 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2266 | } else { |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2267 | skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; |
| 2268 | |
| 2269 | *frag = skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2270 | if (eat) { |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2271 | skb_frag_off_add(frag, eat); |
| 2272 | skb_frag_size_sub(frag, eat); |
linzhang | 3ccc6c6 | 2017-07-17 17:25:02 +0800 | [diff] [blame] | 2273 | if (!i) |
| 2274 | goto end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2275 | eat = 0; |
| 2276 | } |
| 2277 | k++; |
| 2278 | } |
| 2279 | } |
| 2280 | skb_shinfo(skb)->nr_frags = k; |
| 2281 | |
linzhang | 3ccc6c6 | 2017-07-17 17:25:02 +0800 | [diff] [blame] | 2282 | end: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2283 | skb->tail += delta; |
| 2284 | skb->data_len -= delta; |
| 2285 | |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 2286 | if (!skb->data_len) |
| 2287 | skb_zcopy_clear(skb, false); |
| 2288 | |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 2289 | return skb_tail_pointer(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2290 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2291 | EXPORT_SYMBOL(__pskb_pull_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2292 | |
Eric Dumazet | 22019b1 | 2011-07-29 18:37:31 +0000 | [diff] [blame] | 2293 | /** |
| 2294 | * skb_copy_bits - copy bits from skb to kernel buffer |
| 2295 | * @skb: source skb |
| 2296 | * @offset: offset in source |
| 2297 | * @to: destination buffer |
| 2298 | * @len: number of bytes to copy |
| 2299 | * |
| 2300 | * Copy the specified number of bytes from the source skb to the |
| 2301 | * destination buffer. |
| 2302 | * |
| 2303 | * CAUTION ! : |
| 2304 | * If its prototype is ever changed, |
| 2305 | * check arch/{*}/net/{*}.S files, |
| 2306 | * since it is called from BPF assembly code. |
| 2307 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2308 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) |
| 2309 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2310 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2311 | struct sk_buff *frag_iter; |
| 2312 | int i, copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2313 | |
| 2314 | if (offset > (int)skb->len - len) |
| 2315 | goto fault; |
| 2316 | |
| 2317 | /* Copy header. */ |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2318 | if ((copy = start - offset) > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2319 | if (copy > len) |
| 2320 | copy = len; |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2321 | skb_copy_from_linear_data_offset(skb, offset, to, copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2322 | if ((len -= copy) == 0) |
| 2323 | return 0; |
| 2324 | offset += copy; |
| 2325 | to += copy; |
| 2326 | } |
| 2327 | |
| 2328 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2329 | int end; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2330 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2331 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2332 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2333 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2334 | end = start + skb_frag_size(f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2335 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2336 | u32 p_off, p_len, copied; |
| 2337 | struct page *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2338 | u8 *vaddr; |
| 2339 | |
| 2340 | if (copy > len) |
| 2341 | copy = len; |
| 2342 | |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2343 | skb_frag_foreach_page(f, |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2344 | skb_frag_off(f) + offset - start, |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2345 | copy, p, p_off, p_len, copied) { |
| 2346 | vaddr = kmap_atomic(p); |
| 2347 | memcpy(to + copied, vaddr + p_off, p_len); |
| 2348 | kunmap_atomic(vaddr); |
| 2349 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2350 | |
| 2351 | if ((len -= copy) == 0) |
| 2352 | return 0; |
| 2353 | offset += copy; |
| 2354 | to += copy; |
| 2355 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2356 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2357 | } |
| 2358 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2359 | skb_walk_frags(skb, frag_iter) { |
| 2360 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2361 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2362 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2363 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2364 | end = start + frag_iter->len; |
| 2365 | if ((copy = end - offset) > 0) { |
| 2366 | if (copy > len) |
| 2367 | copy = len; |
| 2368 | if (skb_copy_bits(frag_iter, offset - start, to, copy)) |
| 2369 | goto fault; |
| 2370 | if ((len -= copy) == 0) |
| 2371 | return 0; |
| 2372 | offset += copy; |
| 2373 | to += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2374 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2375 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2376 | } |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 2377 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2378 | if (!len) |
| 2379 | return 0; |
| 2380 | |
| 2381 | fault: |
| 2382 | return -EFAULT; |
| 2383 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2384 | EXPORT_SYMBOL(skb_copy_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2385 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2386 | /* |
| 2387 | * Callback from splice_to_pipe(), if we need to release some pages |
| 2388 | * at the end of the spd in case we error'ed out in filling the pipe. |
| 2389 | */ |
| 2390 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) |
| 2391 | { |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2392 | put_page(spd->pages[i]); |
| 2393 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2394 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2395 | static struct page *linear_to_page(struct page *page, unsigned int *len, |
| 2396 | unsigned int *offset, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2397 | struct sock *sk) |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2398 | { |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2399 | struct page_frag *pfrag = sk_page_frag(sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2400 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2401 | if (!sk_page_frag_refill(sk, pfrag)) |
| 2402 | return NULL; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2403 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2404 | *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2405 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2406 | memcpy(page_address(pfrag->page) + pfrag->offset, |
| 2407 | page_address(page) + *offset, *len); |
| 2408 | *offset = pfrag->offset; |
| 2409 | pfrag->offset += *len; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2410 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2411 | return pfrag->page; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2412 | } |
| 2413 | |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2414 | static bool spd_can_coalesce(const struct splice_pipe_desc *spd, |
| 2415 | struct page *page, |
| 2416 | unsigned int offset) |
| 2417 | { |
| 2418 | return spd->nr_pages && |
| 2419 | spd->pages[spd->nr_pages - 1] == page && |
| 2420 | (spd->partial[spd->nr_pages - 1].offset + |
| 2421 | spd->partial[spd->nr_pages - 1].len == offset); |
| 2422 | } |
| 2423 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2424 | /* |
| 2425 | * Fill page/offset/length into spd, if it can hold more pages. |
| 2426 | */ |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2427 | static bool spd_fill_page(struct splice_pipe_desc *spd, |
| 2428 | struct pipe_inode_info *pipe, struct page *page, |
| 2429 | unsigned int *len, unsigned int offset, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2430 | bool linear, |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2431 | struct sock *sk) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2432 | { |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2433 | if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2434 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2435 | |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2436 | if (linear) { |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2437 | page = linear_to_page(page, len, &offset, sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2438 | if (!page) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2439 | return true; |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2440 | } |
| 2441 | if (spd_can_coalesce(spd, page, offset)) { |
| 2442 | spd->partial[spd->nr_pages - 1].len += *len; |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2443 | return false; |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2444 | } |
| 2445 | get_page(page); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2446 | spd->pages[spd->nr_pages] = page; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2447 | spd->partial[spd->nr_pages].len = *len; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2448 | spd->partial[spd->nr_pages].offset = offset; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2449 | spd->nr_pages++; |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2450 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2451 | return false; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2452 | } |
| 2453 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2454 | static bool __splice_segment(struct page *page, unsigned int poff, |
| 2455 | unsigned int plen, unsigned int *off, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2456 | unsigned int *len, |
Eric Dumazet | d7ccf7c | 2012-04-23 23:35:04 -0400 | [diff] [blame] | 2457 | struct splice_pipe_desc *spd, bool linear, |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2458 | struct sock *sk, |
| 2459 | struct pipe_inode_info *pipe) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2460 | { |
| 2461 | if (!*len) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2462 | return true; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2463 | |
| 2464 | /* skip this segment if already processed */ |
| 2465 | if (*off >= plen) { |
| 2466 | *off -= plen; |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2467 | return false; |
Octavian Purdila | db43a28 | 2008-06-27 17:27:21 -0700 | [diff] [blame] | 2468 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2469 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2470 | /* ignore any bits we already processed */ |
Eric Dumazet | 9ca1b22 | 2013-01-05 21:31:18 +0000 | [diff] [blame] | 2471 | poff += *off; |
| 2472 | plen -= *off; |
| 2473 | *off = 0; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2474 | |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2475 | do { |
| 2476 | unsigned int flen = min(*len, plen); |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2477 | |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2478 | if (spd_fill_page(spd, pipe, page, &flen, poff, |
| 2479 | linear, sk)) |
| 2480 | return true; |
| 2481 | poff += flen; |
| 2482 | plen -= flen; |
| 2483 | *len -= flen; |
| 2484 | } while (*len && plen); |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2485 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2486 | return false; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2487 | } |
| 2488 | |
| 2489 | /* |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2490 | * Map linear and fragment data from the skb to spd. It reports true if the |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2491 | * pipe is full or if we already spliced the requested length. |
| 2492 | */ |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2493 | static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, |
| 2494 | unsigned int *offset, unsigned int *len, |
| 2495 | struct splice_pipe_desc *spd, struct sock *sk) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2496 | { |
| 2497 | int seg; |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2498 | struct sk_buff *iter; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2499 | |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 2500 | /* map the linear part : |
Alexander Duyck | 2996d31 | 2012-05-02 18:18:42 +0000 | [diff] [blame] | 2501 | * If skb->head_frag is set, this 'linear' part is backed by a |
| 2502 | * fragment, and if the head is not shared with any clones then |
| 2503 | * we can avoid a copy since we own the head portion of this page. |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2504 | */ |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2505 | if (__splice_segment(virt_to_page(skb->data), |
| 2506 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
| 2507 | skb_headlen(skb), |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2508 | offset, len, spd, |
Alexander Duyck | 3a7c1ee4 | 2012-05-03 01:09:42 +0000 | [diff] [blame] | 2509 | skb_head_is_locked(skb), |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 2510 | sk, pipe)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2511 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2512 | |
| 2513 | /* |
| 2514 | * then map the fragments |
| 2515 | */ |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2516 | for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { |
| 2517 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
| 2518 | |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2519 | if (__splice_segment(skb_frag_page(f), |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2520 | skb_frag_off(f), skb_frag_size(f), |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2521 | offset, len, spd, false, sk, pipe)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2522 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2523 | } |
| 2524 | |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2525 | skb_walk_frags(skb, iter) { |
| 2526 | if (*offset >= iter->len) { |
| 2527 | *offset -= iter->len; |
| 2528 | continue; |
| 2529 | } |
| 2530 | /* __skb_splice_bits() only fails if the output has no room |
| 2531 | * left, so no point in going over the frag_list for the error |
| 2532 | * case. |
| 2533 | */ |
| 2534 | if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) |
| 2535 | return true; |
| 2536 | } |
| 2537 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2538 | return false; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2539 | } |
| 2540 | |
| 2541 | /* |
| 2542 | * Map data from the skb to a pipe. Should handle both the linear part, |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2543 | * the fragments, and the frag list. |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2544 | */ |
Hannes Frederic Sowa | a60e3cc | 2015-05-21 17:00:00 +0200 | [diff] [blame] | 2545 | int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2546 | struct pipe_inode_info *pipe, unsigned int tlen, |
Al Viro | 2586926 | 2016-09-17 21:02:10 -0400 | [diff] [blame] | 2547 | unsigned int flags) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2548 | { |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2549 | struct partial_page partial[MAX_SKB_FRAGS]; |
| 2550 | struct page *pages[MAX_SKB_FRAGS]; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2551 | struct splice_pipe_desc spd = { |
| 2552 | .pages = pages, |
| 2553 | .partial = partial, |
Eric Dumazet | 047fe36 | 2012-06-12 15:24:40 +0200 | [diff] [blame] | 2554 | .nr_pages_max = MAX_SKB_FRAGS, |
Miklos Szeredi | 28a625c | 2014-01-22 19:36:57 +0100 | [diff] [blame] | 2555 | .ops = &nosteal_pipe_buf_ops, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2556 | .spd_release = sock_spd_release, |
| 2557 | }; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 2558 | int ret = 0; |
| 2559 | |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2560 | __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2561 | |
Hannes Frederic Sowa | a60e3cc | 2015-05-21 17:00:00 +0200 | [diff] [blame] | 2562 | if (spd.nr_pages) |
Al Viro | 2586926 | 2016-09-17 21:02:10 -0400 | [diff] [blame] | 2563 | ret = splice_to_pipe(pipe, &spd); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2564 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 2565 | return ret; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2566 | } |
Hannes Frederic Sowa | 2b51457 | 2015-05-21 17:00:01 +0200 | [diff] [blame] | 2567 | EXPORT_SYMBOL_GPL(skb_splice_bits); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2568 | |
Cong Wang | 0739cd2 | 2021-03-30 19:32:24 -0700 | [diff] [blame] | 2569 | static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg, |
| 2570 | struct kvec *vec, size_t num, size_t size) |
| 2571 | { |
| 2572 | struct socket *sock = sk->sk_socket; |
| 2573 | |
| 2574 | if (!sock) |
| 2575 | return -EINVAL; |
| 2576 | return kernel_sendmsg(sock, msg, vec, num, size); |
| 2577 | } |
| 2578 | |
| 2579 | static int sendpage_unlocked(struct sock *sk, struct page *page, int offset, |
| 2580 | size_t size, int flags) |
| 2581 | { |
| 2582 | struct socket *sock = sk->sk_socket; |
| 2583 | |
| 2584 | if (!sock) |
| 2585 | return -EINVAL; |
| 2586 | return kernel_sendpage(sock, page, offset, size, flags); |
| 2587 | } |
| 2588 | |
| 2589 | typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg, |
| 2590 | struct kvec *vec, size_t num, size_t size); |
| 2591 | typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset, |
| 2592 | size_t size, int flags); |
| 2593 | static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, |
| 2594 | int len, sendmsg_func sendmsg, sendpage_func sendpage) |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2595 | { |
| 2596 | unsigned int orig_len = len; |
| 2597 | struct sk_buff *head = skb; |
| 2598 | unsigned short fragidx; |
| 2599 | int slen, ret; |
| 2600 | |
| 2601 | do_frag_list: |
| 2602 | |
| 2603 | /* Deal with head data */ |
| 2604 | while (offset < skb_headlen(skb) && len) { |
| 2605 | struct kvec kv; |
| 2606 | struct msghdr msg; |
| 2607 | |
| 2608 | slen = min_t(int, len, skb_headlen(skb) - offset); |
| 2609 | kv.iov_base = skb->data + offset; |
John Fastabend | db5980d | 2017-08-15 22:31:34 -0700 | [diff] [blame] | 2610 | kv.iov_len = slen; |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2611 | memset(&msg, 0, sizeof(msg)); |
John Fastabend | bd95e678 | 2019-05-24 08:01:00 -0700 | [diff] [blame] | 2612 | msg.msg_flags = MSG_DONTWAIT; |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2613 | |
Cong Wang | 0739cd2 | 2021-03-30 19:32:24 -0700 | [diff] [blame] | 2614 | ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked, |
| 2615 | sendmsg_unlocked, sk, &msg, &kv, 1, slen); |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2616 | if (ret <= 0) |
| 2617 | goto error; |
| 2618 | |
| 2619 | offset += ret; |
| 2620 | len -= ret; |
| 2621 | } |
| 2622 | |
| 2623 | /* All the data was skb head? */ |
| 2624 | if (!len) |
| 2625 | goto out; |
| 2626 | |
| 2627 | /* Make offset relative to start of frags */ |
| 2628 | offset -= skb_headlen(skb); |
| 2629 | |
| 2630 | /* Find where we are in frag list */ |
| 2631 | for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { |
| 2632 | skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; |
| 2633 | |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 2634 | if (offset < skb_frag_size(frag)) |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2635 | break; |
| 2636 | |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 2637 | offset -= skb_frag_size(frag); |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2638 | } |
| 2639 | |
| 2640 | for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { |
| 2641 | skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; |
| 2642 | |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 2643 | slen = min_t(size_t, len, skb_frag_size(frag) - offset); |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2644 | |
| 2645 | while (slen) { |
Cong Wang | 0739cd2 | 2021-03-30 19:32:24 -0700 | [diff] [blame] | 2646 | ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked, |
| 2647 | sendpage_unlocked, sk, |
| 2648 | skb_frag_page(frag), |
| 2649 | skb_frag_off(frag) + offset, |
| 2650 | slen, MSG_DONTWAIT); |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2651 | if (ret <= 0) |
| 2652 | goto error; |
| 2653 | |
| 2654 | len -= ret; |
| 2655 | offset += ret; |
| 2656 | slen -= ret; |
| 2657 | } |
| 2658 | |
| 2659 | offset = 0; |
| 2660 | } |
| 2661 | |
| 2662 | if (len) { |
| 2663 | /* Process any frag lists */ |
| 2664 | |
| 2665 | if (skb == head) { |
| 2666 | if (skb_has_frag_list(skb)) { |
| 2667 | skb = skb_shinfo(skb)->frag_list; |
| 2668 | goto do_frag_list; |
| 2669 | } |
| 2670 | } else if (skb->next) { |
| 2671 | skb = skb->next; |
| 2672 | goto do_frag_list; |
| 2673 | } |
| 2674 | } |
| 2675 | |
| 2676 | out: |
| 2677 | return orig_len - len; |
| 2678 | |
| 2679 | error: |
| 2680 | return orig_len == len ? ret : orig_len - len; |
| 2681 | } |
Cong Wang | 0739cd2 | 2021-03-30 19:32:24 -0700 | [diff] [blame] | 2682 | |
| 2683 | /* Send skb data on a socket. Socket must be locked. */ |
| 2684 | int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, |
| 2685 | int len) |
| 2686 | { |
| 2687 | return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked, |
| 2688 | kernel_sendpage_locked); |
| 2689 | } |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2690 | EXPORT_SYMBOL_GPL(skb_send_sock_locked); |
| 2691 | |
Cong Wang | 0739cd2 | 2021-03-30 19:32:24 -0700 | [diff] [blame] | 2692 | /* Send skb data on a socket. Socket must be unlocked. */ |
| 2693 | int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) |
| 2694 | { |
| 2695 | return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, |
| 2696 | sendpage_unlocked); |
| 2697 | } |
| 2698 | |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2699 | /** |
| 2700 | * skb_store_bits - store bits from kernel buffer to skb |
| 2701 | * @skb: destination buffer |
| 2702 | * @offset: offset in destination |
| 2703 | * @from: source buffer |
| 2704 | * @len: number of bytes to copy |
| 2705 | * |
| 2706 | * Copy the specified number of bytes from the source buffer to the |
| 2707 | * destination skb. This function handles all the messy bits of |
| 2708 | * traversing fragment lists and such. |
| 2709 | */ |
| 2710 | |
Stephen Hemminger | 0c6fcc8 | 2007-04-20 16:40:01 -0700 | [diff] [blame] | 2711 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2712 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2713 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2714 | struct sk_buff *frag_iter; |
| 2715 | int i, copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2716 | |
| 2717 | if (offset > (int)skb->len - len) |
| 2718 | goto fault; |
| 2719 | |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2720 | if ((copy = start - offset) > 0) { |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2721 | if (copy > len) |
| 2722 | copy = len; |
Arnaldo Carvalho de Melo | 27d7ff4 | 2007-03-31 11:55:19 -0300 | [diff] [blame] | 2723 | skb_copy_to_linear_data_offset(skb, offset, from, copy); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2724 | if ((len -= copy) == 0) |
| 2725 | return 0; |
| 2726 | offset += copy; |
| 2727 | from += copy; |
| 2728 | } |
| 2729 | |
| 2730 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 2731 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2732 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2733 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2734 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2735 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2736 | end = start + skb_frag_size(frag); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2737 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2738 | u32 p_off, p_len, copied; |
| 2739 | struct page *p; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2740 | u8 *vaddr; |
| 2741 | |
| 2742 | if (copy > len) |
| 2743 | copy = len; |
| 2744 | |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2745 | skb_frag_foreach_page(frag, |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2746 | skb_frag_off(frag) + offset - start, |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2747 | copy, p, p_off, p_len, copied) { |
| 2748 | vaddr = kmap_atomic(p); |
| 2749 | memcpy(vaddr + p_off, from + copied, p_len); |
| 2750 | kunmap_atomic(vaddr); |
| 2751 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2752 | |
| 2753 | if ((len -= copy) == 0) |
| 2754 | return 0; |
| 2755 | offset += copy; |
| 2756 | from += copy; |
| 2757 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2758 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2759 | } |
| 2760 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2761 | skb_walk_frags(skb, frag_iter) { |
| 2762 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2763 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2764 | WARN_ON(start > offset + len); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2765 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2766 | end = start + frag_iter->len; |
| 2767 | if ((copy = end - offset) > 0) { |
| 2768 | if (copy > len) |
| 2769 | copy = len; |
| 2770 | if (skb_store_bits(frag_iter, offset - start, |
| 2771 | from, copy)) |
| 2772 | goto fault; |
| 2773 | if ((len -= copy) == 0) |
| 2774 | return 0; |
| 2775 | offset += copy; |
| 2776 | from += copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2777 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2778 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2779 | } |
| 2780 | if (!len) |
| 2781 | return 0; |
| 2782 | |
| 2783 | fault: |
| 2784 | return -EFAULT; |
| 2785 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2786 | EXPORT_SYMBOL(skb_store_bits); |
| 2787 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2788 | /* Checksum skb data. */ |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2789 | __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, |
| 2790 | __wsum csum, const struct skb_checksum_ops *ops) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2791 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2792 | int start = skb_headlen(skb); |
| 2793 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2794 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2795 | int pos = 0; |
| 2796 | |
| 2797 | /* Checksum header. */ |
| 2798 | if (copy > 0) { |
| 2799 | if (copy > len) |
| 2800 | copy = len; |
Matteo Croce | 2544af0 | 2019-05-29 17:13:48 +0200 | [diff] [blame] | 2801 | csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, |
| 2802 | skb->data + offset, copy, csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2803 | if ((len -= copy) == 0) |
| 2804 | return csum; |
| 2805 | offset += copy; |
| 2806 | pos = copy; |
| 2807 | } |
| 2808 | |
| 2809 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2810 | int end; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2811 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2812 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2813 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2814 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2815 | end = start + skb_frag_size(frag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2816 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2817 | u32 p_off, p_len, copied; |
| 2818 | struct page *p; |
Al Viro | 44bb936 | 2006-11-14 21:36:14 -0800 | [diff] [blame] | 2819 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2820 | u8 *vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2821 | |
| 2822 | if (copy > len) |
| 2823 | copy = len; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2824 | |
| 2825 | skb_frag_foreach_page(frag, |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2826 | skb_frag_off(frag) + offset - start, |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2827 | copy, p, p_off, p_len, copied) { |
| 2828 | vaddr = kmap_atomic(p); |
Matteo Croce | 2544af0 | 2019-05-29 17:13:48 +0200 | [diff] [blame] | 2829 | csum2 = INDIRECT_CALL_1(ops->update, |
| 2830 | csum_partial_ext, |
| 2831 | vaddr + p_off, p_len, 0); |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2832 | kunmap_atomic(vaddr); |
Matteo Croce | 2544af0 | 2019-05-29 17:13:48 +0200 | [diff] [blame] | 2833 | csum = INDIRECT_CALL_1(ops->combine, |
| 2834 | csum_block_add_ext, csum, |
| 2835 | csum2, pos, p_len); |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2836 | pos += p_len; |
| 2837 | } |
| 2838 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2839 | if (!(len -= copy)) |
| 2840 | return csum; |
| 2841 | offset += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2842 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2843 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2844 | } |
| 2845 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2846 | skb_walk_frags(skb, frag_iter) { |
| 2847 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2848 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2849 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2850 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2851 | end = start + frag_iter->len; |
| 2852 | if ((copy = end - offset) > 0) { |
| 2853 | __wsum csum2; |
| 2854 | if (copy > len) |
| 2855 | copy = len; |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2856 | csum2 = __skb_checksum(frag_iter, offset - start, |
| 2857 | copy, 0, ops); |
Matteo Croce | 2544af0 | 2019-05-29 17:13:48 +0200 | [diff] [blame] | 2858 | csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, |
| 2859 | csum, csum2, pos, copy); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2860 | if ((len -= copy) == 0) |
| 2861 | return csum; |
| 2862 | offset += copy; |
| 2863 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2864 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2865 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2866 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2867 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2868 | |
| 2869 | return csum; |
| 2870 | } |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2871 | EXPORT_SYMBOL(__skb_checksum); |
| 2872 | |
| 2873 | __wsum skb_checksum(const struct sk_buff *skb, int offset, |
| 2874 | int len, __wsum csum) |
| 2875 | { |
| 2876 | const struct skb_checksum_ops ops = { |
Daniel Borkmann | cea80ea | 2013-11-04 17:10:25 +0100 | [diff] [blame] | 2877 | .update = csum_partial_ext, |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2878 | .combine = csum_block_add_ext, |
| 2879 | }; |
| 2880 | |
| 2881 | return __skb_checksum(skb, offset, len, csum, &ops); |
| 2882 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2883 | EXPORT_SYMBOL(skb_checksum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2884 | |
| 2885 | /* Both of above in one bottle. */ |
| 2886 | |
Al Viro | 81d7766 | 2006-11-14 21:37:33 -0800 | [diff] [blame] | 2887 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 2888 | u8 *to, int len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2889 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2890 | int start = skb_headlen(skb); |
| 2891 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2892 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2893 | int pos = 0; |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 2894 | __wsum csum = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2895 | |
| 2896 | /* Copy header. */ |
| 2897 | if (copy > 0) { |
| 2898 | if (copy > len) |
| 2899 | copy = len; |
| 2900 | csum = csum_partial_copy_nocheck(skb->data + offset, to, |
Al Viro | cc44c17 | 2020-07-11 00:12:07 -0400 | [diff] [blame] | 2901 | copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2902 | if ((len -= copy) == 0) |
| 2903 | return csum; |
| 2904 | offset += copy; |
| 2905 | to += copy; |
| 2906 | pos = copy; |
| 2907 | } |
| 2908 | |
| 2909 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2910 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2911 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2912 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2913 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2914 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2915 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2916 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 2917 | u32 p_off, p_len, copied; |
| 2918 | struct page *p; |
Al Viro | 5084205 | 2006-11-14 21:36:34 -0800 | [diff] [blame] | 2919 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2920 | u8 *vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2921 | |
| 2922 | if (copy > len) |
| 2923 | copy = len; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2924 | |
| 2925 | skb_frag_foreach_page(frag, |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2926 | skb_frag_off(frag) + offset - start, |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2927 | copy, p, p_off, p_len, copied) { |
| 2928 | vaddr = kmap_atomic(p); |
| 2929 | csum2 = csum_partial_copy_nocheck(vaddr + p_off, |
| 2930 | to + copied, |
Al Viro | cc44c17 | 2020-07-11 00:12:07 -0400 | [diff] [blame] | 2931 | p_len); |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2932 | kunmap_atomic(vaddr); |
| 2933 | csum = csum_block_add(csum, csum2, pos); |
| 2934 | pos += p_len; |
| 2935 | } |
| 2936 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2937 | if (!(len -= copy)) |
| 2938 | return csum; |
| 2939 | offset += copy; |
| 2940 | to += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2941 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2942 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2943 | } |
| 2944 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2945 | skb_walk_frags(skb, frag_iter) { |
| 2946 | __wsum csum2; |
| 2947 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2948 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2949 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2950 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2951 | end = start + frag_iter->len; |
| 2952 | if ((copy = end - offset) > 0) { |
| 2953 | if (copy > len) |
| 2954 | copy = len; |
| 2955 | csum2 = skb_copy_and_csum_bits(frag_iter, |
| 2956 | offset - start, |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 2957 | to, copy); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2958 | csum = csum_block_add(csum, csum2, pos); |
| 2959 | if ((len -= copy) == 0) |
| 2960 | return csum; |
| 2961 | offset += copy; |
| 2962 | to += copy; |
| 2963 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2964 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2965 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2966 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2967 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2968 | return csum; |
| 2969 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2970 | EXPORT_SYMBOL(skb_copy_and_csum_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2971 | |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2972 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
| 2973 | { |
| 2974 | __sum16 sum; |
| 2975 | |
| 2976 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
Cong Wang | 1464193 | 2018-11-26 09:31:26 -0800 | [diff] [blame] | 2977 | /* See comments in __skb_checksum_complete(). */ |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2978 | if (likely(!sum)) { |
| 2979 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && |
| 2980 | !skb->csum_complete_sw) |
Cong Wang | 7fe50ac | 2018-11-12 14:47:18 -0800 | [diff] [blame] | 2981 | netdev_rx_csum_fault(skb->dev, skb); |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2982 | } |
| 2983 | if (!skb_shared(skb)) |
| 2984 | skb->csum_valid = !sum; |
| 2985 | return sum; |
| 2986 | } |
| 2987 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
| 2988 | |
Cong Wang | 1464193 | 2018-11-26 09:31:26 -0800 | [diff] [blame] | 2989 | /* This function assumes skb->csum already holds pseudo header's checksum, |
| 2990 | * which has been changed from the hardware checksum, for example, by |
| 2991 | * __skb_checksum_validate_complete(). And, the original skb->csum must |
| 2992 | * have been validated unsuccessfully for CHECKSUM_COMPLETE case. |
| 2993 | * |
| 2994 | * It returns non-zero if the recomputed checksum is still invalid, otherwise |
| 2995 | * zero. The new checksum is stored back into skb->csum unless the skb is |
| 2996 | * shared. |
| 2997 | */ |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 2998 | __sum16 __skb_checksum_complete(struct sk_buff *skb) |
| 2999 | { |
| 3000 | __wsum csum; |
| 3001 | __sum16 sum; |
| 3002 | |
| 3003 | csum = skb_checksum(skb, 0, skb->len, 0); |
| 3004 | |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 3005 | sum = csum_fold(csum_add(skb->csum, csum)); |
Cong Wang | 1464193 | 2018-11-26 09:31:26 -0800 | [diff] [blame] | 3006 | /* This check is inverted, because we already knew the hardware |
| 3007 | * checksum is invalid before calling this function. So, if the |
| 3008 | * re-computed checksum is valid instead, then we have a mismatch |
| 3009 | * between the original skb->csum and skb_checksum(). This means either |
| 3010 | * the original hardware checksum is incorrect or we screw up skb->csum |
| 3011 | * when moving skb->data around. |
| 3012 | */ |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 3013 | if (likely(!sum)) { |
| 3014 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && |
| 3015 | !skb->csum_complete_sw) |
Cong Wang | 7fe50ac | 2018-11-12 14:47:18 -0800 | [diff] [blame] | 3016 | netdev_rx_csum_fault(skb->dev, skb); |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 3017 | } |
| 3018 | |
| 3019 | if (!skb_shared(skb)) { |
| 3020 | /* Save full packet checksum */ |
| 3021 | skb->csum = csum; |
| 3022 | skb->ip_summed = CHECKSUM_COMPLETE; |
| 3023 | skb->csum_complete_sw = 1; |
| 3024 | skb->csum_valid = !sum; |
| 3025 | } |
| 3026 | |
| 3027 | return sum; |
| 3028 | } |
| 3029 | EXPORT_SYMBOL(__skb_checksum_complete); |
| 3030 | |
Davide Caratti | 9617813 | 2017-05-18 15:44:37 +0200 | [diff] [blame] | 3031 | static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) |
| 3032 | { |
| 3033 | net_warn_ratelimited( |
| 3034 | "%s: attempt to compute crc32c without libcrc32c.ko\n", |
| 3035 | __func__); |
| 3036 | return 0; |
| 3037 | } |
| 3038 | |
| 3039 | static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, |
| 3040 | int offset, int len) |
| 3041 | { |
| 3042 | net_warn_ratelimited( |
| 3043 | "%s: attempt to compute crc32c without libcrc32c.ko\n", |
| 3044 | __func__); |
| 3045 | return 0; |
| 3046 | } |
| 3047 | |
| 3048 | static const struct skb_checksum_ops default_crc32c_ops = { |
| 3049 | .update = warn_crc32c_csum_update, |
| 3050 | .combine = warn_crc32c_csum_combine, |
| 3051 | }; |
| 3052 | |
| 3053 | const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = |
| 3054 | &default_crc32c_ops; |
| 3055 | EXPORT_SYMBOL(crc32c_csum_stub); |
| 3056 | |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3057 | /** |
| 3058 | * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() |
| 3059 | * @from: source buffer |
| 3060 | * |
| 3061 | * Calculates the amount of linear headroom needed in the 'to' skb passed |
| 3062 | * into skb_zerocopy(). |
| 3063 | */ |
| 3064 | unsigned int |
| 3065 | skb_zerocopy_headlen(const struct sk_buff *from) |
| 3066 | { |
| 3067 | unsigned int hlen = 0; |
| 3068 | |
| 3069 | if (!from->head_frag || |
| 3070 | skb_headlen(from) < L1_CACHE_BYTES || |
Pravin B Shelar | a17ad09 | 2021-07-15 16:59:00 -0700 | [diff] [blame] | 3071 | skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3072 | hlen = skb_headlen(from); |
Pravin B Shelar | a17ad09 | 2021-07-15 16:59:00 -0700 | [diff] [blame] | 3073 | if (!hlen) |
| 3074 | hlen = from->len; |
| 3075 | } |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3076 | |
| 3077 | if (skb_has_frag_list(from)) |
| 3078 | hlen = from->len; |
| 3079 | |
| 3080 | return hlen; |
| 3081 | } |
| 3082 | EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); |
| 3083 | |
| 3084 | /** |
| 3085 | * skb_zerocopy - Zero copy skb to skb |
| 3086 | * @to: destination buffer |
Masanari Iida | 7fceb4d | 2014-01-29 01:05:28 +0900 | [diff] [blame] | 3087 | * @from: source buffer |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3088 | * @len: number of bytes to copy from source buffer |
| 3089 | * @hlen: size of linear headroom in destination buffer |
| 3090 | * |
| 3091 | * Copies up to `len` bytes from `from` to `to` by creating references |
| 3092 | * to the frags in the source buffer. |
| 3093 | * |
| 3094 | * The `hlen` as calculated by skb_zerocopy_headlen() specifies the |
| 3095 | * headroom in the `to` buffer. |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3096 | * |
| 3097 | * Return value: |
| 3098 | * 0: everything is OK |
| 3099 | * -ENOMEM: couldn't orphan frags of @from due to lack of memory |
| 3100 | * -EFAULT: skb_copy_bits() found some problem with skb geometry |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3101 | */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3102 | int |
| 3103 | skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3104 | { |
| 3105 | int i, j = 0; |
| 3106 | int plen = 0; /* length of skb->head fragment */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3107 | int ret; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3108 | struct page *page; |
| 3109 | unsigned int offset; |
| 3110 | |
| 3111 | BUG_ON(!from->head_frag && !hlen); |
| 3112 | |
| 3113 | /* dont bother with small payloads */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3114 | if (len <= skb_tailroom(to)) |
| 3115 | return skb_copy_bits(from, 0, skb_put(to, len), len); |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3116 | |
| 3117 | if (hlen) { |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3118 | ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); |
| 3119 | if (unlikely(ret)) |
| 3120 | return ret; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3121 | len -= hlen; |
| 3122 | } else { |
| 3123 | plen = min_t(int, skb_headlen(from), len); |
| 3124 | if (plen) { |
| 3125 | page = virt_to_head_page(from->head); |
| 3126 | offset = from->data - (unsigned char *)page_address(page); |
| 3127 | __skb_fill_page_desc(to, 0, page, offset, plen); |
| 3128 | get_page(page); |
| 3129 | j = 1; |
| 3130 | len -= plen; |
| 3131 | } |
| 3132 | } |
| 3133 | |
| 3134 | to->truesize += len + plen; |
| 3135 | to->len += len + plen; |
| 3136 | to->data_len += len + plen; |
| 3137 | |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3138 | if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { |
| 3139 | skb_tx_error(from); |
| 3140 | return -ENOMEM; |
| 3141 | } |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 3142 | skb_zerocopy_clone(to, from, GFP_ATOMIC); |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3143 | |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3144 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 3145 | int size; |
| 3146 | |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3147 | if (!len) |
| 3148 | break; |
| 3149 | skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 3150 | size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), |
| 3151 | len); |
| 3152 | skb_frag_size_set(&skb_shinfo(to)->frags[j], size); |
| 3153 | len -= size; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3154 | skb_frag_ref(to, j); |
| 3155 | j++; |
| 3156 | } |
| 3157 | skb_shinfo(to)->nr_frags = j; |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3158 | |
| 3159 | return 0; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3160 | } |
| 3161 | EXPORT_SYMBOL_GPL(skb_zerocopy); |
| 3162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3163 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) |
| 3164 | { |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 3165 | __wsum csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3166 | long csstart; |
| 3167 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 3168 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 3169 | csstart = skb_checksum_start_offset(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3170 | else |
| 3171 | csstart = skb_headlen(skb); |
| 3172 | |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 3173 | BUG_ON(csstart > skb_headlen(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3174 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 3175 | skb_copy_from_linear_data(skb, to, csstart); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3176 | |
| 3177 | csum = 0; |
| 3178 | if (csstart != skb->len) |
| 3179 | csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 3180 | skb->len - csstart); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3181 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 3182 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Al Viro | ff1dcad | 2006-11-20 18:07:29 -0800 | [diff] [blame] | 3183 | long csstuff = csstart + skb->csum_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3184 | |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 3185 | *((__sum16 *)(to + csstuff)) = csum_fold(csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3186 | } |
| 3187 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3188 | EXPORT_SYMBOL(skb_copy_and_csum_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3189 | |
| 3190 | /** |
| 3191 | * skb_dequeue - remove from the head of the queue |
| 3192 | * @list: list to dequeue from |
| 3193 | * |
| 3194 | * Remove the head of the list. The list lock is taken so the function |
| 3195 | * may be used safely with other locking list functions. The head item is |
| 3196 | * returned or %NULL if the list is empty. |
| 3197 | */ |
| 3198 | |
| 3199 | struct sk_buff *skb_dequeue(struct sk_buff_head *list) |
| 3200 | { |
| 3201 | unsigned long flags; |
| 3202 | struct sk_buff *result; |
| 3203 | |
| 3204 | spin_lock_irqsave(&list->lock, flags); |
| 3205 | result = __skb_dequeue(list); |
| 3206 | spin_unlock_irqrestore(&list->lock, flags); |
| 3207 | return result; |
| 3208 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3209 | EXPORT_SYMBOL(skb_dequeue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3210 | |
| 3211 | /** |
| 3212 | * skb_dequeue_tail - remove from the tail of the queue |
| 3213 | * @list: list to dequeue from |
| 3214 | * |
| 3215 | * Remove the tail of the list. The list lock is taken so the function |
| 3216 | * may be used safely with other locking list functions. The tail item is |
| 3217 | * returned or %NULL if the list is empty. |
| 3218 | */ |
| 3219 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) |
| 3220 | { |
| 3221 | unsigned long flags; |
| 3222 | struct sk_buff *result; |
| 3223 | |
| 3224 | spin_lock_irqsave(&list->lock, flags); |
| 3225 | result = __skb_dequeue_tail(list); |
| 3226 | spin_unlock_irqrestore(&list->lock, flags); |
| 3227 | return result; |
| 3228 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3229 | EXPORT_SYMBOL(skb_dequeue_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3230 | |
| 3231 | /** |
| 3232 | * skb_queue_purge - empty a list |
| 3233 | * @list: list to empty |
| 3234 | * |
| 3235 | * Delete all buffers on an &sk_buff list. Each buffer is removed from |
| 3236 | * the list and one reference dropped. This function takes the list |
| 3237 | * lock and is atomic with respect to other list locking functions. |
| 3238 | */ |
| 3239 | void skb_queue_purge(struct sk_buff_head *list) |
| 3240 | { |
| 3241 | struct sk_buff *skb; |
| 3242 | while ((skb = skb_dequeue(list)) != NULL) |
| 3243 | kfree_skb(skb); |
| 3244 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3245 | EXPORT_SYMBOL(skb_queue_purge); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3246 | |
| 3247 | /** |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3248 | * skb_rbtree_purge - empty a skb rbtree |
| 3249 | * @root: root of the rbtree to empty |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 3250 | * Return value: the sum of truesizes of all purged skbs. |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3251 | * |
| 3252 | * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from |
| 3253 | * the list and one reference dropped. This function does not take |
| 3254 | * any lock. Synchronization should be handled by the caller (e.g., TCP |
| 3255 | * out-of-order queue is protected by the socket lock). |
| 3256 | */ |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 3257 | unsigned int skb_rbtree_purge(struct rb_root *root) |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3258 | { |
Eric Dumazet | 7c90584 | 2017-09-23 12:39:12 -0700 | [diff] [blame] | 3259 | struct rb_node *p = rb_first(root); |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 3260 | unsigned int sum = 0; |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3261 | |
Eric Dumazet | 7c90584 | 2017-09-23 12:39:12 -0700 | [diff] [blame] | 3262 | while (p) { |
| 3263 | struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); |
| 3264 | |
| 3265 | p = rb_next(p); |
| 3266 | rb_erase(&skb->rbnode, root); |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 3267 | sum += skb->truesize; |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3268 | kfree_skb(skb); |
Eric Dumazet | 7c90584 | 2017-09-23 12:39:12 -0700 | [diff] [blame] | 3269 | } |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 3270 | return sum; |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3271 | } |
| 3272 | |
| 3273 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3274 | * skb_queue_head - queue a buffer at the list head |
| 3275 | * @list: list to use |
| 3276 | * @newsk: buffer to queue |
| 3277 | * |
| 3278 | * Queue a buffer at the start of the list. This function takes the |
| 3279 | * list lock and can be used safely with other locking &sk_buff functions |
| 3280 | * safely. |
| 3281 | * |
| 3282 | * A buffer cannot be placed on two lists at the same time. |
| 3283 | */ |
| 3284 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) |
| 3285 | { |
| 3286 | unsigned long flags; |
| 3287 | |
| 3288 | spin_lock_irqsave(&list->lock, flags); |
| 3289 | __skb_queue_head(list, newsk); |
| 3290 | spin_unlock_irqrestore(&list->lock, flags); |
| 3291 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3292 | EXPORT_SYMBOL(skb_queue_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3293 | |
| 3294 | /** |
| 3295 | * skb_queue_tail - queue a buffer at the list tail |
| 3296 | * @list: list to use |
| 3297 | * @newsk: buffer to queue |
| 3298 | * |
| 3299 | * Queue a buffer at the tail of the list. This function takes the |
| 3300 | * list lock and can be used safely with other locking &sk_buff functions |
| 3301 | * safely. |
| 3302 | * |
| 3303 | * A buffer cannot be placed on two lists at the same time. |
| 3304 | */ |
| 3305 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) |
| 3306 | { |
| 3307 | unsigned long flags; |
| 3308 | |
| 3309 | spin_lock_irqsave(&list->lock, flags); |
| 3310 | __skb_queue_tail(list, newsk); |
| 3311 | spin_unlock_irqrestore(&list->lock, flags); |
| 3312 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3313 | EXPORT_SYMBOL(skb_queue_tail); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3314 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3315 | /** |
| 3316 | * skb_unlink - remove a buffer from a list |
| 3317 | * @skb: buffer to remove |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3318 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3319 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3320 | * Remove a packet from a list. The list locks are taken and this |
| 3321 | * function is atomic with respect to other list locked calls |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3322 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3323 | * You must know what list the SKB is on. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3324 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3325 | void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3326 | { |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3327 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3328 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3329 | spin_lock_irqsave(&list->lock, flags); |
| 3330 | __skb_unlink(skb, list); |
| 3331 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3332 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3333 | EXPORT_SYMBOL(skb_unlink); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3334 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3335 | /** |
| 3336 | * skb_append - append a buffer |
| 3337 | * @old: buffer to insert after |
| 3338 | * @newsk: buffer to insert |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3339 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3340 | * |
| 3341 | * Place a packet after a given packet in a list. The list locks are taken |
| 3342 | * and this function is atomic with respect to other list locked calls. |
| 3343 | * A buffer cannot be placed on two lists at the same time. |
| 3344 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3345 | void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3346 | { |
| 3347 | unsigned long flags; |
| 3348 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3349 | spin_lock_irqsave(&list->lock, flags); |
Gerrit Renker | 7de6c03 | 2008-04-14 00:05:09 -0700 | [diff] [blame] | 3350 | __skb_queue_after(list, old, newsk); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3351 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3352 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3353 | EXPORT_SYMBOL(skb_append); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3354 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3355 | static inline void skb_split_inside_header(struct sk_buff *skb, |
| 3356 | struct sk_buff* skb1, |
| 3357 | const u32 len, const int pos) |
| 3358 | { |
| 3359 | int i; |
| 3360 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 3361 | skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), |
| 3362 | pos - len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3363 | /* And move data appendix as is. */ |
| 3364 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
| 3365 | skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; |
| 3366 | |
| 3367 | skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; |
| 3368 | skb_shinfo(skb)->nr_frags = 0; |
| 3369 | skb1->data_len = skb->data_len; |
| 3370 | skb1->len += skb1->data_len; |
| 3371 | skb->data_len = 0; |
| 3372 | skb->len = len; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 3373 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3374 | } |
| 3375 | |
| 3376 | static inline void skb_split_no_header(struct sk_buff *skb, |
| 3377 | struct sk_buff* skb1, |
| 3378 | const u32 len, int pos) |
| 3379 | { |
| 3380 | int i, k = 0; |
| 3381 | const int nfrags = skb_shinfo(skb)->nr_frags; |
| 3382 | |
| 3383 | skb_shinfo(skb)->nr_frags = 0; |
| 3384 | skb1->len = skb1->data_len = skb->len - len; |
| 3385 | skb->len = len; |
| 3386 | skb->data_len = len - pos; |
| 3387 | |
| 3388 | for (i = 0; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3389 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3390 | |
| 3391 | if (pos + size > len) { |
| 3392 | skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 3393 | |
| 3394 | if (pos < len) { |
| 3395 | /* Split frag. |
| 3396 | * We have two variants in this case: |
| 3397 | * 1. Move all the frag to the second |
| 3398 | * part, if it is possible. F.e. |
| 3399 | * this approach is mandatory for TUX, |
| 3400 | * where splitting is expensive. |
| 3401 | * 2. Split is accurately. We make this. |
| 3402 | */ |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3403 | skb_frag_ref(skb, i); |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3404 | skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3405 | skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); |
| 3406 | skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3407 | skb_shinfo(skb)->nr_frags++; |
| 3408 | } |
| 3409 | k++; |
| 3410 | } else |
| 3411 | skb_shinfo(skb)->nr_frags++; |
| 3412 | pos += size; |
| 3413 | } |
| 3414 | skb_shinfo(skb1)->nr_frags = k; |
| 3415 | } |
| 3416 | |
| 3417 | /** |
| 3418 | * skb_split - Split fragmented skb to two parts at length len. |
| 3419 | * @skb: the buffer to split |
| 3420 | * @skb1: the buffer to receive the second part |
| 3421 | * @len: new length for skb |
| 3422 | */ |
| 3423 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) |
| 3424 | { |
| 3425 | int pos = skb_headlen(skb); |
| 3426 | |
Jonathan Lemon | 06b4feb | 2021-01-06 14:18:38 -0800 | [diff] [blame] | 3427 | skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 3428 | skb_zerocopy_clone(skb1, skb, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3429 | if (len < pos) /* Split line is inside header. */ |
| 3430 | skb_split_inside_header(skb, skb1, len, pos); |
| 3431 | else /* Second chunk has no header, nothing to copy. */ |
| 3432 | skb_split_no_header(skb, skb1, len, pos); |
| 3433 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3434 | EXPORT_SYMBOL(skb_split); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3435 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 3436 | /* Shifting from/to a cloned skb is a no-go. |
| 3437 | * |
| 3438 | * Caller cannot keep skb_shinfo related pointers past calling here! |
| 3439 | */ |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3440 | static int skb_prepare_for_shift(struct sk_buff *skb) |
| 3441 | { |
Marco Elver | 097b914 | 2021-02-01 17:04:20 +0100 | [diff] [blame] | 3442 | int ret = 0; |
| 3443 | |
| 3444 | if (skb_cloned(skb)) { |
| 3445 | /* Save and restore truesize: pskb_expand_head() may reallocate |
| 3446 | * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we |
| 3447 | * cannot change truesize at this point. |
| 3448 | */ |
| 3449 | unsigned int save_truesize = skb->truesize; |
| 3450 | |
| 3451 | ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
| 3452 | skb->truesize = save_truesize; |
| 3453 | } |
| 3454 | return ret; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3455 | } |
| 3456 | |
| 3457 | /** |
| 3458 | * skb_shift - Shifts paged data partially from skb to another |
| 3459 | * @tgt: buffer into which tail data gets added |
| 3460 | * @skb: buffer from which the paged data comes from |
| 3461 | * @shiftlen: shift up to this many bytes |
| 3462 | * |
| 3463 | * Attempts to shift up to shiftlen worth of bytes, which may be less than |
Feng King | 20e994a | 2011-11-21 01:47:11 +0000 | [diff] [blame] | 3464 | * the length of the skb, from skb to tgt. Returns number bytes shifted. |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3465 | * It's up to caller to free skb if everything was shifted. |
| 3466 | * |
| 3467 | * If @tgt runs out of frags, the whole operation is aborted. |
| 3468 | * |
| 3469 | * Skb cannot include anything else but paged data while tgt is allowed |
| 3470 | * to have non-paged data as well. |
| 3471 | * |
| 3472 | * TODO: full sized shift could be optimized but that would need |
| 3473 | * specialized skb free'er to handle frags without up-to-date nr_frags. |
| 3474 | */ |
| 3475 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) |
| 3476 | { |
| 3477 | int from, to, merge, todo; |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 3478 | skb_frag_t *fragfrom, *fragto; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3479 | |
| 3480 | BUG_ON(shiftlen > skb->len); |
Eric Dumazet | f8071cd | 2016-11-15 12:51:50 -0800 | [diff] [blame] | 3481 | |
| 3482 | if (skb_headlen(skb)) |
| 3483 | return 0; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 3484 | if (skb_zcopy(tgt) || skb_zcopy(skb)) |
| 3485 | return 0; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3486 | |
| 3487 | todo = shiftlen; |
| 3488 | from = 0; |
| 3489 | to = skb_shinfo(tgt)->nr_frags; |
| 3490 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 3491 | |
| 3492 | /* Actual merge is delayed until the point when we know we can |
| 3493 | * commit all, so that we don't have to undo partial changes |
| 3494 | */ |
| 3495 | if (!to || |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3496 | !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3497 | skb_frag_off(fragfrom))) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3498 | merge = -1; |
| 3499 | } else { |
| 3500 | merge = to - 1; |
| 3501 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3502 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3503 | if (todo < 0) { |
| 3504 | if (skb_prepare_for_shift(skb) || |
| 3505 | skb_prepare_for_shift(tgt)) |
| 3506 | return 0; |
| 3507 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 3508 | /* All previous frag pointers might be stale! */ |
| 3509 | fragfrom = &skb_shinfo(skb)->frags[from]; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3510 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 3511 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3512 | skb_frag_size_add(fragto, shiftlen); |
| 3513 | skb_frag_size_sub(fragfrom, shiftlen); |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3514 | skb_frag_off_add(fragfrom, shiftlen); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3515 | |
| 3516 | goto onlymerged; |
| 3517 | } |
| 3518 | |
| 3519 | from++; |
| 3520 | } |
| 3521 | |
| 3522 | /* Skip full, not-fitting skb to avoid expensive operations */ |
| 3523 | if ((shiftlen == skb->len) && |
| 3524 | (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) |
| 3525 | return 0; |
| 3526 | |
| 3527 | if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) |
| 3528 | return 0; |
| 3529 | |
| 3530 | while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { |
| 3531 | if (to == MAX_SKB_FRAGS) |
| 3532 | return 0; |
| 3533 | |
| 3534 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 3535 | fragto = &skb_shinfo(tgt)->frags[to]; |
| 3536 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3537 | if (todo >= skb_frag_size(fragfrom)) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3538 | *fragto = *fragfrom; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3539 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3540 | from++; |
| 3541 | to++; |
| 3542 | |
| 3543 | } else { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3544 | __skb_frag_ref(fragfrom); |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3545 | skb_frag_page_copy(fragto, fragfrom); |
| 3546 | skb_frag_off_copy(fragto, fragfrom); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3547 | skb_frag_size_set(fragto, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3548 | |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3549 | skb_frag_off_add(fragfrom, todo); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3550 | skb_frag_size_sub(fragfrom, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3551 | todo = 0; |
| 3552 | |
| 3553 | to++; |
| 3554 | break; |
| 3555 | } |
| 3556 | } |
| 3557 | |
| 3558 | /* Ready to "commit" this state change to tgt */ |
| 3559 | skb_shinfo(tgt)->nr_frags = to; |
| 3560 | |
| 3561 | if (merge >= 0) { |
| 3562 | fragfrom = &skb_shinfo(skb)->frags[0]; |
| 3563 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 3564 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3565 | skb_frag_size_add(fragto, skb_frag_size(fragfrom)); |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 3566 | __skb_frag_unref(fragfrom, skb->pp_recycle); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3567 | } |
| 3568 | |
| 3569 | /* Reposition in the original skb */ |
| 3570 | to = 0; |
| 3571 | while (from < skb_shinfo(skb)->nr_frags) |
| 3572 | skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; |
| 3573 | skb_shinfo(skb)->nr_frags = to; |
| 3574 | |
| 3575 | BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); |
| 3576 | |
| 3577 | onlymerged: |
| 3578 | /* Most likely the tgt won't ever need its checksum anymore, skb on |
| 3579 | * the other hand might need it if it needs to be resent |
| 3580 | */ |
| 3581 | tgt->ip_summed = CHECKSUM_PARTIAL; |
| 3582 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 3583 | |
| 3584 | /* Yak, is it really working this way? Some helper please? */ |
| 3585 | skb->len -= shiftlen; |
| 3586 | skb->data_len -= shiftlen; |
| 3587 | skb->truesize -= shiftlen; |
| 3588 | tgt->len += shiftlen; |
| 3589 | tgt->data_len += shiftlen; |
| 3590 | tgt->truesize += shiftlen; |
| 3591 | |
| 3592 | return shiftlen; |
| 3593 | } |
| 3594 | |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3595 | /** |
| 3596 | * skb_prepare_seq_read - Prepare a sequential read of skb data |
| 3597 | * @skb: the buffer to read |
| 3598 | * @from: lower offset of data to be read |
| 3599 | * @to: upper offset of data to be read |
| 3600 | * @st: state variable |
| 3601 | * |
| 3602 | * Initializes the specified state variable. Must be called before |
| 3603 | * invoking skb_seq_read() for the first time. |
| 3604 | */ |
| 3605 | void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, |
| 3606 | unsigned int to, struct skb_seq_state *st) |
| 3607 | { |
| 3608 | st->lower_offset = from; |
| 3609 | st->upper_offset = to; |
| 3610 | st->root_skb = st->cur_skb = skb; |
| 3611 | st->frag_idx = st->stepped_offset = 0; |
| 3612 | st->frag_data = NULL; |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3613 | st->frag_off = 0; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3614 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3615 | EXPORT_SYMBOL(skb_prepare_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3616 | |
| 3617 | /** |
| 3618 | * skb_seq_read - Sequentially read skb data |
| 3619 | * @consumed: number of bytes consumed by the caller so far |
| 3620 | * @data: destination pointer for data to be returned |
| 3621 | * @st: state variable |
| 3622 | * |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 3623 | * Reads a block of skb data at @consumed relative to the |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3624 | * lower offset specified to skb_prepare_seq_read(). Assigns |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 3625 | * the head of the data block to @data and returns the length |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3626 | * of the block or 0 if the end of the skb data or the upper |
| 3627 | * offset has been reached. |
| 3628 | * |
| 3629 | * The caller is not required to consume all of the data |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 3630 | * returned, i.e. @consumed is typically set to the number |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3631 | * of bytes already consumed and the next call to |
| 3632 | * skb_seq_read() will return the remaining part of the block. |
| 3633 | * |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 3634 | * Note 1: The size of each block of data returned can be arbitrary, |
Masanari Iida | e793c0f | 2014-09-04 23:44:36 +0900 | [diff] [blame] | 3635 | * this limitation is the cost for zerocopy sequential |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3636 | * reads of potentially non linear data. |
| 3637 | * |
Randy Dunlap | bc2cda1 | 2008-02-13 15:03:25 -0800 | [diff] [blame] | 3638 | * Note 2: Fragment lists within fragments are not implemented |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3639 | * at the moment, state->root_skb could be replaced with |
| 3640 | * a stack for this purpose. |
| 3641 | */ |
| 3642 | unsigned int skb_seq_read(unsigned int consumed, const u8 **data, |
| 3643 | struct skb_seq_state *st) |
| 3644 | { |
| 3645 | unsigned int block_limit, abs_offset = consumed + st->lower_offset; |
| 3646 | skb_frag_t *frag; |
| 3647 | |
Wedson Almeida Filho | aeb193e | 2013-06-23 23:33:48 -0700 | [diff] [blame] | 3648 | if (unlikely(abs_offset >= st->upper_offset)) { |
| 3649 | if (st->frag_data) { |
| 3650 | kunmap_atomic(st->frag_data); |
| 3651 | st->frag_data = NULL; |
| 3652 | } |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3653 | return 0; |
Wedson Almeida Filho | aeb193e | 2013-06-23 23:33:48 -0700 | [diff] [blame] | 3654 | } |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3655 | |
| 3656 | next_skb: |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 3657 | block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3658 | |
Thomas Chenault | 995b337 | 2009-05-18 21:43:27 -0700 | [diff] [blame] | 3659 | if (abs_offset < block_limit && !st->frag_data) { |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 3660 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3661 | return block_limit - abs_offset; |
| 3662 | } |
| 3663 | |
| 3664 | if (st->frag_idx == 0 && !st->frag_data) |
| 3665 | st->stepped_offset += skb_headlen(st->cur_skb); |
| 3666 | |
| 3667 | while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3668 | unsigned int pg_idx, pg_off, pg_sz; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3669 | |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3670 | frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; |
| 3671 | |
| 3672 | pg_idx = 0; |
| 3673 | pg_off = skb_frag_off(frag); |
| 3674 | pg_sz = skb_frag_size(frag); |
| 3675 | |
| 3676 | if (skb_frag_must_loop(skb_frag_page(frag))) { |
| 3677 | pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; |
| 3678 | pg_off = offset_in_page(pg_off + st->frag_off); |
| 3679 | pg_sz = min_t(unsigned int, pg_sz - st->frag_off, |
| 3680 | PAGE_SIZE - pg_off); |
| 3681 | } |
| 3682 | |
| 3683 | block_limit = pg_sz + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3684 | if (abs_offset < block_limit) { |
| 3685 | if (!st->frag_data) |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3686 | st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3687 | |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3688 | *data = (u8 *)st->frag_data + pg_off + |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3689 | (abs_offset - st->stepped_offset); |
| 3690 | |
| 3691 | return block_limit - abs_offset; |
| 3692 | } |
| 3693 | |
| 3694 | if (st->frag_data) { |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 3695 | kunmap_atomic(st->frag_data); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3696 | st->frag_data = NULL; |
| 3697 | } |
| 3698 | |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3699 | st->stepped_offset += pg_sz; |
| 3700 | st->frag_off += pg_sz; |
| 3701 | if (st->frag_off == skb_frag_size(frag)) { |
| 3702 | st->frag_off = 0; |
| 3703 | st->frag_idx++; |
| 3704 | } |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3705 | } |
| 3706 | |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 3707 | if (st->frag_data) { |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 3708 | kunmap_atomic(st->frag_data); |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 3709 | st->frag_data = NULL; |
| 3710 | } |
| 3711 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3712 | if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 3713 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3714 | st->frag_idx = 0; |
| 3715 | goto next_skb; |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 3716 | } else if (st->cur_skb->next) { |
| 3717 | st->cur_skb = st->cur_skb->next; |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 3718 | st->frag_idx = 0; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3719 | goto next_skb; |
| 3720 | } |
| 3721 | |
| 3722 | return 0; |
| 3723 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3724 | EXPORT_SYMBOL(skb_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3725 | |
| 3726 | /** |
| 3727 | * skb_abort_seq_read - Abort a sequential read of skb data |
| 3728 | * @st: state variable |
| 3729 | * |
| 3730 | * Must be called if skb_seq_read() was not called until it |
| 3731 | * returned 0. |
| 3732 | */ |
| 3733 | void skb_abort_seq_read(struct skb_seq_state *st) |
| 3734 | { |
| 3735 | if (st->frag_data) |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 3736 | kunmap_atomic(st->frag_data); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3737 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3738 | EXPORT_SYMBOL(skb_abort_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3739 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3740 | #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) |
| 3741 | |
| 3742 | static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, |
| 3743 | struct ts_config *conf, |
| 3744 | struct ts_state *state) |
| 3745 | { |
| 3746 | return skb_seq_read(offset, text, TS_SKB_CB(state)); |
| 3747 | } |
| 3748 | |
| 3749 | static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) |
| 3750 | { |
| 3751 | skb_abort_seq_read(TS_SKB_CB(state)); |
| 3752 | } |
| 3753 | |
| 3754 | /** |
| 3755 | * skb_find_text - Find a text pattern in skb data |
| 3756 | * @skb: the buffer to look in |
| 3757 | * @from: search offset |
| 3758 | * @to: search limit |
| 3759 | * @config: textsearch configuration |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3760 | * |
| 3761 | * Finds a pattern in the skb data according to the specified |
| 3762 | * textsearch configuration. Use textsearch_next() to retrieve |
| 3763 | * subsequent occurrences of the pattern. Returns the offset |
| 3764 | * to the first occurrence or UINT_MAX if no match was found. |
| 3765 | */ |
| 3766 | unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3767 | unsigned int to, struct ts_config *config) |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3768 | { |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3769 | struct ts_state state; |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 3770 | unsigned int ret; |
| 3771 | |
Willem de Bruijn | b228c9b | 2021-03-01 15:09:44 +0000 | [diff] [blame] | 3772 | BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); |
| 3773 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3774 | config->get_next_block = skb_ts_get_next_block; |
| 3775 | config->finish = skb_ts_finish; |
| 3776 | |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3777 | skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3778 | |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3779 | ret = textsearch_find(config, &state); |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 3780 | return (ret <= to - from ? ret : UINT_MAX); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3781 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3782 | EXPORT_SYMBOL(skb_find_text); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3783 | |
Hannes Frederic Sowa | be12a1f | 2015-05-21 16:59:58 +0200 | [diff] [blame] | 3784 | int skb_append_pagefrags(struct sk_buff *skb, struct page *page, |
| 3785 | int offset, size_t size) |
| 3786 | { |
| 3787 | int i = skb_shinfo(skb)->nr_frags; |
| 3788 | |
| 3789 | if (skb_can_coalesce(skb, i, page, offset)) { |
| 3790 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); |
| 3791 | } else if (i < MAX_SKB_FRAGS) { |
| 3792 | get_page(page); |
| 3793 | skb_fill_page_desc(skb, i, page, offset, size); |
| 3794 | } else { |
| 3795 | return -EMSGSIZE; |
| 3796 | } |
| 3797 | |
| 3798 | return 0; |
| 3799 | } |
| 3800 | EXPORT_SYMBOL_GPL(skb_append_pagefrags); |
| 3801 | |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3802 | /** |
| 3803 | * skb_pull_rcsum - pull skb and update receive checksum |
| 3804 | * @skb: buffer to update |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3805 | * @len: length of data pulled |
| 3806 | * |
| 3807 | * This function performs an skb_pull on the packet and updates |
Urs Thuermann | fee54fa | 2008-02-12 22:03:25 -0800 | [diff] [blame] | 3808 | * the CHECKSUM_COMPLETE checksum. It should be used on |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 3809 | * receive path processing instead of skb_pull unless you know |
| 3810 | * that the checksum difference is zero (e.g., a valid IP header) |
| 3811 | * or you are setting ip_summed to CHECKSUM_NONE. |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3812 | */ |
Johannes Berg | af72868 | 2017-06-16 14:29:22 +0200 | [diff] [blame] | 3813 | void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3814 | { |
Pravin B Shelar | 31b33df | 2015-09-28 17:24:25 -0700 | [diff] [blame] | 3815 | unsigned char *data = skb->data; |
| 3816 | |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3817 | BUG_ON(len > skb->len); |
Pravin B Shelar | 31b33df | 2015-09-28 17:24:25 -0700 | [diff] [blame] | 3818 | __skb_pull(skb, len); |
| 3819 | skb_postpull_rcsum(skb, data, len); |
| 3820 | return skb->data; |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3821 | } |
Arnaldo Carvalho de Melo | f94691a | 2006-03-20 22:47:55 -0800 | [diff] [blame] | 3822 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); |
| 3823 | |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 3824 | static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) |
| 3825 | { |
| 3826 | skb_frag_t head_frag; |
| 3827 | struct page *page; |
| 3828 | |
| 3829 | page = virt_to_head_page(frag_skb->head); |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 3830 | __skb_frag_set_page(&head_frag, page); |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3831 | skb_frag_off_set(&head_frag, frag_skb->data - |
| 3832 | (unsigned char *)page_address(page)); |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 3833 | skb_frag_size_set(&head_frag, skb_headlen(frag_skb)); |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 3834 | return head_frag; |
| 3835 | } |
| 3836 | |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3837 | struct sk_buff *skb_segment_list(struct sk_buff *skb, |
| 3838 | netdev_features_t features, |
| 3839 | unsigned int offset) |
| 3840 | { |
| 3841 | struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; |
| 3842 | unsigned int tnl_hlen = skb_tnl_header_len(skb); |
| 3843 | unsigned int delta_truesize = 0; |
| 3844 | unsigned int delta_len = 0; |
| 3845 | struct sk_buff *tail = NULL; |
Dongseok Yi | 53475c5 | 2021-01-08 11:28:38 +0900 | [diff] [blame] | 3846 | struct sk_buff *nskb, *tmp; |
| 3847 | int err; |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3848 | |
| 3849 | skb_push(skb, -skb_network_offset(skb) + offset); |
| 3850 | |
| 3851 | skb_shinfo(skb)->frag_list = NULL; |
| 3852 | |
| 3853 | do { |
| 3854 | nskb = list_skb; |
| 3855 | list_skb = list_skb->next; |
| 3856 | |
Dongseok Yi | 53475c5 | 2021-01-08 11:28:38 +0900 | [diff] [blame] | 3857 | err = 0; |
| 3858 | if (skb_shared(nskb)) { |
| 3859 | tmp = skb_clone(nskb, GFP_ATOMIC); |
| 3860 | if (tmp) { |
| 3861 | consume_skb(nskb); |
| 3862 | nskb = tmp; |
| 3863 | err = skb_unclone(nskb, GFP_ATOMIC); |
| 3864 | } else { |
| 3865 | err = -ENOMEM; |
| 3866 | } |
| 3867 | } |
| 3868 | |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3869 | if (!tail) |
| 3870 | skb->next = nskb; |
| 3871 | else |
| 3872 | tail->next = nskb; |
| 3873 | |
Dongseok Yi | 53475c5 | 2021-01-08 11:28:38 +0900 | [diff] [blame] | 3874 | if (unlikely(err)) { |
| 3875 | nskb->next = list_skb; |
| 3876 | goto err_linearize; |
| 3877 | } |
| 3878 | |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3879 | tail = nskb; |
| 3880 | |
| 3881 | delta_len += nskb->len; |
| 3882 | delta_truesize += nskb->truesize; |
| 3883 | |
| 3884 | skb_push(nskb, -skb_network_offset(nskb) + offset); |
| 3885 | |
Florian Westphal | cf673ed | 2020-03-30 18:51:29 +0200 | [diff] [blame] | 3886 | skb_release_head_state(nskb); |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3887 | __copy_skb_header(nskb, skb); |
| 3888 | |
| 3889 | skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); |
| 3890 | skb_copy_from_linear_data_offset(skb, -tnl_hlen, |
| 3891 | nskb->data - tnl_hlen, |
| 3892 | offset + tnl_hlen); |
| 3893 | |
| 3894 | if (skb_needs_linearize(nskb, features) && |
| 3895 | __skb_linearize(nskb)) |
| 3896 | goto err_linearize; |
| 3897 | |
| 3898 | } while (list_skb); |
| 3899 | |
| 3900 | skb->truesize = skb->truesize - delta_truesize; |
| 3901 | skb->data_len = skb->data_len - delta_len; |
| 3902 | skb->len = skb->len - delta_len; |
| 3903 | |
| 3904 | skb_gso_reset(skb); |
| 3905 | |
| 3906 | skb->prev = tail; |
| 3907 | |
| 3908 | if (skb_needs_linearize(skb, features) && |
| 3909 | __skb_linearize(skb)) |
| 3910 | goto err_linearize; |
| 3911 | |
| 3912 | skb_get(skb); |
| 3913 | |
| 3914 | return skb; |
| 3915 | |
| 3916 | err_linearize: |
| 3917 | kfree_skb_list(skb->next); |
| 3918 | skb->next = NULL; |
| 3919 | return ERR_PTR(-ENOMEM); |
| 3920 | } |
| 3921 | EXPORT_SYMBOL_GPL(skb_segment_list); |
| 3922 | |
| 3923 | int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) |
| 3924 | { |
| 3925 | if (unlikely(p->len + skb->len >= 65536)) |
| 3926 | return -E2BIG; |
| 3927 | |
| 3928 | if (NAPI_GRO_CB(p)->last == p) |
| 3929 | skb_shinfo(p)->frag_list = skb; |
| 3930 | else |
| 3931 | NAPI_GRO_CB(p)->last->next = skb; |
| 3932 | |
| 3933 | skb_pull(skb, skb_gro_offset(skb)); |
| 3934 | |
| 3935 | NAPI_GRO_CB(p)->last = skb; |
| 3936 | NAPI_GRO_CB(p)->count++; |
| 3937 | p->data_len += skb->len; |
Paolo Abeni | 5e10da5 | 2021-07-28 18:24:03 +0200 | [diff] [blame] | 3938 | |
| 3939 | /* sk owenrship - if any - completely transferred to the aggregated packet */ |
| 3940 | skb->destructor = NULL; |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3941 | p->truesize += skb->truesize; |
| 3942 | p->len += skb->len; |
| 3943 | |
| 3944 | NAPI_GRO_CB(skb)->same_flow = 1; |
| 3945 | |
| 3946 | return 0; |
| 3947 | } |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3948 | |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3949 | /** |
| 3950 | * skb_segment - Perform protocol segmentation on skb. |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3951 | * @head_skb: buffer to segment |
Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 3952 | * @features: features for the output path (see dev->features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3953 | * |
| 3954 | * This function performs segmentation on the given skb. It returns |
Ben Hutchings | 4c821d7 | 2008-04-13 21:52:48 -0700 | [diff] [blame] | 3955 | * a pointer to the first in a list of new skbs for the segments. |
| 3956 | * In case of error it returns ERR_PTR(err). |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3957 | */ |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3958 | struct sk_buff *skb_segment(struct sk_buff *head_skb, |
| 3959 | netdev_features_t features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3960 | { |
| 3961 | struct sk_buff *segs = NULL; |
| 3962 | struct sk_buff *tail = NULL; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3963 | struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3964 | skb_frag_t *frag = skb_shinfo(head_skb)->frags; |
| 3965 | unsigned int mss = skb_shinfo(head_skb)->gso_size; |
| 3966 | unsigned int doffset = head_skb->data - skb_mac_header(head_skb); |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 3967 | struct sk_buff *frag_skb = head_skb; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3968 | unsigned int offset = doffset; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3969 | unsigned int tnl_hlen = skb_tnl_header_len(head_skb); |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3970 | unsigned int partial_segs = 0; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3971 | unsigned int headroom; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3972 | unsigned int len = head_skb->len; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3973 | __be16 proto; |
Alexander Duyck | 36c9838 | 2016-05-02 09:38:18 -0700 | [diff] [blame] | 3974 | bool csum, sg; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3975 | int nfrags = skb_shinfo(head_skb)->nr_frags; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3976 | int err = -ENOMEM; |
| 3977 | int i = 0; |
| 3978 | int pos; |
| 3979 | |
Shmulik Ladkani | 3dcbdb1 | 2019-09-06 12:23:50 +0300 | [diff] [blame] | 3980 | if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && |
| 3981 | (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { |
| 3982 | /* gso_size is untrusted, and we have a frag_list with a linear |
| 3983 | * non head_frag head. |
| 3984 | * |
| 3985 | * (we assume checking the first list_skb member suffices; |
| 3986 | * i.e if either of the list_skb members have non head_frag |
| 3987 | * head, then the first one has too). |
| 3988 | * |
| 3989 | * If head_skb's headlen does not fit requested gso_size, it |
| 3990 | * means that the frag_list members do NOT terminate on exact |
| 3991 | * gso_size boundaries. Hence we cannot perform skb_frag_t page |
| 3992 | * sharing. Therefore we must fallback to copying the frag_list |
| 3993 | * skbs; we do so by disabling SG. |
| 3994 | */ |
| 3995 | if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) |
| 3996 | features &= ~NETIF_F_SG; |
| 3997 | } |
| 3998 | |
Wei-Chun Chao | 5882a07 | 2014-06-08 23:48:54 -0700 | [diff] [blame] | 3999 | __skb_push(head_skb, doffset); |
Miaohe Lin | 2f63113 | 2020-08-01 17:36:05 +0800 | [diff] [blame] | 4000 | proto = skb_network_protocol(head_skb, NULL); |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 4001 | if (unlikely(!proto)) |
| 4002 | return ERR_PTR(-EINVAL); |
| 4003 | |
Alexander Duyck | 36c9838 | 2016-05-02 09:38:18 -0700 | [diff] [blame] | 4004 | sg = !!(features & NETIF_F_SG); |
Alexander Duyck | f245d07 | 2016-02-05 15:28:26 -0800 | [diff] [blame] | 4005 | csum = !!can_checksum_protocol(features, proto); |
Tom Herbert | 7e2b10c | 2014-06-04 17:20:02 -0700 | [diff] [blame] | 4006 | |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4007 | if (sg && csum && (mss != GSO_BY_FRAGS)) { |
| 4008 | if (!(features & NETIF_F_GSO_PARTIAL)) { |
| 4009 | struct sk_buff *iter; |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 4010 | unsigned int frag_len; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4011 | |
| 4012 | if (!list_skb || |
| 4013 | !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) |
| 4014 | goto normal; |
| 4015 | |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 4016 | /* If we get here then all the required |
| 4017 | * GSO features except frag_list are supported. |
| 4018 | * Try to split the SKB to multiple GSO SKBs |
| 4019 | * with no frag_list. |
| 4020 | * Currently we can do that only when the buffers don't |
| 4021 | * have a linear part and all the buffers except |
| 4022 | * the last are of the same length. |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4023 | */ |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 4024 | frag_len = list_skb->len; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4025 | skb_walk_frags(head_skb, iter) { |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 4026 | if (frag_len != iter->len && iter->next) |
| 4027 | goto normal; |
Ilan Tayari | eaffadb | 2017-04-08 02:07:08 +0300 | [diff] [blame] | 4028 | if (skb_headlen(iter) && !iter->head_frag) |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4029 | goto normal; |
| 4030 | |
| 4031 | len -= iter->len; |
| 4032 | } |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 4033 | |
| 4034 | if (len != frag_len) |
| 4035 | goto normal; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4036 | } |
| 4037 | |
| 4038 | /* GSO partial only requires that we trim off any excess that |
| 4039 | * doesn't fit into an MSS sized block, so take care of that |
| 4040 | * now. |
| 4041 | */ |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4042 | partial_segs = len / mss; |
Alexander Duyck | d7fb5a8 | 2016-05-02 09:38:12 -0700 | [diff] [blame] | 4043 | if (partial_segs > 1) |
| 4044 | mss *= partial_segs; |
| 4045 | else |
| 4046 | partial_segs = 0; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4047 | } |
| 4048 | |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4049 | normal: |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4050 | headroom = skb_headroom(head_skb); |
| 4051 | pos = skb_headlen(head_skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4052 | |
| 4053 | do { |
| 4054 | struct sk_buff *nskb; |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4055 | skb_frag_t *nskb_frag; |
Herbert Xu | c8884ed | 2006-10-29 15:59:41 -0800 | [diff] [blame] | 4056 | int hsize; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4057 | int size; |
| 4058 | |
Marcelo Ricardo Leitner | 3953c46 | 2016-06-02 15:05:40 -0300 | [diff] [blame] | 4059 | if (unlikely(mss == GSO_BY_FRAGS)) { |
| 4060 | len = list_skb->len; |
| 4061 | } else { |
| 4062 | len = head_skb->len - offset; |
| 4063 | if (len > mss) |
| 4064 | len = mss; |
| 4065 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4066 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4067 | hsize = skb_headlen(head_skb) - offset; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4068 | |
Xin Long | dbd50f2 | 2021-01-15 17:36:38 +0800 | [diff] [blame] | 4069 | if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4070 | (skb_headlen(list_skb) == len || sg)) { |
| 4071 | BUG_ON(skb_headlen(list_skb) > len); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4072 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4073 | i = 0; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4074 | nfrags = skb_shinfo(list_skb)->nr_frags; |
| 4075 | frag = skb_shinfo(list_skb)->frags; |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 4076 | frag_skb = list_skb; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4077 | pos += skb_headlen(list_skb); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4078 | |
| 4079 | while (pos < offset + len) { |
| 4080 | BUG_ON(i >= nfrags); |
| 4081 | |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 4082 | size = skb_frag_size(frag); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4083 | if (pos + size > offset + len) |
| 4084 | break; |
| 4085 | |
| 4086 | i++; |
| 4087 | pos += size; |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 4088 | frag++; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4089 | } |
| 4090 | |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4091 | nskb = skb_clone(list_skb, GFP_ATOMIC); |
| 4092 | list_skb = list_skb->next; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4093 | |
| 4094 | if (unlikely(!nskb)) |
| 4095 | goto err; |
| 4096 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4097 | if (unlikely(pskb_trim(nskb, len))) { |
| 4098 | kfree_skb(nskb); |
| 4099 | goto err; |
| 4100 | } |
| 4101 | |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 4102 | hsize = skb_end_offset(nskb); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4103 | if (skb_cow_head(nskb, doffset + headroom)) { |
| 4104 | kfree_skb(nskb); |
| 4105 | goto err; |
| 4106 | } |
| 4107 | |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 4108 | nskb->truesize += skb_end_offset(nskb) - hsize; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4109 | skb_release_head_state(nskb); |
| 4110 | __skb_push(nskb, doffset); |
| 4111 | } else { |
Paolo Abeni | 00b229f | 2021-01-19 17:56:56 +0100 | [diff] [blame] | 4112 | if (hsize < 0) |
| 4113 | hsize = 0; |
Xin Long | dbd50f2 | 2021-01-15 17:36:38 +0800 | [diff] [blame] | 4114 | if (hsize > len || !sg) |
| 4115 | hsize = len; |
Xin Long | dbd50f2 | 2021-01-15 17:36:38 +0800 | [diff] [blame] | 4116 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 4117 | nskb = __alloc_skb(hsize + doffset + headroom, |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4118 | GFP_ATOMIC, skb_alloc_rx_flag(head_skb), |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 4119 | NUMA_NO_NODE); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4120 | |
| 4121 | if (unlikely(!nskb)) |
| 4122 | goto err; |
| 4123 | |
| 4124 | skb_reserve(nskb, headroom); |
| 4125 | __skb_put(nskb, doffset); |
| 4126 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4127 | |
| 4128 | if (segs) |
| 4129 | tail->next = nskb; |
| 4130 | else |
| 4131 | segs = nskb; |
| 4132 | tail = nskb; |
| 4133 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4134 | __copy_skb_header(nskb, head_skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4135 | |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 4136 | skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); |
Vlad Yasevich | fcdfe3a | 2014-07-31 10:33:06 -0400 | [diff] [blame] | 4137 | skb_reset_mac_len(nskb); |
Pravin B Shelar | 68c3316 | 2013-02-14 14:02:41 +0000 | [diff] [blame] | 4138 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4139 | skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, |
Pravin B Shelar | 68c3316 | 2013-02-14 14:02:41 +0000 | [diff] [blame] | 4140 | nskb->data - tnl_hlen, |
| 4141 | doffset + tnl_hlen); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4142 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4143 | if (nskb->len == len + doffset) |
Simon Horman | 1cdbcb7 | 2013-05-19 15:46:49 +0000 | [diff] [blame] | 4144 | goto perform_csum_check; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4145 | |
Alexander Duyck | 7fbeffe | 2016-02-05 15:27:43 -0800 | [diff] [blame] | 4146 | if (!sg) { |
Yadu Kishore | 1454c9f | 2020-03-17 14:08:38 +0530 | [diff] [blame] | 4147 | if (!csum) { |
| 4148 | if (!nskb->remcsum_offload) |
| 4149 | nskb->ip_summed = CHECKSUM_NONE; |
| 4150 | SKB_GSO_CB(nskb)->csum = |
| 4151 | skb_copy_and_csum_bits(head_skb, offset, |
| 4152 | skb_put(nskb, |
| 4153 | len), |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 4154 | len); |
Yadu Kishore | 1454c9f | 2020-03-17 14:08:38 +0530 | [diff] [blame] | 4155 | SKB_GSO_CB(nskb)->csum_start = |
| 4156 | skb_headroom(nskb) + doffset; |
| 4157 | } else { |
| 4158 | skb_copy_bits(head_skb, offset, |
| 4159 | skb_put(nskb, len), |
| 4160 | len); |
| 4161 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4162 | continue; |
| 4163 | } |
| 4164 | |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4165 | nskb_frag = skb_shinfo(nskb)->frags; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4166 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4167 | skb_copy_from_linear_data_offset(head_skb, offset, |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 4168 | skb_put(nskb, hsize), hsize); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4169 | |
Jonathan Lemon | 06b4feb | 2021-01-06 14:18:38 -0800 | [diff] [blame] | 4170 | skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & |
| 4171 | SKBFL_SHARED_FRAG; |
Eric Dumazet | cef401d | 2013-01-25 20:34:37 +0000 | [diff] [blame] | 4172 | |
Willem de Bruijn | bf5c25d | 2017-12-22 19:00:17 -0500 | [diff] [blame] | 4173 | if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || |
| 4174 | skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) |
| 4175 | goto err; |
| 4176 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4177 | while (pos < offset + len) { |
| 4178 | if (i >= nfrags) { |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4179 | i = 0; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4180 | nfrags = skb_shinfo(list_skb)->nr_frags; |
| 4181 | frag = skb_shinfo(list_skb)->frags; |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 4182 | frag_skb = list_skb; |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 4183 | if (!skb_headlen(list_skb)) { |
| 4184 | BUG_ON(!nfrags); |
| 4185 | } else { |
| 4186 | BUG_ON(!list_skb->head_frag); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4187 | |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 4188 | /* to make room for head_frag. */ |
| 4189 | i--; |
| 4190 | frag--; |
| 4191 | } |
Willem de Bruijn | bf5c25d | 2017-12-22 19:00:17 -0500 | [diff] [blame] | 4192 | if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || |
| 4193 | skb_zerocopy_clone(nskb, frag_skb, |
| 4194 | GFP_ATOMIC)) |
| 4195 | goto err; |
| 4196 | |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4197 | list_skb = list_skb->next; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4198 | } |
| 4199 | |
| 4200 | if (unlikely(skb_shinfo(nskb)->nr_frags >= |
| 4201 | MAX_SKB_FRAGS)) { |
| 4202 | net_warn_ratelimited( |
| 4203 | "skb_segment: too many frags: %u %u\n", |
| 4204 | pos, mss); |
Eric Dumazet | ff907a1 | 2018-07-19 16:04:38 -0700 | [diff] [blame] | 4205 | err = -EINVAL; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4206 | goto err; |
| 4207 | } |
| 4208 | |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 4209 | *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4210 | __skb_frag_ref(nskb_frag); |
| 4211 | size = skb_frag_size(nskb_frag); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4212 | |
| 4213 | if (pos < offset) { |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 4214 | skb_frag_off_add(nskb_frag, offset - pos); |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4215 | skb_frag_size_sub(nskb_frag, offset - pos); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4216 | } |
| 4217 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4218 | skb_shinfo(nskb)->nr_frags++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4219 | |
| 4220 | if (pos + size <= offset + len) { |
| 4221 | i++; |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 4222 | frag++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4223 | pos += size; |
| 4224 | } else { |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4225 | skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4226 | goto skip_fraglist; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4227 | } |
| 4228 | |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4229 | nskb_frag++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4230 | } |
| 4231 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4232 | skip_fraglist: |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4233 | nskb->data_len = len - hsize; |
| 4234 | nskb->len += nskb->data_len; |
| 4235 | nskb->truesize += nskb->data_len; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 4236 | |
Simon Horman | 1cdbcb7 | 2013-05-19 15:46:49 +0000 | [diff] [blame] | 4237 | perform_csum_check: |
Alexander Duyck | 7fbeffe | 2016-02-05 15:27:43 -0800 | [diff] [blame] | 4238 | if (!csum) { |
Eric Dumazet | ff907a1 | 2018-07-19 16:04:38 -0700 | [diff] [blame] | 4239 | if (skb_has_shared_frag(nskb) && |
| 4240 | __skb_linearize(nskb)) |
| 4241 | goto err; |
| 4242 | |
Alexander Duyck | 7fbeffe | 2016-02-05 15:27:43 -0800 | [diff] [blame] | 4243 | if (!nskb->remcsum_offload) |
| 4244 | nskb->ip_summed = CHECKSUM_NONE; |
Alexander Duyck | 7644345 | 2016-02-05 15:27:37 -0800 | [diff] [blame] | 4245 | SKB_GSO_CB(nskb)->csum = |
| 4246 | skb_checksum(nskb, doffset, |
| 4247 | nskb->len - doffset, 0); |
Tom Herbert | 7e2b10c | 2014-06-04 17:20:02 -0700 | [diff] [blame] | 4248 | SKB_GSO_CB(nskb)->csum_start = |
Alexander Duyck | 7644345 | 2016-02-05 15:27:37 -0800 | [diff] [blame] | 4249 | skb_headroom(nskb) + doffset; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 4250 | } |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4251 | } while ((offset += len) < head_skb->len); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4252 | |
Eric Dumazet | bec3cfd | 2014-10-03 20:59:19 -0700 | [diff] [blame] | 4253 | /* Some callers want to get the end of the list. |
| 4254 | * Put it in segs->prev to avoid walking the list. |
| 4255 | * (see validate_xmit_skb_list() for example) |
| 4256 | */ |
| 4257 | segs->prev = tail; |
Toshiaki Makita | 432c856 | 2014-10-27 10:30:51 -0700 | [diff] [blame] | 4258 | |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4259 | if (partial_segs) { |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4260 | struct sk_buff *iter; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4261 | int type = skb_shinfo(head_skb)->gso_type; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4262 | unsigned short gso_size = skb_shinfo(head_skb)->gso_size; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4263 | |
| 4264 | /* Update type to add partial and then remove dodgy if set */ |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4265 | type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4266 | type &= ~SKB_GSO_DODGY; |
| 4267 | |
| 4268 | /* Update GSO info and prepare to start updating headers on |
| 4269 | * our way back down the stack of protocols. |
| 4270 | */ |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4271 | for (iter = segs; iter; iter = iter->next) { |
| 4272 | skb_shinfo(iter)->gso_size = gso_size; |
| 4273 | skb_shinfo(iter)->gso_segs = partial_segs; |
| 4274 | skb_shinfo(iter)->gso_type = type; |
| 4275 | SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; |
| 4276 | } |
| 4277 | |
| 4278 | if (tail->len - doffset <= gso_size) |
| 4279 | skb_shinfo(tail)->gso_size = 0; |
| 4280 | else if (tail != segs) |
| 4281 | skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4282 | } |
| 4283 | |
Toshiaki Makita | 432c856 | 2014-10-27 10:30:51 -0700 | [diff] [blame] | 4284 | /* Following permits correct backpressure, for protocols |
| 4285 | * using skb_set_owner_w(). |
| 4286 | * Idea is to tranfert ownership from head_skb to last segment. |
| 4287 | */ |
| 4288 | if (head_skb->destructor == sock_wfree) { |
| 4289 | swap(tail->truesize, head_skb->truesize); |
| 4290 | swap(tail->destructor, head_skb->destructor); |
| 4291 | swap(tail->sk, head_skb->sk); |
| 4292 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4293 | return segs; |
| 4294 | |
| 4295 | err: |
Eric Dumazet | 289dccb | 2013-12-20 14:29:08 -0800 | [diff] [blame] | 4296 | kfree_skb_list(segs); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4297 | return ERR_PTR(err); |
| 4298 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4299 | EXPORT_SYMBOL_GPL(skb_segment); |
| 4300 | |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 4301 | int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 4302 | { |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 4303 | struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 4304 | unsigned int offset = skb_gro_offset(skb); |
| 4305 | unsigned int headlen = skb_headlen(skb); |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 4306 | unsigned int len = skb_gro_len(skb); |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 4307 | unsigned int delta_truesize; |
Paolo Abeni | 5e10da5 | 2021-07-28 18:24:03 +0200 | [diff] [blame] | 4308 | unsigned int new_truesize; |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 4309 | struct sk_buff *lp; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 4310 | |
Steffen Klassert | 0ab03f3 | 2019-04-02 08:16:03 +0200 | [diff] [blame] | 4311 | if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 4312 | return -E2BIG; |
| 4313 | |
Eric Dumazet | 29e9824 | 2014-05-16 11:34:37 -0700 | [diff] [blame] | 4314 | lp = NAPI_GRO_CB(p)->last; |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 4315 | pinfo = skb_shinfo(lp); |
| 4316 | |
| 4317 | if (headlen <= offset) { |
Herbert Xu | 42da699 | 2009-05-26 18:50:19 +0000 | [diff] [blame] | 4318 | skb_frag_t *frag; |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 4319 | skb_frag_t *frag2; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 4320 | int i = skbinfo->nr_frags; |
| 4321 | int nr_frags = pinfo->nr_frags + i; |
Herbert Xu | 42da699 | 2009-05-26 18:50:19 +0000 | [diff] [blame] | 4322 | |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 4323 | if (nr_frags > MAX_SKB_FRAGS) |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 4324 | goto merge; |
Herbert Xu | 81705ad | 2009-01-29 14:19:51 +0000 | [diff] [blame] | 4325 | |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 4326 | offset -= headlen; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 4327 | pinfo->nr_frags = nr_frags; |
| 4328 | skbinfo->nr_frags = 0; |
Herbert Xu | f557206 | 2009-01-14 20:40:03 -0800 | [diff] [blame] | 4329 | |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 4330 | frag = pinfo->frags + nr_frags; |
| 4331 | frag2 = skbinfo->frags + i; |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 4332 | do { |
| 4333 | *--frag = *--frag2; |
| 4334 | } while (--i); |
| 4335 | |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 4336 | skb_frag_off_add(frag, offset); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 4337 | skb_frag_size_sub(frag, offset); |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 4338 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 4339 | /* all fragments truesize : remove (head size + sk_buff) */ |
Paolo Abeni | 5e10da5 | 2021-07-28 18:24:03 +0200 | [diff] [blame] | 4340 | new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); |
| 4341 | delta_truesize = skb->truesize - new_truesize; |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 4342 | |
Paolo Abeni | 5e10da5 | 2021-07-28 18:24:03 +0200 | [diff] [blame] | 4343 | skb->truesize = new_truesize; |
Herbert Xu | f557206 | 2009-01-14 20:40:03 -0800 | [diff] [blame] | 4344 | skb->len -= skb->data_len; |
| 4345 | skb->data_len = 0; |
| 4346 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 4347 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; |
Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 4348 | goto done; |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 4349 | } else if (skb->head_frag) { |
| 4350 | int nr_frags = pinfo->nr_frags; |
| 4351 | skb_frag_t *frag = pinfo->frags + nr_frags; |
| 4352 | struct page *page = virt_to_head_page(skb->head); |
| 4353 | unsigned int first_size = headlen - offset; |
| 4354 | unsigned int first_offset; |
| 4355 | |
| 4356 | if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 4357 | goto merge; |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 4358 | |
| 4359 | first_offset = skb->data - |
| 4360 | (unsigned char *)page_address(page) + |
| 4361 | offset; |
| 4362 | |
| 4363 | pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; |
| 4364 | |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 4365 | __skb_frag_set_page(frag, page); |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 4366 | skb_frag_off_set(frag, first_offset); |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 4367 | skb_frag_size_set(frag, first_size); |
| 4368 | |
| 4369 | memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); |
| 4370 | /* We dont need to clear skbinfo->nr_frags here */ |
| 4371 | |
Paolo Abeni | af35246 | 2021-08-04 21:07:00 +0200 | [diff] [blame] | 4372 | new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); |
Paolo Abeni | 5e10da5 | 2021-07-28 18:24:03 +0200 | [diff] [blame] | 4373 | delta_truesize = skb->truesize - new_truesize; |
| 4374 | skb->truesize = new_truesize; |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 4375 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; |
| 4376 | goto done; |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 4377 | } |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 4378 | |
| 4379 | merge: |
Paolo Abeni | 5e10da5 | 2021-07-28 18:24:03 +0200 | [diff] [blame] | 4380 | /* sk owenrship - if any - completely transferred to the aggregated packet */ |
| 4381 | skb->destructor = NULL; |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 4382 | delta_truesize = skb->truesize; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 4383 | if (offset > headlen) { |
Michal Schmidt | d1dc7ab | 2011-01-24 12:08:48 +0000 | [diff] [blame] | 4384 | unsigned int eat = offset - headlen; |
| 4385 | |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 4386 | skb_frag_off_add(&skbinfo->frags[0], eat); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 4387 | skb_frag_size_sub(&skbinfo->frags[0], eat); |
Michal Schmidt | d1dc7ab | 2011-01-24 12:08:48 +0000 | [diff] [blame] | 4388 | skb->data_len -= eat; |
| 4389 | skb->len -= eat; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 4390 | offset = headlen; |
Herbert Xu | 5603502 | 2009-02-05 21:26:52 -0800 | [diff] [blame] | 4391 | } |
| 4392 | |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 4393 | __skb_pull(skb, offset); |
Herbert Xu | 5603502 | 2009-02-05 21:26:52 -0800 | [diff] [blame] | 4394 | |
Eric Dumazet | 29e9824 | 2014-05-16 11:34:37 -0700 | [diff] [blame] | 4395 | if (NAPI_GRO_CB(p)->last == p) |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 4396 | skb_shinfo(p)->frag_list = skb; |
| 4397 | else |
| 4398 | NAPI_GRO_CB(p)->last->next = skb; |
Eric Dumazet | c3c7c25 | 2012-12-06 13:54:59 +0000 | [diff] [blame] | 4399 | NAPI_GRO_CB(p)->last = skb; |
Eric Dumazet | f4a775d | 2014-09-22 16:29:32 -0700 | [diff] [blame] | 4400 | __skb_header_release(skb); |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 4401 | lp = p; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 4402 | |
Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 4403 | done: |
| 4404 | NAPI_GRO_CB(p)->count++; |
Herbert Xu | 37fe473 | 2009-01-17 19:48:13 +0000 | [diff] [blame] | 4405 | p->data_len += len; |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 4406 | p->truesize += delta_truesize; |
Herbert Xu | 37fe473 | 2009-01-17 19:48:13 +0000 | [diff] [blame] | 4407 | p->len += len; |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 4408 | if (lp != p) { |
| 4409 | lp->data_len += len; |
| 4410 | lp->truesize += delta_truesize; |
| 4411 | lp->len += len; |
| 4412 | } |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 4413 | NAPI_GRO_CB(skb)->same_flow = 1; |
| 4414 | return 0; |
| 4415 | } |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 4416 | |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 4417 | #ifdef CONFIG_SKB_EXTENSIONS |
| 4418 | #define SKB_EXT_ALIGN_VALUE 8 |
| 4419 | #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) |
| 4420 | |
| 4421 | static const u8 skb_ext_type_len[] = { |
| 4422 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
| 4423 | [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), |
| 4424 | #endif |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 4425 | #ifdef CONFIG_XFRM |
| 4426 | [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), |
| 4427 | #endif |
Paul Blakey | 95a7233 | 2019-09-04 16:56:37 +0300 | [diff] [blame] | 4428 | #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
| 4429 | [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), |
| 4430 | #endif |
Mat Martineau | 3ee17bc | 2020-01-09 07:59:19 -0800 | [diff] [blame] | 4431 | #if IS_ENABLED(CONFIG_MPTCP) |
| 4432 | [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), |
| 4433 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 4434 | }; |
| 4435 | |
| 4436 | static __always_inline unsigned int skb_ext_total_length(void) |
| 4437 | { |
| 4438 | return SKB_EXT_CHUNKSIZEOF(struct skb_ext) + |
| 4439 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
| 4440 | skb_ext_type_len[SKB_EXT_BRIDGE_NF] + |
| 4441 | #endif |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 4442 | #ifdef CONFIG_XFRM |
| 4443 | skb_ext_type_len[SKB_EXT_SEC_PATH] + |
| 4444 | #endif |
Paul Blakey | 95a7233 | 2019-09-04 16:56:37 +0300 | [diff] [blame] | 4445 | #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
| 4446 | skb_ext_type_len[TC_SKB_EXT] + |
| 4447 | #endif |
Mat Martineau | 3ee17bc | 2020-01-09 07:59:19 -0800 | [diff] [blame] | 4448 | #if IS_ENABLED(CONFIG_MPTCP) |
| 4449 | skb_ext_type_len[SKB_EXT_MPTCP] + |
| 4450 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 4451 | 0; |
| 4452 | } |
| 4453 | |
| 4454 | static void skb_extensions_init(void) |
| 4455 | { |
| 4456 | BUILD_BUG_ON(SKB_EXT_NUM >= 8); |
| 4457 | BUILD_BUG_ON(skb_ext_total_length() > 255); |
| 4458 | |
| 4459 | skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", |
| 4460 | SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), |
| 4461 | 0, |
| 4462 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
| 4463 | NULL); |
| 4464 | } |
| 4465 | #else |
| 4466 | static void skb_extensions_init(void) {} |
| 4467 | #endif |
| 4468 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4469 | void __init skb_init(void) |
| 4470 | { |
Kees Cook | 79a8a64 | 2018-02-07 17:44:38 -0800 | [diff] [blame] | 4471 | skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4472 | sizeof(struct sk_buff), |
| 4473 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 4474 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Kees Cook | 79a8a64 | 2018-02-07 17:44:38 -0800 | [diff] [blame] | 4475 | offsetof(struct sk_buff, cb), |
| 4476 | sizeof_field(struct sk_buff, cb), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 4477 | NULL); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 4478 | skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 4479 | sizeof(struct sk_buff_fclones), |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 4480 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 4481 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 4482 | NULL); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 4483 | skb_extensions_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4484 | } |
| 4485 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 4486 | static int |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4487 | __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, |
| 4488 | unsigned int recursion_level) |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4489 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 4490 | int start = skb_headlen(skb); |
| 4491 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4492 | struct sk_buff *frag_iter; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4493 | int elt = 0; |
| 4494 | |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4495 | if (unlikely(recursion_level >= 24)) |
| 4496 | return -EMSGSIZE; |
| 4497 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4498 | if (copy > 0) { |
| 4499 | if (copy > len) |
| 4500 | copy = len; |
Jens Axboe | 642f14903 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 4501 | sg_set_buf(sg, skb->data + offset, copy); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4502 | elt++; |
| 4503 | if ((len -= copy) == 0) |
| 4504 | return elt; |
| 4505 | offset += copy; |
| 4506 | } |
| 4507 | |
| 4508 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 4509 | int end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4510 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 4511 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 4512 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 4513 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4514 | if ((copy = end - offset) > 0) { |
| 4515 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4516 | if (unlikely(elt && sg_is_last(&sg[elt - 1]))) |
| 4517 | return -EMSGSIZE; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4518 | |
| 4519 | if (copy > len) |
| 4520 | copy = len; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 4521 | sg_set_page(&sg[elt], skb_frag_page(frag), copy, |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 4522 | skb_frag_off(frag) + offset - start); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4523 | elt++; |
| 4524 | if (!(len -= copy)) |
| 4525 | return elt; |
| 4526 | offset += copy; |
| 4527 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 4528 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4529 | } |
| 4530 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4531 | skb_walk_frags(skb, frag_iter) { |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4532 | int end, ret; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4533 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4534 | WARN_ON(start > offset + len); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4535 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4536 | end = start + frag_iter->len; |
| 4537 | if ((copy = end - offset) > 0) { |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4538 | if (unlikely(elt && sg_is_last(&sg[elt - 1]))) |
| 4539 | return -EMSGSIZE; |
| 4540 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4541 | if (copy > len) |
| 4542 | copy = len; |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4543 | ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
| 4544 | copy, recursion_level + 1); |
| 4545 | if (unlikely(ret < 0)) |
| 4546 | return ret; |
| 4547 | elt += ret; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4548 | if ((len -= copy) == 0) |
| 4549 | return elt; |
| 4550 | offset += copy; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4551 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4552 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4553 | } |
| 4554 | BUG_ON(len); |
| 4555 | return elt; |
| 4556 | } |
| 4557 | |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4558 | /** |
| 4559 | * skb_to_sgvec - Fill a scatter-gather list from a socket buffer |
| 4560 | * @skb: Socket buffer containing the buffers to be mapped |
| 4561 | * @sg: The scatter-gather list to map into |
| 4562 | * @offset: The offset into the buffer's contents to start mapping |
| 4563 | * @len: Length of buffer space to be mapped |
| 4564 | * |
| 4565 | * Fill the specified scatter-gather list with mappings/pointers into a |
| 4566 | * region of the buffer space attached to a socket buffer. Returns either |
| 4567 | * the number of scatterlist items used, or -EMSGSIZE if the contents |
| 4568 | * could not fit. |
| 4569 | */ |
| 4570 | int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
| 4571 | { |
| 4572 | int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); |
| 4573 | |
| 4574 | if (nsg <= 0) |
| 4575 | return nsg; |
| 4576 | |
| 4577 | sg_mark_end(&sg[nsg - 1]); |
| 4578 | |
| 4579 | return nsg; |
| 4580 | } |
| 4581 | EXPORT_SYMBOL_GPL(skb_to_sgvec); |
| 4582 | |
Fan Du | 25a91d8 | 2014-01-18 09:54:23 +0800 | [diff] [blame] | 4583 | /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given |
| 4584 | * sglist without mark the sg which contain last skb data as the end. |
| 4585 | * So the caller can mannipulate sg list as will when padding new data after |
| 4586 | * the first call without calling sg_unmark_end to expend sg list. |
| 4587 | * |
| 4588 | * Scenario to use skb_to_sgvec_nomark: |
| 4589 | * 1. sg_init_table |
| 4590 | * 2. skb_to_sgvec_nomark(payload1) |
| 4591 | * 3. skb_to_sgvec_nomark(payload2) |
| 4592 | * |
| 4593 | * This is equivalent to: |
| 4594 | * 1. sg_init_table |
| 4595 | * 2. skb_to_sgvec(payload1) |
| 4596 | * 3. sg_unmark_end |
| 4597 | * 4. skb_to_sgvec(payload2) |
| 4598 | * |
| 4599 | * When mapping mutilple payload conditionally, skb_to_sgvec_nomark |
| 4600 | * is more preferable. |
| 4601 | */ |
| 4602 | int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, |
| 4603 | int offset, int len) |
| 4604 | { |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4605 | return __skb_to_sgvec(skb, sg, offset, len, 0); |
Fan Du | 25a91d8 | 2014-01-18 09:54:23 +0800 | [diff] [blame] | 4606 | } |
| 4607 | EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); |
| 4608 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 4609 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 4610 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4611 | /** |
| 4612 | * skb_cow_data - Check that a socket buffer's data buffers are writable |
| 4613 | * @skb: The socket buffer to check. |
| 4614 | * @tailbits: Amount of trailing space to be added |
| 4615 | * @trailer: Returned pointer to the skb where the @tailbits space begins |
| 4616 | * |
| 4617 | * Make sure that the data buffers attached to a socket buffer are |
| 4618 | * writable. If they are not, private copies are made of the data buffers |
| 4619 | * and the socket buffer is set to use these instead. |
| 4620 | * |
| 4621 | * If @tailbits is given, make sure that there is space to write @tailbits |
| 4622 | * bytes of data beyond current end of socket buffer. @trailer will be |
| 4623 | * set to point to the skb in which this space begins. |
| 4624 | * |
| 4625 | * The number of scatterlist elements required to completely map the |
| 4626 | * COW'd and extended socket buffer will be returned. |
| 4627 | */ |
| 4628 | int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) |
| 4629 | { |
| 4630 | int copyflag; |
| 4631 | int elt; |
| 4632 | struct sk_buff *skb1, **skb_p; |
| 4633 | |
| 4634 | /* If skb is cloned or its head is paged, reallocate |
| 4635 | * head pulling out all the pages (pages are considered not writable |
| 4636 | * at the moment even if they are anonymous). |
| 4637 | */ |
| 4638 | if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && |
Miaohe Lin | c15fc19 | 2020-08-01 17:30:23 +0800 | [diff] [blame] | 4639 | !__pskb_pull_tail(skb, __skb_pagelen(skb))) |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4640 | return -ENOMEM; |
| 4641 | |
| 4642 | /* Easy case. Most of packets will go this way. */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 4643 | if (!skb_has_frag_list(skb)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4644 | /* A little of trouble, not enough of space for trailer. |
| 4645 | * This should not happen, when stack is tuned to generate |
| 4646 | * good frames. OK, on miss we reallocate and reserve even more |
| 4647 | * space, 128 bytes is fair. */ |
| 4648 | |
| 4649 | if (skb_tailroom(skb) < tailbits && |
| 4650 | pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) |
| 4651 | return -ENOMEM; |
| 4652 | |
| 4653 | /* Voila! */ |
| 4654 | *trailer = skb; |
| 4655 | return 1; |
| 4656 | } |
| 4657 | |
| 4658 | /* Misery. We are in troubles, going to mincer fragments... */ |
| 4659 | |
| 4660 | elt = 1; |
| 4661 | skb_p = &skb_shinfo(skb)->frag_list; |
| 4662 | copyflag = 0; |
| 4663 | |
| 4664 | while ((skb1 = *skb_p) != NULL) { |
| 4665 | int ntail = 0; |
| 4666 | |
| 4667 | /* The fragment is partially pulled by someone, |
| 4668 | * this can happen on input. Copy it and everything |
| 4669 | * after it. */ |
| 4670 | |
| 4671 | if (skb_shared(skb1)) |
| 4672 | copyflag = 1; |
| 4673 | |
| 4674 | /* If the skb is the last, worry about trailer. */ |
| 4675 | |
| 4676 | if (skb1->next == NULL && tailbits) { |
| 4677 | if (skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 4678 | skb_has_frag_list(skb1) || |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4679 | skb_tailroom(skb1) < tailbits) |
| 4680 | ntail = tailbits + 128; |
| 4681 | } |
| 4682 | |
| 4683 | if (copyflag || |
| 4684 | skb_cloned(skb1) || |
| 4685 | ntail || |
| 4686 | skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 4687 | skb_has_frag_list(skb1)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4688 | struct sk_buff *skb2; |
| 4689 | |
| 4690 | /* Fuck, we are miserable poor guys... */ |
| 4691 | if (ntail == 0) |
| 4692 | skb2 = skb_copy(skb1, GFP_ATOMIC); |
| 4693 | else |
| 4694 | skb2 = skb_copy_expand(skb1, |
| 4695 | skb_headroom(skb1), |
| 4696 | ntail, |
| 4697 | GFP_ATOMIC); |
| 4698 | if (unlikely(skb2 == NULL)) |
| 4699 | return -ENOMEM; |
| 4700 | |
| 4701 | if (skb1->sk) |
| 4702 | skb_set_owner_w(skb2, skb1->sk); |
| 4703 | |
| 4704 | /* Looking around. Are we still alive? |
| 4705 | * OK, link new skb, drop old one */ |
| 4706 | |
| 4707 | skb2->next = skb1->next; |
| 4708 | *skb_p = skb2; |
| 4709 | kfree_skb(skb1); |
| 4710 | skb1 = skb2; |
| 4711 | } |
| 4712 | elt++; |
| 4713 | *trailer = skb1; |
| 4714 | skb_p = &skb1->next; |
| 4715 | } |
| 4716 | |
| 4717 | return elt; |
| 4718 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 4719 | EXPORT_SYMBOL_GPL(skb_cow_data); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4720 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4721 | static void sock_rmem_free(struct sk_buff *skb) |
| 4722 | { |
| 4723 | struct sock *sk = skb->sk; |
| 4724 | |
| 4725 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
| 4726 | } |
| 4727 | |
Soheil Hassas Yeganeh | 8605330a | 2017-03-18 17:02:59 -0400 | [diff] [blame] | 4728 | static void skb_set_err_queue(struct sk_buff *skb) |
| 4729 | { |
| 4730 | /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. |
| 4731 | * So, it is safe to (mis)use it to mark skbs on the error queue. |
| 4732 | */ |
| 4733 | skb->pkt_type = PACKET_OUTGOING; |
| 4734 | BUILD_BUG_ON(PACKET_OUTGOING == 0); |
| 4735 | } |
| 4736 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4737 | /* |
| 4738 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) |
| 4739 | */ |
| 4740 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) |
| 4741 | { |
| 4742 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
Eric Dumazet | ebb3b78 | 2019-10-10 20:17:44 -0700 | [diff] [blame] | 4743 | (unsigned int)READ_ONCE(sk->sk_rcvbuf)) |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4744 | return -ENOMEM; |
| 4745 | |
| 4746 | skb_orphan(skb); |
| 4747 | skb->sk = sk; |
| 4748 | skb->destructor = sock_rmem_free; |
| 4749 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); |
Soheil Hassas Yeganeh | 8605330a | 2017-03-18 17:02:59 -0400 | [diff] [blame] | 4750 | skb_set_err_queue(skb); |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4751 | |
Eric Dumazet | abb57ea | 2011-05-18 02:21:31 -0400 | [diff] [blame] | 4752 | /* before exiting rcu section, make sure dst is refcounted */ |
| 4753 | skb_dst_force(skb); |
| 4754 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4755 | skb_queue_tail(&sk->sk_error_queue, skb); |
| 4756 | if (!sock_flag(sk, SOCK_DEAD)) |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 4757 | sk_error_report(sk); |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4758 | return 0; |
| 4759 | } |
| 4760 | EXPORT_SYMBOL(sock_queue_err_skb); |
| 4761 | |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4762 | static bool is_icmp_err_skb(const struct sk_buff *skb) |
| 4763 | { |
| 4764 | return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || |
| 4765 | SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); |
| 4766 | } |
| 4767 | |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4768 | struct sk_buff *sock_dequeue_err_skb(struct sock *sk) |
| 4769 | { |
| 4770 | struct sk_buff_head *q = &sk->sk_error_queue; |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4771 | struct sk_buff *skb, *skb_next = NULL; |
| 4772 | bool icmp_next = false; |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 4773 | unsigned long flags; |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4774 | |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 4775 | spin_lock_irqsave(&q->lock, flags); |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4776 | skb = __skb_dequeue(q); |
Soheil Hassas Yeganeh | 38b2579 | 2017-06-02 12:38:22 -0400 | [diff] [blame] | 4777 | if (skb && (skb_next = skb_peek(q))) { |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4778 | icmp_next = is_icmp_err_skb(skb_next); |
Soheil Hassas Yeganeh | 38b2579 | 2017-06-02 12:38:22 -0400 | [diff] [blame] | 4779 | if (icmp_next) |
Willem de Bruijn | 985f733 | 2020-11-26 10:12:20 -0500 | [diff] [blame] | 4780 | sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; |
Soheil Hassas Yeganeh | 38b2579 | 2017-06-02 12:38:22 -0400 | [diff] [blame] | 4781 | } |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 4782 | spin_unlock_irqrestore(&q->lock, flags); |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4783 | |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4784 | if (is_icmp_err_skb(skb) && !icmp_next) |
| 4785 | sk->sk_err = 0; |
| 4786 | |
| 4787 | if (skb_next) |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 4788 | sk_error_report(sk); |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4789 | |
| 4790 | return skb; |
| 4791 | } |
| 4792 | EXPORT_SYMBOL(sock_dequeue_err_skb); |
| 4793 | |
Alexander Duyck | cab41c4 | 2014-09-10 18:05:26 -0400 | [diff] [blame] | 4794 | /** |
| 4795 | * skb_clone_sk - create clone of skb, and take reference to socket |
| 4796 | * @skb: the skb to clone |
| 4797 | * |
| 4798 | * This function creates a clone of a buffer that holds a reference on |
| 4799 | * sk_refcnt. Buffers created via this function are meant to be |
| 4800 | * returned using sock_queue_err_skb, or free via kfree_skb. |
| 4801 | * |
| 4802 | * When passing buffers allocated with this function to sock_queue_err_skb |
| 4803 | * it is necessary to wrap the call with sock_hold/sock_put in order to |
| 4804 | * prevent the socket from being released prior to being enqueued on |
| 4805 | * the sk_error_queue. |
| 4806 | */ |
Alexander Duyck | 62bccb8 | 2014-09-04 13:31:35 -0400 | [diff] [blame] | 4807 | struct sk_buff *skb_clone_sk(struct sk_buff *skb) |
| 4808 | { |
| 4809 | struct sock *sk = skb->sk; |
| 4810 | struct sk_buff *clone; |
| 4811 | |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 4812 | if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) |
Alexander Duyck | 62bccb8 | 2014-09-04 13:31:35 -0400 | [diff] [blame] | 4813 | return NULL; |
| 4814 | |
| 4815 | clone = skb_clone(skb, GFP_ATOMIC); |
| 4816 | if (!clone) { |
| 4817 | sock_put(sk); |
| 4818 | return NULL; |
| 4819 | } |
| 4820 | |
| 4821 | clone->sk = sk; |
| 4822 | clone->destructor = sock_efree; |
| 4823 | |
| 4824 | return clone; |
| 4825 | } |
| 4826 | EXPORT_SYMBOL(skb_clone_sk); |
| 4827 | |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4828 | static void __skb_complete_tx_timestamp(struct sk_buff *skb, |
| 4829 | struct sock *sk, |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4830 | int tstype, |
| 4831 | bool opt_stats) |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4832 | { |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4833 | struct sock_exterr_skb *serr; |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4834 | int err; |
| 4835 | |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4836 | BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); |
| 4837 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4838 | serr = SKB_EXT_ERR(skb); |
| 4839 | memset(serr, 0, sizeof(*serr)); |
| 4840 | serr->ee.ee_errno = ENOMSG; |
| 4841 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
Willem de Bruijn | e7fd288 | 2014-08-04 22:11:48 -0400 | [diff] [blame] | 4842 | serr->ee.ee_info = tstype; |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4843 | serr->opt_stats = opt_stats; |
Willem de Bruijn | 1862d62 | 2017-04-12 19:24:35 -0400 | [diff] [blame] | 4844 | serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 4845 | if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { |
Willem de Bruijn | 09c2d25 | 2014-08-04 22:11:47 -0400 | [diff] [blame] | 4846 | serr->ee.ee_data = skb_shinfo(skb)->tskey; |
WANG Cong | ac5cc97 | 2015-12-16 23:39:04 -0800 | [diff] [blame] | 4847 | if (sk->sk_protocol == IPPROTO_TCP && |
| 4848 | sk->sk_type == SOCK_STREAM) |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 4849 | serr->ee.ee_data -= sk->sk_tskey; |
| 4850 | } |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 4851 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4852 | err = sock_queue_err_skb(sk, skb); |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 4853 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4854 | if (err) |
| 4855 | kfree_skb(skb); |
| 4856 | } |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4857 | |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 4858 | static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) |
| 4859 | { |
| 4860 | bool ret; |
| 4861 | |
| 4862 | if (likely(sysctl_tstamp_allow_data || tsonly)) |
| 4863 | return true; |
| 4864 | |
| 4865 | read_lock_bh(&sk->sk_callback_lock); |
| 4866 | ret = sk->sk_socket && sk->sk_socket->file && |
| 4867 | file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); |
| 4868 | read_unlock_bh(&sk->sk_callback_lock); |
| 4869 | return ret; |
| 4870 | } |
| 4871 | |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4872 | void skb_complete_tx_timestamp(struct sk_buff *skb, |
| 4873 | struct skb_shared_hwtstamps *hwtstamps) |
| 4874 | { |
| 4875 | struct sock *sk = skb->sk; |
| 4876 | |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 4877 | if (!skb_may_tx_timestamp(sk, false)) |
Willem de Bruijn | 35b99df | 2017-12-13 14:41:06 -0500 | [diff] [blame] | 4878 | goto err; |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 4879 | |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4880 | /* Take a reference to prevent skb_orphan() from freeing the socket, |
| 4881 | * but only if the socket refcount is not zero. |
| 4882 | */ |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 4883 | if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4884 | *skb_hwtstamps(skb) = *hwtstamps; |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4885 | __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4886 | sock_put(sk); |
Willem de Bruijn | 35b99df | 2017-12-13 14:41:06 -0500 | [diff] [blame] | 4887 | return; |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4888 | } |
Willem de Bruijn | 35b99df | 2017-12-13 14:41:06 -0500 | [diff] [blame] | 4889 | |
| 4890 | err: |
| 4891 | kfree_skb(skb); |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4892 | } |
| 4893 | EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); |
| 4894 | |
| 4895 | void __skb_tstamp_tx(struct sk_buff *orig_skb, |
Yousuk Seung | e7ed11e | 2021-01-20 12:41:55 -0800 | [diff] [blame] | 4896 | const struct sk_buff *ack_skb, |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4897 | struct skb_shared_hwtstamps *hwtstamps, |
| 4898 | struct sock *sk, int tstype) |
| 4899 | { |
| 4900 | struct sk_buff *skb; |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4901 | bool tsonly, opt_stats = false; |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4902 | |
Willem de Bruijn | 3a8dd97 | 2015-03-11 15:43:55 -0400 | [diff] [blame] | 4903 | if (!sk) |
| 4904 | return; |
| 4905 | |
Miroslav Lichvar | b50a5c7 | 2017-05-19 17:52:40 +0200 | [diff] [blame] | 4906 | if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && |
| 4907 | skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) |
| 4908 | return; |
| 4909 | |
Willem de Bruijn | 3a8dd97 | 2015-03-11 15:43:55 -0400 | [diff] [blame] | 4910 | tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; |
| 4911 | if (!skb_may_tx_timestamp(sk, tsonly)) |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4912 | return; |
| 4913 | |
Francis Yan | 1c88580 | 2016-11-27 23:07:18 -0800 | [diff] [blame] | 4914 | if (tsonly) { |
| 4915 | #ifdef CONFIG_INET |
| 4916 | if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && |
| 4917 | sk->sk_protocol == IPPROTO_TCP && |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4918 | sk->sk_type == SOCK_STREAM) { |
Yousuk Seung | e7ed11e | 2021-01-20 12:41:55 -0800 | [diff] [blame] | 4919 | skb = tcp_get_timestamping_opt_stats(sk, orig_skb, |
| 4920 | ack_skb); |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4921 | opt_stats = true; |
| 4922 | } else |
Francis Yan | 1c88580 | 2016-11-27 23:07:18 -0800 | [diff] [blame] | 4923 | #endif |
| 4924 | skb = alloc_skb(0, GFP_ATOMIC); |
| 4925 | } else { |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 4926 | skb = skb_clone(orig_skb, GFP_ATOMIC); |
Francis Yan | 1c88580 | 2016-11-27 23:07:18 -0800 | [diff] [blame] | 4927 | } |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4928 | if (!skb) |
| 4929 | return; |
| 4930 | |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 4931 | if (tsonly) { |
Willem de Bruijn | fff8803 | 2017-06-08 11:35:03 -0400 | [diff] [blame] | 4932 | skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & |
| 4933 | SKBTX_ANY_TSTAMP; |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 4934 | skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; |
| 4935 | } |
| 4936 | |
| 4937 | if (hwtstamps) |
| 4938 | *skb_hwtstamps(skb) = *hwtstamps; |
| 4939 | else |
| 4940 | skb->tstamp = ktime_get_real(); |
| 4941 | |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4942 | __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4943 | } |
Willem de Bruijn | e7fd288 | 2014-08-04 22:11:48 -0400 | [diff] [blame] | 4944 | EXPORT_SYMBOL_GPL(__skb_tstamp_tx); |
| 4945 | |
| 4946 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
| 4947 | struct skb_shared_hwtstamps *hwtstamps) |
| 4948 | { |
Yousuk Seung | e7ed11e | 2021-01-20 12:41:55 -0800 | [diff] [blame] | 4949 | return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, |
Willem de Bruijn | e7fd288 | 2014-08-04 22:11:48 -0400 | [diff] [blame] | 4950 | SCM_TSTAMP_SND); |
| 4951 | } |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4952 | EXPORT_SYMBOL_GPL(skb_tstamp_tx); |
| 4953 | |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 4954 | void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) |
| 4955 | { |
| 4956 | struct sock *sk = skb->sk; |
| 4957 | struct sock_exterr_skb *serr; |
Eric Dumazet | dd4f107 | 2017-03-03 21:01:02 -0800 | [diff] [blame] | 4958 | int err = 1; |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 4959 | |
| 4960 | skb->wifi_acked_valid = 1; |
| 4961 | skb->wifi_acked = acked; |
| 4962 | |
| 4963 | serr = SKB_EXT_ERR(skb); |
| 4964 | memset(serr, 0, sizeof(*serr)); |
| 4965 | serr->ee.ee_errno = ENOMSG; |
| 4966 | serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; |
| 4967 | |
Eric Dumazet | dd4f107 | 2017-03-03 21:01:02 -0800 | [diff] [blame] | 4968 | /* Take a reference to prevent skb_orphan() from freeing the socket, |
| 4969 | * but only if the socket refcount is not zero. |
| 4970 | */ |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 4971 | if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { |
Eric Dumazet | dd4f107 | 2017-03-03 21:01:02 -0800 | [diff] [blame] | 4972 | err = sock_queue_err_skb(sk, skb); |
| 4973 | sock_put(sk); |
| 4974 | } |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 4975 | if (err) |
| 4976 | kfree_skb(skb); |
| 4977 | } |
| 4978 | EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); |
| 4979 | |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4980 | /** |
| 4981 | * skb_partial_csum_set - set up and verify partial csum values for packet |
| 4982 | * @skb: the skb to set |
| 4983 | * @start: the number of bytes after skb->data to start checksumming. |
| 4984 | * @off: the offset from start to place the checksum. |
| 4985 | * |
| 4986 | * For untrusted partially-checksummed packets, we need to make sure the values |
| 4987 | * for skb->csum_start and skb->csum_offset are valid so we don't oops. |
| 4988 | * |
| 4989 | * This function checks and sets those values and skb->ip_summed: if this |
| 4990 | * returns false you should drop the packet. |
| 4991 | */ |
| 4992 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) |
| 4993 | { |
Eric Dumazet | 52b5d6f | 2018-10-10 06:59:35 -0700 | [diff] [blame] | 4994 | u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); |
| 4995 | u32 csum_start = skb_headroom(skb) + (u32)start; |
| 4996 | |
| 4997 | if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { |
| 4998 | net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", |
| 4999 | start, off, skb_headroom(skb), skb_headlen(skb)); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 5000 | return false; |
| 5001 | } |
| 5002 | skb->ip_summed = CHECKSUM_PARTIAL; |
Eric Dumazet | 52b5d6f | 2018-10-10 06:59:35 -0700 | [diff] [blame] | 5003 | skb->csum_start = csum_start; |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 5004 | skb->csum_offset = off; |
Jason Wang | e5d5dec | 2013-03-26 23:11:20 +0000 | [diff] [blame] | 5005 | skb_set_transport_header(skb, start); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 5006 | return true; |
| 5007 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 5008 | EXPORT_SYMBOL_GPL(skb_partial_csum_set); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 5009 | |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5010 | static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, |
| 5011 | unsigned int max) |
| 5012 | { |
| 5013 | if (skb_headlen(skb) >= len) |
| 5014 | return 0; |
| 5015 | |
| 5016 | /* If we need to pullup then pullup to the max, so we |
| 5017 | * won't need to do it again. |
| 5018 | */ |
| 5019 | if (max > skb->len) |
| 5020 | max = skb->len; |
| 5021 | |
| 5022 | if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) |
| 5023 | return -ENOMEM; |
| 5024 | |
| 5025 | if (skb_headlen(skb) < len) |
| 5026 | return -EPROTO; |
| 5027 | |
| 5028 | return 0; |
| 5029 | } |
| 5030 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5031 | #define MAX_TCP_HDR_LEN (15 * 4) |
| 5032 | |
| 5033 | static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, |
| 5034 | typeof(IPPROTO_IP) proto, |
| 5035 | unsigned int off) |
| 5036 | { |
Kees Cook | 161d179 | 2020-02-19 22:23:04 -0800 | [diff] [blame] | 5037 | int err; |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5038 | |
Kees Cook | 161d179 | 2020-02-19 22:23:04 -0800 | [diff] [blame] | 5039 | switch (proto) { |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5040 | case IPPROTO_TCP: |
| 5041 | err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), |
| 5042 | off + MAX_TCP_HDR_LEN); |
| 5043 | if (!err && !skb_partial_csum_set(skb, off, |
| 5044 | offsetof(struct tcphdr, |
| 5045 | check))) |
| 5046 | err = -EPROTO; |
| 5047 | return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; |
| 5048 | |
| 5049 | case IPPROTO_UDP: |
| 5050 | err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), |
| 5051 | off + sizeof(struct udphdr)); |
| 5052 | if (!err && !skb_partial_csum_set(skb, off, |
| 5053 | offsetof(struct udphdr, |
| 5054 | check))) |
| 5055 | err = -EPROTO; |
| 5056 | return err ? ERR_PTR(err) : &udp_hdr(skb)->check; |
| 5057 | } |
| 5058 | |
| 5059 | return ERR_PTR(-EPROTO); |
| 5060 | } |
| 5061 | |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5062 | /* This value should be large enough to cover a tagged ethernet header plus |
| 5063 | * maximally sized IP and TCP or UDP headers. |
| 5064 | */ |
| 5065 | #define MAX_IP_HDR_LEN 128 |
| 5066 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5067 | static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5068 | { |
| 5069 | unsigned int off; |
| 5070 | bool fragment; |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5071 | __sum16 *csum; |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5072 | int err; |
| 5073 | |
| 5074 | fragment = false; |
| 5075 | |
| 5076 | err = skb_maybe_pull_tail(skb, |
| 5077 | sizeof(struct iphdr), |
| 5078 | MAX_IP_HDR_LEN); |
| 5079 | if (err < 0) |
| 5080 | goto out; |
| 5081 | |
Miaohe Lin | 11f920d | 2020-08-06 19:57:18 +0800 | [diff] [blame] | 5082 | if (ip_is_fragment(ip_hdr(skb))) |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5083 | fragment = true; |
| 5084 | |
| 5085 | off = ip_hdrlen(skb); |
| 5086 | |
| 5087 | err = -EPROTO; |
| 5088 | |
| 5089 | if (fragment) |
| 5090 | goto out; |
| 5091 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5092 | csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); |
| 5093 | if (IS_ERR(csum)) |
| 5094 | return PTR_ERR(csum); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5095 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5096 | if (recalculate) |
| 5097 | *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, |
| 5098 | ip_hdr(skb)->daddr, |
| 5099 | skb->len - off, |
| 5100 | ip_hdr(skb)->protocol, 0); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5101 | err = 0; |
| 5102 | |
| 5103 | out: |
| 5104 | return err; |
| 5105 | } |
| 5106 | |
| 5107 | /* This value should be large enough to cover a tagged ethernet header plus |
| 5108 | * an IPv6 header, all options, and a maximal TCP or UDP header. |
| 5109 | */ |
| 5110 | #define MAX_IPV6_HDR_LEN 256 |
| 5111 | |
| 5112 | #define OPT_HDR(type, skb, off) \ |
| 5113 | (type *)(skb_network_header(skb) + (off)) |
| 5114 | |
| 5115 | static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) |
| 5116 | { |
| 5117 | int err; |
| 5118 | u8 nexthdr; |
| 5119 | unsigned int off; |
| 5120 | unsigned int len; |
| 5121 | bool fragment; |
| 5122 | bool done; |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5123 | __sum16 *csum; |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5124 | |
| 5125 | fragment = false; |
| 5126 | done = false; |
| 5127 | |
| 5128 | off = sizeof(struct ipv6hdr); |
| 5129 | |
| 5130 | err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); |
| 5131 | if (err < 0) |
| 5132 | goto out; |
| 5133 | |
| 5134 | nexthdr = ipv6_hdr(skb)->nexthdr; |
| 5135 | |
| 5136 | len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); |
| 5137 | while (off <= len && !done) { |
| 5138 | switch (nexthdr) { |
| 5139 | case IPPROTO_DSTOPTS: |
| 5140 | case IPPROTO_HOPOPTS: |
| 5141 | case IPPROTO_ROUTING: { |
| 5142 | struct ipv6_opt_hdr *hp; |
| 5143 | |
| 5144 | err = skb_maybe_pull_tail(skb, |
| 5145 | off + |
| 5146 | sizeof(struct ipv6_opt_hdr), |
| 5147 | MAX_IPV6_HDR_LEN); |
| 5148 | if (err < 0) |
| 5149 | goto out; |
| 5150 | |
| 5151 | hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); |
| 5152 | nexthdr = hp->nexthdr; |
| 5153 | off += ipv6_optlen(hp); |
| 5154 | break; |
| 5155 | } |
| 5156 | case IPPROTO_AH: { |
| 5157 | struct ip_auth_hdr *hp; |
| 5158 | |
| 5159 | err = skb_maybe_pull_tail(skb, |
| 5160 | off + |
| 5161 | sizeof(struct ip_auth_hdr), |
| 5162 | MAX_IPV6_HDR_LEN); |
| 5163 | if (err < 0) |
| 5164 | goto out; |
| 5165 | |
| 5166 | hp = OPT_HDR(struct ip_auth_hdr, skb, off); |
| 5167 | nexthdr = hp->nexthdr; |
| 5168 | off += ipv6_authlen(hp); |
| 5169 | break; |
| 5170 | } |
| 5171 | case IPPROTO_FRAGMENT: { |
| 5172 | struct frag_hdr *hp; |
| 5173 | |
| 5174 | err = skb_maybe_pull_tail(skb, |
| 5175 | off + |
| 5176 | sizeof(struct frag_hdr), |
| 5177 | MAX_IPV6_HDR_LEN); |
| 5178 | if (err < 0) |
| 5179 | goto out; |
| 5180 | |
| 5181 | hp = OPT_HDR(struct frag_hdr, skb, off); |
| 5182 | |
| 5183 | if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) |
| 5184 | fragment = true; |
| 5185 | |
| 5186 | nexthdr = hp->nexthdr; |
| 5187 | off += sizeof(struct frag_hdr); |
| 5188 | break; |
| 5189 | } |
| 5190 | default: |
| 5191 | done = true; |
| 5192 | break; |
| 5193 | } |
| 5194 | } |
| 5195 | |
| 5196 | err = -EPROTO; |
| 5197 | |
| 5198 | if (!done || fragment) |
| 5199 | goto out; |
| 5200 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5201 | csum = skb_checksum_setup_ip(skb, nexthdr, off); |
| 5202 | if (IS_ERR(csum)) |
| 5203 | return PTR_ERR(csum); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5204 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5205 | if (recalculate) |
| 5206 | *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
| 5207 | &ipv6_hdr(skb)->daddr, |
| 5208 | skb->len - off, nexthdr, 0); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5209 | err = 0; |
| 5210 | |
| 5211 | out: |
| 5212 | return err; |
| 5213 | } |
| 5214 | |
| 5215 | /** |
| 5216 | * skb_checksum_setup - set up partial checksum offset |
| 5217 | * @skb: the skb to set up |
| 5218 | * @recalculate: if true the pseudo-header checksum will be recalculated |
| 5219 | */ |
| 5220 | int skb_checksum_setup(struct sk_buff *skb, bool recalculate) |
| 5221 | { |
| 5222 | int err; |
| 5223 | |
| 5224 | switch (skb->protocol) { |
| 5225 | case htons(ETH_P_IP): |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5226 | err = skb_checksum_setup_ipv4(skb, recalculate); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5227 | break; |
| 5228 | |
| 5229 | case htons(ETH_P_IPV6): |
| 5230 | err = skb_checksum_setup_ipv6(skb, recalculate); |
| 5231 | break; |
| 5232 | |
| 5233 | default: |
| 5234 | err = -EPROTO; |
| 5235 | break; |
| 5236 | } |
| 5237 | |
| 5238 | return err; |
| 5239 | } |
| 5240 | EXPORT_SYMBOL(skb_checksum_setup); |
| 5241 | |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5242 | /** |
| 5243 | * skb_checksum_maybe_trim - maybe trims the given skb |
| 5244 | * @skb: the skb to check |
| 5245 | * @transport_len: the data length beyond the network header |
| 5246 | * |
| 5247 | * Checks whether the given skb has data beyond the given transport length. |
| 5248 | * If so, returns a cloned skb trimmed to this transport length. |
| 5249 | * Otherwise returns the provided skb. Returns NULL in error cases |
| 5250 | * (e.g. transport_len exceeds skb length or out-of-memory). |
| 5251 | * |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5252 | * Caller needs to set the skb transport header and free any returned skb if it |
| 5253 | * differs from the provided skb. |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5254 | */ |
| 5255 | static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, |
| 5256 | unsigned int transport_len) |
| 5257 | { |
| 5258 | struct sk_buff *skb_chk; |
| 5259 | unsigned int len = skb_transport_offset(skb) + transport_len; |
| 5260 | int ret; |
| 5261 | |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5262 | if (skb->len < len) |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5263 | return NULL; |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5264 | else if (skb->len == len) |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5265 | return skb; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5266 | |
| 5267 | skb_chk = skb_clone(skb, GFP_ATOMIC); |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5268 | if (!skb_chk) |
| 5269 | return NULL; |
| 5270 | |
| 5271 | ret = pskb_trim_rcsum(skb_chk, len); |
| 5272 | if (ret) { |
| 5273 | kfree_skb(skb_chk); |
| 5274 | return NULL; |
| 5275 | } |
| 5276 | |
| 5277 | return skb_chk; |
| 5278 | } |
| 5279 | |
| 5280 | /** |
| 5281 | * skb_checksum_trimmed - validate checksum of an skb |
| 5282 | * @skb: the skb to check |
| 5283 | * @transport_len: the data length beyond the network header |
| 5284 | * @skb_chkf: checksum function to use |
| 5285 | * |
| 5286 | * Applies the given checksum function skb_chkf to the provided skb. |
| 5287 | * Returns a checked and maybe trimmed skb. Returns NULL on error. |
| 5288 | * |
| 5289 | * If the skb has data beyond the given transport length, then a |
| 5290 | * trimmed & cloned skb is checked and returned. |
| 5291 | * |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5292 | * Caller needs to set the skb transport header and free any returned skb if it |
| 5293 | * differs from the provided skb. |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5294 | */ |
| 5295 | struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, |
| 5296 | unsigned int transport_len, |
| 5297 | __sum16(*skb_chkf)(struct sk_buff *skb)) |
| 5298 | { |
| 5299 | struct sk_buff *skb_chk; |
| 5300 | unsigned int offset = skb_transport_offset(skb); |
Linus Lüssing | fcba67c | 2015-05-05 00:19:35 +0200 | [diff] [blame] | 5301 | __sum16 ret; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5302 | |
| 5303 | skb_chk = skb_checksum_maybe_trim(skb, transport_len); |
| 5304 | if (!skb_chk) |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5305 | goto err; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5306 | |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5307 | if (!pskb_may_pull(skb_chk, offset)) |
| 5308 | goto err; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5309 | |
Linus Lüssing | 9b36881 | 2016-02-24 04:21:42 +0100 | [diff] [blame] | 5310 | skb_pull_rcsum(skb_chk, offset); |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5311 | ret = skb_chkf(skb_chk); |
Linus Lüssing | 9b36881 | 2016-02-24 04:21:42 +0100 | [diff] [blame] | 5312 | skb_push_rcsum(skb_chk, offset); |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5313 | |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5314 | if (ret) |
| 5315 | goto err; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5316 | |
| 5317 | return skb_chk; |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5318 | |
| 5319 | err: |
| 5320 | if (skb_chk && skb_chk != skb) |
| 5321 | kfree_skb(skb_chk); |
| 5322 | |
| 5323 | return NULL; |
| 5324 | |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5325 | } |
| 5326 | EXPORT_SYMBOL(skb_checksum_trimmed); |
| 5327 | |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 5328 | void __skb_warn_lro_forwarding(const struct sk_buff *skb) |
| 5329 | { |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 5330 | net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", |
| 5331 | skb->dev->name); |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 5332 | } |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 5333 | EXPORT_SYMBOL(__skb_warn_lro_forwarding); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5334 | |
| 5335 | void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) |
| 5336 | { |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 5337 | if (head_stolen) { |
| 5338 | skb_release_head_state(skb); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5339 | kmem_cache_free(skbuff_head_cache, skb); |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 5340 | } else { |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5341 | __kfree_skb(skb); |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 5342 | } |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5343 | } |
| 5344 | EXPORT_SYMBOL(kfree_skb_partial); |
| 5345 | |
| 5346 | /** |
| 5347 | * skb_try_coalesce - try to merge skb to prior one |
| 5348 | * @to: prior buffer |
| 5349 | * @from: buffer to add |
| 5350 | * @fragstolen: pointer to boolean |
Randy Dunlap | c6c4b97 | 2012-06-08 14:01:44 +0000 | [diff] [blame] | 5351 | * @delta_truesize: how much more was allocated than was requested |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5352 | */ |
| 5353 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, |
| 5354 | bool *fragstolen, int *delta_truesize) |
| 5355 | { |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5356 | struct skb_shared_info *to_shinfo, *from_shinfo; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5357 | int i, delta, len = from->len; |
| 5358 | |
| 5359 | *fragstolen = false; |
| 5360 | |
| 5361 | if (skb_cloned(to)) |
| 5362 | return false; |
| 5363 | |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 5364 | /* The page pool signature of struct page will eventually figure out |
| 5365 | * which pages can be recycled or not but for now let's prohibit slab |
| 5366 | * allocated and page_pool allocated SKBs from being coalesced. |
| 5367 | */ |
| 5368 | if (to->pp_recycle != from->pp_recycle) |
| 5369 | return false; |
| 5370 | |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5371 | if (len <= skb_tailroom(to)) { |
Eric Dumazet | e93a043 | 2014-09-15 04:19:52 -0700 | [diff] [blame] | 5372 | if (len) |
| 5373 | BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5374 | *delta_truesize = 0; |
| 5375 | return true; |
| 5376 | } |
| 5377 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5378 | to_shinfo = skb_shinfo(to); |
| 5379 | from_shinfo = skb_shinfo(from); |
| 5380 | if (to_shinfo->frag_list || from_shinfo->frag_list) |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5381 | return false; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 5382 | if (skb_zcopy(to) || skb_zcopy(from)) |
| 5383 | return false; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5384 | |
| 5385 | if (skb_headlen(from) != 0) { |
| 5386 | struct page *page; |
| 5387 | unsigned int offset; |
| 5388 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5389 | if (to_shinfo->nr_frags + |
| 5390 | from_shinfo->nr_frags >= MAX_SKB_FRAGS) |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5391 | return false; |
| 5392 | |
| 5393 | if (skb_head_is_locked(from)) |
| 5394 | return false; |
| 5395 | |
| 5396 | delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); |
| 5397 | |
| 5398 | page = virt_to_head_page(from->head); |
| 5399 | offset = from->data - (unsigned char *)page_address(page); |
| 5400 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5401 | skb_fill_page_desc(to, to_shinfo->nr_frags, |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5402 | page, offset, skb_headlen(from)); |
| 5403 | *fragstolen = true; |
| 5404 | } else { |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5405 | if (to_shinfo->nr_frags + |
| 5406 | from_shinfo->nr_frags > MAX_SKB_FRAGS) |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5407 | return false; |
| 5408 | |
Weiping Pan | f4b549a | 2012-09-28 20:15:30 +0000 | [diff] [blame] | 5409 | delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5410 | } |
| 5411 | |
| 5412 | WARN_ON_ONCE(delta < len); |
| 5413 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5414 | memcpy(to_shinfo->frags + to_shinfo->nr_frags, |
| 5415 | from_shinfo->frags, |
| 5416 | from_shinfo->nr_frags * sizeof(skb_frag_t)); |
| 5417 | to_shinfo->nr_frags += from_shinfo->nr_frags; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5418 | |
| 5419 | if (!skb_cloned(from)) |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5420 | from_shinfo->nr_frags = 0; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5421 | |
Li RongQing | 8ea853f | 2012-09-18 16:53:21 +0000 | [diff] [blame] | 5422 | /* if the skb is not cloned this does nothing |
| 5423 | * since we set nr_frags to 0. |
| 5424 | */ |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5425 | for (i = 0; i < from_shinfo->nr_frags; i++) |
| 5426 | __skb_frag_ref(&from_shinfo->frags[i]); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5427 | |
| 5428 | to->truesize += delta; |
| 5429 | to->len += len; |
| 5430 | to->data_len += len; |
| 5431 | |
| 5432 | *delta_truesize = delta; |
| 5433 | return true; |
| 5434 | } |
| 5435 | EXPORT_SYMBOL(skb_try_coalesce); |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5436 | |
| 5437 | /** |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 5438 | * skb_scrub_packet - scrub an skb |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5439 | * |
| 5440 | * @skb: buffer to clean |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 5441 | * @xnet: packet is crossing netns |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5442 | * |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 5443 | * skb_scrub_packet can be used after encapsulating or decapsulting a packet |
| 5444 | * into/from a tunnel. Some information have to be cleared during these |
| 5445 | * operations. |
| 5446 | * skb_scrub_packet can also be used to clean a skb before injecting it in |
| 5447 | * another namespace (@xnet == true). We have to clear all information in the |
| 5448 | * skb that could impact namespace isolation. |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5449 | */ |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 5450 | void skb_scrub_packet(struct sk_buff *skb, bool xnet) |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5451 | { |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5452 | skb->pkt_type = PACKET_HOST; |
| 5453 | skb->skb_iif = 0; |
WANG Cong | 60ff746 | 2014-05-04 16:39:18 -0700 | [diff] [blame] | 5454 | skb->ignore_df = 0; |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5455 | skb_dst_drop(skb); |
Florian Westphal | 174e238 | 2019-09-26 20:37:05 +0200 | [diff] [blame] | 5456 | skb_ext_reset(skb); |
Florian Westphal | 895b5c9 | 2019-09-29 20:54:03 +0200 | [diff] [blame] | 5457 | nf_reset_ct(skb); |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5458 | nf_reset_trace(skb); |
Herbert Xu | 213dd74 | 2015-04-16 09:03:27 +0800 | [diff] [blame] | 5459 | |
Petr Machata | 6f9a506 | 2018-11-19 16:11:07 +0000 | [diff] [blame] | 5460 | #ifdef CONFIG_NET_SWITCHDEV |
| 5461 | skb->offload_fwd_mark = 0; |
Ido Schimmel | 875e893 | 2018-12-04 08:15:10 +0000 | [diff] [blame] | 5462 | skb->offload_l3_fwd_mark = 0; |
Petr Machata | 6f9a506 | 2018-11-19 16:11:07 +0000 | [diff] [blame] | 5463 | #endif |
| 5464 | |
Herbert Xu | 213dd74 | 2015-04-16 09:03:27 +0800 | [diff] [blame] | 5465 | if (!xnet) |
| 5466 | return; |
| 5467 | |
Ye Yin | 2b5ec1a | 2017-10-26 16:57:05 +0800 | [diff] [blame] | 5468 | ipvs_reset(skb); |
Herbert Xu | 213dd74 | 2015-04-16 09:03:27 +0800 | [diff] [blame] | 5469 | skb->mark = 0; |
Jesus Sanchez-Palencia | c47d8c2 | 2018-07-03 15:42:47 -0700 | [diff] [blame] | 5470 | skb->tstamp = 0; |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5471 | } |
| 5472 | EXPORT_SYMBOL_GPL(skb_scrub_packet); |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 5473 | |
| 5474 | /** |
| 5475 | * skb_gso_transport_seglen - Return length of individual segments of a gso packet |
| 5476 | * |
| 5477 | * @skb: GSO skb |
| 5478 | * |
| 5479 | * skb_gso_transport_seglen is used to determine the real size of the |
| 5480 | * individual segments, including Layer4 headers (TCP/UDP). |
| 5481 | * |
| 5482 | * The MAC/L2 or network (IP, IPv6) headers are not accounted for. |
| 5483 | */ |
Daniel Axtens | a4a7771 | 2018-03-01 17:13:40 +1100 | [diff] [blame] | 5484 | static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 5485 | { |
| 5486 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 5487 | unsigned int thlen = 0; |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 5488 | |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 5489 | if (skb->encapsulation) { |
| 5490 | thlen = skb_inner_transport_header(skb) - |
| 5491 | skb_transport_header(skb); |
Florian Westphal | 6d39d58 | 2014-04-09 10:28:50 +0200 | [diff] [blame] | 5492 | |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 5493 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) |
| 5494 | thlen += inner_tcp_hdrlen(skb); |
| 5495 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { |
| 5496 | thlen = tcp_hdrlen(skb); |
Daniel Axtens | 1dd27cd | 2018-03-09 14:06:09 +1100 | [diff] [blame] | 5497 | } else if (unlikely(skb_is_gso_sctp(skb))) { |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 5498 | thlen = sizeof(struct sctphdr); |
Willem de Bruijn | ee80d1e | 2018-04-26 13:42:16 -0400 | [diff] [blame] | 5499 | } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { |
| 5500 | thlen = sizeof(struct udphdr); |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 5501 | } |
Florian Westphal | 6d39d58 | 2014-04-09 10:28:50 +0200 | [diff] [blame] | 5502 | /* UFO sets gso_size to the size of the fragmentation |
| 5503 | * payload, i.e. the size of the L4 (UDP) header is already |
| 5504 | * accounted for. |
| 5505 | */ |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 5506 | return thlen + shinfo->gso_size; |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 5507 | } |
Daniel Axtens | a4a7771 | 2018-03-01 17:13:40 +1100 | [diff] [blame] | 5508 | |
| 5509 | /** |
| 5510 | * skb_gso_network_seglen - Return length of individual segments of a gso packet |
| 5511 | * |
| 5512 | * @skb: GSO skb |
| 5513 | * |
| 5514 | * skb_gso_network_seglen is used to determine the real size of the |
| 5515 | * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). |
| 5516 | * |
| 5517 | * The MAC/L2 header is not accounted for. |
| 5518 | */ |
| 5519 | static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) |
| 5520 | { |
| 5521 | unsigned int hdr_len = skb_transport_header(skb) - |
| 5522 | skb_network_header(skb); |
| 5523 | |
| 5524 | return hdr_len + skb_gso_transport_seglen(skb); |
| 5525 | } |
| 5526 | |
| 5527 | /** |
| 5528 | * skb_gso_mac_seglen - Return length of individual segments of a gso packet |
| 5529 | * |
| 5530 | * @skb: GSO skb |
| 5531 | * |
| 5532 | * skb_gso_mac_seglen is used to determine the real size of the |
| 5533 | * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 |
| 5534 | * headers (TCP/UDP). |
| 5535 | */ |
| 5536 | static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) |
| 5537 | { |
| 5538 | unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); |
| 5539 | |
| 5540 | return hdr_len + skb_gso_transport_seglen(skb); |
| 5541 | } |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5542 | |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5543 | /** |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5544 | * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS |
| 5545 | * |
| 5546 | * There are a couple of instances where we have a GSO skb, and we |
| 5547 | * want to determine what size it would be after it is segmented. |
| 5548 | * |
| 5549 | * We might want to check: |
| 5550 | * - L3+L4+payload size (e.g. IP forwarding) |
| 5551 | * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) |
| 5552 | * |
| 5553 | * This is a helper to do that correctly considering GSO_BY_FRAGS. |
| 5554 | * |
Mathieu Malaterre | 49682bf | 2018-10-31 13:16:58 +0100 | [diff] [blame] | 5555 | * @skb: GSO skb |
| 5556 | * |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5557 | * @seg_len: The segmented length (from skb_gso_*_seglen). In the |
| 5558 | * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. |
| 5559 | * |
| 5560 | * @max_len: The maximum permissible length. |
| 5561 | * |
| 5562 | * Returns true if the segmented length <= max length. |
| 5563 | */ |
| 5564 | static inline bool skb_gso_size_check(const struct sk_buff *skb, |
| 5565 | unsigned int seg_len, |
| 5566 | unsigned int max_len) { |
| 5567 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 5568 | const struct sk_buff *iter; |
| 5569 | |
| 5570 | if (shinfo->gso_size != GSO_BY_FRAGS) |
| 5571 | return seg_len <= max_len; |
| 5572 | |
| 5573 | /* Undo this so we can re-use header sizes */ |
| 5574 | seg_len -= GSO_BY_FRAGS; |
| 5575 | |
| 5576 | skb_walk_frags(skb, iter) { |
| 5577 | if (seg_len + skb_headlen(iter) > max_len) |
| 5578 | return false; |
| 5579 | } |
| 5580 | |
| 5581 | return true; |
| 5582 | } |
| 5583 | |
| 5584 | /** |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5585 | * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5586 | * |
| 5587 | * @skb: GSO skb |
David S. Miller | 76f21b9 | 2016-06-03 22:56:28 -0700 | [diff] [blame] | 5588 | * @mtu: MTU to validate against |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5589 | * |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5590 | * skb_gso_validate_network_len validates if a given skb will fit a |
| 5591 | * wanted MTU once split. It considers L3 headers, L4 headers, and the |
| 5592 | * payload. |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5593 | */ |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5594 | bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5595 | { |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5596 | return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5597 | } |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5598 | EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5599 | |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5600 | /** |
| 5601 | * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? |
| 5602 | * |
| 5603 | * @skb: GSO skb |
| 5604 | * @len: length to validate against |
| 5605 | * |
| 5606 | * skb_gso_validate_mac_len validates if a given skb will fit a wanted |
| 5607 | * length once split, including L2, L3 and L4 headers and the payload. |
| 5608 | */ |
| 5609 | bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) |
| 5610 | { |
| 5611 | return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); |
| 5612 | } |
| 5613 | EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); |
| 5614 | |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5615 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
| 5616 | { |
Yuya Kusakabe | d85e8be | 2019-04-16 10:22:28 +0900 | [diff] [blame] | 5617 | int mac_len, meta_len; |
| 5618 | void *meta; |
Toshiaki Makita | 4bbb3e0 | 2018-03-13 14:51:27 +0900 | [diff] [blame] | 5619 | |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5620 | if (skb_cow(skb, skb_headroom(skb)) < 0) { |
| 5621 | kfree_skb(skb); |
| 5622 | return NULL; |
| 5623 | } |
| 5624 | |
Toshiaki Makita | 4bbb3e0 | 2018-03-13 14:51:27 +0900 | [diff] [blame] | 5625 | mac_len = skb->data - skb_mac_header(skb); |
Toshiaki Makita | ae47457 | 2018-03-29 19:05:29 +0900 | [diff] [blame] | 5626 | if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { |
| 5627 | memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), |
| 5628 | mac_len - VLAN_HLEN - ETH_TLEN); |
| 5629 | } |
Yuya Kusakabe | d85e8be | 2019-04-16 10:22:28 +0900 | [diff] [blame] | 5630 | |
| 5631 | meta_len = skb_metadata_len(skb); |
| 5632 | if (meta_len) { |
| 5633 | meta = skb_metadata_end(skb) - meta_len; |
| 5634 | memmove(meta + VLAN_HLEN, meta, meta_len); |
| 5635 | } |
| 5636 | |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5637 | skb->mac_header += VLAN_HLEN; |
| 5638 | return skb; |
| 5639 | } |
| 5640 | |
| 5641 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb) |
| 5642 | { |
| 5643 | struct vlan_hdr *vhdr; |
| 5644 | u16 vlan_tci; |
| 5645 | |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5646 | if (unlikely(skb_vlan_tag_present(skb))) { |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5647 | /* vlan_tci is already set-up so leave this for another time */ |
| 5648 | return skb; |
| 5649 | } |
| 5650 | |
| 5651 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 5652 | if (unlikely(!skb)) |
| 5653 | goto err_free; |
Miaohe Lin | 55eff0e | 2020-08-15 04:44:31 -0400 | [diff] [blame] | 5654 | /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ |
| 5655 | if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5656 | goto err_free; |
| 5657 | |
| 5658 | vhdr = (struct vlan_hdr *)skb->data; |
| 5659 | vlan_tci = ntohs(vhdr->h_vlan_TCI); |
| 5660 | __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); |
| 5661 | |
| 5662 | skb_pull_rcsum(skb, VLAN_HLEN); |
| 5663 | vlan_set_encap_proto(skb, vhdr); |
| 5664 | |
| 5665 | skb = skb_reorder_vlan_header(skb); |
| 5666 | if (unlikely(!skb)) |
| 5667 | goto err_free; |
| 5668 | |
| 5669 | skb_reset_network_header(skb); |
Alexander Lobakin | 8be33ec | 2020-11-09 23:47:23 +0000 | [diff] [blame] | 5670 | if (!skb_transport_header_was_set(skb)) |
| 5671 | skb_reset_transport_header(skb); |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5672 | skb_reset_mac_len(skb); |
| 5673 | |
| 5674 | return skb; |
| 5675 | |
| 5676 | err_free: |
| 5677 | kfree_skb(skb); |
| 5678 | return NULL; |
| 5679 | } |
| 5680 | EXPORT_SYMBOL(skb_vlan_untag); |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5681 | |
Jiri Pirko | e219512 | 2014-11-19 14:05:01 +0100 | [diff] [blame] | 5682 | int skb_ensure_writable(struct sk_buff *skb, int write_len) |
| 5683 | { |
| 5684 | if (!pskb_may_pull(skb, write_len)) |
| 5685 | return -ENOMEM; |
| 5686 | |
| 5687 | if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) |
| 5688 | return 0; |
| 5689 | |
| 5690 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
| 5691 | } |
| 5692 | EXPORT_SYMBOL(skb_ensure_writable); |
| 5693 | |
Shmulik Ladkani | bfca4c5 | 2016-09-19 19:11:09 +0300 | [diff] [blame] | 5694 | /* remove VLAN header from packet and update csum accordingly. |
| 5695 | * expects a non skb_vlan_tag_present skb with a vlan tag payload |
| 5696 | */ |
| 5697 | int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5698 | { |
| 5699 | struct vlan_hdr *vhdr; |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5700 | int offset = skb->data - skb_mac_header(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5701 | int err; |
| 5702 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5703 | if (WARN_ONCE(offset, |
| 5704 | "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", |
| 5705 | offset)) { |
| 5706 | return -EINVAL; |
| 5707 | } |
| 5708 | |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5709 | err = skb_ensure_writable(skb, VLAN_ETH_HLEN); |
| 5710 | if (unlikely(err)) |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5711 | return err; |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5712 | |
| 5713 | skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); |
| 5714 | |
| 5715 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
| 5716 | *vlan_tci = ntohs(vhdr->h_vlan_TCI); |
| 5717 | |
| 5718 | memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); |
| 5719 | __skb_pull(skb, VLAN_HLEN); |
| 5720 | |
| 5721 | vlan_set_encap_proto(skb, vhdr); |
| 5722 | skb->mac_header += VLAN_HLEN; |
| 5723 | |
| 5724 | if (skb_network_offset(skb) < ETH_HLEN) |
| 5725 | skb_set_network_header(skb, ETH_HLEN); |
| 5726 | |
| 5727 | skb_reset_mac_len(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5728 | |
| 5729 | return err; |
| 5730 | } |
Shmulik Ladkani | bfca4c5 | 2016-09-19 19:11:09 +0300 | [diff] [blame] | 5731 | EXPORT_SYMBOL(__skb_vlan_pop); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5732 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5733 | /* Pop a vlan tag either from hwaccel or from payload. |
| 5734 | * Expects skb->data at mac header. |
| 5735 | */ |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5736 | int skb_vlan_pop(struct sk_buff *skb) |
| 5737 | { |
| 5738 | u16 vlan_tci; |
| 5739 | __be16 vlan_proto; |
| 5740 | int err; |
| 5741 | |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5742 | if (likely(skb_vlan_tag_present(skb))) { |
Michał Mirosław | b1817524 | 2018-11-09 00:18:02 +0100 | [diff] [blame] | 5743 | __vlan_hwaccel_clear_tag(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5744 | } else { |
Shmulik Ladkani | ecf4ee4 | 2016-09-20 12:48:37 +0300 | [diff] [blame] | 5745 | if (unlikely(!eth_type_vlan(skb->protocol))) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5746 | return 0; |
| 5747 | |
| 5748 | err = __skb_vlan_pop(skb, &vlan_tci); |
| 5749 | if (err) |
| 5750 | return err; |
| 5751 | } |
| 5752 | /* move next vlan tag to hw accel tag */ |
Shmulik Ladkani | ecf4ee4 | 2016-09-20 12:48:37 +0300 | [diff] [blame] | 5753 | if (likely(!eth_type_vlan(skb->protocol))) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5754 | return 0; |
| 5755 | |
| 5756 | vlan_proto = skb->protocol; |
| 5757 | err = __skb_vlan_pop(skb, &vlan_tci); |
| 5758 | if (unlikely(err)) |
| 5759 | return err; |
| 5760 | |
| 5761 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); |
| 5762 | return 0; |
| 5763 | } |
| 5764 | EXPORT_SYMBOL(skb_vlan_pop); |
| 5765 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5766 | /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). |
| 5767 | * Expects skb->data at mac header. |
| 5768 | */ |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5769 | int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) |
| 5770 | { |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5771 | if (skb_vlan_tag_present(skb)) { |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5772 | int offset = skb->data - skb_mac_header(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5773 | int err; |
| 5774 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5775 | if (WARN_ONCE(offset, |
| 5776 | "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", |
| 5777 | offset)) { |
| 5778 | return -EINVAL; |
| 5779 | } |
| 5780 | |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5781 | err = __vlan_insert_tag(skb, skb->vlan_proto, |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5782 | skb_vlan_tag_get(skb)); |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5783 | if (err) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5784 | return err; |
Daniel Borkmann | 9241e2d | 2016-04-16 02:27:58 +0200 | [diff] [blame] | 5785 | |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5786 | skb->protocol = skb->vlan_proto; |
| 5787 | skb->mac_len += VLAN_HLEN; |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5788 | |
Daniel Borkmann | 6b83d28 | 2016-02-20 00:29:30 +0100 | [diff] [blame] | 5789 | skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5790 | } |
| 5791 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); |
| 5792 | return 0; |
| 5793 | } |
| 5794 | EXPORT_SYMBOL(skb_vlan_push); |
| 5795 | |
Guillaume Nault | 19fbcb3 | 2020-10-03 00:44:28 +0200 | [diff] [blame] | 5796 | /** |
| 5797 | * skb_eth_pop() - Drop the Ethernet header at the head of a packet |
| 5798 | * |
| 5799 | * @skb: Socket buffer to modify |
| 5800 | * |
| 5801 | * Drop the Ethernet header of @skb. |
| 5802 | * |
| 5803 | * Expects that skb->data points to the mac header and that no VLAN tags are |
| 5804 | * present. |
| 5805 | * |
| 5806 | * Returns 0 on success, -errno otherwise. |
| 5807 | */ |
| 5808 | int skb_eth_pop(struct sk_buff *skb) |
| 5809 | { |
| 5810 | if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || |
| 5811 | skb_network_offset(skb) < ETH_HLEN) |
| 5812 | return -EPROTO; |
| 5813 | |
| 5814 | skb_pull_rcsum(skb, ETH_HLEN); |
| 5815 | skb_reset_mac_header(skb); |
| 5816 | skb_reset_mac_len(skb); |
| 5817 | |
| 5818 | return 0; |
| 5819 | } |
| 5820 | EXPORT_SYMBOL(skb_eth_pop); |
| 5821 | |
| 5822 | /** |
| 5823 | * skb_eth_push() - Add a new Ethernet header at the head of a packet |
| 5824 | * |
| 5825 | * @skb: Socket buffer to modify |
| 5826 | * @dst: Destination MAC address of the new header |
| 5827 | * @src: Source MAC address of the new header |
| 5828 | * |
| 5829 | * Prepend @skb with a new Ethernet header. |
| 5830 | * |
| 5831 | * Expects that skb->data points to the mac header, which must be empty. |
| 5832 | * |
| 5833 | * Returns 0 on success, -errno otherwise. |
| 5834 | */ |
| 5835 | int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, |
| 5836 | const unsigned char *src) |
| 5837 | { |
| 5838 | struct ethhdr *eth; |
| 5839 | int err; |
| 5840 | |
| 5841 | if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) |
| 5842 | return -EPROTO; |
| 5843 | |
| 5844 | err = skb_cow_head(skb, sizeof(*eth)); |
| 5845 | if (err < 0) |
| 5846 | return err; |
| 5847 | |
| 5848 | skb_push(skb, sizeof(*eth)); |
| 5849 | skb_reset_mac_header(skb); |
| 5850 | skb_reset_mac_len(skb); |
| 5851 | |
| 5852 | eth = eth_hdr(skb); |
| 5853 | ether_addr_copy(eth->h_dest, dst); |
| 5854 | ether_addr_copy(eth->h_source, src); |
| 5855 | eth->h_proto = skb->protocol; |
| 5856 | |
| 5857 | skb_postpush_rcsum(skb, eth, sizeof(*eth)); |
| 5858 | |
| 5859 | return 0; |
| 5860 | } |
| 5861 | EXPORT_SYMBOL(skb_eth_push); |
| 5862 | |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5863 | /* Update the ethertype of hdr and the skb csum value if required. */ |
| 5864 | static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, |
| 5865 | __be16 ethertype) |
| 5866 | { |
| 5867 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
| 5868 | __be16 diff[] = { ~hdr->h_proto, ethertype }; |
| 5869 | |
| 5870 | skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); |
| 5871 | } |
| 5872 | |
| 5873 | hdr->h_proto = ethertype; |
| 5874 | } |
| 5875 | |
| 5876 | /** |
Martin Varghese | e7dbfed | 2019-12-21 08:50:01 +0530 | [diff] [blame] | 5877 | * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of |
| 5878 | * the packet |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5879 | * |
| 5880 | * @skb: buffer |
| 5881 | * @mpls_lse: MPLS label stack entry to push |
| 5882 | * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5883 | * @mac_len: length of the MAC header |
Martin Varghese | e7dbfed | 2019-12-21 08:50:01 +0530 | [diff] [blame] | 5884 | * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is |
| 5885 | * ethernet |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5886 | * |
| 5887 | * Expects skb->data at mac header. |
| 5888 | * |
| 5889 | * Returns 0 on success, -errno otherwise. |
| 5890 | */ |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5891 | int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, |
Martin Varghese | d04ac22 | 2019-12-05 05:57:22 +0530 | [diff] [blame] | 5892 | int mac_len, bool ethernet) |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5893 | { |
| 5894 | struct mpls_shim_hdr *lse; |
| 5895 | int err; |
| 5896 | |
| 5897 | if (unlikely(!eth_p_mpls(mpls_proto))) |
| 5898 | return -EINVAL; |
| 5899 | |
| 5900 | /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ |
| 5901 | if (skb->encapsulation) |
| 5902 | return -EINVAL; |
| 5903 | |
| 5904 | err = skb_cow_head(skb, MPLS_HLEN); |
| 5905 | if (unlikely(err)) |
| 5906 | return err; |
| 5907 | |
| 5908 | if (!skb->inner_protocol) { |
Martin Varghese | e7dbfed | 2019-12-21 08:50:01 +0530 | [diff] [blame] | 5909 | skb_set_inner_network_header(skb, skb_network_offset(skb)); |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5910 | skb_set_inner_protocol(skb, skb->protocol); |
| 5911 | } |
| 5912 | |
| 5913 | skb_push(skb, MPLS_HLEN); |
| 5914 | memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5915 | mac_len); |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5916 | skb_reset_mac_header(skb); |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5917 | skb_set_network_header(skb, mac_len); |
Martin Varghese | e7dbfed | 2019-12-21 08:50:01 +0530 | [diff] [blame] | 5918 | skb_reset_mac_len(skb); |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5919 | |
| 5920 | lse = mpls_hdr(skb); |
| 5921 | lse->label_stack_entry = mpls_lse; |
| 5922 | skb_postpush_rcsum(skb, lse, MPLS_HLEN); |
| 5923 | |
Guillaume Nault | 4296adc | 2020-10-02 21:53:08 +0200 | [diff] [blame] | 5924 | if (ethernet && mac_len >= ETH_HLEN) |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5925 | skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); |
| 5926 | skb->protocol = mpls_proto; |
| 5927 | |
| 5928 | return 0; |
| 5929 | } |
| 5930 | EXPORT_SYMBOL_GPL(skb_mpls_push); |
| 5931 | |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5932 | /** |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5933 | * skb_mpls_pop() - pop the outermost MPLS header |
| 5934 | * |
| 5935 | * @skb: buffer |
| 5936 | * @next_proto: ethertype of header after popped MPLS header |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5937 | * @mac_len: length of the MAC header |
Martin Varghese | 76f99f9 | 2019-12-21 08:50:23 +0530 | [diff] [blame] | 5938 | * @ethernet: flag to indicate if the packet is ethernet |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5939 | * |
| 5940 | * Expects skb->data at mac header. |
| 5941 | * |
| 5942 | * Returns 0 on success, -errno otherwise. |
| 5943 | */ |
Martin Varghese | 040b5cf | 2019-12-02 10:49:51 +0530 | [diff] [blame] | 5944 | int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, |
| 5945 | bool ethernet) |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5946 | { |
| 5947 | int err; |
| 5948 | |
| 5949 | if (unlikely(!eth_p_mpls(skb->protocol))) |
Davide Caratti | dedc5a0 | 2019-10-12 13:55:06 +0200 | [diff] [blame] | 5950 | return 0; |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5951 | |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5952 | err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5953 | if (unlikely(err)) |
| 5954 | return err; |
| 5955 | |
| 5956 | skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); |
| 5957 | memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5958 | mac_len); |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5959 | |
| 5960 | __skb_pull(skb, MPLS_HLEN); |
| 5961 | skb_reset_mac_header(skb); |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5962 | skb_set_network_header(skb, mac_len); |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5963 | |
Guillaume Nault | 4296adc | 2020-10-02 21:53:08 +0200 | [diff] [blame] | 5964 | if (ethernet && mac_len >= ETH_HLEN) { |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5965 | struct ethhdr *hdr; |
| 5966 | |
| 5967 | /* use mpls_hdr() to get ethertype to account for VLANs. */ |
| 5968 | hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); |
| 5969 | skb_mod_eth_type(skb, hdr, next_proto); |
| 5970 | } |
| 5971 | skb->protocol = next_proto; |
| 5972 | |
| 5973 | return 0; |
| 5974 | } |
| 5975 | EXPORT_SYMBOL_GPL(skb_mpls_pop); |
| 5976 | |
| 5977 | /** |
John Hurley | d27cf5c | 2019-07-07 15:01:56 +0100 | [diff] [blame] | 5978 | * skb_mpls_update_lse() - modify outermost MPLS header and update csum |
| 5979 | * |
| 5980 | * @skb: buffer |
| 5981 | * @mpls_lse: new MPLS label stack entry to update to |
| 5982 | * |
| 5983 | * Expects skb->data at mac header. |
| 5984 | * |
| 5985 | * Returns 0 on success, -errno otherwise. |
| 5986 | */ |
| 5987 | int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) |
| 5988 | { |
| 5989 | int err; |
| 5990 | |
| 5991 | if (unlikely(!eth_p_mpls(skb->protocol))) |
| 5992 | return -EINVAL; |
| 5993 | |
| 5994 | err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); |
| 5995 | if (unlikely(err)) |
| 5996 | return err; |
| 5997 | |
| 5998 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
| 5999 | __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; |
| 6000 | |
| 6001 | skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); |
| 6002 | } |
| 6003 | |
| 6004 | mpls_hdr(skb)->label_stack_entry = mpls_lse; |
| 6005 | |
| 6006 | return 0; |
| 6007 | } |
| 6008 | EXPORT_SYMBOL_GPL(skb_mpls_update_lse); |
| 6009 | |
| 6010 | /** |
John Hurley | 2a2ea50 | 2019-07-07 15:01:57 +0100 | [diff] [blame] | 6011 | * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header |
| 6012 | * |
| 6013 | * @skb: buffer |
| 6014 | * |
| 6015 | * Expects skb->data at mac header. |
| 6016 | * |
| 6017 | * Returns 0 on success, -errno otherwise. |
| 6018 | */ |
| 6019 | int skb_mpls_dec_ttl(struct sk_buff *skb) |
| 6020 | { |
| 6021 | u32 lse; |
| 6022 | u8 ttl; |
| 6023 | |
| 6024 | if (unlikely(!eth_p_mpls(skb->protocol))) |
| 6025 | return -EINVAL; |
| 6026 | |
Davide Caratti | 13de4ed | 2020-12-03 10:58:21 +0100 | [diff] [blame] | 6027 | if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) |
| 6028 | return -ENOMEM; |
| 6029 | |
John Hurley | 2a2ea50 | 2019-07-07 15:01:57 +0100 | [diff] [blame] | 6030 | lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); |
| 6031 | ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; |
| 6032 | if (!--ttl) |
| 6033 | return -EINVAL; |
| 6034 | |
| 6035 | lse &= ~MPLS_LS_TTL_MASK; |
| 6036 | lse |= ttl << MPLS_LS_TTL_SHIFT; |
| 6037 | |
| 6038 | return skb_mpls_update_lse(skb, cpu_to_be32(lse)); |
| 6039 | } |
| 6040 | EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); |
| 6041 | |
| 6042 | /** |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 6043 | * alloc_skb_with_frags - allocate skb with page frags |
| 6044 | * |
Masanari Iida | de3f0d0 | 2014-10-09 12:58:08 +0900 | [diff] [blame] | 6045 | * @header_len: size of linear part |
| 6046 | * @data_len: needed length in frags |
| 6047 | * @max_page_order: max page order desired. |
| 6048 | * @errcode: pointer to error code if any |
| 6049 | * @gfp_mask: allocation mask |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 6050 | * |
| 6051 | * This can be used to allocate a paged skb, given a maximal order for frags. |
| 6052 | */ |
| 6053 | struct sk_buff *alloc_skb_with_frags(unsigned long header_len, |
| 6054 | unsigned long data_len, |
| 6055 | int max_page_order, |
| 6056 | int *errcode, |
| 6057 | gfp_t gfp_mask) |
| 6058 | { |
| 6059 | int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 6060 | unsigned long chunk; |
| 6061 | struct sk_buff *skb; |
| 6062 | struct page *page; |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 6063 | int i; |
| 6064 | |
| 6065 | *errcode = -EMSGSIZE; |
| 6066 | /* Note this test could be relaxed, if we succeed to allocate |
| 6067 | * high order pages... |
| 6068 | */ |
| 6069 | if (npages > MAX_SKB_FRAGS) |
| 6070 | return NULL; |
| 6071 | |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 6072 | *errcode = -ENOBUFS; |
David Rientjes | f8c468e | 2019-01-02 13:01:43 -0800 | [diff] [blame] | 6073 | skb = alloc_skb(header_len, gfp_mask); |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 6074 | if (!skb) |
| 6075 | return NULL; |
| 6076 | |
| 6077 | skb->truesize += npages << PAGE_SHIFT; |
| 6078 | |
| 6079 | for (i = 0; npages > 0; i++) { |
| 6080 | int order = max_page_order; |
| 6081 | |
| 6082 | while (order) { |
| 6083 | if (npages >= 1 << order) { |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 6084 | page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 6085 | __GFP_COMP | |
Michal Hocko | d14b56f | 2018-06-28 17:53:06 +0200 | [diff] [blame] | 6086 | __GFP_NOWARN, |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 6087 | order); |
| 6088 | if (page) |
| 6089 | goto fill_page; |
| 6090 | /* Do not retry other high order allocations */ |
| 6091 | order = 1; |
| 6092 | max_page_order = 0; |
| 6093 | } |
| 6094 | order--; |
| 6095 | } |
| 6096 | page = alloc_page(gfp_mask); |
| 6097 | if (!page) |
| 6098 | goto failure; |
| 6099 | fill_page: |
| 6100 | chunk = min_t(unsigned long, data_len, |
| 6101 | PAGE_SIZE << order); |
| 6102 | skb_fill_page_desc(skb, i, page, 0, chunk); |
| 6103 | data_len -= chunk; |
| 6104 | npages -= 1 << order; |
| 6105 | } |
| 6106 | return skb; |
| 6107 | |
| 6108 | failure: |
| 6109 | kfree_skb(skb); |
| 6110 | return NULL; |
| 6111 | } |
| 6112 | EXPORT_SYMBOL(alloc_skb_with_frags); |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6113 | |
| 6114 | /* carve out the first off bytes from skb when off < headlen */ |
| 6115 | static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, |
| 6116 | const int headlen, gfp_t gfp_mask) |
| 6117 | { |
| 6118 | int i; |
| 6119 | int size = skb_end_offset(skb); |
| 6120 | int new_hlen = headlen - off; |
| 6121 | u8 *data; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6122 | |
| 6123 | size = SKB_DATA_ALIGN(size); |
| 6124 | |
| 6125 | if (skb_pfmemalloc(skb)) |
| 6126 | gfp_mask |= __GFP_MEMALLOC; |
| 6127 | data = kmalloc_reserve(size + |
| 6128 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 6129 | gfp_mask, NUMA_NO_NODE, NULL); |
| 6130 | if (!data) |
| 6131 | return -ENOMEM; |
| 6132 | |
| 6133 | size = SKB_WITH_OVERHEAD(ksize(data)); |
| 6134 | |
| 6135 | /* Copy real data, and all frags */ |
| 6136 | skb_copy_from_linear_data_offset(skb, off, data, new_hlen); |
| 6137 | skb->len -= off; |
| 6138 | |
| 6139 | memcpy((struct skb_shared_info *)(data + size), |
| 6140 | skb_shinfo(skb), |
| 6141 | offsetof(struct skb_shared_info, |
| 6142 | frags[skb_shinfo(skb)->nr_frags])); |
| 6143 | if (skb_cloned(skb)) { |
| 6144 | /* drop the old head gracefully */ |
| 6145 | if (skb_orphan_frags(skb, gfp_mask)) { |
| 6146 | kfree(data); |
| 6147 | return -ENOMEM; |
| 6148 | } |
| 6149 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
| 6150 | skb_frag_ref(skb, i); |
| 6151 | if (skb_has_frag_list(skb)) |
| 6152 | skb_clone_fraglist(skb); |
| 6153 | skb_release_data(skb); |
| 6154 | } else { |
| 6155 | /* we can reuse existing recount- all we did was |
| 6156 | * relocate values |
| 6157 | */ |
| 6158 | skb_free_head(skb); |
| 6159 | } |
| 6160 | |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6161 | skb->head = data; |
| 6162 | skb->data = data; |
| 6163 | skb->head_frag = 0; |
| 6164 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 6165 | skb->end = size; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6166 | #else |
| 6167 | skb->end = skb->head + size; |
| 6168 | #endif |
| 6169 | skb_set_tail_pointer(skb, skb_headlen(skb)); |
| 6170 | skb_headers_offset_update(skb, 0); |
| 6171 | skb->cloned = 0; |
| 6172 | skb->hdr_len = 0; |
| 6173 | skb->nohdr = 0; |
| 6174 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
| 6175 | |
| 6176 | return 0; |
| 6177 | } |
| 6178 | |
| 6179 | static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); |
| 6180 | |
| 6181 | /* carve out the first eat bytes from skb's frag_list. May recurse into |
| 6182 | * pskb_carve() |
| 6183 | */ |
| 6184 | static int pskb_carve_frag_list(struct sk_buff *skb, |
| 6185 | struct skb_shared_info *shinfo, int eat, |
| 6186 | gfp_t gfp_mask) |
| 6187 | { |
| 6188 | struct sk_buff *list = shinfo->frag_list; |
| 6189 | struct sk_buff *clone = NULL; |
| 6190 | struct sk_buff *insp = NULL; |
| 6191 | |
| 6192 | do { |
| 6193 | if (!list) { |
| 6194 | pr_err("Not enough bytes to eat. Want %d\n", eat); |
| 6195 | return -EFAULT; |
| 6196 | } |
| 6197 | if (list->len <= eat) { |
| 6198 | /* Eaten as whole. */ |
| 6199 | eat -= list->len; |
| 6200 | list = list->next; |
| 6201 | insp = list; |
| 6202 | } else { |
| 6203 | /* Eaten partially. */ |
| 6204 | if (skb_shared(list)) { |
| 6205 | clone = skb_clone(list, gfp_mask); |
| 6206 | if (!clone) |
| 6207 | return -ENOMEM; |
| 6208 | insp = list->next; |
| 6209 | list = clone; |
| 6210 | } else { |
| 6211 | /* This may be pulled without problems. */ |
| 6212 | insp = list; |
| 6213 | } |
| 6214 | if (pskb_carve(list, eat, gfp_mask) < 0) { |
| 6215 | kfree_skb(clone); |
| 6216 | return -ENOMEM; |
| 6217 | } |
| 6218 | break; |
| 6219 | } |
| 6220 | } while (eat); |
| 6221 | |
| 6222 | /* Free pulled out fragments. */ |
| 6223 | while ((list = shinfo->frag_list) != insp) { |
| 6224 | shinfo->frag_list = list->next; |
| 6225 | kfree_skb(list); |
| 6226 | } |
| 6227 | /* And insert new clone at head. */ |
| 6228 | if (clone) { |
| 6229 | clone->next = list; |
| 6230 | shinfo->frag_list = clone; |
| 6231 | } |
| 6232 | return 0; |
| 6233 | } |
| 6234 | |
| 6235 | /* carve off first len bytes from skb. Split line (off) is in the |
| 6236 | * non-linear part of skb |
| 6237 | */ |
| 6238 | static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, |
| 6239 | int pos, gfp_t gfp_mask) |
| 6240 | { |
| 6241 | int i, k = 0; |
| 6242 | int size = skb_end_offset(skb); |
| 6243 | u8 *data; |
| 6244 | const int nfrags = skb_shinfo(skb)->nr_frags; |
| 6245 | struct skb_shared_info *shinfo; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6246 | |
| 6247 | size = SKB_DATA_ALIGN(size); |
| 6248 | |
| 6249 | if (skb_pfmemalloc(skb)) |
| 6250 | gfp_mask |= __GFP_MEMALLOC; |
| 6251 | data = kmalloc_reserve(size + |
| 6252 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 6253 | gfp_mask, NUMA_NO_NODE, NULL); |
| 6254 | if (!data) |
| 6255 | return -ENOMEM; |
| 6256 | |
| 6257 | size = SKB_WITH_OVERHEAD(ksize(data)); |
| 6258 | |
| 6259 | memcpy((struct skb_shared_info *)(data + size), |
Miaohe Lin | e3ec1e8 | 2020-08-15 04:48:53 -0400 | [diff] [blame] | 6260 | skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6261 | if (skb_orphan_frags(skb, gfp_mask)) { |
| 6262 | kfree(data); |
| 6263 | return -ENOMEM; |
| 6264 | } |
| 6265 | shinfo = (struct skb_shared_info *)(data + size); |
| 6266 | for (i = 0; i < nfrags; i++) { |
| 6267 | int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 6268 | |
| 6269 | if (pos + fsize > off) { |
| 6270 | shinfo->frags[k] = skb_shinfo(skb)->frags[i]; |
| 6271 | |
| 6272 | if (pos < off) { |
| 6273 | /* Split frag. |
| 6274 | * We have two variants in this case: |
| 6275 | * 1. Move all the frag to the second |
| 6276 | * part, if it is possible. F.e. |
| 6277 | * this approach is mandatory for TUX, |
| 6278 | * where splitting is expensive. |
| 6279 | * 2. Split is accurately. We make this. |
| 6280 | */ |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 6281 | skb_frag_off_add(&shinfo->frags[0], off - pos); |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6282 | skb_frag_size_sub(&shinfo->frags[0], off - pos); |
| 6283 | } |
| 6284 | skb_frag_ref(skb, i); |
| 6285 | k++; |
| 6286 | } |
| 6287 | pos += fsize; |
| 6288 | } |
| 6289 | shinfo->nr_frags = k; |
| 6290 | if (skb_has_frag_list(skb)) |
| 6291 | skb_clone_fraglist(skb); |
| 6292 | |
Miaohe Lin | eabe861 | 2020-08-15 04:46:41 -0400 | [diff] [blame] | 6293 | /* split line is in frag list */ |
| 6294 | if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { |
| 6295 | /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ |
| 6296 | if (skb_has_frag_list(skb)) |
| 6297 | kfree_skb_list(skb_shinfo(skb)->frag_list); |
| 6298 | kfree(data); |
| 6299 | return -ENOMEM; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6300 | } |
| 6301 | skb_release_data(skb); |
| 6302 | |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6303 | skb->head = data; |
| 6304 | skb->head_frag = 0; |
| 6305 | skb->data = data; |
| 6306 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 6307 | skb->end = size; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6308 | #else |
| 6309 | skb->end = skb->head + size; |
| 6310 | #endif |
| 6311 | skb_reset_tail_pointer(skb); |
| 6312 | skb_headers_offset_update(skb, 0); |
| 6313 | skb->cloned = 0; |
| 6314 | skb->hdr_len = 0; |
| 6315 | skb->nohdr = 0; |
| 6316 | skb->len -= off; |
| 6317 | skb->data_len = skb->len; |
| 6318 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
| 6319 | return 0; |
| 6320 | } |
| 6321 | |
| 6322 | /* remove len bytes from the beginning of the skb */ |
| 6323 | static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) |
| 6324 | { |
| 6325 | int headlen = skb_headlen(skb); |
| 6326 | |
| 6327 | if (len < headlen) |
| 6328 | return pskb_carve_inside_header(skb, len, headlen, gfp); |
| 6329 | else |
| 6330 | return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); |
| 6331 | } |
| 6332 | |
| 6333 | /* Extract to_copy bytes starting at off from skb, and return this in |
| 6334 | * a new skb |
| 6335 | */ |
| 6336 | struct sk_buff *pskb_extract(struct sk_buff *skb, int off, |
| 6337 | int to_copy, gfp_t gfp) |
| 6338 | { |
| 6339 | struct sk_buff *clone = skb_clone(skb, gfp); |
| 6340 | |
| 6341 | if (!clone) |
| 6342 | return NULL; |
| 6343 | |
| 6344 | if (pskb_carve(clone, off, gfp) < 0 || |
| 6345 | pskb_trim(clone, to_copy)) { |
| 6346 | kfree_skb(clone); |
| 6347 | return NULL; |
| 6348 | } |
| 6349 | return clone; |
| 6350 | } |
| 6351 | EXPORT_SYMBOL(pskb_extract); |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 6352 | |
| 6353 | /** |
| 6354 | * skb_condense - try to get rid of fragments/frag_list if possible |
| 6355 | * @skb: buffer |
| 6356 | * |
| 6357 | * Can be used to save memory before skb is added to a busy queue. |
| 6358 | * If packet has bytes in frags and enough tail room in skb->head, |
| 6359 | * pull all of them, so that we can free the frags right now and adjust |
| 6360 | * truesize. |
| 6361 | * Notes: |
| 6362 | * We do not reallocate skb->head thus can not fail. |
| 6363 | * Caller must re-evaluate skb->truesize if needed. |
| 6364 | */ |
| 6365 | void skb_condense(struct sk_buff *skb) |
| 6366 | { |
Eric Dumazet | 3174fed | 2016-12-09 08:02:05 -0800 | [diff] [blame] | 6367 | if (skb->data_len) { |
| 6368 | if (skb->data_len > skb->end - skb->tail || |
| 6369 | skb_cloned(skb)) |
| 6370 | return; |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 6371 | |
Eric Dumazet | 3174fed | 2016-12-09 08:02:05 -0800 | [diff] [blame] | 6372 | /* Nice, we can free page frag(s) right now */ |
| 6373 | __pskb_pull_tail(skb, skb->data_len); |
| 6374 | } |
| 6375 | /* At this point, skb->truesize might be over estimated, |
| 6376 | * because skb had a fragment, and fragments do not tell |
| 6377 | * their truesize. |
| 6378 | * When we pulled its content into skb->head, fragment |
| 6379 | * was freed, but __pskb_pull_tail() could not possibly |
| 6380 | * adjust skb->truesize, not knowing the frag truesize. |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 6381 | */ |
| 6382 | skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); |
| 6383 | } |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6384 | |
| 6385 | #ifdef CONFIG_SKB_EXTENSIONS |
| 6386 | static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) |
| 6387 | { |
| 6388 | return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); |
| 6389 | } |
| 6390 | |
Paolo Abeni | 8b69a80 | 2020-01-09 07:59:24 -0800 | [diff] [blame] | 6391 | /** |
| 6392 | * __skb_ext_alloc - allocate a new skb extensions storage |
| 6393 | * |
Florian Westphal | 4930f48 | 2020-05-16 10:46:23 +0200 | [diff] [blame] | 6394 | * @flags: See kmalloc(). |
| 6395 | * |
Paolo Abeni | 8b69a80 | 2020-01-09 07:59:24 -0800 | [diff] [blame] | 6396 | * Returns the newly allocated pointer. The pointer can later attached to a |
| 6397 | * skb via __skb_ext_set(). |
| 6398 | * Note: caller must handle the skb_ext as an opaque data. |
| 6399 | */ |
Florian Westphal | 4930f48 | 2020-05-16 10:46:23 +0200 | [diff] [blame] | 6400 | struct skb_ext *__skb_ext_alloc(gfp_t flags) |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6401 | { |
Florian Westphal | 4930f48 | 2020-05-16 10:46:23 +0200 | [diff] [blame] | 6402 | struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6403 | |
| 6404 | if (new) { |
| 6405 | memset(new->offset, 0, sizeof(new->offset)); |
| 6406 | refcount_set(&new->refcnt, 1); |
| 6407 | } |
| 6408 | |
| 6409 | return new; |
| 6410 | } |
| 6411 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6412 | static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, |
| 6413 | unsigned int old_active) |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6414 | { |
| 6415 | struct skb_ext *new; |
| 6416 | |
| 6417 | if (refcount_read(&old->refcnt) == 1) |
| 6418 | return old; |
| 6419 | |
| 6420 | new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); |
| 6421 | if (!new) |
| 6422 | return NULL; |
| 6423 | |
| 6424 | memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); |
| 6425 | refcount_set(&new->refcnt, 1); |
| 6426 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6427 | #ifdef CONFIG_XFRM |
| 6428 | if (old_active & (1 << SKB_EXT_SEC_PATH)) { |
| 6429 | struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); |
| 6430 | unsigned int i; |
| 6431 | |
| 6432 | for (i = 0; i < sp->len; i++) |
| 6433 | xfrm_state_hold(sp->xvec[i]); |
| 6434 | } |
| 6435 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6436 | __skb_ext_put(old); |
| 6437 | return new; |
| 6438 | } |
| 6439 | |
| 6440 | /** |
Paolo Abeni | 8b69a80 | 2020-01-09 07:59:24 -0800 | [diff] [blame] | 6441 | * __skb_ext_set - attach the specified extension storage to this skb |
| 6442 | * @skb: buffer |
| 6443 | * @id: extension id |
| 6444 | * @ext: extension storage previously allocated via __skb_ext_alloc() |
| 6445 | * |
| 6446 | * Existing extensions, if any, are cleared. |
| 6447 | * |
| 6448 | * Returns the pointer to the extension. |
| 6449 | */ |
| 6450 | void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, |
| 6451 | struct skb_ext *ext) |
| 6452 | { |
| 6453 | unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); |
| 6454 | |
| 6455 | skb_ext_put(skb); |
| 6456 | newlen = newoff + skb_ext_type_len[id]; |
| 6457 | ext->chunks = newlen; |
| 6458 | ext->offset[id] = newoff; |
| 6459 | skb->extensions = ext; |
| 6460 | skb->active_extensions = 1 << id; |
| 6461 | return skb_ext_get_ptr(ext, id); |
| 6462 | } |
| 6463 | |
| 6464 | /** |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6465 | * skb_ext_add - allocate space for given extension, COW if needed |
| 6466 | * @skb: buffer |
| 6467 | * @id: extension to allocate space for |
| 6468 | * |
| 6469 | * Allocates enough space for the given extension. |
| 6470 | * If the extension is already present, a pointer to that extension |
| 6471 | * is returned. |
| 6472 | * |
| 6473 | * If the skb was cloned, COW applies and the returned memory can be |
| 6474 | * modified without changing the extension space of clones buffers. |
| 6475 | * |
| 6476 | * Returns pointer to the extension or NULL on allocation failure. |
| 6477 | */ |
| 6478 | void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) |
| 6479 | { |
| 6480 | struct skb_ext *new, *old = NULL; |
| 6481 | unsigned int newlen, newoff; |
| 6482 | |
| 6483 | if (skb->active_extensions) { |
| 6484 | old = skb->extensions; |
| 6485 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6486 | new = skb_ext_maybe_cow(old, skb->active_extensions); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6487 | if (!new) |
| 6488 | return NULL; |
| 6489 | |
Paolo Abeni | 682ec85 | 2018-12-21 19:03:15 +0100 | [diff] [blame] | 6490 | if (__skb_ext_exist(new, id)) |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6491 | goto set_active; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6492 | |
Paolo Abeni | e94e50b | 2018-12-21 19:03:13 +0100 | [diff] [blame] | 6493 | newoff = new->chunks; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6494 | } else { |
| 6495 | newoff = SKB_EXT_CHUNKSIZEOF(*new); |
| 6496 | |
Florian Westphal | 4930f48 | 2020-05-16 10:46:23 +0200 | [diff] [blame] | 6497 | new = __skb_ext_alloc(GFP_ATOMIC); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6498 | if (!new) |
| 6499 | return NULL; |
| 6500 | } |
| 6501 | |
| 6502 | newlen = newoff + skb_ext_type_len[id]; |
| 6503 | new->chunks = newlen; |
| 6504 | new->offset[id] = newoff; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6505 | set_active: |
Paolo Abeni | b0999f3 | 2021-07-28 18:24:01 +0200 | [diff] [blame] | 6506 | skb->slow_gro = 1; |
Paolo Abeni | 682ec85 | 2018-12-21 19:03:15 +0100 | [diff] [blame] | 6507 | skb->extensions = new; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6508 | skb->active_extensions |= 1 << id; |
| 6509 | return skb_ext_get_ptr(new, id); |
| 6510 | } |
| 6511 | EXPORT_SYMBOL(skb_ext_add); |
| 6512 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6513 | #ifdef CONFIG_XFRM |
| 6514 | static void skb_ext_put_sp(struct sec_path *sp) |
| 6515 | { |
| 6516 | unsigned int i; |
| 6517 | |
| 6518 | for (i = 0; i < sp->len; i++) |
| 6519 | xfrm_state_put(sp->xvec[i]); |
| 6520 | } |
| 6521 | #endif |
| 6522 | |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6523 | void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) |
| 6524 | { |
| 6525 | struct skb_ext *ext = skb->extensions; |
| 6526 | |
| 6527 | skb->active_extensions &= ~(1 << id); |
| 6528 | if (skb->active_extensions == 0) { |
| 6529 | skb->extensions = NULL; |
| 6530 | __skb_ext_put(ext); |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6531 | #ifdef CONFIG_XFRM |
| 6532 | } else if (id == SKB_EXT_SEC_PATH && |
| 6533 | refcount_read(&ext->refcnt) == 1) { |
| 6534 | struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); |
| 6535 | |
| 6536 | skb_ext_put_sp(sp); |
| 6537 | sp->len = 0; |
| 6538 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6539 | } |
| 6540 | } |
| 6541 | EXPORT_SYMBOL(__skb_ext_del); |
| 6542 | |
| 6543 | void __skb_ext_put(struct skb_ext *ext) |
| 6544 | { |
| 6545 | /* If this is last clone, nothing can increment |
| 6546 | * it after check passes. Avoids one atomic op. |
| 6547 | */ |
| 6548 | if (refcount_read(&ext->refcnt) == 1) |
| 6549 | goto free_now; |
| 6550 | |
| 6551 | if (!refcount_dec_and_test(&ext->refcnt)) |
| 6552 | return; |
| 6553 | free_now: |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6554 | #ifdef CONFIG_XFRM |
| 6555 | if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) |
| 6556 | skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); |
| 6557 | #endif |
| 6558 | |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6559 | kmem_cache_free(skbuff_ext_cache, ext); |
| 6560 | } |
| 6561 | EXPORT_SYMBOL(__skb_ext_put); |
| 6562 | #endif /* CONFIG_SKB_EXTENSIONS */ |