Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Routines having to do with the 'struct sk_buff' memory handlers. |
| 4 | * |
Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 5 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Florian La Roche <rzsfl@rz.uni-sb.de> |
| 7 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * Fixes: |
| 9 | * Alan Cox : Fixed the worst of the load |
| 10 | * balancer bugs. |
| 11 | * Dave Platt : Interrupt stacking fix. |
| 12 | * Richard Kooijman : Timestamp fixes. |
| 13 | * Alan Cox : Changed buffer format. |
| 14 | * Alan Cox : destructor hook for AF_UNIX etc. |
| 15 | * Linus Torvalds : Better skb_clone. |
| 16 | * Alan Cox : Added skb_copy. |
| 17 | * Alan Cox : Added all the changed routines Linus |
| 18 | * only put in the headers |
| 19 | * Ray VanTassle : Fixed --skb->lock in free |
| 20 | * Alan Cox : skb_copy copy arp field |
| 21 | * Andi Kleen : slabified it. |
| 22 | * Robert Olsson : Removed skb_head_pool |
| 23 | * |
| 24 | * NOTE: |
| 25 | * The __skb_ routines should be called with interrupts |
| 26 | * disabled, or you better be *real* sure that the operation is atomic |
| 27 | * with respect to whatever list is being frobbed (e.g. via lock_sock() |
| 28 | * or via disabling bottom half handlers, etc). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | */ |
| 30 | |
| 31 | /* |
| 32 | * The functions in this file will not compile correctly with gcc 2.4.x |
| 33 | */ |
| 34 | |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/module.h> |
| 38 | #include <linux/types.h> |
| 39 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include <linux/mm.h> |
| 41 | #include <linux/interrupt.h> |
| 42 | #include <linux/in.h> |
| 43 | #include <linux/inet.h> |
| 44 | #include <linux/slab.h> |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 45 | #include <linux/tcp.h> |
| 46 | #include <linux/udp.h> |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 47 | #include <linux/sctp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <linux/netdevice.h> |
| 49 | #ifdef CONFIG_NET_CLS_ACT |
| 50 | #include <net/pkt_sched.h> |
| 51 | #endif |
| 52 | #include <linux/string.h> |
| 53 | #include <linux/skbuff.h> |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 54 | #include <linux/splice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | #include <linux/cache.h> |
| 56 | #include <linux/rtnetlink.h> |
| 57 | #include <linux/init.h> |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 58 | #include <linux/scatterlist.h> |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 59 | #include <linux/errqueue.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 60 | #include <linux/prefetch.h> |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 61 | #include <linux/if_vlan.h> |
John Hurley | 2a2ea50 | 2019-07-07 15:01:57 +0100 | [diff] [blame] | 62 | #include <linux/mpls.h> |
Sebastian Andrzej Siewior | 183f47f | 2021-02-18 18:31:24 +0100 | [diff] [blame] | 63 | #include <linux/kcov.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
| 65 | #include <net/protocol.h> |
| 66 | #include <net/dst.h> |
| 67 | #include <net/sock.h> |
| 68 | #include <net/checksum.h> |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 69 | #include <net/ip6_checksum.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | #include <net/xfrm.h> |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 71 | #include <net/mpls.h> |
Mat Martineau | 3ee17bc | 2020-01-09 07:59:19 -0800 | [diff] [blame] | 72 | #include <net/mptcp.h> |
Jeremy Kerr | 78476d3 | 2021-10-29 11:01:44 +0800 | [diff] [blame] | 73 | #include <net/mctp.h> |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 74 | #include <net/page_pool.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 76 | #include <linux/uaccess.h> |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 77 | #include <trace/events/skb.h> |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 78 | #include <linux/highmem.h> |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 79 | #include <linux/capability.h> |
| 80 | #include <linux/user_namespace.h> |
Matteo Croce | 2544af0 | 2019-05-29 17:13:48 +0200 | [diff] [blame] | 81 | #include <linux/indirect_call_wrapper.h> |
Al Viro | a1f8e7f7 | 2006-10-19 16:08:53 -0400 | [diff] [blame] | 82 | |
Bart Van Assche | 7b7ed88 | 2019-03-25 09:17:23 -0700 | [diff] [blame] | 83 | #include "datagram.h" |
Vasily Averin | 7f678de | 2021-10-22 13:28:37 +0300 | [diff] [blame] | 84 | #include "sock_destructor.h" |
Bart Van Assche | 7b7ed88 | 2019-03-25 09:17:23 -0700 | [diff] [blame] | 85 | |
Alexey Dobriyan | 08009a7 | 2018-02-24 21:20:33 +0300 | [diff] [blame] | 86 | struct kmem_cache *skbuff_head_cache __ro_after_init; |
| 87 | static struct kmem_cache *skbuff_fclone_cache __ro_after_init; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 88 | #ifdef CONFIG_SKB_EXTENSIONS |
| 89 | static struct kmem_cache *skbuff_ext_cache __ro_after_init; |
| 90 | #endif |
Hans Westgaard Ry | 5f74f82e | 2016-02-03 09:26:57 +0100 | [diff] [blame] | 91 | int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; |
| 92 | EXPORT_SYMBOL(sysctl_max_skb_frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | /** |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 95 | * skb_panic - private function for out-of-line support |
| 96 | * @skb: buffer |
| 97 | * @sz: size |
| 98 | * @addr: address |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 99 | * @msg: skb_over_panic or skb_under_panic |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | * |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 101 | * Out-of-line support for skb_put() and skb_push(). |
| 102 | * Called via the wrapper skb_over_panic() or skb_under_panic(). |
| 103 | * Keep out of line to prevent kernel bloat. |
| 104 | * __builtin_return_address is not used because it is not always reliable. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | */ |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 106 | static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 107 | const char msg[]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | { |
Jesper Dangaard Brouer | 41a4691 | 2020-04-27 18:37:43 +0200 | [diff] [blame] | 109 | pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 110 | msg, addr, skb->len, sz, skb->head, skb->data, |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 111 | (unsigned long)skb->tail, (unsigned long)skb->end, |
| 112 | skb->dev ? skb->dev->name : "<NULL>"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | BUG(); |
| 114 | } |
| 115 | |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 116 | static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | { |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 118 | skb_panic(skb, sz, addr, __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | } |
| 120 | |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 121 | static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
| 122 | { |
| 123 | skb_panic(skb, sz, addr, __func__); |
| 124 | } |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 125 | |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 126 | #define NAPI_SKB_CACHE_SIZE 64 |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 127 | #define NAPI_SKB_CACHE_BULK 16 |
| 128 | #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 129 | |
| 130 | struct napi_alloc_cache { |
| 131 | struct page_frag_cache page; |
| 132 | unsigned int skb_count; |
| 133 | void *skb_cache[NAPI_SKB_CACHE_SIZE]; |
| 134 | }; |
| 135 | |
| 136 | static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); |
| 137 | static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); |
| 138 | |
Yajun Deng | 32e3573 | 2021-09-14 11:49:35 +0800 | [diff] [blame] | 139 | void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 140 | { |
| 141 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
| 142 | |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 143 | fragsz = SKB_DATA_ALIGN(fragsz); |
| 144 | |
Yajun Deng | 32e3573 | 2021-09-14 11:49:35 +0800 | [diff] [blame] | 145 | return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 146 | } |
| 147 | EXPORT_SYMBOL(__napi_alloc_frag_align); |
| 148 | |
| 149 | void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) |
| 150 | { |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 151 | void *data; |
| 152 | |
| 153 | fragsz = SKB_DATA_ALIGN(fragsz); |
Changbin Du | afa79d0 | 2021-08-13 22:57:49 +0800 | [diff] [blame] | 154 | if (in_hardirq() || irqs_disabled()) { |
Yajun Deng | 32e3573 | 2021-09-14 11:49:35 +0800 | [diff] [blame] | 155 | struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); |
| 156 | |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 157 | data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); |
| 158 | } else { |
Yajun Deng | 32e3573 | 2021-09-14 11:49:35 +0800 | [diff] [blame] | 159 | struct napi_alloc_cache *nc; |
| 160 | |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 161 | local_bh_disable(); |
Yajun Deng | 32e3573 | 2021-09-14 11:49:35 +0800 | [diff] [blame] | 162 | nc = this_cpu_ptr(&napi_alloc_cache); |
| 163 | data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); |
Alexander Lobakin | 50fad4b5 | 2021-02-13 14:12:13 +0000 | [diff] [blame] | 164 | local_bh_enable(); |
| 165 | } |
| 166 | return data; |
| 167 | } |
| 168 | EXPORT_SYMBOL(__netdev_alloc_frag_align); |
| 169 | |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 170 | static struct sk_buff *napi_skb_cache_get(void) |
| 171 | { |
| 172 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
| 173 | struct sk_buff *skb; |
| 174 | |
| 175 | if (unlikely(!nc->skb_count)) |
| 176 | nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache, |
| 177 | GFP_ATOMIC, |
| 178 | NAPI_SKB_CACHE_BULK, |
| 179 | nc->skb_cache); |
| 180 | if (unlikely(!nc->skb_count)) |
| 181 | return NULL; |
| 182 | |
| 183 | skb = nc->skb_cache[--nc->skb_count]; |
| 184 | kasan_unpoison_object_data(skbuff_head_cache, skb); |
| 185 | |
| 186 | return skb; |
| 187 | } |
| 188 | |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 189 | /* Caller must provide SKB that is memset cleared */ |
Alexander Lobakin | 483126b | 2021-02-13 14:11:26 +0000 | [diff] [blame] | 190 | static void __build_skb_around(struct sk_buff *skb, void *data, |
| 191 | unsigned int frag_size) |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 192 | { |
| 193 | struct skb_shared_info *shinfo; |
| 194 | unsigned int size = frag_size ? : ksize(data); |
| 195 | |
| 196 | size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 197 | |
| 198 | /* Assumes caller memset cleared SKB */ |
| 199 | skb->truesize = SKB_TRUESIZE(size); |
| 200 | refcount_set(&skb->users, 1); |
| 201 | skb->head = data; |
| 202 | skb->data = data; |
| 203 | skb_reset_tail_pointer(skb); |
| 204 | skb->end = skb->tail + size; |
| 205 | skb->mac_header = (typeof(skb->mac_header))~0U; |
| 206 | skb->transport_header = (typeof(skb->transport_header))~0U; |
| 207 | |
| 208 | /* make sure we initialize shinfo sequentially */ |
| 209 | shinfo = skb_shinfo(skb); |
| 210 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
| 211 | atomic_set(&shinfo->dataref, 1); |
| 212 | |
Aleksandr Nogikh | 6370cc3 | 2020-10-29 17:36:19 +0000 | [diff] [blame] | 213 | skb_set_kcov_handle(skb, kcov_common_handle()); |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 214 | } |
| 215 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | /** |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 217 | * __build_skb - build a network buffer |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 218 | * @data: data buffer provided by caller |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 219 | * @frag_size: size of data, or 0 if head was kmalloced |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 220 | * |
| 221 | * Allocate a new &sk_buff. Caller provides space holding head and |
Florian Fainelli | deceb4c | 2013-07-23 20:22:39 +0100 | [diff] [blame] | 222 | * skb_shared_info. @data must have been allocated by kmalloc() only if |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 223 | * @frag_size is 0, otherwise data should come from the page allocator |
| 224 | * or vmalloc() |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 225 | * The return is the new skb buffer. |
| 226 | * On a failure the return is %NULL, and @data is not freed. |
| 227 | * Notes : |
| 228 | * Before IO, driver allocates only data buffer where NIC put incoming frame |
| 229 | * Driver should add room at head (NET_SKB_PAD) and |
| 230 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) |
| 231 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it |
| 232 | * before giving packet to stack. |
| 233 | * RX rings only contains data buffers, not full skbs. |
| 234 | */ |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 235 | struct sk_buff *__build_skb(void *data, unsigned int frag_size) |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 236 | { |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 237 | struct sk_buff *skb; |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 238 | |
| 239 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 240 | if (unlikely(!skb)) |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 241 | return NULL; |
| 242 | |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 243 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
Alexander Lobakin | 483126b | 2021-02-13 14:11:26 +0000 | [diff] [blame] | 244 | __build_skb_around(skb, data, frag_size); |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 245 | |
Alexander Lobakin | 483126b | 2021-02-13 14:11:26 +0000 | [diff] [blame] | 246 | return skb; |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 247 | } |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 248 | |
| 249 | /* build_skb() is wrapper over __build_skb(), that specifically |
| 250 | * takes care of skb->head and skb->pfmemalloc |
| 251 | * This means that if @frag_size is not zero, then @data must be backed |
| 252 | * by a page fragment, not kmalloc() or vmalloc() |
| 253 | */ |
| 254 | struct sk_buff *build_skb(void *data, unsigned int frag_size) |
| 255 | { |
| 256 | struct sk_buff *skb = __build_skb(data, frag_size); |
| 257 | |
| 258 | if (skb && frag_size) { |
| 259 | skb->head_frag = 1; |
Michal Hocko | 2f064f3 | 2015-08-21 14:11:51 -0700 | [diff] [blame] | 260 | if (page_is_pfmemalloc(virt_to_head_page(data))) |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 261 | skb->pfmemalloc = 1; |
| 262 | } |
| 263 | return skb; |
| 264 | } |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 265 | EXPORT_SYMBOL(build_skb); |
| 266 | |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 267 | /** |
| 268 | * build_skb_around - build a network buffer around provided skb |
| 269 | * @skb: sk_buff provide by caller, must be memset cleared |
| 270 | * @data: data buffer provided by caller |
| 271 | * @frag_size: size of data, or 0 if head was kmalloced |
| 272 | */ |
| 273 | struct sk_buff *build_skb_around(struct sk_buff *skb, |
| 274 | void *data, unsigned int frag_size) |
| 275 | { |
| 276 | if (unlikely(!skb)) |
| 277 | return NULL; |
| 278 | |
Alexander Lobakin | 483126b | 2021-02-13 14:11:26 +0000 | [diff] [blame] | 279 | __build_skb_around(skb, data, frag_size); |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 280 | |
Alexander Lobakin | 483126b | 2021-02-13 14:11:26 +0000 | [diff] [blame] | 281 | if (frag_size) { |
Jesper Dangaard Brouer | ba0509b | 2019-04-12 17:07:37 +0200 | [diff] [blame] | 282 | skb->head_frag = 1; |
| 283 | if (page_is_pfmemalloc(virt_to_head_page(data))) |
| 284 | skb->pfmemalloc = 1; |
| 285 | } |
| 286 | return skb; |
| 287 | } |
| 288 | EXPORT_SYMBOL(build_skb_around); |
| 289 | |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 290 | /** |
| 291 | * __napi_build_skb - build a network buffer |
| 292 | * @data: data buffer provided by caller |
| 293 | * @frag_size: size of data, or 0 if head was kmalloced |
| 294 | * |
| 295 | * Version of __build_skb() that uses NAPI percpu caches to obtain |
| 296 | * skbuff_head instead of inplace allocation. |
| 297 | * |
| 298 | * Returns a new &sk_buff on success, %NULL on allocation failure. |
| 299 | */ |
| 300 | static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) |
| 301 | { |
| 302 | struct sk_buff *skb; |
| 303 | |
| 304 | skb = napi_skb_cache_get(); |
| 305 | if (unlikely(!skb)) |
| 306 | return NULL; |
| 307 | |
| 308 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
| 309 | __build_skb_around(skb, data, frag_size); |
| 310 | |
| 311 | return skb; |
| 312 | } |
| 313 | |
| 314 | /** |
| 315 | * napi_build_skb - build a network buffer |
| 316 | * @data: data buffer provided by caller |
| 317 | * @frag_size: size of data, or 0 if head was kmalloced |
| 318 | * |
| 319 | * Version of __napi_build_skb() that takes care of skb->head_frag |
| 320 | * and skb->pfmemalloc when the data is a page or page fragment. |
| 321 | * |
| 322 | * Returns a new &sk_buff on success, %NULL on allocation failure. |
| 323 | */ |
| 324 | struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) |
| 325 | { |
| 326 | struct sk_buff *skb = __napi_build_skb(data, frag_size); |
| 327 | |
| 328 | if (likely(skb) && frag_size) { |
| 329 | skb->head_frag = 1; |
| 330 | skb_propagate_pfmemalloc(virt_to_head_page(data), skb); |
| 331 | } |
| 332 | |
| 333 | return skb; |
| 334 | } |
| 335 | EXPORT_SYMBOL(napi_build_skb); |
| 336 | |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 337 | /* |
| 338 | * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells |
| 339 | * the caller if emergency pfmemalloc reserves are being used. If it is and |
| 340 | * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves |
| 341 | * may be used. Otherwise, the packet data may be discarded until enough |
| 342 | * memory is free |
| 343 | */ |
Alexander Lobakin | ef28095 | 2021-02-13 14:11:11 +0000 | [diff] [blame] | 344 | static void *kmalloc_reserve(size_t size, gfp_t flags, int node, |
| 345 | bool *pfmemalloc) |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 346 | { |
| 347 | void *obj; |
| 348 | bool ret_pfmemalloc = false; |
| 349 | |
| 350 | /* |
| 351 | * Try a regular allocation, when that fails and we're not entitled |
| 352 | * to the reserves, fail. |
| 353 | */ |
| 354 | obj = kmalloc_node_track_caller(size, |
| 355 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, |
| 356 | node); |
| 357 | if (obj || !(gfp_pfmemalloc_allowed(flags))) |
| 358 | goto out; |
| 359 | |
| 360 | /* Try again but now we are using pfmemalloc reserves */ |
| 361 | ret_pfmemalloc = true; |
| 362 | obj = kmalloc_node_track_caller(size, flags, node); |
| 363 | |
| 364 | out: |
| 365 | if (pfmemalloc) |
| 366 | *pfmemalloc = ret_pfmemalloc; |
| 367 | |
| 368 | return obj; |
| 369 | } |
| 370 | |
| 371 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
| 372 | * 'private' fields and also do memory statistics to find all the |
| 373 | * [BEEP] leaks. |
| 374 | * |
| 375 | */ |
| 376 | |
| 377 | /** |
| 378 | * __alloc_skb - allocate a network buffer |
| 379 | * @size: size to allocate |
| 380 | * @gfp_mask: allocation mask |
| 381 | * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache |
| 382 | * instead of head cache and allocate a cloned (child) skb. |
| 383 | * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for |
| 384 | * allocations in case the data is required for writeback |
| 385 | * @node: numa node to allocate memory on |
| 386 | * |
| 387 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
| 388 | * tail room of at least size bytes. The object has a reference count |
| 389 | * of one. The return is the buffer. On a failure the return is %NULL. |
| 390 | * |
| 391 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
| 392 | * %GFP_ATOMIC. |
| 393 | */ |
| 394 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
| 395 | int flags, int node) |
| 396 | { |
| 397 | struct kmem_cache *cache; |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 398 | struct sk_buff *skb; |
Li RongQing | a5df633 | 2021-09-22 14:17:19 +0800 | [diff] [blame] | 399 | unsigned int osize; |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 400 | bool pfmemalloc; |
Li RongQing | a5df633 | 2021-09-22 14:17:19 +0800 | [diff] [blame] | 401 | u8 *data; |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 402 | |
| 403 | cache = (flags & SKB_ALLOC_FCLONE) |
| 404 | ? skbuff_fclone_cache : skbuff_head_cache; |
| 405 | |
| 406 | if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) |
| 407 | gfp_mask |= __GFP_MEMALLOC; |
| 408 | |
| 409 | /* Get the HEAD */ |
Alexander Lobakin | d13612b | 2021-02-13 14:12:38 +0000 | [diff] [blame] | 410 | if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && |
| 411 | likely(node == NUMA_NO_NODE || node == numa_mem_id())) |
| 412 | skb = napi_skb_cache_get(); |
| 413 | else |
| 414 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); |
Alexander Lobakin | df1ae02 | 2021-02-13 14:11:39 +0000 | [diff] [blame] | 415 | if (unlikely(!skb)) |
| 416 | return NULL; |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 417 | prefetchw(skb); |
| 418 | |
| 419 | /* We do our best to align skb_shared_info on a separate cache |
| 420 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives |
| 421 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. |
| 422 | * Both skb->head and skb_shared_info are cache line aligned. |
| 423 | */ |
| 424 | size = SKB_DATA_ALIGN(size); |
| 425 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 426 | data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); |
Alexander Lobakin | df1ae02 | 2021-02-13 14:11:39 +0000 | [diff] [blame] | 427 | if (unlikely(!data)) |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 428 | goto nodata; |
| 429 | /* kmalloc(size) might give us more room than requested. |
| 430 | * Put skb_shared_info exactly at the end of allocated zone, |
| 431 | * to allow max possible filling before reallocation. |
| 432 | */ |
Li RongQing | a5df633 | 2021-09-22 14:17:19 +0800 | [diff] [blame] | 433 | osize = ksize(data); |
| 434 | size = SKB_WITH_OVERHEAD(osize); |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 435 | prefetchw(data + size); |
| 436 | |
| 437 | /* |
| 438 | * Only clear those fields we need to clear, not those that we will |
| 439 | * actually initialise below. Hence, don't put any more fields after |
| 440 | * the tail pointer in struct sk_buff! |
| 441 | */ |
| 442 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
Li RongQing | a5df633 | 2021-09-22 14:17:19 +0800 | [diff] [blame] | 443 | __build_skb_around(skb, data, osize); |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 444 | skb->pfmemalloc = pfmemalloc; |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 445 | |
| 446 | if (flags & SKB_ALLOC_FCLONE) { |
| 447 | struct sk_buff_fclones *fclones; |
| 448 | |
| 449 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
| 450 | |
| 451 | skb->fclone = SKB_FCLONE_ORIG; |
| 452 | refcount_set(&fclones->fclone_ref, 1); |
| 453 | |
| 454 | fclones->skb2.fclone = SKB_FCLONE_CLONE; |
| 455 | } |
| 456 | |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 457 | return skb; |
Alexander Lobakin | df1ae02 | 2021-02-13 14:11:39 +0000 | [diff] [blame] | 458 | |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 459 | nodata: |
| 460 | kmem_cache_free(cache, skb); |
Alexander Lobakin | df1ae02 | 2021-02-13 14:11:39 +0000 | [diff] [blame] | 461 | return NULL; |
Alexander Lobakin | 5381b23 | 2021-02-13 14:11:00 +0000 | [diff] [blame] | 462 | } |
| 463 | EXPORT_SYMBOL(__alloc_skb); |
| 464 | |
Sebastian Andrzej Siewior | 7ba7aea | 2019-06-07 21:20:34 +0200 | [diff] [blame] | 465 | /** |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 466 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |
| 467 | * @dev: network device to receive on |
Masanari Iida | d749916 | 2015-08-24 22:56:54 +0900 | [diff] [blame] | 468 | * @len: length to allocate |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 469 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
| 470 | * |
| 471 | * Allocate a new &sk_buff and assign it a usage count of one. The |
| 472 | * buffer has NET_SKB_PAD headroom built in. Users should allocate |
| 473 | * the headroom they think they need without accounting for the |
| 474 | * built in space. The built in space is used for optimisations. |
| 475 | * |
| 476 | * %NULL is returned if there is no free memory. |
| 477 | */ |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 478 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, |
| 479 | gfp_t gfp_mask) |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 480 | { |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 481 | struct page_frag_cache *nc; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 482 | struct sk_buff *skb; |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 483 | bool pfmemalloc; |
| 484 | void *data; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 485 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 486 | len += NET_SKB_PAD; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 487 | |
Alexander Lobakin | 66c5560 | 2021-01-15 15:04:40 +0000 | [diff] [blame] | 488 | /* If requested length is either too small or too big, |
| 489 | * we use kmalloc() for skb->head allocation. |
| 490 | */ |
| 491 | if (len <= SKB_WITH_OVERHEAD(1024) || |
| 492 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 493 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 494 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); |
| 495 | if (!skb) |
| 496 | goto skb_fail; |
| 497 | goto skb_success; |
| 498 | } |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 499 | |
| 500 | len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 501 | len = SKB_DATA_ALIGN(len); |
| 502 | |
| 503 | if (sk_memalloc_socks()) |
| 504 | gfp_mask |= __GFP_MEMALLOC; |
| 505 | |
Changbin Du | afa79d0 | 2021-08-13 22:57:49 +0800 | [diff] [blame] | 506 | if (in_hardirq() || irqs_disabled()) { |
Sebastian Andrzej Siewior | 92dcabd | 2019-06-07 21:20:35 +0200 | [diff] [blame] | 507 | nc = this_cpu_ptr(&netdev_alloc_cache); |
| 508 | data = page_frag_alloc(nc, len, gfp_mask); |
| 509 | pfmemalloc = nc->pfmemalloc; |
| 510 | } else { |
| 511 | local_bh_disable(); |
| 512 | nc = this_cpu_ptr(&napi_alloc_cache.page); |
| 513 | data = page_frag_alloc(nc, len, gfp_mask); |
| 514 | pfmemalloc = nc->pfmemalloc; |
| 515 | local_bh_enable(); |
| 516 | } |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 517 | |
| 518 | if (unlikely(!data)) |
| 519 | return NULL; |
| 520 | |
| 521 | skb = __build_skb(data, len); |
| 522 | if (unlikely(!skb)) { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 523 | skb_free_frag(data); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 524 | return NULL; |
Christoph Hellwig | 7b2e497 | 2006-08-07 16:09:04 -0700 | [diff] [blame] | 525 | } |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 526 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 527 | if (pfmemalloc) |
| 528 | skb->pfmemalloc = 1; |
| 529 | skb->head_frag = 1; |
| 530 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 531 | skb_success: |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 532 | skb_reserve(skb, NET_SKB_PAD); |
| 533 | skb->dev = dev; |
| 534 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 535 | skb_fail: |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 536 | return skb; |
| 537 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 538 | EXPORT_SYMBOL(__netdev_alloc_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 540 | /** |
| 541 | * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance |
| 542 | * @napi: napi instance this buffer was allocated for |
Masanari Iida | d749916 | 2015-08-24 22:56:54 +0900 | [diff] [blame] | 543 | * @len: length to allocate |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 544 | * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages |
| 545 | * |
| 546 | * Allocate a new sk_buff for use in NAPI receive. This buffer will |
| 547 | * attempt to allocate the head from a special reserved region used |
| 548 | * only for NAPI Rx allocation. By doing this we can save several |
| 549 | * CPU cycles by avoiding having to disable and re-enable IRQs. |
| 550 | * |
| 551 | * %NULL is returned if there is no free memory. |
| 552 | */ |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 553 | struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, |
| 554 | gfp_t gfp_mask) |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 555 | { |
Eric Dumazet | 3226b15 | 2021-01-13 08:18:19 -0800 | [diff] [blame] | 556 | struct napi_alloc_cache *nc; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 557 | struct sk_buff *skb; |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 558 | void *data; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 559 | |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 560 | len += NET_SKB_PAD + NET_IP_ALIGN; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 561 | |
Eric Dumazet | 3226b15 | 2021-01-13 08:18:19 -0800 | [diff] [blame] | 562 | /* If requested length is either too small or too big, |
| 563 | * we use kmalloc() for skb->head allocation. |
| 564 | */ |
| 565 | if (len <= SKB_WITH_OVERHEAD(1024) || |
| 566 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 567 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
Alexander Lobakin | cfb8ec6 | 2021-02-13 14:12:49 +0000 | [diff] [blame] | 568 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, |
| 569 | NUMA_NO_NODE); |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 570 | if (!skb) |
| 571 | goto skb_fail; |
| 572 | goto skb_success; |
| 573 | } |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 574 | |
Eric Dumazet | 3226b15 | 2021-01-13 08:18:19 -0800 | [diff] [blame] | 575 | nc = this_cpu_ptr(&napi_alloc_cache); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 576 | len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 577 | len = SKB_DATA_ALIGN(len); |
| 578 | |
| 579 | if (sk_memalloc_socks()) |
| 580 | gfp_mask |= __GFP_MEMALLOC; |
| 581 | |
Alexander Duyck | 8c2dd3e | 2017-01-10 16:58:06 -0800 | [diff] [blame] | 582 | data = page_frag_alloc(&nc->page, len, gfp_mask); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 583 | if (unlikely(!data)) |
| 584 | return NULL; |
| 585 | |
Alexander Lobakin | cfb8ec6 | 2021-02-13 14:12:49 +0000 | [diff] [blame] | 586 | skb = __napi_build_skb(data, len); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 587 | if (unlikely(!skb)) { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 588 | skb_free_frag(data); |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 589 | return NULL; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 590 | } |
| 591 | |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 592 | if (nc->page.pfmemalloc) |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 593 | skb->pfmemalloc = 1; |
| 594 | skb->head_frag = 1; |
| 595 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 596 | skb_success: |
Alexander Duyck | 9451980 | 2015-05-06 21:11:40 -0700 | [diff] [blame] | 597 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
| 598 | skb->dev = napi->dev; |
| 599 | |
Alexander Duyck | a080e7b | 2015-05-13 13:34:13 -0700 | [diff] [blame] | 600 | skb_fail: |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 601 | return skb; |
| 602 | } |
| 603 | EXPORT_SYMBOL(__napi_alloc_skb); |
| 604 | |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 605 | void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
Eric Dumazet | 50269e1 | 2012-03-23 23:59:33 +0000 | [diff] [blame] | 606 | int size, unsigned int truesize) |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 607 | { |
| 608 | skb_fill_page_desc(skb, i, page, off, size); |
| 609 | skb->len += size; |
| 610 | skb->data_len += size; |
Eric Dumazet | 50269e1 | 2012-03-23 23:59:33 +0000 | [diff] [blame] | 611 | skb->truesize += truesize; |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 612 | } |
| 613 | EXPORT_SYMBOL(skb_add_rx_frag); |
| 614 | |
Jason Wang | f8e617e | 2013-11-01 14:07:47 +0800 | [diff] [blame] | 615 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, |
| 616 | unsigned int truesize) |
| 617 | { |
| 618 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 619 | |
| 620 | skb_frag_size_add(frag, size); |
| 621 | skb->len += size; |
| 622 | skb->data_len += size; |
| 623 | skb->truesize += truesize; |
| 624 | } |
| 625 | EXPORT_SYMBOL(skb_coalesce_rx_frag); |
| 626 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 627 | static void skb_drop_list(struct sk_buff **listp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | { |
Eric Dumazet | bd8a703 | 2013-06-24 06:26:00 -0700 | [diff] [blame] | 629 | kfree_skb_list(*listp); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 630 | *listp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | } |
| 632 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 633 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
| 634 | { |
| 635 | skb_drop_list(&skb_shinfo(skb)->frag_list); |
| 636 | } |
| 637 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | static void skb_clone_fraglist(struct sk_buff *skb) |
| 639 | { |
| 640 | struct sk_buff *list; |
| 641 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 642 | skb_walk_frags(skb, list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | skb_get(list); |
| 644 | } |
| 645 | |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 646 | static void skb_free_head(struct sk_buff *skb) |
| 647 | { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 648 | unsigned char *head = skb->head; |
| 649 | |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 650 | if (skb->head_frag) { |
| 651 | if (skb_pp_recycle(skb, head)) |
| 652 | return; |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 653 | skb_free_frag(head); |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 654 | } else { |
Alexander Duyck | 181edb2 | 2015-05-06 21:12:03 -0700 | [diff] [blame] | 655 | kfree(head); |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 656 | } |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 657 | } |
| 658 | |
Adrian Bunk | 5bba171 | 2006-06-29 13:02:35 -0700 | [diff] [blame] | 659 | static void skb_release_data(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | { |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 661 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 662 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 664 | if (skb->cloned && |
| 665 | atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, |
| 666 | &shinfo->dataref)) |
Ilias Apalodimas | 2cc3aeb | 2021-07-16 10:02:18 +0300 | [diff] [blame] | 667 | goto exit; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 668 | |
Jonathan Lemon | 70c4316 | 2021-01-06 14:18:36 -0800 | [diff] [blame] | 669 | skb_zcopy_clear(skb, true); |
| 670 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 671 | for (i = 0; i < shinfo->nr_frags; i++) |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 672 | __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 673 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 674 | if (shinfo->frag_list) |
| 675 | kfree_skb_list(shinfo->frag_list); |
| 676 | |
| 677 | skb_free_head(skb); |
Ilias Apalodimas | 2cc3aeb | 2021-07-16 10:02:18 +0300 | [diff] [blame] | 678 | exit: |
| 679 | /* When we clone an SKB we copy the reycling bit. The pp_recycle |
| 680 | * bit is only set on the head though, so in order to avoid races |
| 681 | * while trying to recycle fragments on __skb_frag_unref() we need |
| 682 | * to make one SKB responsible for triggering the recycle path. |
| 683 | * So disable the recycling bit if an SKB is cloned and we have |
| 684 | * additional references to to the fragmented part of the SKB. |
| 685 | * Eventually the last SKB will have the recycling bit set and it's |
| 686 | * dataref set to 0, which will trigger the recycling |
| 687 | */ |
| 688 | skb->pp_recycle = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | } |
| 690 | |
| 691 | /* |
| 692 | * Free an skbuff by memory without cleaning the state. |
| 693 | */ |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 694 | static void kfree_skbmem(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | { |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 696 | struct sk_buff_fclones *fclones; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 697 | |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 698 | switch (skb->fclone) { |
| 699 | case SKB_FCLONE_UNAVAILABLE: |
| 700 | kmem_cache_free(skbuff_head_cache, skb); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 701 | return; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 702 | |
| 703 | case SKB_FCLONE_ORIG: |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 704 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 705 | |
| 706 | /* We usually free the clone (TX completion) before original skb |
| 707 | * This test would have no chance to be true for the clone, |
| 708 | * while here, branch prediction will be good. |
| 709 | */ |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 710 | if (refcount_read(&fclones->fclone_ref) == 1) |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 711 | goto fastpath; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 712 | break; |
| 713 | |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 714 | default: /* SKB_FCLONE_CLONE */ |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 715 | fclones = container_of(skb, struct sk_buff_fclones, skb2); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 716 | break; |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 717 | } |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 718 | if (!refcount_dec_and_test(&fclones->fclone_ref)) |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 719 | return; |
| 720 | fastpath: |
| 721 | kmem_cache_free(skbuff_fclone_cache, fclones); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | } |
| 723 | |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 724 | void skb_release_head_state(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | { |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 726 | skb_dst_drop(skb); |
Stephen Hemminger | 9c2b332 | 2005-04-19 22:39:42 -0700 | [diff] [blame] | 727 | if (skb->destructor) { |
Changbin Du | afa79d0 | 2021-08-13 22:57:49 +0800 | [diff] [blame] | 728 | WARN_ON(in_hardirq()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | skb->destructor(skb); |
| 730 | } |
Igor Maravić | a3bf7ae | 2011-12-12 02:58:22 +0000 | [diff] [blame] | 731 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
Florian Westphal | cb9c683 | 2017-01-23 18:21:56 +0100 | [diff] [blame] | 732 | nf_conntrack_put(skb_nfct(skb)); |
KOVACS Krisztian | 2fc72c7 | 2011-01-12 20:25:08 +0100 | [diff] [blame] | 733 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 734 | skb_ext_put(skb); |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 735 | } |
| 736 | |
| 737 | /* Free everything but the sk_buff shell. */ |
| 738 | static void skb_release_all(struct sk_buff *skb) |
| 739 | { |
| 740 | skb_release_head_state(skb); |
Florian Westphal | a28b1b9 | 2017-07-23 19:54:47 +0200 | [diff] [blame] | 741 | if (likely(skb->head)) |
| 742 | skb_release_data(skb); |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 743 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 745 | /** |
| 746 | * __kfree_skb - private function |
| 747 | * @skb: buffer |
| 748 | * |
| 749 | * Free an sk_buff. Release anything attached to the buffer. |
| 750 | * Clean the state. This is an internal helper function. Users should |
| 751 | * always call kfree_skb |
| 752 | */ |
| 753 | |
| 754 | void __kfree_skb(struct sk_buff *skb) |
| 755 | { |
| 756 | skb_release_all(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | kfree_skbmem(skb); |
| 758 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 759 | EXPORT_SYMBOL(__kfree_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | |
| 761 | /** |
Menglong Dong | c504e5c | 2022-01-09 14:36:26 +0800 | [diff] [blame] | 762 | * kfree_skb_reason - free an sk_buff with special reason |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 763 | * @skb: buffer to free |
Menglong Dong | c504e5c | 2022-01-09 14:36:26 +0800 | [diff] [blame] | 764 | * @reason: reason why this skb is dropped |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 765 | * |
| 766 | * Drop a reference to the buffer and free it if the usage count has |
Menglong Dong | c504e5c | 2022-01-09 14:36:26 +0800 | [diff] [blame] | 767 | * hit zero. Meanwhile, pass the drop reason to 'kfree_skb' |
| 768 | * tracepoint. |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 769 | */ |
Menglong Dong | c504e5c | 2022-01-09 14:36:26 +0800 | [diff] [blame] | 770 | void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 771 | { |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 772 | if (!skb_unref(skb)) |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 773 | return; |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 774 | |
Menglong Dong | c504e5c | 2022-01-09 14:36:26 +0800 | [diff] [blame] | 775 | trace_kfree_skb(skb, __builtin_return_address(0), reason); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 776 | __kfree_skb(skb); |
| 777 | } |
Menglong Dong | c504e5c | 2022-01-09 14:36:26 +0800 | [diff] [blame] | 778 | EXPORT_SYMBOL(kfree_skb_reason); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 779 | |
Eric Dumazet | bd8a703 | 2013-06-24 06:26:00 -0700 | [diff] [blame] | 780 | void kfree_skb_list(struct sk_buff *segs) |
| 781 | { |
| 782 | while (segs) { |
| 783 | struct sk_buff *next = segs->next; |
| 784 | |
| 785 | kfree_skb(segs); |
| 786 | segs = next; |
| 787 | } |
| 788 | } |
| 789 | EXPORT_SYMBOL(kfree_skb_list); |
| 790 | |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 791 | /* Dump skb information and contents. |
| 792 | * |
| 793 | * Must only be called from net_ratelimit()-ed paths. |
| 794 | * |
Vladimir Oltean | 302af7c | 2020-10-05 17:48:38 +0300 | [diff] [blame] | 795 | * Dumps whole packets if full_pkt, only headers otherwise. |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 796 | */ |
| 797 | void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) |
| 798 | { |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 799 | struct skb_shared_info *sh = skb_shinfo(skb); |
| 800 | struct net_device *dev = skb->dev; |
| 801 | struct sock *sk = skb->sk; |
| 802 | struct sk_buff *list_skb; |
| 803 | bool has_mac, has_trans; |
| 804 | int headroom, tailroom; |
| 805 | int i, len, seg_len; |
| 806 | |
| 807 | if (full_pkt) |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 808 | len = skb->len; |
| 809 | else |
| 810 | len = min_t(int, skb->len, MAX_HEADER + 128); |
| 811 | |
| 812 | headroom = skb_headroom(skb); |
| 813 | tailroom = skb_tailroom(skb); |
| 814 | |
| 815 | has_mac = skb_mac_header_was_set(skb); |
| 816 | has_trans = skb_transport_header_was_set(skb); |
| 817 | |
| 818 | printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" |
| 819 | "mac=(%d,%d) net=(%d,%d) trans=%d\n" |
| 820 | "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" |
| 821 | "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" |
| 822 | "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", |
| 823 | level, skb->len, headroom, skb_headlen(skb), tailroom, |
| 824 | has_mac ? skb->mac_header : -1, |
| 825 | has_mac ? skb_mac_header_len(skb) : -1, |
| 826 | skb->network_header, |
| 827 | has_trans ? skb_network_header_len(skb) : -1, |
| 828 | has_trans ? skb->transport_header : -1, |
| 829 | sh->tx_flags, sh->nr_frags, |
| 830 | sh->gso_size, sh->gso_type, sh->gso_segs, |
| 831 | skb->csum, skb->ip_summed, skb->csum_complete_sw, |
| 832 | skb->csum_valid, skb->csum_level, |
| 833 | skb->hash, skb->sw_hash, skb->l4_hash, |
| 834 | ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); |
| 835 | |
| 836 | if (dev) |
Gal Pressman | 8a03ef6 | 2021-12-16 11:28:25 +0200 | [diff] [blame] | 837 | printk("%sdev name=%s feat=%pNF\n", |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 838 | level, dev->name, &dev->features); |
| 839 | if (sk) |
Qian Cai | db8051f | 2019-07-16 11:43:05 -0400 | [diff] [blame] | 840 | printk("%ssk family=%hu type=%u proto=%u\n", |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 841 | level, sk->sk_family, sk->sk_type, sk->sk_protocol); |
| 842 | |
| 843 | if (full_pkt && headroom) |
| 844 | print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, |
| 845 | 16, 1, skb->head, headroom, false); |
| 846 | |
| 847 | seg_len = min_t(int, skb_headlen(skb), len); |
| 848 | if (seg_len) |
| 849 | print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, |
| 850 | 16, 1, skb->data, seg_len, false); |
| 851 | len -= seg_len; |
| 852 | |
| 853 | if (full_pkt && tailroom) |
| 854 | print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, |
| 855 | 16, 1, skb_tail_pointer(skb), tailroom, false); |
| 856 | |
| 857 | for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { |
| 858 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 859 | u32 p_off, p_len, copied; |
| 860 | struct page *p; |
| 861 | u8 *vaddr; |
| 862 | |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 863 | skb_frag_foreach_page(frag, skb_frag_off(frag), |
Willem de Bruijn | 6413139 | 2019-07-07 05:51:55 -0400 | [diff] [blame] | 864 | skb_frag_size(frag), p, p_off, p_len, |
| 865 | copied) { |
| 866 | seg_len = min_t(int, p_len, len); |
| 867 | vaddr = kmap_atomic(p); |
| 868 | print_hex_dump(level, "skb frag: ", |
| 869 | DUMP_PREFIX_OFFSET, |
| 870 | 16, 1, vaddr + p_off, seg_len, false); |
| 871 | kunmap_atomic(vaddr); |
| 872 | len -= seg_len; |
| 873 | if (!len) |
| 874 | break; |
| 875 | } |
| 876 | } |
| 877 | |
| 878 | if (full_pkt && skb_has_frag_list(skb)) { |
| 879 | printk("skb fraglist:\n"); |
| 880 | skb_walk_frags(skb, list_skb) |
| 881 | skb_dump(level, list_skb, true); |
| 882 | } |
| 883 | } |
| 884 | EXPORT_SYMBOL(skb_dump); |
| 885 | |
Stephen Hemminger | d1a203e | 2008-11-01 21:01:09 -0700 | [diff] [blame] | 886 | /** |
Michael S. Tsirkin | 2512117 | 2012-11-01 09:16:28 +0000 | [diff] [blame] | 887 | * skb_tx_error - report an sk_buff xmit error |
| 888 | * @skb: buffer that triggered an error |
| 889 | * |
| 890 | * Report xmit error if a device callback is tracking this skb. |
| 891 | * skb must be freed afterwards. |
| 892 | */ |
| 893 | void skb_tx_error(struct sk_buff *skb) |
| 894 | { |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 895 | skb_zcopy_clear(skb, true); |
Michael S. Tsirkin | 2512117 | 2012-11-01 09:16:28 +0000 | [diff] [blame] | 896 | } |
| 897 | EXPORT_SYMBOL(skb_tx_error); |
| 898 | |
Herbert Xu | be769db | 2020-08-22 08:23:29 +1000 | [diff] [blame] | 899 | #ifdef CONFIG_TRACEPOINTS |
Michael S. Tsirkin | 2512117 | 2012-11-01 09:16:28 +0000 | [diff] [blame] | 900 | /** |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 901 | * consume_skb - free an skbuff |
| 902 | * @skb: buffer to free |
| 903 | * |
| 904 | * Drop a ref to the buffer and free it if the usage count has hit zero |
| 905 | * Functions identically to kfree_skb, but kfree_skb assumes that the frame |
| 906 | * is being dropped after a failure and notes that |
| 907 | */ |
| 908 | void consume_skb(struct sk_buff *skb) |
| 909 | { |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 910 | if (!skb_unref(skb)) |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 911 | return; |
Paolo Abeni | 3889a803 | 2017-06-12 11:23:41 +0200 | [diff] [blame] | 912 | |
Koki Sanagi | 07dc22e | 2010-08-23 18:46:12 +0900 | [diff] [blame] | 913 | trace_consume_skb(skb); |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 914 | __kfree_skb(skb); |
| 915 | } |
| 916 | EXPORT_SYMBOL(consume_skb); |
Herbert Xu | be769db | 2020-08-22 08:23:29 +1000 | [diff] [blame] | 917 | #endif |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 918 | |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 919 | /** |
Mauro Carvalho Chehab | c1639be | 2020-11-16 11:17:58 +0100 | [diff] [blame] | 920 | * __consume_stateless_skb - free an skbuff, assuming it is stateless |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 921 | * @skb: buffer to free |
| 922 | * |
Paolo Abeni | ca2c141 | 2017-09-06 14:44:36 +0200 | [diff] [blame] | 923 | * Alike consume_skb(), but this variant assumes that this is the last |
| 924 | * skb reference and all the head states have been already dropped |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 925 | */ |
Paolo Abeni | ca2c141 | 2017-09-06 14:44:36 +0200 | [diff] [blame] | 926 | void __consume_stateless_skb(struct sk_buff *skb) |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 927 | { |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 928 | trace_consume_skb(skb); |
Florian Westphal | 06dc75a | 2017-07-17 18:56:54 +0200 | [diff] [blame] | 929 | skb_release_data(skb); |
Paolo Abeni | 0a463c7 | 2017-06-12 11:23:42 +0200 | [diff] [blame] | 930 | kfree_skbmem(skb); |
| 931 | } |
| 932 | |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 933 | static void napi_skb_cache_put(struct sk_buff *skb) |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 934 | { |
| 935 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 936 | u32 i; |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 937 | |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 938 | kasan_poison_object_data(skbuff_head_cache, skb); |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 939 | nc->skb_cache[nc->skb_count++] = skb; |
| 940 | |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 941 | if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 942 | for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) |
| 943 | kasan_unpoison_object_data(skbuff_head_cache, |
| 944 | nc->skb_cache[i]); |
| 945 | |
| 946 | kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF, |
| 947 | nc->skb_cache + NAPI_SKB_CACHE_HALF); |
| 948 | nc->skb_count = NAPI_SKB_CACHE_HALF; |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 949 | } |
| 950 | } |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 951 | |
Jesper Dangaard Brouer | 15fad71 | 2016-02-08 13:15:04 +0100 | [diff] [blame] | 952 | void __kfree_skb_defer(struct sk_buff *skb) |
| 953 | { |
Alexander Lobakin | 9243adf | 2021-02-13 14:13:09 +0000 | [diff] [blame] | 954 | skb_release_all(skb); |
| 955 | napi_skb_cache_put(skb); |
| 956 | } |
| 957 | |
| 958 | void napi_skb_free_stolen_head(struct sk_buff *skb) |
| 959 | { |
Paolo Abeni | 9efb4b5 | 2021-07-28 18:24:02 +0200 | [diff] [blame] | 960 | if (unlikely(skb->slow_gro)) { |
| 961 | nf_reset_ct(skb); |
| 962 | skb_dst_drop(skb); |
| 963 | skb_ext_put(skb); |
Paolo Abeni | 5e10da5 | 2021-07-28 18:24:03 +0200 | [diff] [blame] | 964 | skb_orphan(skb); |
Paolo Abeni | 9efb4b5 | 2021-07-28 18:24:02 +0200 | [diff] [blame] | 965 | skb->slow_gro = 0; |
| 966 | } |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 967 | napi_skb_cache_put(skb); |
Jesper Dangaard Brouer | 15fad71 | 2016-02-08 13:15:04 +0100 | [diff] [blame] | 968 | } |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 969 | |
| 970 | void napi_consume_skb(struct sk_buff *skb, int budget) |
| 971 | { |
Jesper Dangaard Brouer | 885eb0a | 2016-03-11 09:43:58 +0100 | [diff] [blame] | 972 | /* Zero budget indicate non-NAPI context called us, like netpoll */ |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 973 | if (unlikely(!budget)) { |
Jesper Dangaard Brouer | 885eb0a | 2016-03-11 09:43:58 +0100 | [diff] [blame] | 974 | dev_consume_skb_any(skb); |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 975 | return; |
| 976 | } |
| 977 | |
Yunsheng Lin | 6454eca | 2020-11-24 18:49:29 +0800 | [diff] [blame] | 978 | lockdep_assert_in_softirq(); |
| 979 | |
Paolo Abeni | 7608894 | 2017-06-14 11:48:48 +0200 | [diff] [blame] | 980 | if (!skb_unref(skb)) |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 981 | return; |
Paolo Abeni | 7608894 | 2017-06-14 11:48:48 +0200 | [diff] [blame] | 982 | |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 983 | /* if reaching here SKB is ready to free */ |
| 984 | trace_consume_skb(skb); |
| 985 | |
| 986 | /* if SKB is a clone, don't handle this case */ |
Eric Dumazet | abbdb5a | 2016-03-20 11:27:47 -0700 | [diff] [blame] | 987 | if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 988 | __kfree_skb(skb); |
| 989 | return; |
| 990 | } |
| 991 | |
Alexander Lobakin | 9243adf | 2021-02-13 14:13:09 +0000 | [diff] [blame] | 992 | skb_release_all(skb); |
Alexander Lobakin | f450d53 | 2021-02-13 14:12:25 +0000 | [diff] [blame] | 993 | napi_skb_cache_put(skb); |
Jesper Dangaard Brouer | 795bb1c | 2016-02-08 13:14:59 +0100 | [diff] [blame] | 994 | } |
| 995 | EXPORT_SYMBOL(napi_consume_skb); |
| 996 | |
Kees Cook | 03f6104 | 2021-11-20 16:31:49 -0800 | [diff] [blame] | 997 | /* Make sure a field is contained by headers group */ |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 998 | #define CHECK_SKB_FIELD(field) \ |
Kees Cook | 03f6104 | 2021-11-20 16:31:49 -0800 | [diff] [blame] | 999 | BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ |
| 1000 | offsetof(struct sk_buff, headers.field)); \ |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1001 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1002 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
| 1003 | { |
| 1004 | new->tstamp = old->tstamp; |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1005 | /* We do not copy old->sk */ |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1006 | new->dev = old->dev; |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1007 | memcpy(new->cb, old->cb, sizeof(old->cb)); |
Eric Dumazet | 7fee226 | 2010-05-11 23:19:48 +0000 | [diff] [blame] | 1008 | skb_dst_copy(new, old); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 1009 | __skb_ext_copy(new, old); |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1010 | __nf_copy(new, old, false); |
Patrick McHardy | 6aa895b | 2008-07-14 22:49:06 -0700 | [diff] [blame] | 1011 | |
Kees Cook | 03f6104 | 2021-11-20 16:31:49 -0800 | [diff] [blame] | 1012 | /* Note : this field could be in the headers group. |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1013 | * It is not yet because we do not want to have a 16 bit hole |
| 1014 | */ |
| 1015 | new->queue_mapping = old->queue_mapping; |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 1016 | |
Kees Cook | 03f6104 | 2021-11-20 16:31:49 -0800 | [diff] [blame] | 1017 | memcpy(&new->headers, &old->headers, sizeof(new->headers)); |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1018 | CHECK_SKB_FIELD(protocol); |
| 1019 | CHECK_SKB_FIELD(csum); |
| 1020 | CHECK_SKB_FIELD(hash); |
| 1021 | CHECK_SKB_FIELD(priority); |
| 1022 | CHECK_SKB_FIELD(skb_iif); |
| 1023 | CHECK_SKB_FIELD(vlan_proto); |
| 1024 | CHECK_SKB_FIELD(vlan_tci); |
| 1025 | CHECK_SKB_FIELD(transport_header); |
| 1026 | CHECK_SKB_FIELD(network_header); |
| 1027 | CHECK_SKB_FIELD(mac_header); |
| 1028 | CHECK_SKB_FIELD(inner_protocol); |
| 1029 | CHECK_SKB_FIELD(inner_transport_header); |
| 1030 | CHECK_SKB_FIELD(inner_network_header); |
| 1031 | CHECK_SKB_FIELD(inner_mac_header); |
| 1032 | CHECK_SKB_FIELD(mark); |
| 1033 | #ifdef CONFIG_NETWORK_SECMARK |
| 1034 | CHECK_SKB_FIELD(secmark); |
| 1035 | #endif |
Cong Wang | e0d1095 | 2013-08-01 11:10:25 +0800 | [diff] [blame] | 1036 | #ifdef CONFIG_NET_RX_BUSY_POLL |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1037 | CHECK_SKB_FIELD(napi_id); |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 1038 | #endif |
Eric Dumazet | 2bd8248 | 2015-02-03 23:48:24 -0800 | [diff] [blame] | 1039 | #ifdef CONFIG_XPS |
| 1040 | CHECK_SKB_FIELD(sender_cpu); |
| 1041 | #endif |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1042 | #ifdef CONFIG_NET_SCHED |
| 1043 | CHECK_SKB_FIELD(tc_index); |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 1044 | #endif |
| 1045 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1046 | } |
| 1047 | |
Herbert Xu | 82c49a3 | 2009-05-22 22:11:37 +0000 | [diff] [blame] | 1048 | /* |
| 1049 | * You should not add any new code to this function. Add it to |
| 1050 | * __copy_skb_header above instead. |
| 1051 | */ |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1052 | static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1053 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | #define C(x) n->x = skb->x |
| 1055 | |
| 1056 | n->next = n->prev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1057 | n->sk = NULL; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1058 | __copy_skb_header(n, skb); |
| 1059 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1060 | C(len); |
| 1061 | C(data_len); |
Alexey Dobriyan | 3e6b3b2 | 2007-03-16 15:00:46 -0700 | [diff] [blame] | 1062 | C(mac_len); |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 1063 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 1064 | n->cloned = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 | n->nohdr = 0; |
Eric Dumazet | b13dda9 | 2018-04-07 13:42:39 -0700 | [diff] [blame] | 1066 | n->peeked = 0; |
Stefano Brivio | e78bfb0 | 2018-07-13 13:21:07 +0200 | [diff] [blame] | 1067 | C(pfmemalloc); |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 1068 | C(pp_recycle); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | n->destructor = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 | C(tail); |
| 1071 | C(end); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 1072 | C(head); |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 1073 | C(head_frag); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 1074 | C(data); |
| 1075 | C(truesize); |
Reshetova, Elena | 6335479 | 2017-06-30 13:07:58 +0300 | [diff] [blame] | 1076 | refcount_set(&n->users, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 | |
| 1078 | atomic_inc(&(skb_shinfo(skb)->dataref)); |
| 1079 | skb->cloned = 1; |
| 1080 | |
| 1081 | return n; |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1082 | #undef C |
| 1083 | } |
| 1084 | |
| 1085 | /** |
Jakub Kicinski | da29e4b | 2019-06-03 15:16:58 -0700 | [diff] [blame] | 1086 | * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg |
| 1087 | * @first: first sk_buff of the msg |
| 1088 | */ |
| 1089 | struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) |
| 1090 | { |
| 1091 | struct sk_buff *n; |
| 1092 | |
| 1093 | n = alloc_skb(0, GFP_ATOMIC); |
| 1094 | if (!n) |
| 1095 | return NULL; |
| 1096 | |
| 1097 | n->len = first->len; |
| 1098 | n->data_len = first->len; |
| 1099 | n->truesize = first->truesize; |
| 1100 | |
| 1101 | skb_shinfo(n)->frag_list = first; |
| 1102 | |
| 1103 | __copy_skb_header(n, first); |
| 1104 | n->destructor = NULL; |
| 1105 | |
| 1106 | return n; |
| 1107 | } |
| 1108 | EXPORT_SYMBOL_GPL(alloc_skb_for_msg); |
| 1109 | |
| 1110 | /** |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1111 | * skb_morph - morph one skb into another |
| 1112 | * @dst: the skb to receive the contents |
| 1113 | * @src: the skb to supply the contents |
| 1114 | * |
| 1115 | * This is identical to skb_clone except that the target skb is |
| 1116 | * supplied by the user. |
| 1117 | * |
| 1118 | * The target skb is returned upon exit. |
| 1119 | */ |
| 1120 | struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) |
| 1121 | { |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 1122 | skb_release_all(dst); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1123 | return __skb_clone(dst, src); |
| 1124 | } |
| 1125 | EXPORT_SYMBOL_GPL(skb_morph); |
| 1126 | |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 1127 | int mm_account_pinned_pages(struct mmpin *mmp, size_t size) |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1128 | { |
| 1129 | unsigned long max_pg, num_pg, new_pg, old_pg; |
| 1130 | struct user_struct *user; |
| 1131 | |
| 1132 | if (capable(CAP_IPC_LOCK) || !size) |
| 1133 | return 0; |
| 1134 | |
| 1135 | num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ |
| 1136 | max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 1137 | user = mmp->user ? : current_user(); |
| 1138 | |
| 1139 | do { |
| 1140 | old_pg = atomic_long_read(&user->locked_vm); |
| 1141 | new_pg = old_pg + num_pg; |
| 1142 | if (new_pg > max_pg) |
| 1143 | return -ENOBUFS; |
| 1144 | } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != |
| 1145 | old_pg); |
| 1146 | |
| 1147 | if (!mmp->user) { |
| 1148 | mmp->user = get_uid(user); |
| 1149 | mmp->num_pg = num_pg; |
| 1150 | } else { |
| 1151 | mmp->num_pg += num_pg; |
| 1152 | } |
| 1153 | |
| 1154 | return 0; |
| 1155 | } |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 1156 | EXPORT_SYMBOL_GPL(mm_account_pinned_pages); |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1157 | |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 1158 | void mm_unaccount_pinned_pages(struct mmpin *mmp) |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1159 | { |
| 1160 | if (mmp->user) { |
| 1161 | atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); |
| 1162 | free_uid(mmp->user); |
| 1163 | } |
| 1164 | } |
Sowmini Varadhan | 6f89dbc | 2018-02-15 10:49:32 -0800 | [diff] [blame] | 1165 | EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1166 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1167 | struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1168 | { |
| 1169 | struct ubuf_info *uarg; |
| 1170 | struct sk_buff *skb; |
| 1171 | |
| 1172 | WARN_ON_ONCE(!in_task()); |
| 1173 | |
| 1174 | skb = sock_omalloc(sk, 0, GFP_KERNEL); |
| 1175 | if (!skb) |
| 1176 | return NULL; |
| 1177 | |
| 1178 | BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); |
| 1179 | uarg = (void *)skb->cb; |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1180 | uarg->mmp.user = NULL; |
| 1181 | |
| 1182 | if (mm_account_pinned_pages(&uarg->mmp, size)) { |
| 1183 | kfree_skb(skb); |
| 1184 | return NULL; |
| 1185 | } |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1186 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1187 | uarg->callback = msg_zerocopy_callback; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1188 | uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; |
| 1189 | uarg->len = 1; |
| 1190 | uarg->bytelen = size; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1191 | uarg->zerocopy = 1; |
Jonathan Lemon | 04c2d33 | 2021-01-06 14:18:39 -0800 | [diff] [blame] | 1192 | uarg->flags = SKBFL_ZEROCOPY_FRAG; |
Eric Dumazet | c1d1b43 | 2017-08-31 16:48:22 -0700 | [diff] [blame] | 1193 | refcount_set(&uarg->refcnt, 1); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1194 | sock_hold(sk); |
| 1195 | |
| 1196 | return uarg; |
| 1197 | } |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1198 | EXPORT_SYMBOL_GPL(msg_zerocopy_alloc); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1199 | |
| 1200 | static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) |
| 1201 | { |
| 1202 | return container_of((void *)uarg, struct sk_buff, cb); |
| 1203 | } |
| 1204 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1205 | struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, |
| 1206 | struct ubuf_info *uarg) |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1207 | { |
| 1208 | if (uarg) { |
| 1209 | const u32 byte_limit = 1 << 19; /* limit to a few TSO */ |
| 1210 | u32 bytelen, next; |
| 1211 | |
| 1212 | /* realloc only when socket is locked (TCP, UDP cork), |
| 1213 | * so uarg->len and sk_zckey access is serialized |
| 1214 | */ |
| 1215 | if (!sock_owned_by_user(sk)) { |
| 1216 | WARN_ON_ONCE(1); |
| 1217 | return NULL; |
| 1218 | } |
| 1219 | |
| 1220 | bytelen = uarg->bytelen + size; |
| 1221 | if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) { |
| 1222 | /* TCP can create new skb to attach new uarg */ |
| 1223 | if (sk->sk_type == SOCK_STREAM) |
| 1224 | goto new_alloc; |
| 1225 | return NULL; |
| 1226 | } |
| 1227 | |
| 1228 | next = (u32)atomic_read(&sk->sk_zckey); |
| 1229 | if ((u32)(uarg->id + uarg->len) == next) { |
Willem de Bruijn | a91dbff | 2017-08-03 16:29:43 -0400 | [diff] [blame] | 1230 | if (mm_account_pinned_pages(&uarg->mmp, size)) |
| 1231 | return NULL; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1232 | uarg->len++; |
| 1233 | uarg->bytelen = bytelen; |
| 1234 | atomic_set(&sk->sk_zckey, ++next); |
Willem de Bruijn | 100f6d8 | 2019-05-30 18:01:21 -0400 | [diff] [blame] | 1235 | |
| 1236 | /* no extra ref when appending to datagram (MSG_MORE) */ |
| 1237 | if (sk->sk_type == SOCK_STREAM) |
Jonathan Lemon | 8e04491 | 2021-01-06 14:18:41 -0800 | [diff] [blame] | 1238 | net_zcopy_get(uarg); |
Willem de Bruijn | 100f6d8 | 2019-05-30 18:01:21 -0400 | [diff] [blame] | 1239 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1240 | return uarg; |
| 1241 | } |
| 1242 | } |
| 1243 | |
| 1244 | new_alloc: |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1245 | return msg_zerocopy_alloc(sk, size); |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1246 | } |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1247 | EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1248 | |
| 1249 | static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) |
| 1250 | { |
| 1251 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
| 1252 | u32 old_lo, old_hi; |
| 1253 | u64 sum_len; |
| 1254 | |
| 1255 | old_lo = serr->ee.ee_info; |
| 1256 | old_hi = serr->ee.ee_data; |
| 1257 | sum_len = old_hi - old_lo + 1ULL + len; |
| 1258 | |
| 1259 | if (sum_len >= (1ULL << 32)) |
| 1260 | return false; |
| 1261 | |
| 1262 | if (lo != old_hi + 1) |
| 1263 | return false; |
| 1264 | |
| 1265 | serr->ee.ee_data += len; |
| 1266 | return true; |
| 1267 | } |
| 1268 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1269 | static void __msg_zerocopy_callback(struct ubuf_info *uarg) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1270 | { |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1271 | struct sk_buff *tail, *skb = skb_from_uarg(uarg); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1272 | struct sock_exterr_skb *serr; |
| 1273 | struct sock *sk = skb->sk; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1274 | struct sk_buff_head *q; |
| 1275 | unsigned long flags; |
Willem de Bruijn | 3bdd5ee | 2021-06-09 18:41:57 -0400 | [diff] [blame] | 1276 | bool is_zerocopy; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1277 | u32 lo, hi; |
| 1278 | u16 len; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1279 | |
Willem de Bruijn | ccaffff | 2017-08-09 19:09:43 -0400 | [diff] [blame] | 1280 | mm_unaccount_pinned_pages(&uarg->mmp); |
| 1281 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1282 | /* if !len, there was only 1 call, and it was aborted |
| 1283 | * so do not queue a completion notification |
| 1284 | */ |
| 1285 | if (!uarg->len || sock_flag(sk, SOCK_DEAD)) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1286 | goto release; |
| 1287 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1288 | len = uarg->len; |
| 1289 | lo = uarg->id; |
| 1290 | hi = uarg->id + len - 1; |
Willem de Bruijn | 3bdd5ee | 2021-06-09 18:41:57 -0400 | [diff] [blame] | 1291 | is_zerocopy = uarg->zerocopy; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1292 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1293 | serr = SKB_EXT_ERR(skb); |
| 1294 | memset(serr, 0, sizeof(*serr)); |
| 1295 | serr->ee.ee_errno = 0; |
| 1296 | serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1297 | serr->ee.ee_data = hi; |
| 1298 | serr->ee.ee_info = lo; |
Willem de Bruijn | 3bdd5ee | 2021-06-09 18:41:57 -0400 | [diff] [blame] | 1299 | if (!is_zerocopy) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1300 | serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; |
| 1301 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1302 | q = &sk->sk_error_queue; |
| 1303 | spin_lock_irqsave(&q->lock, flags); |
| 1304 | tail = skb_peek_tail(q); |
| 1305 | if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || |
| 1306 | !skb_zerocopy_notify_extend(tail, lo, len)) { |
| 1307 | __skb_queue_tail(q, skb); |
| 1308 | skb = NULL; |
| 1309 | } |
| 1310 | spin_unlock_irqrestore(&q->lock, flags); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1311 | |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 1312 | sk_error_report(sk); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1313 | |
| 1314 | release: |
| 1315 | consume_skb(skb); |
| 1316 | sock_put(sk); |
| 1317 | } |
Jonathan Lemon | 7551885 | 2021-01-06 14:18:31 -0800 | [diff] [blame] | 1318 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1319 | void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, |
| 1320 | bool success) |
Jonathan Lemon | 7551885 | 2021-01-06 14:18:31 -0800 | [diff] [blame] | 1321 | { |
| 1322 | uarg->zerocopy = uarg->zerocopy & success; |
| 1323 | |
| 1324 | if (refcount_dec_and_test(&uarg->refcnt)) |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1325 | __msg_zerocopy_callback(uarg); |
Jonathan Lemon | 7551885 | 2021-01-06 14:18:31 -0800 | [diff] [blame] | 1326 | } |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1327 | EXPORT_SYMBOL_GPL(msg_zerocopy_callback); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1328 | |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1329 | void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1330 | { |
Jonathan Lemon | 236a6b1 | 2021-01-06 14:18:35 -0800 | [diff] [blame] | 1331 | struct sock *sk = skb_from_uarg(uarg)->sk; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1332 | |
Jonathan Lemon | 236a6b1 | 2021-01-06 14:18:35 -0800 | [diff] [blame] | 1333 | atomic_dec(&sk->sk_zckey); |
| 1334 | uarg->len--; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1335 | |
Jonathan Lemon | 236a6b1 | 2021-01-06 14:18:35 -0800 | [diff] [blame] | 1336 | if (have_uref) |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1337 | msg_zerocopy_callback(NULL, uarg, true); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1338 | } |
Jonathan Lemon | 8c79382 | 2021-01-06 14:18:37 -0800 | [diff] [blame] | 1339 | EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1340 | |
Willem de Bruijn | b5947e5 | 2018-11-30 15:32:39 -0500 | [diff] [blame] | 1341 | int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) |
| 1342 | { |
| 1343 | return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); |
| 1344 | } |
| 1345 | EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram); |
| 1346 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1347 | int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, |
| 1348 | struct msghdr *msg, int len, |
| 1349 | struct ubuf_info *uarg) |
| 1350 | { |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1351 | struct ubuf_info *orig_uarg = skb_zcopy(skb); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1352 | struct iov_iter orig_iter = msg->msg_iter; |
| 1353 | int err, orig_len = skb->len; |
| 1354 | |
Willem de Bruijn | 4ab6c99 | 2017-08-03 16:29:42 -0400 | [diff] [blame] | 1355 | /* An skb can only point to one uarg. This edge case happens when |
| 1356 | * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. |
| 1357 | */ |
| 1358 | if (orig_uarg && uarg != orig_uarg) |
| 1359 | return -EEXIST; |
| 1360 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1361 | err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); |
| 1362 | if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { |
Willem de Bruijn | 54d43117 | 2017-10-19 12:40:39 -0400 | [diff] [blame] | 1363 | struct sock *save_sk = skb->sk; |
| 1364 | |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1365 | /* Streams do not free skb on error. Reset to prev state. */ |
| 1366 | msg->msg_iter = orig_iter; |
Willem de Bruijn | 54d43117 | 2017-10-19 12:40:39 -0400 | [diff] [blame] | 1367 | skb->sk = sk; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1368 | ___pskb_trim(skb, orig_len); |
Willem de Bruijn | 54d43117 | 2017-10-19 12:40:39 -0400 | [diff] [blame] | 1369 | skb->sk = save_sk; |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1370 | return err; |
| 1371 | } |
| 1372 | |
Willem de Bruijn | 52900d2 | 2018-11-30 15:32:40 -0500 | [diff] [blame] | 1373 | skb_zcopy_set(skb, uarg, NULL); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1374 | return skb->len - orig_len; |
| 1375 | } |
| 1376 | EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); |
| 1377 | |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1378 | static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1379 | gfp_t gfp_mask) |
| 1380 | { |
| 1381 | if (skb_zcopy(orig)) { |
| 1382 | if (skb_zcopy(nskb)) { |
| 1383 | /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ |
| 1384 | if (!gfp_mask) { |
| 1385 | WARN_ON_ONCE(1); |
| 1386 | return -ENOMEM; |
| 1387 | } |
| 1388 | if (skb_uarg(nskb) == skb_uarg(orig)) |
| 1389 | return 0; |
| 1390 | if (skb_copy_ubufs(nskb, GFP_ATOMIC)) |
| 1391 | return -EIO; |
| 1392 | } |
Willem de Bruijn | 52900d2 | 2018-11-30 15:32:40 -0500 | [diff] [blame] | 1393 | skb_zcopy_set(nskb, skb_uarg(orig), NULL); |
Willem de Bruijn | 5226779 | 2017-08-03 16:29:39 -0400 | [diff] [blame] | 1394 | } |
| 1395 | return 0; |
| 1396 | } |
| 1397 | |
Ben Hutchings | 2c53040 | 2012-07-10 10:55:09 +0000 | [diff] [blame] | 1398 | /** |
| 1399 | * skb_copy_ubufs - copy userspace skb frags buffers to kernel |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 1400 | * @skb: the skb to modify |
| 1401 | * @gfp_mask: allocation priority |
| 1402 | * |
Jonathan Lemon | 06b4feb | 2021-01-06 14:18:38 -0800 | [diff] [blame] | 1403 | * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 1404 | * It will copy all frags into kernel and drop the reference |
| 1405 | * to userspace pages. |
| 1406 | * |
| 1407 | * If this function is called from an interrupt gfp_mask() must be |
| 1408 | * %GFP_ATOMIC. |
| 1409 | * |
| 1410 | * Returns 0 on success or a negative error code on failure |
| 1411 | * to allocate kernel memory to copy to. |
| 1412 | */ |
| 1413 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1414 | { |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1415 | int num_frags = skb_shinfo(skb)->nr_frags; |
| 1416 | struct page *page, *head = NULL; |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1417 | int i, new_frags; |
| 1418 | u32 d_off; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1419 | |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1420 | if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) |
| 1421 | return -EINVAL; |
| 1422 | |
Willem de Bruijn | f72c4ac | 2017-12-28 12:38:13 -0500 | [diff] [blame] | 1423 | if (!num_frags) |
| 1424 | goto release; |
| 1425 | |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1426 | new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 1427 | for (i = 0; i < new_frags; i++) { |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 1428 | page = alloc_page(gfp_mask); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1429 | if (!page) { |
| 1430 | while (head) { |
Sunghan Suh | 40dadff | 2013-07-12 16:17:23 +0900 | [diff] [blame] | 1431 | struct page *next = (struct page *)page_private(head); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1432 | put_page(head); |
| 1433 | head = next; |
| 1434 | } |
| 1435 | return -ENOMEM; |
| 1436 | } |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1437 | set_page_private(page, (unsigned long)head); |
| 1438 | head = page; |
| 1439 | } |
| 1440 | |
| 1441 | page = head; |
| 1442 | d_off = 0; |
| 1443 | for (i = 0; i < num_frags; i++) { |
| 1444 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
| 1445 | u32 p_off, p_len, copied; |
| 1446 | struct page *p; |
| 1447 | u8 *vaddr; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1448 | |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 1449 | skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1450 | p, p_off, p_len, copied) { |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1451 | u32 copy, done = 0; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1452 | vaddr = kmap_atomic(p); |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1453 | |
| 1454 | while (done < p_len) { |
| 1455 | if (d_off == PAGE_SIZE) { |
| 1456 | d_off = 0; |
| 1457 | page = (struct page *)page_private(page); |
| 1458 | } |
| 1459 | copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); |
| 1460 | memcpy(page_address(page) + d_off, |
| 1461 | vaddr + p_off + done, copy); |
| 1462 | done += copy; |
| 1463 | d_off += copy; |
| 1464 | } |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 1465 | kunmap_atomic(vaddr); |
| 1466 | } |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1467 | } |
| 1468 | |
| 1469 | /* skb frags release userspace buffers */ |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 1470 | for (i = 0; i < num_frags; i++) |
Ian Campbell | a8605c6 | 2011-10-19 23:01:49 +0000 | [diff] [blame] | 1471 | skb_frag_unref(skb, i); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1472 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1473 | /* skb frags point to kernel buffers */ |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1474 | for (i = 0; i < new_frags - 1; i++) { |
| 1475 | __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); |
Sunghan Suh | 40dadff | 2013-07-12 16:17:23 +0900 | [diff] [blame] | 1476 | head = (struct page *)page_private(head); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1477 | } |
Willem de Bruijn | 3ece782 | 2017-08-03 16:29:38 -0400 | [diff] [blame] | 1478 | __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); |
| 1479 | skb_shinfo(skb)->nr_frags = new_frags; |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 1480 | |
Willem de Bruijn | b90ddd5 | 2017-12-20 17:37:50 -0500 | [diff] [blame] | 1481 | release: |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1482 | skb_zcopy_clear(skb, false); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1483 | return 0; |
| 1484 | } |
Michael S. Tsirkin | dcc0fb7 | 2012-07-20 09:23:20 +0000 | [diff] [blame] | 1485 | EXPORT_SYMBOL_GPL(skb_copy_ubufs); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1486 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1487 | /** |
| 1488 | * skb_clone - duplicate an sk_buff |
| 1489 | * @skb: buffer to clone |
| 1490 | * @gfp_mask: allocation priority |
| 1491 | * |
| 1492 | * Duplicate an &sk_buff. The new one is not owned by a socket. Both |
| 1493 | * copies share the same packet data but not structure. The new |
| 1494 | * buffer has a reference count of 1. If the allocation fails the |
| 1495 | * function returns %NULL otherwise the new buffer is returned. |
| 1496 | * |
| 1497 | * If this function is called from an interrupt gfp_mask() must be |
| 1498 | * %GFP_ATOMIC. |
| 1499 | */ |
| 1500 | |
| 1501 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) |
| 1502 | { |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 1503 | struct sk_buff_fclones *fclones = container_of(skb, |
| 1504 | struct sk_buff_fclones, |
| 1505 | skb1); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 1506 | struct sk_buff *n; |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1507 | |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1508 | if (skb_orphan_frags(skb, gfp_mask)) |
| 1509 | return NULL; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1510 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1511 | if (skb->fclone == SKB_FCLONE_ORIG && |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 1512 | refcount_read(&fclones->fclone_ref) == 1) { |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 1513 | n = &fclones->skb2; |
Reshetova, Elena | 2638595 | 2017-06-30 13:07:59 +0300 | [diff] [blame] | 1514 | refcount_set(&fclones->fclone_ref, 2); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1515 | } else { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1516 | if (skb_pfmemalloc(skb)) |
| 1517 | gfp_mask |= __GFP_MEMALLOC; |
| 1518 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1519 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); |
| 1520 | if (!n) |
| 1521 | return NULL; |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 1522 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1523 | n->fclone = SKB_FCLONE_UNAVAILABLE; |
| 1524 | } |
| 1525 | |
| 1526 | return __skb_clone(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1527 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1528 | EXPORT_SYMBOL(skb_clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 | |
Toshiaki Makita | b0768a8 | 2018-08-03 16:58:09 +0900 | [diff] [blame] | 1530 | void skb_headers_offset_update(struct sk_buff *skb, int off) |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1531 | { |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 1532 | /* Only adjust this if it actually is csum_start rather than csum */ |
| 1533 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 1534 | skb->csum_start += off; |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1535 | /* {transport,network,mac}_header and tail are relative to skb->head */ |
| 1536 | skb->transport_header += off; |
| 1537 | skb->network_header += off; |
| 1538 | if (skb_mac_header_was_set(skb)) |
| 1539 | skb->mac_header += off; |
| 1540 | skb->inner_transport_header += off; |
| 1541 | skb->inner_network_header += off; |
Pravin B Shelar | aefbd2b | 2013-03-07 13:21:46 +0000 | [diff] [blame] | 1542 | skb->inner_mac_header += off; |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1543 | } |
Toshiaki Makita | b0768a8 | 2018-08-03 16:58:09 +0900 | [diff] [blame] | 1544 | EXPORT_SYMBOL(skb_headers_offset_update); |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1545 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1546 | void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1547 | { |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1548 | __copy_skb_header(new, old); |
| 1549 | |
Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 1550 | skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; |
| 1551 | skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; |
| 1552 | skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1553 | } |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1554 | EXPORT_SYMBOL(skb_copy_header); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1555 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1556 | static inline int skb_alloc_rx_flag(const struct sk_buff *skb) |
| 1557 | { |
| 1558 | if (skb_pfmemalloc(skb)) |
| 1559 | return SKB_ALLOC_RX; |
| 1560 | return 0; |
| 1561 | } |
| 1562 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1563 | /** |
| 1564 | * skb_copy - create private copy of an sk_buff |
| 1565 | * @skb: buffer to copy |
| 1566 | * @gfp_mask: allocation priority |
| 1567 | * |
| 1568 | * Make a copy of both an &sk_buff and its data. This is used when the |
| 1569 | * caller wishes to modify the data and needs a private copy of the |
| 1570 | * data to alter. Returns %NULL on failure or the pointer to the buffer |
| 1571 | * on success. The returned buffer has a reference count of 1. |
| 1572 | * |
| 1573 | * As by-product this function converts non-linear &sk_buff to linear |
| 1574 | * one, so that &sk_buff becomes completely private and caller is allowed |
| 1575 | * to modify all the data of returned buffer. This means that this |
| 1576 | * function is not recommended for use in circumstances when only |
| 1577 | * header is going to be modified. Use pskb_copy() instead. |
| 1578 | */ |
| 1579 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1580 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1581 | { |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1582 | int headerlen = skb_headroom(skb); |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 1583 | unsigned int size = skb_end_offset(skb) + skb->data_len; |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1584 | struct sk_buff *n = __alloc_skb(size, gfp_mask, |
| 1585 | skb_alloc_rx_flag(skb), NUMA_NO_NODE); |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1586 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1587 | if (!n) |
| 1588 | return NULL; |
| 1589 | |
| 1590 | /* Set the data pointer */ |
| 1591 | skb_reserve(n, headerlen); |
| 1592 | /* Set the tail pointer and length */ |
| 1593 | skb_put(n, skb->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1594 | |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 1595 | BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1596 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1597 | skb_copy_header(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 | return n; |
| 1599 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1600 | EXPORT_SYMBOL(skb_copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 | |
| 1602 | /** |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1603 | * __pskb_copy_fclone - create copy of an sk_buff with private head. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1604 | * @skb: buffer to copy |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1605 | * @headroom: headroom of new skb |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1606 | * @gfp_mask: allocation priority |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1607 | * @fclone: if true allocate the copy of the skb from the fclone |
| 1608 | * cache instead of the head cache; it is recommended to set this |
| 1609 | * to true for the cases where the copy will likely be cloned |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1610 | * |
| 1611 | * Make a copy of both an &sk_buff and part of its data, located |
| 1612 | * in header. Fragmented data remain shared. This is used when |
| 1613 | * the caller wishes to modify only header of &sk_buff and needs |
| 1614 | * private copy of the header to alter. Returns %NULL on failure |
| 1615 | * or the pointer to the buffer on success. |
| 1616 | * The returned buffer has a reference count of 1. |
| 1617 | */ |
| 1618 | |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1619 | struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, |
| 1620 | gfp_t gfp_mask, bool fclone) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1621 | { |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1622 | unsigned int size = skb_headlen(skb) + headroom; |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1623 | int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); |
| 1624 | struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1625 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1626 | if (!n) |
| 1627 | goto out; |
| 1628 | |
| 1629 | /* Set the data pointer */ |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1630 | skb_reserve(n, headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1631 | /* Set the tail pointer and length */ |
| 1632 | skb_put(n, skb_headlen(skb)); |
| 1633 | /* Copy the bytes */ |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 1634 | skb_copy_from_linear_data(skb, n->data, n->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1635 | |
Herbert Xu | 25f484a | 2006-11-07 14:57:15 -0800 | [diff] [blame] | 1636 | n->truesize += skb->data_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1637 | n->data_len = skb->data_len; |
| 1638 | n->len = skb->len; |
| 1639 | |
| 1640 | if (skb_shinfo(skb)->nr_frags) { |
| 1641 | int i; |
| 1642 | |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1643 | if (skb_orphan_frags(skb, gfp_mask) || |
| 1644 | skb_zerocopy_clone(n, skb, gfp_mask)) { |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1645 | kfree_skb(n); |
| 1646 | n = NULL; |
| 1647 | goto out; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1648 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1649 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 1650 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1651 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1652 | } |
| 1653 | skb_shinfo(n)->nr_frags = i; |
| 1654 | } |
| 1655 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1656 | if (skb_has_frag_list(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1657 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; |
| 1658 | skb_clone_fraglist(n); |
| 1659 | } |
| 1660 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1661 | skb_copy_header(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1662 | out: |
| 1663 | return n; |
| 1664 | } |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1665 | EXPORT_SYMBOL(__pskb_copy_fclone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1666 | |
| 1667 | /** |
| 1668 | * pskb_expand_head - reallocate header of &sk_buff |
| 1669 | * @skb: buffer to reallocate |
| 1670 | * @nhead: room to add at head |
| 1671 | * @ntail: room to add at tail |
| 1672 | * @gfp_mask: allocation priority |
| 1673 | * |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 1674 | * Expands (or creates identical copy, if @nhead and @ntail are zero) |
| 1675 | * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1676 | * reference count of 1. Returns zero in the case of success or error, |
| 1677 | * if expansion failed. In the last case, &sk_buff is not changed. |
| 1678 | * |
| 1679 | * All the pointers pointing into skb header may change and must be |
| 1680 | * reloaded after call to this function. |
| 1681 | */ |
| 1682 | |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1683 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1684 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1685 | { |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1686 | int i, osize = skb_end_offset(skb); |
| 1687 | int size = osize + nhead + ntail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1688 | long off; |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1689 | u8 *data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1690 | |
Herbert Xu | 4edd87a | 2008-10-01 07:09:38 -0700 | [diff] [blame] | 1691 | BUG_ON(nhead < 0); |
| 1692 | |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 1693 | BUG_ON(skb_shared(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1694 | |
| 1695 | size = SKB_DATA_ALIGN(size); |
| 1696 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1697 | if (skb_pfmemalloc(skb)) |
| 1698 | gfp_mask |= __GFP_MEMALLOC; |
| 1699 | data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 1700 | gfp_mask, NUMA_NO_NODE, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1701 | if (!data) |
| 1702 | goto nodata; |
Eric Dumazet | 87151b8 | 2012-04-10 20:08:39 +0000 | [diff] [blame] | 1703 | size = SKB_WITH_OVERHEAD(ksize(data)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1704 | |
| 1705 | /* Copy only real data... and, alas, header. This should be |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1706 | * optimized for the cases when header is void. |
| 1707 | */ |
| 1708 | memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); |
| 1709 | |
| 1710 | memcpy((struct skb_shared_info *)(data + size), |
| 1711 | skb_shinfo(skb), |
Eric Dumazet | fed6638 | 2010-07-22 19:09:08 +0000 | [diff] [blame] | 1712 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1713 | |
Alexander Duyck | 3e24591 | 2012-05-04 14:26:51 +0000 | [diff] [blame] | 1714 | /* |
| 1715 | * if shinfo is shared we must drop the old head gracefully, but if it |
| 1716 | * is not we can just drop the old head and let the existing refcount |
| 1717 | * be since all we did is relocate the values |
| 1718 | */ |
| 1719 | if (skb_cloned(skb)) { |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1720 | if (skb_orphan_frags(skb, gfp_mask)) |
| 1721 | goto nofrags; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 1722 | if (skb_zcopy(skb)) |
Eric Dumazet | c1d1b43 | 2017-08-31 16:48:22 -0700 | [diff] [blame] | 1723 | refcount_inc(&skb_uarg(skb)->refcnt); |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1724 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1725 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1726 | |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1727 | if (skb_has_frag_list(skb)) |
| 1728 | skb_clone_fraglist(skb); |
| 1729 | |
| 1730 | skb_release_data(skb); |
Alexander Duyck | 3e24591 | 2012-05-04 14:26:51 +0000 | [diff] [blame] | 1731 | } else { |
| 1732 | skb_free_head(skb); |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1733 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1734 | off = (data + nhead) - skb->head; |
| 1735 | |
| 1736 | skb->head = data; |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 1737 | skb->head_frag = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1738 | skb->data += off; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1739 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 1740 | skb->end = size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 1741 | off = nhead; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1742 | #else |
| 1743 | skb->end = skb->head + size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 1744 | #endif |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1745 | skb->tail += off; |
Peter Pan(潘卫平) | b41abb4 | 2013-06-06 21:27:21 +0800 | [diff] [blame] | 1746 | skb_headers_offset_update(skb, nhead); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1747 | skb->cloned = 0; |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 1748 | skb->hdr_len = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1749 | skb->nohdr = 0; |
| 1750 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1751 | |
Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 1752 | skb_metadata_clear(skb); |
| 1753 | |
Eric Dumazet | 158f323 | 2017-01-27 07:11:27 -0800 | [diff] [blame] | 1754 | /* It is not generally safe to change skb->truesize. |
| 1755 | * For the moment, we really care of rx path, or |
| 1756 | * when skb is orphaned (not attached to a socket). |
| 1757 | */ |
| 1758 | if (!skb->sk || skb->destructor == sock_edemux) |
| 1759 | skb->truesize += size - osize; |
| 1760 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1761 | return 0; |
| 1762 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1763 | nofrags: |
| 1764 | kfree(data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1765 | nodata: |
| 1766 | return -ENOMEM; |
| 1767 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1768 | EXPORT_SYMBOL(pskb_expand_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1769 | |
| 1770 | /* Make private copy of skb with writable head and some headroom */ |
| 1771 | |
| 1772 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) |
| 1773 | { |
| 1774 | struct sk_buff *skb2; |
| 1775 | int delta = headroom - skb_headroom(skb); |
| 1776 | |
| 1777 | if (delta <= 0) |
| 1778 | skb2 = pskb_copy(skb, GFP_ATOMIC); |
| 1779 | else { |
| 1780 | skb2 = skb_clone(skb, GFP_ATOMIC); |
| 1781 | if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, |
| 1782 | GFP_ATOMIC)) { |
| 1783 | kfree_skb(skb2); |
| 1784 | skb2 = NULL; |
| 1785 | } |
| 1786 | } |
| 1787 | return skb2; |
| 1788 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1789 | EXPORT_SYMBOL(skb_realloc_headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1790 | |
| 1791 | /** |
Vasily Averin | f1260ff | 2021-08-02 11:52:15 +0300 | [diff] [blame] | 1792 | * skb_expand_head - reallocate header of &sk_buff |
| 1793 | * @skb: buffer to reallocate |
| 1794 | * @headroom: needed headroom |
| 1795 | * |
| 1796 | * Unlike skb_realloc_headroom, this one does not allocate a new skb |
| 1797 | * if possible; copies skb->sk to new skb as needed |
| 1798 | * and frees original skb in case of failures. |
| 1799 | * |
| 1800 | * It expect increased headroom and generates warning otherwise. |
| 1801 | */ |
| 1802 | |
| 1803 | struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) |
| 1804 | { |
| 1805 | int delta = headroom - skb_headroom(skb); |
Vasily Averin | 7f678de | 2021-10-22 13:28:37 +0300 | [diff] [blame] | 1806 | int osize = skb_end_offset(skb); |
| 1807 | struct sock *sk = skb->sk; |
Vasily Averin | f1260ff | 2021-08-02 11:52:15 +0300 | [diff] [blame] | 1808 | |
| 1809 | if (WARN_ONCE(delta <= 0, |
| 1810 | "%s is expecting an increase in the headroom", __func__)) |
| 1811 | return skb; |
| 1812 | |
Vasily Averin | 7f678de | 2021-10-22 13:28:37 +0300 | [diff] [blame] | 1813 | delta = SKB_DATA_ALIGN(delta); |
| 1814 | /* pskb_expand_head() might crash, if skb is shared. */ |
| 1815 | if (skb_shared(skb) || !is_skb_wmem(skb)) { |
Vasily Averin | f1260ff | 2021-08-02 11:52:15 +0300 | [diff] [blame] | 1816 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); |
| 1817 | |
Vasily Averin | 7f678de | 2021-10-22 13:28:37 +0300 | [diff] [blame] | 1818 | if (unlikely(!nskb)) |
| 1819 | goto fail; |
| 1820 | |
| 1821 | if (sk) |
| 1822 | skb_set_owner_w(nskb, sk); |
| 1823 | consume_skb(skb); |
Vasily Averin | f1260ff | 2021-08-02 11:52:15 +0300 | [diff] [blame] | 1824 | skb = nskb; |
| 1825 | } |
Vasily Averin | 7f678de | 2021-10-22 13:28:37 +0300 | [diff] [blame] | 1826 | if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) |
| 1827 | goto fail; |
| 1828 | |
| 1829 | if (sk && is_skb_wmem(skb)) { |
| 1830 | delta = skb_end_offset(skb) - osize; |
| 1831 | refcount_add(delta, &sk->sk_wmem_alloc); |
| 1832 | skb->truesize += delta; |
Vasily Averin | f1260ff | 2021-08-02 11:52:15 +0300 | [diff] [blame] | 1833 | } |
| 1834 | return skb; |
Vasily Averin | 7f678de | 2021-10-22 13:28:37 +0300 | [diff] [blame] | 1835 | |
| 1836 | fail: |
| 1837 | kfree_skb(skb); |
| 1838 | return NULL; |
Vasily Averin | f1260ff | 2021-08-02 11:52:15 +0300 | [diff] [blame] | 1839 | } |
| 1840 | EXPORT_SYMBOL(skb_expand_head); |
| 1841 | |
| 1842 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1843 | * skb_copy_expand - copy and expand sk_buff |
| 1844 | * @skb: buffer to copy |
| 1845 | * @newheadroom: new free bytes at head |
| 1846 | * @newtailroom: new free bytes at tail |
| 1847 | * @gfp_mask: allocation priority |
| 1848 | * |
| 1849 | * Make a copy of both an &sk_buff and its data and while doing so |
| 1850 | * allocate additional space. |
| 1851 | * |
| 1852 | * This is used when the caller wishes to modify the data and needs a |
| 1853 | * private copy of the data to alter as well as more space for new fields. |
| 1854 | * Returns %NULL on failure or the pointer to the buffer |
| 1855 | * on success. The returned buffer has a reference count of 1. |
| 1856 | * |
| 1857 | * You must pass %GFP_ATOMIC as the allocation priority if this function |
| 1858 | * is called from an interrupt. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1859 | */ |
| 1860 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1861 | int newheadroom, int newtailroom, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1862 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1863 | { |
| 1864 | /* |
| 1865 | * Allocate the copy buffer |
| 1866 | */ |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1867 | struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, |
| 1868 | gfp_mask, skb_alloc_rx_flag(skb), |
| 1869 | NUMA_NO_NODE); |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1870 | int oldheadroom = skb_headroom(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1871 | int head_copy_len, head_copy_off; |
| 1872 | |
| 1873 | if (!n) |
| 1874 | return NULL; |
| 1875 | |
| 1876 | skb_reserve(n, newheadroom); |
| 1877 | |
| 1878 | /* Set the tail pointer and length */ |
| 1879 | skb_put(n, skb->len); |
| 1880 | |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1881 | head_copy_len = oldheadroom; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1882 | head_copy_off = 0; |
| 1883 | if (newheadroom <= head_copy_len) |
| 1884 | head_copy_len = newheadroom; |
| 1885 | else |
| 1886 | head_copy_off = newheadroom - head_copy_len; |
| 1887 | |
| 1888 | /* Copy the linear header and data. */ |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 1889 | BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, |
| 1890 | skb->len + head_copy_len)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1891 | |
Ilya Lesokhin | 08303c1 | 2018-04-30 10:16:11 +0300 | [diff] [blame] | 1892 | skb_copy_header(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1893 | |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 1894 | skb_headers_offset_update(n, newheadroom - oldheadroom); |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1895 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1896 | return n; |
| 1897 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1898 | EXPORT_SYMBOL(skb_copy_expand); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1899 | |
| 1900 | /** |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1901 | * __skb_pad - zero pad the tail of an skb |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1902 | * @skb: buffer to pad |
| 1903 | * @pad: space to pad |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1904 | * @free_on_error: free buffer on error |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1905 | * |
| 1906 | * Ensure that a buffer is followed by a padding area that is zero |
| 1907 | * filled. Used by network drivers which may DMA or transfer data |
| 1908 | * beyond the buffer end onto the wire. |
| 1909 | * |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1910 | * May return error in out of memory cases. The skb is freed on error |
| 1911 | * if @free_on_error is true. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 | */ |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1913 | |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1914 | int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1915 | { |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1916 | int err; |
| 1917 | int ntail; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1918 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1919 | /* If the skbuff is non linear tailroom is always zero.. */ |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1920 | if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1921 | memset(skb->data+skb->len, 0, pad); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1922 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1923 | } |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1924 | |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1925 | ntail = skb->data_len + pad - (skb->end - skb->tail); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1926 | if (likely(skb_cloned(skb) || ntail > 0)) { |
| 1927 | err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); |
| 1928 | if (unlikely(err)) |
| 1929 | goto free_skb; |
| 1930 | } |
| 1931 | |
| 1932 | /* FIXME: The use of this function with non-linear skb's really needs |
| 1933 | * to be audited. |
| 1934 | */ |
| 1935 | err = skb_linearize(skb); |
| 1936 | if (unlikely(err)) |
| 1937 | goto free_skb; |
| 1938 | |
| 1939 | memset(skb->data + skb->len, 0, pad); |
| 1940 | return 0; |
| 1941 | |
| 1942 | free_skb: |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1943 | if (free_on_error) |
| 1944 | kfree_skb(skb); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1945 | return err; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1946 | } |
Florian Fainelli | cd0a137 | 2017-08-22 15:12:14 -0700 | [diff] [blame] | 1947 | EXPORT_SYMBOL(__skb_pad); |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1948 | |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1949 | /** |
Mathias Krause | 0c7ddf3 | 2013-11-07 14:18:24 +0100 | [diff] [blame] | 1950 | * pskb_put - add data to the tail of a potentially fragmented buffer |
| 1951 | * @skb: start of the buffer to use |
| 1952 | * @tail: tail fragment of the buffer to use |
| 1953 | * @len: amount of data to add |
| 1954 | * |
| 1955 | * This function extends the used data area of the potentially |
| 1956 | * fragmented buffer. @tail must be the last fragment of @skb -- or |
| 1957 | * @skb itself. If this would exceed the total buffer size the kernel |
| 1958 | * will panic. A pointer to the first byte of the extra data is |
| 1959 | * returned. |
| 1960 | */ |
| 1961 | |
Johannes Berg | 4df864c | 2017-06-16 14:29:21 +0200 | [diff] [blame] | 1962 | void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) |
Mathias Krause | 0c7ddf3 | 2013-11-07 14:18:24 +0100 | [diff] [blame] | 1963 | { |
| 1964 | if (tail != skb) { |
| 1965 | skb->data_len += len; |
| 1966 | skb->len += len; |
| 1967 | } |
| 1968 | return skb_put(tail, len); |
| 1969 | } |
| 1970 | EXPORT_SYMBOL_GPL(pskb_put); |
| 1971 | |
| 1972 | /** |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1973 | * skb_put - add data to a buffer |
| 1974 | * @skb: buffer to use |
| 1975 | * @len: amount of data to add |
| 1976 | * |
| 1977 | * This function extends the used data area of the buffer. If this would |
| 1978 | * exceed the total buffer size the kernel will panic. A pointer to the |
| 1979 | * first byte of the extra data is returned. |
| 1980 | */ |
Johannes Berg | 4df864c | 2017-06-16 14:29:21 +0200 | [diff] [blame] | 1981 | void *skb_put(struct sk_buff *skb, unsigned int len) |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1982 | { |
Johannes Berg | 4df864c | 2017-06-16 14:29:21 +0200 | [diff] [blame] | 1983 | void *tmp = skb_tail_pointer(skb); |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1984 | SKB_LINEAR_ASSERT(skb); |
| 1985 | skb->tail += len; |
| 1986 | skb->len += len; |
| 1987 | if (unlikely(skb->tail > skb->end)) |
| 1988 | skb_over_panic(skb, len, __builtin_return_address(0)); |
| 1989 | return tmp; |
| 1990 | } |
| 1991 | EXPORT_SYMBOL(skb_put); |
| 1992 | |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1993 | /** |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 1994 | * skb_push - add data to the start of a buffer |
| 1995 | * @skb: buffer to use |
| 1996 | * @len: amount of data to add |
| 1997 | * |
| 1998 | * This function extends the used data area of the buffer at the buffer |
| 1999 | * start. If this would exceed the total buffer headroom the kernel will |
| 2000 | * panic. A pointer to the first byte of the extra data is returned. |
| 2001 | */ |
Johannes Berg | d58ff35 | 2017-06-16 14:29:23 +0200 | [diff] [blame] | 2002 | void *skb_push(struct sk_buff *skb, unsigned int len) |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 2003 | { |
| 2004 | skb->data -= len; |
| 2005 | skb->len += len; |
Ganesh Goudar | 9aba2f8 | 2018-08-02 15:34:52 +0530 | [diff] [blame] | 2006 | if (unlikely(skb->data < skb->head)) |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 2007 | skb_under_panic(skb, len, __builtin_return_address(0)); |
| 2008 | return skb->data; |
| 2009 | } |
| 2010 | EXPORT_SYMBOL(skb_push); |
| 2011 | |
| 2012 | /** |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 2013 | * skb_pull - remove data from the start of a buffer |
| 2014 | * @skb: buffer to use |
| 2015 | * @len: amount of data to remove |
| 2016 | * |
| 2017 | * This function removes data from the start of a buffer, returning |
| 2018 | * the memory to the headroom. A pointer to the next data in the buffer |
| 2019 | * is returned. Once the data has been pulled future pushes will overwrite |
| 2020 | * the old data. |
| 2021 | */ |
Johannes Berg | af72868 | 2017-06-16 14:29:22 +0200 | [diff] [blame] | 2022 | void *skb_pull(struct sk_buff *skb, unsigned int len) |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 2023 | { |
David S. Miller | 47d2964 | 2010-05-02 02:21:44 -0700 | [diff] [blame] | 2024 | return skb_pull_inline(skb, len); |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 2025 | } |
| 2026 | EXPORT_SYMBOL(skb_pull); |
| 2027 | |
Ilpo Järvinen | 419ae74 | 2008-03-27 17:54:01 -0700 | [diff] [blame] | 2028 | /** |
Luiz Augusto von Dentz | 13244cc | 2021-12-01 10:54:52 -0800 | [diff] [blame] | 2029 | * skb_pull_data - remove data from the start of a buffer returning its |
| 2030 | * original position. |
| 2031 | * @skb: buffer to use |
| 2032 | * @len: amount of data to remove |
| 2033 | * |
| 2034 | * This function removes data from the start of a buffer, returning |
| 2035 | * the memory to the headroom. A pointer to the original data in the buffer |
| 2036 | * is returned after checking if there is enough data to pull. Once the |
| 2037 | * data has been pulled future pushes will overwrite the old data. |
| 2038 | */ |
| 2039 | void *skb_pull_data(struct sk_buff *skb, size_t len) |
| 2040 | { |
| 2041 | void *data = skb->data; |
| 2042 | |
| 2043 | if (skb->len < len) |
| 2044 | return NULL; |
| 2045 | |
| 2046 | skb_pull(skb, len); |
| 2047 | |
| 2048 | return data; |
| 2049 | } |
| 2050 | EXPORT_SYMBOL(skb_pull_data); |
| 2051 | |
| 2052 | /** |
Ilpo Järvinen | 419ae74 | 2008-03-27 17:54:01 -0700 | [diff] [blame] | 2053 | * skb_trim - remove end from a buffer |
| 2054 | * @skb: buffer to alter |
| 2055 | * @len: new length |
| 2056 | * |
| 2057 | * Cut the length of a buffer down by removing data from the tail. If |
| 2058 | * the buffer is already under the length specified it is not modified. |
| 2059 | * The skb must be linear. |
| 2060 | */ |
| 2061 | void skb_trim(struct sk_buff *skb, unsigned int len) |
| 2062 | { |
| 2063 | if (skb->len > len) |
| 2064 | __skb_trim(skb, len); |
| 2065 | } |
| 2066 | EXPORT_SYMBOL(skb_trim); |
| 2067 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 2068 | /* Trims skb to length len. It can change skb pointers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2069 | */ |
| 2070 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 2071 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2072 | { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2073 | struct sk_buff **fragp; |
| 2074 | struct sk_buff *frag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2075 | int offset = skb_headlen(skb); |
| 2076 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 2077 | int i; |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2078 | int err; |
| 2079 | |
| 2080 | if (skb_cloned(skb) && |
| 2081 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) |
| 2082 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2083 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 2084 | i = 0; |
| 2085 | if (offset >= len) |
| 2086 | goto drop_pages; |
| 2087 | |
| 2088 | for (; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2089 | int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2090 | |
| 2091 | if (end < len) { |
| 2092 | offset = end; |
| 2093 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 | } |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2095 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2096 | skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2097 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 2098 | drop_pages: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2099 | skb_shinfo(skb)->nr_frags = i; |
| 2100 | |
| 2101 | for (; i < nfrags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2102 | skb_frag_unref(skb, i); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2103 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 2104 | if (skb_has_frag_list(skb)) |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2105 | skb_drop_fraglist(skb); |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 2106 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2107 | } |
| 2108 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2109 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); |
| 2110 | fragp = &frag->next) { |
| 2111 | int end = offset + frag->len; |
| 2112 | |
| 2113 | if (skb_shared(frag)) { |
| 2114 | struct sk_buff *nfrag; |
| 2115 | |
| 2116 | nfrag = skb_clone(frag, GFP_ATOMIC); |
| 2117 | if (unlikely(!nfrag)) |
| 2118 | return -ENOMEM; |
| 2119 | |
| 2120 | nfrag->next = frag->next; |
Eric Dumazet | 85bb2a6 | 2012-04-19 02:24:53 +0000 | [diff] [blame] | 2121 | consume_skb(frag); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2122 | frag = nfrag; |
| 2123 | *fragp = frag; |
| 2124 | } |
| 2125 | |
| 2126 | if (end < len) { |
| 2127 | offset = end; |
| 2128 | continue; |
| 2129 | } |
| 2130 | |
| 2131 | if (end > len && |
| 2132 | unlikely((err = pskb_trim(frag, len - offset)))) |
| 2133 | return err; |
| 2134 | |
| 2135 | if (frag->next) |
| 2136 | skb_drop_list(&frag->next); |
| 2137 | break; |
| 2138 | } |
| 2139 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 2140 | done: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2141 | if (len > skb_headlen(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2142 | skb->data_len -= skb->len - len; |
| 2143 | skb->len = len; |
| 2144 | } else { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 2145 | skb->len = len; |
| 2146 | skb->data_len = 0; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 2147 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2148 | } |
| 2149 | |
Eric Dumazet | c21b48c | 2017-04-26 09:07:46 -0700 | [diff] [blame] | 2150 | if (!skb->sk || skb->destructor == sock_edemux) |
| 2151 | skb_condense(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2152 | return 0; |
| 2153 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2154 | EXPORT_SYMBOL(___pskb_trim); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2155 | |
Eric Dumazet | 88078d9 | 2018-04-18 11:43:15 -0700 | [diff] [blame] | 2156 | /* Note : use pskb_trim_rcsum() instead of calling this directly |
| 2157 | */ |
| 2158 | int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) |
| 2159 | { |
| 2160 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
| 2161 | int delta = skb->len - len; |
| 2162 | |
Dimitris Michailidis | d55bef50 | 2018-10-19 17:07:13 -0700 | [diff] [blame] | 2163 | skb->csum = csum_block_sub(skb->csum, |
| 2164 | skb_checksum(skb, len, delta, 0), |
| 2165 | len); |
Vasily Averin | 54970a2 | 2020-12-14 22:07:39 +0300 | [diff] [blame] | 2166 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 2167 | int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; |
| 2168 | int offset = skb_checksum_start_offset(skb) + skb->csum_offset; |
| 2169 | |
| 2170 | if (offset + sizeof(__sum16) > hdlen) |
| 2171 | return -EINVAL; |
Eric Dumazet | 88078d9 | 2018-04-18 11:43:15 -0700 | [diff] [blame] | 2172 | } |
| 2173 | return __pskb_trim(skb, len); |
| 2174 | } |
| 2175 | EXPORT_SYMBOL(pskb_trim_rcsum_slow); |
| 2176 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2177 | /** |
| 2178 | * __pskb_pull_tail - advance tail of skb header |
| 2179 | * @skb: buffer to reallocate |
| 2180 | * @delta: number of bytes to advance tail |
| 2181 | * |
| 2182 | * The function makes a sense only on a fragmented &sk_buff, |
| 2183 | * it expands header moving its tail forward and copying necessary |
| 2184 | * data from fragmented part. |
| 2185 | * |
| 2186 | * &sk_buff MUST have reference count of 1. |
| 2187 | * |
| 2188 | * Returns %NULL (and &sk_buff does not change) if pull failed |
| 2189 | * or value of new tail of skb in the case of success. |
| 2190 | * |
| 2191 | * All the pointers pointing into skb header may change and must be |
| 2192 | * reloaded after call to this function. |
| 2193 | */ |
| 2194 | |
| 2195 | /* Moves tail of skb head forward, copying data from fragmented part, |
| 2196 | * when it is necessary. |
| 2197 | * 1. It may fail due to malloc failure. |
| 2198 | * 2. It may change skb pointers. |
| 2199 | * |
| 2200 | * It is pretty complicated. Luckily, it is called only in exceptional cases. |
| 2201 | */ |
Johannes Berg | af72868 | 2017-06-16 14:29:22 +0200 | [diff] [blame] | 2202 | void *__pskb_pull_tail(struct sk_buff *skb, int delta) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2203 | { |
| 2204 | /* If skb has not enough free space at tail, get new one |
| 2205 | * plus 128 bytes for future expansions. If we have enough |
| 2206 | * room at tail, reallocate without expansion only if skb is cloned. |
| 2207 | */ |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 2208 | int i, k, eat = (skb->tail + delta) - skb->end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2209 | |
| 2210 | if (eat > 0 || skb_cloned(skb)) { |
| 2211 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, |
| 2212 | GFP_ATOMIC)) |
| 2213 | return NULL; |
| 2214 | } |
| 2215 | |
Tim Hansen | 9f77fad | 2017-10-09 11:37:59 -0400 | [diff] [blame] | 2216 | BUG_ON(skb_copy_bits(skb, skb_headlen(skb), |
| 2217 | skb_tail_pointer(skb), delta)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2218 | |
| 2219 | /* Optimization: no fragments, no reasons to preestimate |
| 2220 | * size of pulled pages. Superb. |
| 2221 | */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 2222 | if (!skb_has_frag_list(skb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2223 | goto pull_pages; |
| 2224 | |
| 2225 | /* Estimate size of pulled pages. */ |
| 2226 | eat = delta; |
| 2227 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2228 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 2229 | |
| 2230 | if (size >= eat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2231 | goto pull_pages; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2232 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2233 | } |
| 2234 | |
| 2235 | /* If we need update frag list, we are in troubles. |
Wenhua Shi | 09001b0 | 2017-10-14 18:51:36 +0200 | [diff] [blame] | 2236 | * Certainly, it is possible to add an offset to skb data, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2237 | * but taking into account that pulling is expected to |
| 2238 | * be very rare operation, it is worth to fight against |
| 2239 | * further bloating skb head and crucify ourselves here instead. |
| 2240 | * Pure masohism, indeed. 8)8) |
| 2241 | */ |
| 2242 | if (eat) { |
| 2243 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
| 2244 | struct sk_buff *clone = NULL; |
| 2245 | struct sk_buff *insp = NULL; |
| 2246 | |
| 2247 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2248 | if (list->len <= eat) { |
| 2249 | /* Eaten as whole. */ |
| 2250 | eat -= list->len; |
| 2251 | list = list->next; |
| 2252 | insp = list; |
| 2253 | } else { |
| 2254 | /* Eaten partially. */ |
| 2255 | |
| 2256 | if (skb_shared(list)) { |
| 2257 | /* Sucks! We need to fork list. :-( */ |
| 2258 | clone = skb_clone(list, GFP_ATOMIC); |
| 2259 | if (!clone) |
| 2260 | return NULL; |
| 2261 | insp = list->next; |
| 2262 | list = clone; |
| 2263 | } else { |
| 2264 | /* This may be pulled without |
| 2265 | * problems. */ |
| 2266 | insp = list; |
| 2267 | } |
| 2268 | if (!pskb_pull(list, eat)) { |
Wei Yongjun | f3fbbe0 | 2009-02-25 00:37:32 +0000 | [diff] [blame] | 2269 | kfree_skb(clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2270 | return NULL; |
| 2271 | } |
| 2272 | break; |
| 2273 | } |
| 2274 | } while (eat); |
| 2275 | |
| 2276 | /* Free pulled out fragments. */ |
| 2277 | while ((list = skb_shinfo(skb)->frag_list) != insp) { |
| 2278 | skb_shinfo(skb)->frag_list = list->next; |
| 2279 | kfree_skb(list); |
| 2280 | } |
| 2281 | /* And insert new clone at head. */ |
| 2282 | if (clone) { |
| 2283 | clone->next = list; |
| 2284 | skb_shinfo(skb)->frag_list = clone; |
| 2285 | } |
| 2286 | } |
| 2287 | /* Success! Now we may commit changes to skb data. */ |
| 2288 | |
| 2289 | pull_pages: |
| 2290 | eat = delta; |
| 2291 | k = 0; |
| 2292 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2293 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 2294 | |
| 2295 | if (size <= eat) { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2296 | skb_frag_unref(skb, i); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2297 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2298 | } else { |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2299 | skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; |
| 2300 | |
| 2301 | *frag = skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2302 | if (eat) { |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2303 | skb_frag_off_add(frag, eat); |
| 2304 | skb_frag_size_sub(frag, eat); |
linzhang | 3ccc6c6 | 2017-07-17 17:25:02 +0800 | [diff] [blame] | 2305 | if (!i) |
| 2306 | goto end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2307 | eat = 0; |
| 2308 | } |
| 2309 | k++; |
| 2310 | } |
| 2311 | } |
| 2312 | skb_shinfo(skb)->nr_frags = k; |
| 2313 | |
linzhang | 3ccc6c6 | 2017-07-17 17:25:02 +0800 | [diff] [blame] | 2314 | end: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2315 | skb->tail += delta; |
| 2316 | skb->data_len -= delta; |
| 2317 | |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 2318 | if (!skb->data_len) |
| 2319 | skb_zcopy_clear(skb, false); |
| 2320 | |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 2321 | return skb_tail_pointer(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2322 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2323 | EXPORT_SYMBOL(__pskb_pull_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2324 | |
Eric Dumazet | 22019b1 | 2011-07-29 18:37:31 +0000 | [diff] [blame] | 2325 | /** |
| 2326 | * skb_copy_bits - copy bits from skb to kernel buffer |
| 2327 | * @skb: source skb |
| 2328 | * @offset: offset in source |
| 2329 | * @to: destination buffer |
| 2330 | * @len: number of bytes to copy |
| 2331 | * |
| 2332 | * Copy the specified number of bytes from the source skb to the |
| 2333 | * destination buffer. |
| 2334 | * |
| 2335 | * CAUTION ! : |
| 2336 | * If its prototype is ever changed, |
| 2337 | * check arch/{*}/net/{*}.S files, |
| 2338 | * since it is called from BPF assembly code. |
| 2339 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2340 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) |
| 2341 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2342 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2343 | struct sk_buff *frag_iter; |
| 2344 | int i, copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2345 | |
| 2346 | if (offset > (int)skb->len - len) |
| 2347 | goto fault; |
| 2348 | |
| 2349 | /* Copy header. */ |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2350 | if ((copy = start - offset) > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2351 | if (copy > len) |
| 2352 | copy = len; |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2353 | skb_copy_from_linear_data_offset(skb, offset, to, copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2354 | if ((len -= copy) == 0) |
| 2355 | return 0; |
| 2356 | offset += copy; |
| 2357 | to += copy; |
| 2358 | } |
| 2359 | |
| 2360 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2361 | int end; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2362 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2363 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2364 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2365 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2366 | end = start + skb_frag_size(f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2367 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2368 | u32 p_off, p_len, copied; |
| 2369 | struct page *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2370 | u8 *vaddr; |
| 2371 | |
| 2372 | if (copy > len) |
| 2373 | copy = len; |
| 2374 | |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2375 | skb_frag_foreach_page(f, |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2376 | skb_frag_off(f) + offset - start, |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2377 | copy, p, p_off, p_len, copied) { |
| 2378 | vaddr = kmap_atomic(p); |
| 2379 | memcpy(to + copied, vaddr + p_off, p_len); |
| 2380 | kunmap_atomic(vaddr); |
| 2381 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2382 | |
| 2383 | if ((len -= copy) == 0) |
| 2384 | return 0; |
| 2385 | offset += copy; |
| 2386 | to += copy; |
| 2387 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2388 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2389 | } |
| 2390 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2391 | skb_walk_frags(skb, frag_iter) { |
| 2392 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2393 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2394 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2395 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2396 | end = start + frag_iter->len; |
| 2397 | if ((copy = end - offset) > 0) { |
| 2398 | if (copy > len) |
| 2399 | copy = len; |
| 2400 | if (skb_copy_bits(frag_iter, offset - start, to, copy)) |
| 2401 | goto fault; |
| 2402 | if ((len -= copy) == 0) |
| 2403 | return 0; |
| 2404 | offset += copy; |
| 2405 | to += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2406 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2407 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2408 | } |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 2409 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2410 | if (!len) |
| 2411 | return 0; |
| 2412 | |
| 2413 | fault: |
| 2414 | return -EFAULT; |
| 2415 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2416 | EXPORT_SYMBOL(skb_copy_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2417 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2418 | /* |
| 2419 | * Callback from splice_to_pipe(), if we need to release some pages |
| 2420 | * at the end of the spd in case we error'ed out in filling the pipe. |
| 2421 | */ |
| 2422 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) |
| 2423 | { |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2424 | put_page(spd->pages[i]); |
| 2425 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2426 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2427 | static struct page *linear_to_page(struct page *page, unsigned int *len, |
| 2428 | unsigned int *offset, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2429 | struct sock *sk) |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2430 | { |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2431 | struct page_frag *pfrag = sk_page_frag(sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2432 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2433 | if (!sk_page_frag_refill(sk, pfrag)) |
| 2434 | return NULL; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2435 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2436 | *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2437 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2438 | memcpy(page_address(pfrag->page) + pfrag->offset, |
| 2439 | page_address(page) + *offset, *len); |
| 2440 | *offset = pfrag->offset; |
| 2441 | pfrag->offset += *len; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2442 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 2443 | return pfrag->page; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2444 | } |
| 2445 | |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2446 | static bool spd_can_coalesce(const struct splice_pipe_desc *spd, |
| 2447 | struct page *page, |
| 2448 | unsigned int offset) |
| 2449 | { |
| 2450 | return spd->nr_pages && |
| 2451 | spd->pages[spd->nr_pages - 1] == page && |
| 2452 | (spd->partial[spd->nr_pages - 1].offset + |
| 2453 | spd->partial[spd->nr_pages - 1].len == offset); |
| 2454 | } |
| 2455 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2456 | /* |
| 2457 | * Fill page/offset/length into spd, if it can hold more pages. |
| 2458 | */ |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2459 | static bool spd_fill_page(struct splice_pipe_desc *spd, |
| 2460 | struct pipe_inode_info *pipe, struct page *page, |
| 2461 | unsigned int *len, unsigned int offset, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2462 | bool linear, |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2463 | struct sock *sk) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2464 | { |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2465 | if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2466 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2467 | |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2468 | if (linear) { |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2469 | page = linear_to_page(page, len, &offset, sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2470 | if (!page) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2471 | return true; |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2472 | } |
| 2473 | if (spd_can_coalesce(spd, page, offset)) { |
| 2474 | spd->partial[spd->nr_pages - 1].len += *len; |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2475 | return false; |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2476 | } |
| 2477 | get_page(page); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2478 | spd->pages[spd->nr_pages] = page; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 2479 | spd->partial[spd->nr_pages].len = *len; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2480 | spd->partial[spd->nr_pages].offset = offset; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2481 | spd->nr_pages++; |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 2482 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2483 | return false; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2484 | } |
| 2485 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2486 | static bool __splice_segment(struct page *page, unsigned int poff, |
| 2487 | unsigned int plen, unsigned int *off, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2488 | unsigned int *len, |
Eric Dumazet | d7ccf7c | 2012-04-23 23:35:04 -0400 | [diff] [blame] | 2489 | struct splice_pipe_desc *spd, bool linear, |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2490 | struct sock *sk, |
| 2491 | struct pipe_inode_info *pipe) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2492 | { |
| 2493 | if (!*len) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2494 | return true; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2495 | |
| 2496 | /* skip this segment if already processed */ |
| 2497 | if (*off >= plen) { |
| 2498 | *off -= plen; |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2499 | return false; |
Octavian Purdila | db43a28 | 2008-06-27 17:27:21 -0700 | [diff] [blame] | 2500 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2501 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2502 | /* ignore any bits we already processed */ |
Eric Dumazet | 9ca1b22 | 2013-01-05 21:31:18 +0000 | [diff] [blame] | 2503 | poff += *off; |
| 2504 | plen -= *off; |
| 2505 | *off = 0; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2506 | |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2507 | do { |
| 2508 | unsigned int flen = min(*len, plen); |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2509 | |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2510 | if (spd_fill_page(spd, pipe, page, &flen, poff, |
| 2511 | linear, sk)) |
| 2512 | return true; |
| 2513 | poff += flen; |
| 2514 | plen -= flen; |
| 2515 | *len -= flen; |
| 2516 | } while (*len && plen); |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2517 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2518 | return false; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2519 | } |
| 2520 | |
| 2521 | /* |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2522 | * Map linear and fragment data from the skb to spd. It reports true if the |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2523 | * pipe is full or if we already spliced the requested length. |
| 2524 | */ |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2525 | static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, |
| 2526 | unsigned int *offset, unsigned int *len, |
| 2527 | struct splice_pipe_desc *spd, struct sock *sk) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2528 | { |
| 2529 | int seg; |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2530 | struct sk_buff *iter; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2531 | |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 2532 | /* map the linear part : |
Alexander Duyck | 2996d31 | 2012-05-02 18:18:42 +0000 | [diff] [blame] | 2533 | * If skb->head_frag is set, this 'linear' part is backed by a |
| 2534 | * fragment, and if the head is not shared with any clones then |
| 2535 | * we can avoid a copy since we own the head portion of this page. |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2536 | */ |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 2537 | if (__splice_segment(virt_to_page(skb->data), |
| 2538 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
| 2539 | skb_headlen(skb), |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2540 | offset, len, spd, |
Alexander Duyck | 3a7c1ee4 | 2012-05-03 01:09:42 +0000 | [diff] [blame] | 2541 | skb_head_is_locked(skb), |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 2542 | sk, pipe)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2543 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2544 | |
| 2545 | /* |
| 2546 | * then map the fragments |
| 2547 | */ |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2548 | for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { |
| 2549 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
| 2550 | |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2551 | if (__splice_segment(skb_frag_page(f), |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2552 | skb_frag_off(f), skb_frag_size(f), |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 2553 | offset, len, spd, false, sk, pipe)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2554 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2555 | } |
| 2556 | |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2557 | skb_walk_frags(skb, iter) { |
| 2558 | if (*offset >= iter->len) { |
| 2559 | *offset -= iter->len; |
| 2560 | continue; |
| 2561 | } |
| 2562 | /* __skb_splice_bits() only fails if the output has no room |
| 2563 | * left, so no point in going over the frag_list for the error |
| 2564 | * case. |
| 2565 | */ |
| 2566 | if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) |
| 2567 | return true; |
| 2568 | } |
| 2569 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 2570 | return false; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2571 | } |
| 2572 | |
| 2573 | /* |
| 2574 | * Map data from the skb to a pipe. Should handle both the linear part, |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2575 | * the fragments, and the frag list. |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2576 | */ |
Hannes Frederic Sowa | a60e3cc | 2015-05-21 17:00:00 +0200 | [diff] [blame] | 2577 | int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2578 | struct pipe_inode_info *pipe, unsigned int tlen, |
Al Viro | 2586926 | 2016-09-17 21:02:10 -0400 | [diff] [blame] | 2579 | unsigned int flags) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2580 | { |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 2581 | struct partial_page partial[MAX_SKB_FRAGS]; |
| 2582 | struct page *pages[MAX_SKB_FRAGS]; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2583 | struct splice_pipe_desc spd = { |
| 2584 | .pages = pages, |
| 2585 | .partial = partial, |
Eric Dumazet | 047fe36 | 2012-06-12 15:24:40 +0200 | [diff] [blame] | 2586 | .nr_pages_max = MAX_SKB_FRAGS, |
Miklos Szeredi | 28a625c | 2014-01-22 19:36:57 +0100 | [diff] [blame] | 2587 | .ops = &nosteal_pipe_buf_ops, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2588 | .spd_release = sock_spd_release, |
| 2589 | }; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 2590 | int ret = 0; |
| 2591 | |
Tom Herbert | fa9835e | 2016-03-07 14:11:04 -0800 | [diff] [blame] | 2592 | __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2593 | |
Hannes Frederic Sowa | a60e3cc | 2015-05-21 17:00:00 +0200 | [diff] [blame] | 2594 | if (spd.nr_pages) |
Al Viro | 2586926 | 2016-09-17 21:02:10 -0400 | [diff] [blame] | 2595 | ret = splice_to_pipe(pipe, &spd); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2596 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 2597 | return ret; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2598 | } |
Hannes Frederic Sowa | 2b51457 | 2015-05-21 17:00:01 +0200 | [diff] [blame] | 2599 | EXPORT_SYMBOL_GPL(skb_splice_bits); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 2600 | |
Cong Wang | 0739cd2 | 2021-03-30 19:32:24 -0700 | [diff] [blame] | 2601 | static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg, |
| 2602 | struct kvec *vec, size_t num, size_t size) |
| 2603 | { |
| 2604 | struct socket *sock = sk->sk_socket; |
| 2605 | |
| 2606 | if (!sock) |
| 2607 | return -EINVAL; |
| 2608 | return kernel_sendmsg(sock, msg, vec, num, size); |
| 2609 | } |
| 2610 | |
| 2611 | static int sendpage_unlocked(struct sock *sk, struct page *page, int offset, |
| 2612 | size_t size, int flags) |
| 2613 | { |
| 2614 | struct socket *sock = sk->sk_socket; |
| 2615 | |
| 2616 | if (!sock) |
| 2617 | return -EINVAL; |
| 2618 | return kernel_sendpage(sock, page, offset, size, flags); |
| 2619 | } |
| 2620 | |
| 2621 | typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg, |
| 2622 | struct kvec *vec, size_t num, size_t size); |
| 2623 | typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset, |
| 2624 | size_t size, int flags); |
| 2625 | static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, |
| 2626 | int len, sendmsg_func sendmsg, sendpage_func sendpage) |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2627 | { |
| 2628 | unsigned int orig_len = len; |
| 2629 | struct sk_buff *head = skb; |
| 2630 | unsigned short fragidx; |
| 2631 | int slen, ret; |
| 2632 | |
| 2633 | do_frag_list: |
| 2634 | |
| 2635 | /* Deal with head data */ |
| 2636 | while (offset < skb_headlen(skb) && len) { |
| 2637 | struct kvec kv; |
| 2638 | struct msghdr msg; |
| 2639 | |
| 2640 | slen = min_t(int, len, skb_headlen(skb) - offset); |
| 2641 | kv.iov_base = skb->data + offset; |
John Fastabend | db5980d | 2017-08-15 22:31:34 -0700 | [diff] [blame] | 2642 | kv.iov_len = slen; |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2643 | memset(&msg, 0, sizeof(msg)); |
John Fastabend | bd95e678 | 2019-05-24 08:01:00 -0700 | [diff] [blame] | 2644 | msg.msg_flags = MSG_DONTWAIT; |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2645 | |
Cong Wang | 0739cd2 | 2021-03-30 19:32:24 -0700 | [diff] [blame] | 2646 | ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked, |
| 2647 | sendmsg_unlocked, sk, &msg, &kv, 1, slen); |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2648 | if (ret <= 0) |
| 2649 | goto error; |
| 2650 | |
| 2651 | offset += ret; |
| 2652 | len -= ret; |
| 2653 | } |
| 2654 | |
| 2655 | /* All the data was skb head? */ |
| 2656 | if (!len) |
| 2657 | goto out; |
| 2658 | |
| 2659 | /* Make offset relative to start of frags */ |
| 2660 | offset -= skb_headlen(skb); |
| 2661 | |
| 2662 | /* Find where we are in frag list */ |
| 2663 | for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { |
| 2664 | skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; |
| 2665 | |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 2666 | if (offset < skb_frag_size(frag)) |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2667 | break; |
| 2668 | |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 2669 | offset -= skb_frag_size(frag); |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2670 | } |
| 2671 | |
| 2672 | for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { |
| 2673 | skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; |
| 2674 | |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 2675 | slen = min_t(size_t, len, skb_frag_size(frag) - offset); |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2676 | |
| 2677 | while (slen) { |
Cong Wang | 0739cd2 | 2021-03-30 19:32:24 -0700 | [diff] [blame] | 2678 | ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked, |
| 2679 | sendpage_unlocked, sk, |
| 2680 | skb_frag_page(frag), |
| 2681 | skb_frag_off(frag) + offset, |
| 2682 | slen, MSG_DONTWAIT); |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2683 | if (ret <= 0) |
| 2684 | goto error; |
| 2685 | |
| 2686 | len -= ret; |
| 2687 | offset += ret; |
| 2688 | slen -= ret; |
| 2689 | } |
| 2690 | |
| 2691 | offset = 0; |
| 2692 | } |
| 2693 | |
| 2694 | if (len) { |
| 2695 | /* Process any frag lists */ |
| 2696 | |
| 2697 | if (skb == head) { |
| 2698 | if (skb_has_frag_list(skb)) { |
| 2699 | skb = skb_shinfo(skb)->frag_list; |
| 2700 | goto do_frag_list; |
| 2701 | } |
| 2702 | } else if (skb->next) { |
| 2703 | skb = skb->next; |
| 2704 | goto do_frag_list; |
| 2705 | } |
| 2706 | } |
| 2707 | |
| 2708 | out: |
| 2709 | return orig_len - len; |
| 2710 | |
| 2711 | error: |
| 2712 | return orig_len == len ? ret : orig_len - len; |
| 2713 | } |
Cong Wang | 0739cd2 | 2021-03-30 19:32:24 -0700 | [diff] [blame] | 2714 | |
| 2715 | /* Send skb data on a socket. Socket must be locked. */ |
| 2716 | int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, |
| 2717 | int len) |
| 2718 | { |
| 2719 | return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked, |
| 2720 | kernel_sendpage_locked); |
| 2721 | } |
Tom Herbert | 20bf50d | 2017-07-28 16:22:42 -0700 | [diff] [blame] | 2722 | EXPORT_SYMBOL_GPL(skb_send_sock_locked); |
| 2723 | |
Cong Wang | 0739cd2 | 2021-03-30 19:32:24 -0700 | [diff] [blame] | 2724 | /* Send skb data on a socket. Socket must be unlocked. */ |
| 2725 | int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) |
| 2726 | { |
| 2727 | return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, |
| 2728 | sendpage_unlocked); |
| 2729 | } |
| 2730 | |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2731 | /** |
| 2732 | * skb_store_bits - store bits from kernel buffer to skb |
| 2733 | * @skb: destination buffer |
| 2734 | * @offset: offset in destination |
| 2735 | * @from: source buffer |
| 2736 | * @len: number of bytes to copy |
| 2737 | * |
| 2738 | * Copy the specified number of bytes from the source buffer to the |
| 2739 | * destination skb. This function handles all the messy bits of |
| 2740 | * traversing fragment lists and such. |
| 2741 | */ |
| 2742 | |
Stephen Hemminger | 0c6fcc8 | 2007-04-20 16:40:01 -0700 | [diff] [blame] | 2743 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2744 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2745 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2746 | struct sk_buff *frag_iter; |
| 2747 | int i, copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2748 | |
| 2749 | if (offset > (int)skb->len - len) |
| 2750 | goto fault; |
| 2751 | |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2752 | if ((copy = start - offset) > 0) { |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2753 | if (copy > len) |
| 2754 | copy = len; |
Arnaldo Carvalho de Melo | 27d7ff4 | 2007-03-31 11:55:19 -0300 | [diff] [blame] | 2755 | skb_copy_to_linear_data_offset(skb, offset, from, copy); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2756 | if ((len -= copy) == 0) |
| 2757 | return 0; |
| 2758 | offset += copy; |
| 2759 | from += copy; |
| 2760 | } |
| 2761 | |
| 2762 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 2763 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2764 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2765 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2766 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2767 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2768 | end = start + skb_frag_size(frag); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2769 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2770 | u32 p_off, p_len, copied; |
| 2771 | struct page *p; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2772 | u8 *vaddr; |
| 2773 | |
| 2774 | if (copy > len) |
| 2775 | copy = len; |
| 2776 | |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2777 | skb_frag_foreach_page(frag, |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2778 | skb_frag_off(frag) + offset - start, |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2779 | copy, p, p_off, p_len, copied) { |
| 2780 | vaddr = kmap_atomic(p); |
| 2781 | memcpy(vaddr + p_off, from + copied, p_len); |
| 2782 | kunmap_atomic(vaddr); |
| 2783 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2784 | |
| 2785 | if ((len -= copy) == 0) |
| 2786 | return 0; |
| 2787 | offset += copy; |
| 2788 | from += copy; |
| 2789 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2790 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2791 | } |
| 2792 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2793 | skb_walk_frags(skb, frag_iter) { |
| 2794 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2795 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2796 | WARN_ON(start > offset + len); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2797 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2798 | end = start + frag_iter->len; |
| 2799 | if ((copy = end - offset) > 0) { |
| 2800 | if (copy > len) |
| 2801 | copy = len; |
| 2802 | if (skb_store_bits(frag_iter, offset - start, |
| 2803 | from, copy)) |
| 2804 | goto fault; |
| 2805 | if ((len -= copy) == 0) |
| 2806 | return 0; |
| 2807 | offset += copy; |
| 2808 | from += copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2809 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2810 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2811 | } |
| 2812 | if (!len) |
| 2813 | return 0; |
| 2814 | |
| 2815 | fault: |
| 2816 | return -EFAULT; |
| 2817 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2818 | EXPORT_SYMBOL(skb_store_bits); |
| 2819 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2820 | /* Checksum skb data. */ |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2821 | __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, |
| 2822 | __wsum csum, const struct skb_checksum_ops *ops) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2823 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2824 | int start = skb_headlen(skb); |
| 2825 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2826 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2827 | int pos = 0; |
| 2828 | |
| 2829 | /* Checksum header. */ |
| 2830 | if (copy > 0) { |
| 2831 | if (copy > len) |
| 2832 | copy = len; |
Matteo Croce | 2544af0 | 2019-05-29 17:13:48 +0200 | [diff] [blame] | 2833 | csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, |
| 2834 | skb->data + offset, copy, csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2835 | if ((len -= copy) == 0) |
| 2836 | return csum; |
| 2837 | offset += copy; |
| 2838 | pos = copy; |
| 2839 | } |
| 2840 | |
| 2841 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2842 | int end; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2843 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2844 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2845 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2846 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2847 | end = start + skb_frag_size(frag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2848 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2849 | u32 p_off, p_len, copied; |
| 2850 | struct page *p; |
Al Viro | 44bb936 | 2006-11-14 21:36:14 -0800 | [diff] [blame] | 2851 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2852 | u8 *vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2853 | |
| 2854 | if (copy > len) |
| 2855 | copy = len; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2856 | |
| 2857 | skb_frag_foreach_page(frag, |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2858 | skb_frag_off(frag) + offset - start, |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2859 | copy, p, p_off, p_len, copied) { |
| 2860 | vaddr = kmap_atomic(p); |
Matteo Croce | 2544af0 | 2019-05-29 17:13:48 +0200 | [diff] [blame] | 2861 | csum2 = INDIRECT_CALL_1(ops->update, |
| 2862 | csum_partial_ext, |
| 2863 | vaddr + p_off, p_len, 0); |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2864 | kunmap_atomic(vaddr); |
Matteo Croce | 2544af0 | 2019-05-29 17:13:48 +0200 | [diff] [blame] | 2865 | csum = INDIRECT_CALL_1(ops->combine, |
| 2866 | csum_block_add_ext, csum, |
| 2867 | csum2, pos, p_len); |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2868 | pos += p_len; |
| 2869 | } |
| 2870 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2871 | if (!(len -= copy)) |
| 2872 | return csum; |
| 2873 | offset += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2874 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2875 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2876 | } |
| 2877 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2878 | skb_walk_frags(skb, frag_iter) { |
| 2879 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2880 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2881 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2882 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2883 | end = start + frag_iter->len; |
| 2884 | if ((copy = end - offset) > 0) { |
| 2885 | __wsum csum2; |
| 2886 | if (copy > len) |
| 2887 | copy = len; |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2888 | csum2 = __skb_checksum(frag_iter, offset - start, |
| 2889 | copy, 0, ops); |
Matteo Croce | 2544af0 | 2019-05-29 17:13:48 +0200 | [diff] [blame] | 2890 | csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, |
| 2891 | csum, csum2, pos, copy); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2892 | if ((len -= copy) == 0) |
| 2893 | return csum; |
| 2894 | offset += copy; |
| 2895 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2896 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2897 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2898 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2899 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2900 | |
| 2901 | return csum; |
| 2902 | } |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2903 | EXPORT_SYMBOL(__skb_checksum); |
| 2904 | |
| 2905 | __wsum skb_checksum(const struct sk_buff *skb, int offset, |
| 2906 | int len, __wsum csum) |
| 2907 | { |
| 2908 | const struct skb_checksum_ops ops = { |
Daniel Borkmann | cea80ea | 2013-11-04 17:10:25 +0100 | [diff] [blame] | 2909 | .update = csum_partial_ext, |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2910 | .combine = csum_block_add_ext, |
| 2911 | }; |
| 2912 | |
| 2913 | return __skb_checksum(skb, offset, len, csum, &ops); |
| 2914 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2915 | EXPORT_SYMBOL(skb_checksum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2916 | |
| 2917 | /* Both of above in one bottle. */ |
| 2918 | |
Al Viro | 81d7766 | 2006-11-14 21:37:33 -0800 | [diff] [blame] | 2919 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 2920 | u8 *to, int len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2921 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2922 | int start = skb_headlen(skb); |
| 2923 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2924 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2925 | int pos = 0; |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 2926 | __wsum csum = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2927 | |
| 2928 | /* Copy header. */ |
| 2929 | if (copy > 0) { |
| 2930 | if (copy > len) |
| 2931 | copy = len; |
| 2932 | csum = csum_partial_copy_nocheck(skb->data + offset, to, |
Al Viro | cc44c17 | 2020-07-11 00:12:07 -0400 | [diff] [blame] | 2933 | copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2934 | if ((len -= copy) == 0) |
| 2935 | return csum; |
| 2936 | offset += copy; |
| 2937 | to += copy; |
| 2938 | pos = copy; |
| 2939 | } |
| 2940 | |
| 2941 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2942 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2943 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2944 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2945 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2946 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2947 | if ((copy = end - offset) > 0) { |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2948 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 2949 | u32 p_off, p_len, copied; |
| 2950 | struct page *p; |
Al Viro | 5084205 | 2006-11-14 21:36:34 -0800 | [diff] [blame] | 2951 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2952 | u8 *vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2953 | |
| 2954 | if (copy > len) |
| 2955 | copy = len; |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2956 | |
| 2957 | skb_frag_foreach_page(frag, |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 2958 | skb_frag_off(frag) + offset - start, |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2959 | copy, p, p_off, p_len, copied) { |
| 2960 | vaddr = kmap_atomic(p); |
| 2961 | csum2 = csum_partial_copy_nocheck(vaddr + p_off, |
| 2962 | to + copied, |
Al Viro | cc44c17 | 2020-07-11 00:12:07 -0400 | [diff] [blame] | 2963 | p_len); |
Willem de Bruijn | c613c20 | 2017-07-31 08:15:47 -0400 | [diff] [blame] | 2964 | kunmap_atomic(vaddr); |
| 2965 | csum = csum_block_add(csum, csum2, pos); |
| 2966 | pos += p_len; |
| 2967 | } |
| 2968 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2969 | if (!(len -= copy)) |
| 2970 | return csum; |
| 2971 | offset += copy; |
| 2972 | to += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2973 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2974 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2975 | } |
| 2976 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2977 | skb_walk_frags(skb, frag_iter) { |
| 2978 | __wsum csum2; |
| 2979 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2980 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2981 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2982 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2983 | end = start + frag_iter->len; |
| 2984 | if ((copy = end - offset) > 0) { |
| 2985 | if (copy > len) |
| 2986 | copy = len; |
| 2987 | csum2 = skb_copy_and_csum_bits(frag_iter, |
| 2988 | offset - start, |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 2989 | to, copy); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2990 | csum = csum_block_add(csum, csum2, pos); |
| 2991 | if ((len -= copy) == 0) |
| 2992 | return csum; |
| 2993 | offset += copy; |
| 2994 | to += copy; |
| 2995 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2996 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2997 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2998 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2999 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3000 | return csum; |
| 3001 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3002 | EXPORT_SYMBOL(skb_copy_and_csum_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3003 | |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 3004 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
| 3005 | { |
| 3006 | __sum16 sum; |
| 3007 | |
| 3008 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
Cong Wang | 1464193 | 2018-11-26 09:31:26 -0800 | [diff] [blame] | 3009 | /* See comments in __skb_checksum_complete(). */ |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 3010 | if (likely(!sum)) { |
| 3011 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && |
| 3012 | !skb->csum_complete_sw) |
Cong Wang | 7fe50ac | 2018-11-12 14:47:18 -0800 | [diff] [blame] | 3013 | netdev_rx_csum_fault(skb->dev, skb); |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 3014 | } |
| 3015 | if (!skb_shared(skb)) |
| 3016 | skb->csum_valid = !sum; |
| 3017 | return sum; |
| 3018 | } |
| 3019 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
| 3020 | |
Cong Wang | 1464193 | 2018-11-26 09:31:26 -0800 | [diff] [blame] | 3021 | /* This function assumes skb->csum already holds pseudo header's checksum, |
| 3022 | * which has been changed from the hardware checksum, for example, by |
| 3023 | * __skb_checksum_validate_complete(). And, the original skb->csum must |
| 3024 | * have been validated unsuccessfully for CHECKSUM_COMPLETE case. |
| 3025 | * |
| 3026 | * It returns non-zero if the recomputed checksum is still invalid, otherwise |
| 3027 | * zero. The new checksum is stored back into skb->csum unless the skb is |
| 3028 | * shared. |
| 3029 | */ |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 3030 | __sum16 __skb_checksum_complete(struct sk_buff *skb) |
| 3031 | { |
| 3032 | __wsum csum; |
| 3033 | __sum16 sum; |
| 3034 | |
| 3035 | csum = skb_checksum(skb, 0, skb->len, 0); |
| 3036 | |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 3037 | sum = csum_fold(csum_add(skb->csum, csum)); |
Cong Wang | 1464193 | 2018-11-26 09:31:26 -0800 | [diff] [blame] | 3038 | /* This check is inverted, because we already knew the hardware |
| 3039 | * checksum is invalid before calling this function. So, if the |
| 3040 | * re-computed checksum is valid instead, then we have a mismatch |
| 3041 | * between the original skb->csum and skb_checksum(). This means either |
| 3042 | * the original hardware checksum is incorrect or we screw up skb->csum |
| 3043 | * when moving skb->data around. |
| 3044 | */ |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 3045 | if (likely(!sum)) { |
| 3046 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && |
| 3047 | !skb->csum_complete_sw) |
Cong Wang | 7fe50ac | 2018-11-12 14:47:18 -0800 | [diff] [blame] | 3048 | netdev_rx_csum_fault(skb->dev, skb); |
Cong Wang | 49f8e83 | 2018-11-08 14:05:42 -0800 | [diff] [blame] | 3049 | } |
| 3050 | |
| 3051 | if (!skb_shared(skb)) { |
| 3052 | /* Save full packet checksum */ |
| 3053 | skb->csum = csum; |
| 3054 | skb->ip_summed = CHECKSUM_COMPLETE; |
| 3055 | skb->csum_complete_sw = 1; |
| 3056 | skb->csum_valid = !sum; |
| 3057 | } |
| 3058 | |
| 3059 | return sum; |
| 3060 | } |
| 3061 | EXPORT_SYMBOL(__skb_checksum_complete); |
| 3062 | |
Davide Caratti | 9617813 | 2017-05-18 15:44:37 +0200 | [diff] [blame] | 3063 | static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) |
| 3064 | { |
| 3065 | net_warn_ratelimited( |
| 3066 | "%s: attempt to compute crc32c without libcrc32c.ko\n", |
| 3067 | __func__); |
| 3068 | return 0; |
| 3069 | } |
| 3070 | |
| 3071 | static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, |
| 3072 | int offset, int len) |
| 3073 | { |
| 3074 | net_warn_ratelimited( |
| 3075 | "%s: attempt to compute crc32c without libcrc32c.ko\n", |
| 3076 | __func__); |
| 3077 | return 0; |
| 3078 | } |
| 3079 | |
| 3080 | static const struct skb_checksum_ops default_crc32c_ops = { |
| 3081 | .update = warn_crc32c_csum_update, |
| 3082 | .combine = warn_crc32c_csum_combine, |
| 3083 | }; |
| 3084 | |
| 3085 | const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = |
| 3086 | &default_crc32c_ops; |
| 3087 | EXPORT_SYMBOL(crc32c_csum_stub); |
| 3088 | |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3089 | /** |
| 3090 | * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() |
| 3091 | * @from: source buffer |
| 3092 | * |
| 3093 | * Calculates the amount of linear headroom needed in the 'to' skb passed |
| 3094 | * into skb_zerocopy(). |
| 3095 | */ |
| 3096 | unsigned int |
| 3097 | skb_zerocopy_headlen(const struct sk_buff *from) |
| 3098 | { |
| 3099 | unsigned int hlen = 0; |
| 3100 | |
| 3101 | if (!from->head_frag || |
| 3102 | skb_headlen(from) < L1_CACHE_BYTES || |
Pravin B Shelar | a17ad09 | 2021-07-15 16:59:00 -0700 | [diff] [blame] | 3103 | skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3104 | hlen = skb_headlen(from); |
Pravin B Shelar | a17ad09 | 2021-07-15 16:59:00 -0700 | [diff] [blame] | 3105 | if (!hlen) |
| 3106 | hlen = from->len; |
| 3107 | } |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3108 | |
| 3109 | if (skb_has_frag_list(from)) |
| 3110 | hlen = from->len; |
| 3111 | |
| 3112 | return hlen; |
| 3113 | } |
| 3114 | EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); |
| 3115 | |
| 3116 | /** |
| 3117 | * skb_zerocopy - Zero copy skb to skb |
| 3118 | * @to: destination buffer |
Masanari Iida | 7fceb4d | 2014-01-29 01:05:28 +0900 | [diff] [blame] | 3119 | * @from: source buffer |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3120 | * @len: number of bytes to copy from source buffer |
| 3121 | * @hlen: size of linear headroom in destination buffer |
| 3122 | * |
| 3123 | * Copies up to `len` bytes from `from` to `to` by creating references |
| 3124 | * to the frags in the source buffer. |
| 3125 | * |
| 3126 | * The `hlen` as calculated by skb_zerocopy_headlen() specifies the |
| 3127 | * headroom in the `to` buffer. |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3128 | * |
| 3129 | * Return value: |
| 3130 | * 0: everything is OK |
| 3131 | * -ENOMEM: couldn't orphan frags of @from due to lack of memory |
| 3132 | * -EFAULT: skb_copy_bits() found some problem with skb geometry |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3133 | */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3134 | int |
| 3135 | skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3136 | { |
| 3137 | int i, j = 0; |
| 3138 | int plen = 0; /* length of skb->head fragment */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3139 | int ret; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3140 | struct page *page; |
| 3141 | unsigned int offset; |
| 3142 | |
| 3143 | BUG_ON(!from->head_frag && !hlen); |
| 3144 | |
| 3145 | /* dont bother with small payloads */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3146 | if (len <= skb_tailroom(to)) |
| 3147 | return skb_copy_bits(from, 0, skb_put(to, len), len); |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3148 | |
| 3149 | if (hlen) { |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3150 | ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); |
| 3151 | if (unlikely(ret)) |
| 3152 | return ret; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3153 | len -= hlen; |
| 3154 | } else { |
| 3155 | plen = min_t(int, skb_headlen(from), len); |
| 3156 | if (plen) { |
| 3157 | page = virt_to_head_page(from->head); |
| 3158 | offset = from->data - (unsigned char *)page_address(page); |
| 3159 | __skb_fill_page_desc(to, 0, page, offset, plen); |
| 3160 | get_page(page); |
| 3161 | j = 1; |
| 3162 | len -= plen; |
| 3163 | } |
| 3164 | } |
| 3165 | |
| 3166 | to->truesize += len + plen; |
| 3167 | to->len += len + plen; |
| 3168 | to->data_len += len + plen; |
| 3169 | |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3170 | if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { |
| 3171 | skb_tx_error(from); |
| 3172 | return -ENOMEM; |
| 3173 | } |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 3174 | skb_zerocopy_clone(to, from, GFP_ATOMIC); |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3175 | |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3176 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 3177 | int size; |
| 3178 | |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3179 | if (!len) |
| 3180 | break; |
| 3181 | skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 3182 | size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), |
| 3183 | len); |
| 3184 | skb_frag_size_set(&skb_shinfo(to)->frags[j], size); |
| 3185 | len -= size; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3186 | skb_frag_ref(to, j); |
| 3187 | j++; |
| 3188 | } |
| 3189 | skb_shinfo(to)->nr_frags = j; |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 3190 | |
| 3191 | return 0; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 3192 | } |
| 3193 | EXPORT_SYMBOL_GPL(skb_zerocopy); |
| 3194 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3195 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) |
| 3196 | { |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 3197 | __wsum csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3198 | long csstart; |
| 3199 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 3200 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 3201 | csstart = skb_checksum_start_offset(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3202 | else |
| 3203 | csstart = skb_headlen(skb); |
| 3204 | |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 3205 | BUG_ON(csstart > skb_headlen(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3206 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 3207 | skb_copy_from_linear_data(skb, to, csstart); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3208 | |
| 3209 | csum = 0; |
| 3210 | if (csstart != skb->len) |
| 3211 | csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 3212 | skb->len - csstart); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3213 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 3214 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Al Viro | ff1dcad | 2006-11-20 18:07:29 -0800 | [diff] [blame] | 3215 | long csstuff = csstart + skb->csum_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3216 | |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 3217 | *((__sum16 *)(to + csstuff)) = csum_fold(csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3218 | } |
| 3219 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3220 | EXPORT_SYMBOL(skb_copy_and_csum_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3221 | |
| 3222 | /** |
| 3223 | * skb_dequeue - remove from the head of the queue |
| 3224 | * @list: list to dequeue from |
| 3225 | * |
| 3226 | * Remove the head of the list. The list lock is taken so the function |
| 3227 | * may be used safely with other locking list functions. The head item is |
| 3228 | * returned or %NULL if the list is empty. |
| 3229 | */ |
| 3230 | |
| 3231 | struct sk_buff *skb_dequeue(struct sk_buff_head *list) |
| 3232 | { |
| 3233 | unsigned long flags; |
| 3234 | struct sk_buff *result; |
| 3235 | |
| 3236 | spin_lock_irqsave(&list->lock, flags); |
| 3237 | result = __skb_dequeue(list); |
| 3238 | spin_unlock_irqrestore(&list->lock, flags); |
| 3239 | return result; |
| 3240 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3241 | EXPORT_SYMBOL(skb_dequeue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3242 | |
| 3243 | /** |
| 3244 | * skb_dequeue_tail - remove from the tail of the queue |
| 3245 | * @list: list to dequeue from |
| 3246 | * |
| 3247 | * Remove the tail of the list. The list lock is taken so the function |
| 3248 | * may be used safely with other locking list functions. The tail item is |
| 3249 | * returned or %NULL if the list is empty. |
| 3250 | */ |
| 3251 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) |
| 3252 | { |
| 3253 | unsigned long flags; |
| 3254 | struct sk_buff *result; |
| 3255 | |
| 3256 | spin_lock_irqsave(&list->lock, flags); |
| 3257 | result = __skb_dequeue_tail(list); |
| 3258 | spin_unlock_irqrestore(&list->lock, flags); |
| 3259 | return result; |
| 3260 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3261 | EXPORT_SYMBOL(skb_dequeue_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3262 | |
| 3263 | /** |
| 3264 | * skb_queue_purge - empty a list |
| 3265 | * @list: list to empty |
| 3266 | * |
| 3267 | * Delete all buffers on an &sk_buff list. Each buffer is removed from |
| 3268 | * the list and one reference dropped. This function takes the list |
| 3269 | * lock and is atomic with respect to other list locking functions. |
| 3270 | */ |
| 3271 | void skb_queue_purge(struct sk_buff_head *list) |
| 3272 | { |
| 3273 | struct sk_buff *skb; |
| 3274 | while ((skb = skb_dequeue(list)) != NULL) |
| 3275 | kfree_skb(skb); |
| 3276 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3277 | EXPORT_SYMBOL(skb_queue_purge); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3278 | |
| 3279 | /** |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3280 | * skb_rbtree_purge - empty a skb rbtree |
| 3281 | * @root: root of the rbtree to empty |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 3282 | * Return value: the sum of truesizes of all purged skbs. |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3283 | * |
| 3284 | * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from |
| 3285 | * the list and one reference dropped. This function does not take |
| 3286 | * any lock. Synchronization should be handled by the caller (e.g., TCP |
| 3287 | * out-of-order queue is protected by the socket lock). |
| 3288 | */ |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 3289 | unsigned int skb_rbtree_purge(struct rb_root *root) |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3290 | { |
Eric Dumazet | 7c90584 | 2017-09-23 12:39:12 -0700 | [diff] [blame] | 3291 | struct rb_node *p = rb_first(root); |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 3292 | unsigned int sum = 0; |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3293 | |
Eric Dumazet | 7c90584 | 2017-09-23 12:39:12 -0700 | [diff] [blame] | 3294 | while (p) { |
| 3295 | struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); |
| 3296 | |
| 3297 | p = rb_next(p); |
| 3298 | rb_erase(&skb->rbnode, root); |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 3299 | sum += skb->truesize; |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3300 | kfree_skb(skb); |
Eric Dumazet | 7c90584 | 2017-09-23 12:39:12 -0700 | [diff] [blame] | 3301 | } |
Peter Oskolkov | 385114d | 2018-08-02 23:34:38 +0000 | [diff] [blame] | 3302 | return sum; |
Yaogong Wang | 9f5afea | 2016-09-07 14:49:28 -0700 | [diff] [blame] | 3303 | } |
| 3304 | |
| 3305 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3306 | * skb_queue_head - queue a buffer at the list head |
| 3307 | * @list: list to use |
| 3308 | * @newsk: buffer to queue |
| 3309 | * |
| 3310 | * Queue a buffer at the start of the list. This function takes the |
| 3311 | * list lock and can be used safely with other locking &sk_buff functions |
| 3312 | * safely. |
| 3313 | * |
| 3314 | * A buffer cannot be placed on two lists at the same time. |
| 3315 | */ |
| 3316 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) |
| 3317 | { |
| 3318 | unsigned long flags; |
| 3319 | |
| 3320 | spin_lock_irqsave(&list->lock, flags); |
| 3321 | __skb_queue_head(list, newsk); |
| 3322 | spin_unlock_irqrestore(&list->lock, flags); |
| 3323 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3324 | EXPORT_SYMBOL(skb_queue_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3325 | |
| 3326 | /** |
| 3327 | * skb_queue_tail - queue a buffer at the list tail |
| 3328 | * @list: list to use |
| 3329 | * @newsk: buffer to queue |
| 3330 | * |
| 3331 | * Queue a buffer at the tail of the list. This function takes the |
| 3332 | * list lock and can be used safely with other locking &sk_buff functions |
| 3333 | * safely. |
| 3334 | * |
| 3335 | * A buffer cannot be placed on two lists at the same time. |
| 3336 | */ |
| 3337 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) |
| 3338 | { |
| 3339 | unsigned long flags; |
| 3340 | |
| 3341 | spin_lock_irqsave(&list->lock, flags); |
| 3342 | __skb_queue_tail(list, newsk); |
| 3343 | spin_unlock_irqrestore(&list->lock, flags); |
| 3344 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3345 | EXPORT_SYMBOL(skb_queue_tail); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3346 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3347 | /** |
| 3348 | * skb_unlink - remove a buffer from a list |
| 3349 | * @skb: buffer to remove |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3350 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3351 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3352 | * Remove a packet from a list. The list locks are taken and this |
| 3353 | * function is atomic with respect to other list locked calls |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3354 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3355 | * You must know what list the SKB is on. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3356 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3357 | void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3358 | { |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3359 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3360 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3361 | spin_lock_irqsave(&list->lock, flags); |
| 3362 | __skb_unlink(skb, list); |
| 3363 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3364 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3365 | EXPORT_SYMBOL(skb_unlink); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3366 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3367 | /** |
| 3368 | * skb_append - append a buffer |
| 3369 | * @old: buffer to insert after |
| 3370 | * @newsk: buffer to insert |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3371 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3372 | * |
| 3373 | * Place a packet after a given packet in a list. The list locks are taken |
| 3374 | * and this function is atomic with respect to other list locked calls. |
| 3375 | * A buffer cannot be placed on two lists at the same time. |
| 3376 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3377 | void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3378 | { |
| 3379 | unsigned long flags; |
| 3380 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3381 | spin_lock_irqsave(&list->lock, flags); |
Gerrit Renker | 7de6c03 | 2008-04-14 00:05:09 -0700 | [diff] [blame] | 3382 | __skb_queue_after(list, old, newsk); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 3383 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3384 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3385 | EXPORT_SYMBOL(skb_append); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3386 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3387 | static inline void skb_split_inside_header(struct sk_buff *skb, |
| 3388 | struct sk_buff* skb1, |
| 3389 | const u32 len, const int pos) |
| 3390 | { |
| 3391 | int i; |
| 3392 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 3393 | skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), |
| 3394 | pos - len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3395 | /* And move data appendix as is. */ |
| 3396 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
| 3397 | skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; |
| 3398 | |
| 3399 | skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; |
| 3400 | skb_shinfo(skb)->nr_frags = 0; |
| 3401 | skb1->data_len = skb->data_len; |
| 3402 | skb1->len += skb1->data_len; |
| 3403 | skb->data_len = 0; |
| 3404 | skb->len = len; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 3405 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3406 | } |
| 3407 | |
| 3408 | static inline void skb_split_no_header(struct sk_buff *skb, |
| 3409 | struct sk_buff* skb1, |
| 3410 | const u32 len, int pos) |
| 3411 | { |
| 3412 | int i, k = 0; |
| 3413 | const int nfrags = skb_shinfo(skb)->nr_frags; |
| 3414 | |
| 3415 | skb_shinfo(skb)->nr_frags = 0; |
| 3416 | skb1->len = skb1->data_len = skb->len - len; |
| 3417 | skb->len = len; |
| 3418 | skb->data_len = len - pos; |
| 3419 | |
| 3420 | for (i = 0; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3421 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3422 | |
| 3423 | if (pos + size > len) { |
| 3424 | skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 3425 | |
| 3426 | if (pos < len) { |
| 3427 | /* Split frag. |
| 3428 | * We have two variants in this case: |
| 3429 | * 1. Move all the frag to the second |
| 3430 | * part, if it is possible. F.e. |
| 3431 | * this approach is mandatory for TUX, |
| 3432 | * where splitting is expensive. |
| 3433 | * 2. Split is accurately. We make this. |
| 3434 | */ |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3435 | skb_frag_ref(skb, i); |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3436 | skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3437 | skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); |
| 3438 | skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3439 | skb_shinfo(skb)->nr_frags++; |
| 3440 | } |
| 3441 | k++; |
| 3442 | } else |
| 3443 | skb_shinfo(skb)->nr_frags++; |
| 3444 | pos += size; |
| 3445 | } |
| 3446 | skb_shinfo(skb1)->nr_frags = k; |
| 3447 | } |
| 3448 | |
| 3449 | /** |
| 3450 | * skb_split - Split fragmented skb to two parts at length len. |
| 3451 | * @skb: the buffer to split |
| 3452 | * @skb1: the buffer to receive the second part |
| 3453 | * @len: new length for skb |
| 3454 | */ |
| 3455 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) |
| 3456 | { |
| 3457 | int pos = skb_headlen(skb); |
Talal Ahmad | 9b65b17 | 2021-11-02 22:58:44 -0400 | [diff] [blame] | 3458 | const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3459 | |
Talal Ahmad | 9b65b17 | 2021-11-02 22:58:44 -0400 | [diff] [blame] | 3460 | skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 3461 | skb_zerocopy_clone(skb1, skb, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3462 | if (len < pos) /* Split line is inside header. */ |
| 3463 | skb_split_inside_header(skb, skb1, len, pos); |
| 3464 | else /* Second chunk has no header, nothing to copy. */ |
| 3465 | skb_split_no_header(skb, skb1, len, pos); |
| 3466 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3467 | EXPORT_SYMBOL(skb_split); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3468 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 3469 | /* Shifting from/to a cloned skb is a no-go. |
| 3470 | * |
| 3471 | * Caller cannot keep skb_shinfo related pointers past calling here! |
| 3472 | */ |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3473 | static int skb_prepare_for_shift(struct sk_buff *skb) |
| 3474 | { |
Eric Dumazet | c4777ef | 2021-11-01 17:45:55 -0700 | [diff] [blame] | 3475 | return skb_unclone_keeptruesize(skb, GFP_ATOMIC); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3476 | } |
| 3477 | |
| 3478 | /** |
| 3479 | * skb_shift - Shifts paged data partially from skb to another |
| 3480 | * @tgt: buffer into which tail data gets added |
| 3481 | * @skb: buffer from which the paged data comes from |
| 3482 | * @shiftlen: shift up to this many bytes |
| 3483 | * |
| 3484 | * Attempts to shift up to shiftlen worth of bytes, which may be less than |
Feng King | 20e994a | 2011-11-21 01:47:11 +0000 | [diff] [blame] | 3485 | * the length of the skb, from skb to tgt. Returns number bytes shifted. |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3486 | * It's up to caller to free skb if everything was shifted. |
| 3487 | * |
| 3488 | * If @tgt runs out of frags, the whole operation is aborted. |
| 3489 | * |
| 3490 | * Skb cannot include anything else but paged data while tgt is allowed |
| 3491 | * to have non-paged data as well. |
| 3492 | * |
| 3493 | * TODO: full sized shift could be optimized but that would need |
| 3494 | * specialized skb free'er to handle frags without up-to-date nr_frags. |
| 3495 | */ |
| 3496 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) |
| 3497 | { |
| 3498 | int from, to, merge, todo; |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 3499 | skb_frag_t *fragfrom, *fragto; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3500 | |
| 3501 | BUG_ON(shiftlen > skb->len); |
Eric Dumazet | f8071cd | 2016-11-15 12:51:50 -0800 | [diff] [blame] | 3502 | |
| 3503 | if (skb_headlen(skb)) |
| 3504 | return 0; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 3505 | if (skb_zcopy(tgt) || skb_zcopy(skb)) |
| 3506 | return 0; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3507 | |
| 3508 | todo = shiftlen; |
| 3509 | from = 0; |
| 3510 | to = skb_shinfo(tgt)->nr_frags; |
| 3511 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 3512 | |
| 3513 | /* Actual merge is delayed until the point when we know we can |
| 3514 | * commit all, so that we don't have to undo partial changes |
| 3515 | */ |
| 3516 | if (!to || |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3517 | !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3518 | skb_frag_off(fragfrom))) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3519 | merge = -1; |
| 3520 | } else { |
| 3521 | merge = to - 1; |
| 3522 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3523 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3524 | if (todo < 0) { |
| 3525 | if (skb_prepare_for_shift(skb) || |
| 3526 | skb_prepare_for_shift(tgt)) |
| 3527 | return 0; |
| 3528 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 3529 | /* All previous frag pointers might be stale! */ |
| 3530 | fragfrom = &skb_shinfo(skb)->frags[from]; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3531 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 3532 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3533 | skb_frag_size_add(fragto, shiftlen); |
| 3534 | skb_frag_size_sub(fragfrom, shiftlen); |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3535 | skb_frag_off_add(fragfrom, shiftlen); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3536 | |
| 3537 | goto onlymerged; |
| 3538 | } |
| 3539 | |
| 3540 | from++; |
| 3541 | } |
| 3542 | |
| 3543 | /* Skip full, not-fitting skb to avoid expensive operations */ |
| 3544 | if ((shiftlen == skb->len) && |
| 3545 | (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) |
| 3546 | return 0; |
| 3547 | |
| 3548 | if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) |
| 3549 | return 0; |
| 3550 | |
| 3551 | while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { |
| 3552 | if (to == MAX_SKB_FRAGS) |
| 3553 | return 0; |
| 3554 | |
| 3555 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 3556 | fragto = &skb_shinfo(tgt)->frags[to]; |
| 3557 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3558 | if (todo >= skb_frag_size(fragfrom)) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3559 | *fragto = *fragfrom; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3560 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3561 | from++; |
| 3562 | to++; |
| 3563 | |
| 3564 | } else { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3565 | __skb_frag_ref(fragfrom); |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3566 | skb_frag_page_copy(fragto, fragfrom); |
| 3567 | skb_frag_off_copy(fragto, fragfrom); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3568 | skb_frag_size_set(fragto, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3569 | |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3570 | skb_frag_off_add(fragfrom, todo); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3571 | skb_frag_size_sub(fragfrom, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3572 | todo = 0; |
| 3573 | |
| 3574 | to++; |
| 3575 | break; |
| 3576 | } |
| 3577 | } |
| 3578 | |
| 3579 | /* Ready to "commit" this state change to tgt */ |
| 3580 | skb_shinfo(tgt)->nr_frags = to; |
| 3581 | |
| 3582 | if (merge >= 0) { |
| 3583 | fragfrom = &skb_shinfo(skb)->frags[0]; |
| 3584 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 3585 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3586 | skb_frag_size_add(fragto, skb_frag_size(fragfrom)); |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 3587 | __skb_frag_unref(fragfrom, skb->pp_recycle); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 3588 | } |
| 3589 | |
| 3590 | /* Reposition in the original skb */ |
| 3591 | to = 0; |
| 3592 | while (from < skb_shinfo(skb)->nr_frags) |
| 3593 | skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; |
| 3594 | skb_shinfo(skb)->nr_frags = to; |
| 3595 | |
| 3596 | BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); |
| 3597 | |
| 3598 | onlymerged: |
| 3599 | /* Most likely the tgt won't ever need its checksum anymore, skb on |
| 3600 | * the other hand might need it if it needs to be resent |
| 3601 | */ |
| 3602 | tgt->ip_summed = CHECKSUM_PARTIAL; |
| 3603 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 3604 | |
| 3605 | /* Yak, is it really working this way? Some helper please? */ |
| 3606 | skb->len -= shiftlen; |
| 3607 | skb->data_len -= shiftlen; |
| 3608 | skb->truesize -= shiftlen; |
| 3609 | tgt->len += shiftlen; |
| 3610 | tgt->data_len += shiftlen; |
| 3611 | tgt->truesize += shiftlen; |
| 3612 | |
| 3613 | return shiftlen; |
| 3614 | } |
| 3615 | |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3616 | /** |
| 3617 | * skb_prepare_seq_read - Prepare a sequential read of skb data |
| 3618 | * @skb: the buffer to read |
| 3619 | * @from: lower offset of data to be read |
| 3620 | * @to: upper offset of data to be read |
| 3621 | * @st: state variable |
| 3622 | * |
| 3623 | * Initializes the specified state variable. Must be called before |
| 3624 | * invoking skb_seq_read() for the first time. |
| 3625 | */ |
| 3626 | void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, |
| 3627 | unsigned int to, struct skb_seq_state *st) |
| 3628 | { |
| 3629 | st->lower_offset = from; |
| 3630 | st->upper_offset = to; |
| 3631 | st->root_skb = st->cur_skb = skb; |
| 3632 | st->frag_idx = st->stepped_offset = 0; |
| 3633 | st->frag_data = NULL; |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3634 | st->frag_off = 0; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3635 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3636 | EXPORT_SYMBOL(skb_prepare_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3637 | |
| 3638 | /** |
| 3639 | * skb_seq_read - Sequentially read skb data |
| 3640 | * @consumed: number of bytes consumed by the caller so far |
| 3641 | * @data: destination pointer for data to be returned |
| 3642 | * @st: state variable |
| 3643 | * |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 3644 | * Reads a block of skb data at @consumed relative to the |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3645 | * lower offset specified to skb_prepare_seq_read(). Assigns |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 3646 | * the head of the data block to @data and returns the length |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3647 | * of the block or 0 if the end of the skb data or the upper |
| 3648 | * offset has been reached. |
| 3649 | * |
| 3650 | * The caller is not required to consume all of the data |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 3651 | * returned, i.e. @consumed is typically set to the number |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3652 | * of bytes already consumed and the next call to |
| 3653 | * skb_seq_read() will return the remaining part of the block. |
| 3654 | * |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 3655 | * Note 1: The size of each block of data returned can be arbitrary, |
Masanari Iida | e793c0f | 2014-09-04 23:44:36 +0900 | [diff] [blame] | 3656 | * this limitation is the cost for zerocopy sequential |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3657 | * reads of potentially non linear data. |
| 3658 | * |
Randy Dunlap | bc2cda1 | 2008-02-13 15:03:25 -0800 | [diff] [blame] | 3659 | * Note 2: Fragment lists within fragments are not implemented |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3660 | * at the moment, state->root_skb could be replaced with |
| 3661 | * a stack for this purpose. |
| 3662 | */ |
| 3663 | unsigned int skb_seq_read(unsigned int consumed, const u8 **data, |
| 3664 | struct skb_seq_state *st) |
| 3665 | { |
| 3666 | unsigned int block_limit, abs_offset = consumed + st->lower_offset; |
| 3667 | skb_frag_t *frag; |
| 3668 | |
Wedson Almeida Filho | aeb193e | 2013-06-23 23:33:48 -0700 | [diff] [blame] | 3669 | if (unlikely(abs_offset >= st->upper_offset)) { |
| 3670 | if (st->frag_data) { |
| 3671 | kunmap_atomic(st->frag_data); |
| 3672 | st->frag_data = NULL; |
| 3673 | } |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3674 | return 0; |
Wedson Almeida Filho | aeb193e | 2013-06-23 23:33:48 -0700 | [diff] [blame] | 3675 | } |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3676 | |
| 3677 | next_skb: |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 3678 | block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3679 | |
Thomas Chenault | 995b337 | 2009-05-18 21:43:27 -0700 | [diff] [blame] | 3680 | if (abs_offset < block_limit && !st->frag_data) { |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 3681 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3682 | return block_limit - abs_offset; |
| 3683 | } |
| 3684 | |
| 3685 | if (st->frag_idx == 0 && !st->frag_data) |
| 3686 | st->stepped_offset += skb_headlen(st->cur_skb); |
| 3687 | |
| 3688 | while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3689 | unsigned int pg_idx, pg_off, pg_sz; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3690 | |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3691 | frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; |
| 3692 | |
| 3693 | pg_idx = 0; |
| 3694 | pg_off = skb_frag_off(frag); |
| 3695 | pg_sz = skb_frag_size(frag); |
| 3696 | |
| 3697 | if (skb_frag_must_loop(skb_frag_page(frag))) { |
| 3698 | pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; |
| 3699 | pg_off = offset_in_page(pg_off + st->frag_off); |
| 3700 | pg_sz = min_t(unsigned int, pg_sz - st->frag_off, |
| 3701 | PAGE_SIZE - pg_off); |
| 3702 | } |
| 3703 | |
| 3704 | block_limit = pg_sz + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3705 | if (abs_offset < block_limit) { |
| 3706 | if (!st->frag_data) |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3707 | st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3708 | |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3709 | *data = (u8 *)st->frag_data + pg_off + |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3710 | (abs_offset - st->stepped_offset); |
| 3711 | |
| 3712 | return block_limit - abs_offset; |
| 3713 | } |
| 3714 | |
| 3715 | if (st->frag_data) { |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 3716 | kunmap_atomic(st->frag_data); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3717 | st->frag_data = NULL; |
| 3718 | } |
| 3719 | |
Willem de Bruijn | 97550f6 | 2021-01-09 17:18:33 -0500 | [diff] [blame] | 3720 | st->stepped_offset += pg_sz; |
| 3721 | st->frag_off += pg_sz; |
| 3722 | if (st->frag_off == skb_frag_size(frag)) { |
| 3723 | st->frag_off = 0; |
| 3724 | st->frag_idx++; |
| 3725 | } |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3726 | } |
| 3727 | |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 3728 | if (st->frag_data) { |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 3729 | kunmap_atomic(st->frag_data); |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 3730 | st->frag_data = NULL; |
| 3731 | } |
| 3732 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3733 | if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 3734 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3735 | st->frag_idx = 0; |
| 3736 | goto next_skb; |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 3737 | } else if (st->cur_skb->next) { |
| 3738 | st->cur_skb = st->cur_skb->next; |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 3739 | st->frag_idx = 0; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3740 | goto next_skb; |
| 3741 | } |
| 3742 | |
| 3743 | return 0; |
| 3744 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3745 | EXPORT_SYMBOL(skb_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3746 | |
| 3747 | /** |
| 3748 | * skb_abort_seq_read - Abort a sequential read of skb data |
| 3749 | * @st: state variable |
| 3750 | * |
| 3751 | * Must be called if skb_seq_read() was not called until it |
| 3752 | * returned 0. |
| 3753 | */ |
| 3754 | void skb_abort_seq_read(struct skb_seq_state *st) |
| 3755 | { |
| 3756 | if (st->frag_data) |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 3757 | kunmap_atomic(st->frag_data); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3758 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3759 | EXPORT_SYMBOL(skb_abort_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 3760 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3761 | #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) |
| 3762 | |
| 3763 | static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, |
| 3764 | struct ts_config *conf, |
| 3765 | struct ts_state *state) |
| 3766 | { |
| 3767 | return skb_seq_read(offset, text, TS_SKB_CB(state)); |
| 3768 | } |
| 3769 | |
| 3770 | static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) |
| 3771 | { |
| 3772 | skb_abort_seq_read(TS_SKB_CB(state)); |
| 3773 | } |
| 3774 | |
| 3775 | /** |
| 3776 | * skb_find_text - Find a text pattern in skb data |
| 3777 | * @skb: the buffer to look in |
| 3778 | * @from: search offset |
| 3779 | * @to: search limit |
| 3780 | * @config: textsearch configuration |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3781 | * |
| 3782 | * Finds a pattern in the skb data according to the specified |
| 3783 | * textsearch configuration. Use textsearch_next() to retrieve |
| 3784 | * subsequent occurrences of the pattern. Returns the offset |
| 3785 | * to the first occurrence or UINT_MAX if no match was found. |
| 3786 | */ |
| 3787 | unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3788 | unsigned int to, struct ts_config *config) |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3789 | { |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3790 | struct ts_state state; |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 3791 | unsigned int ret; |
| 3792 | |
Willem de Bruijn | b228c9b | 2021-03-01 15:09:44 +0000 | [diff] [blame] | 3793 | BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); |
| 3794 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3795 | config->get_next_block = skb_ts_get_next_block; |
| 3796 | config->finish = skb_ts_finish; |
| 3797 | |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3798 | skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3799 | |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 3800 | ret = textsearch_find(config, &state); |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 3801 | return (ret <= to - from ? ret : UINT_MAX); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3802 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3803 | EXPORT_SYMBOL(skb_find_text); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 3804 | |
Hannes Frederic Sowa | be12a1f | 2015-05-21 16:59:58 +0200 | [diff] [blame] | 3805 | int skb_append_pagefrags(struct sk_buff *skb, struct page *page, |
| 3806 | int offset, size_t size) |
| 3807 | { |
| 3808 | int i = skb_shinfo(skb)->nr_frags; |
| 3809 | |
| 3810 | if (skb_can_coalesce(skb, i, page, offset)) { |
| 3811 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); |
| 3812 | } else if (i < MAX_SKB_FRAGS) { |
| 3813 | get_page(page); |
| 3814 | skb_fill_page_desc(skb, i, page, offset, size); |
| 3815 | } else { |
| 3816 | return -EMSGSIZE; |
| 3817 | } |
| 3818 | |
| 3819 | return 0; |
| 3820 | } |
| 3821 | EXPORT_SYMBOL_GPL(skb_append_pagefrags); |
| 3822 | |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3823 | /** |
| 3824 | * skb_pull_rcsum - pull skb and update receive checksum |
| 3825 | * @skb: buffer to update |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3826 | * @len: length of data pulled |
| 3827 | * |
| 3828 | * This function performs an skb_pull on the packet and updates |
Urs Thuermann | fee54fa | 2008-02-12 22:03:25 -0800 | [diff] [blame] | 3829 | * the CHECKSUM_COMPLETE checksum. It should be used on |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 3830 | * receive path processing instead of skb_pull unless you know |
| 3831 | * that the checksum difference is zero (e.g., a valid IP header) |
| 3832 | * or you are setting ip_summed to CHECKSUM_NONE. |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3833 | */ |
Johannes Berg | af72868 | 2017-06-16 14:29:22 +0200 | [diff] [blame] | 3834 | void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3835 | { |
Pravin B Shelar | 31b33df | 2015-09-28 17:24:25 -0700 | [diff] [blame] | 3836 | unsigned char *data = skb->data; |
| 3837 | |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3838 | BUG_ON(len > skb->len); |
Pravin B Shelar | 31b33df | 2015-09-28 17:24:25 -0700 | [diff] [blame] | 3839 | __skb_pull(skb, len); |
| 3840 | skb_postpull_rcsum(skb, data, len); |
| 3841 | return skb->data; |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 3842 | } |
Arnaldo Carvalho de Melo | f94691a | 2006-03-20 22:47:55 -0800 | [diff] [blame] | 3843 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); |
| 3844 | |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 3845 | static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) |
| 3846 | { |
| 3847 | skb_frag_t head_frag; |
| 3848 | struct page *page; |
| 3849 | |
| 3850 | page = virt_to_head_page(frag_skb->head); |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 3851 | __skb_frag_set_page(&head_frag, page); |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 3852 | skb_frag_off_set(&head_frag, frag_skb->data - |
| 3853 | (unsigned char *)page_address(page)); |
Matthew Wilcox (Oracle) | d8e18a5 | 2019-07-22 20:08:26 -0700 | [diff] [blame] | 3854 | skb_frag_size_set(&head_frag, skb_headlen(frag_skb)); |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 3855 | return head_frag; |
| 3856 | } |
| 3857 | |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3858 | struct sk_buff *skb_segment_list(struct sk_buff *skb, |
| 3859 | netdev_features_t features, |
| 3860 | unsigned int offset) |
| 3861 | { |
| 3862 | struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; |
| 3863 | unsigned int tnl_hlen = skb_tnl_header_len(skb); |
| 3864 | unsigned int delta_truesize = 0; |
| 3865 | unsigned int delta_len = 0; |
| 3866 | struct sk_buff *tail = NULL; |
Dongseok Yi | 53475c5 | 2021-01-08 11:28:38 +0900 | [diff] [blame] | 3867 | struct sk_buff *nskb, *tmp; |
| 3868 | int err; |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3869 | |
| 3870 | skb_push(skb, -skb_network_offset(skb) + offset); |
| 3871 | |
| 3872 | skb_shinfo(skb)->frag_list = NULL; |
| 3873 | |
| 3874 | do { |
| 3875 | nskb = list_skb; |
| 3876 | list_skb = list_skb->next; |
| 3877 | |
Dongseok Yi | 53475c5 | 2021-01-08 11:28:38 +0900 | [diff] [blame] | 3878 | err = 0; |
| 3879 | if (skb_shared(nskb)) { |
| 3880 | tmp = skb_clone(nskb, GFP_ATOMIC); |
| 3881 | if (tmp) { |
| 3882 | consume_skb(nskb); |
| 3883 | nskb = tmp; |
| 3884 | err = skb_unclone(nskb, GFP_ATOMIC); |
| 3885 | } else { |
| 3886 | err = -ENOMEM; |
| 3887 | } |
| 3888 | } |
| 3889 | |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3890 | if (!tail) |
| 3891 | skb->next = nskb; |
| 3892 | else |
| 3893 | tail->next = nskb; |
| 3894 | |
Dongseok Yi | 53475c5 | 2021-01-08 11:28:38 +0900 | [diff] [blame] | 3895 | if (unlikely(err)) { |
| 3896 | nskb->next = list_skb; |
| 3897 | goto err_linearize; |
| 3898 | } |
| 3899 | |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3900 | tail = nskb; |
| 3901 | |
| 3902 | delta_len += nskb->len; |
| 3903 | delta_truesize += nskb->truesize; |
| 3904 | |
| 3905 | skb_push(nskb, -skb_network_offset(nskb) + offset); |
| 3906 | |
Florian Westphal | cf673ed | 2020-03-30 18:51:29 +0200 | [diff] [blame] | 3907 | skb_release_head_state(nskb); |
Colin Ian King | c645fe9b | 2021-09-02 23:56:23 +0100 | [diff] [blame] | 3908 | __copy_skb_header(nskb, skb); |
Steffen Klassert | 3a1296a | 2020-01-25 11:26:44 +0100 | [diff] [blame] | 3909 | |
| 3910 | skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); |
| 3911 | skb_copy_from_linear_data_offset(skb, -tnl_hlen, |
| 3912 | nskb->data - tnl_hlen, |
| 3913 | offset + tnl_hlen); |
| 3914 | |
| 3915 | if (skb_needs_linearize(nskb, features) && |
| 3916 | __skb_linearize(nskb)) |
| 3917 | goto err_linearize; |
| 3918 | |
| 3919 | } while (list_skb); |
| 3920 | |
| 3921 | skb->truesize = skb->truesize - delta_truesize; |
| 3922 | skb->data_len = skb->data_len - delta_len; |
| 3923 | skb->len = skb->len - delta_len; |
| 3924 | |
| 3925 | skb_gso_reset(skb); |
| 3926 | |
| 3927 | skb->prev = tail; |
| 3928 | |
| 3929 | if (skb_needs_linearize(skb, features) && |
| 3930 | __skb_linearize(skb)) |
| 3931 | goto err_linearize; |
| 3932 | |
| 3933 | skb_get(skb); |
| 3934 | |
| 3935 | return skb; |
| 3936 | |
| 3937 | err_linearize: |
| 3938 | kfree_skb_list(skb->next); |
| 3939 | skb->next = NULL; |
| 3940 | return ERR_PTR(-ENOMEM); |
| 3941 | } |
| 3942 | EXPORT_SYMBOL_GPL(skb_segment_list); |
| 3943 | |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3944 | /** |
| 3945 | * skb_segment - Perform protocol segmentation on skb. |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3946 | * @head_skb: buffer to segment |
Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 3947 | * @features: features for the output path (see dev->features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3948 | * |
| 3949 | * This function performs segmentation on the given skb. It returns |
Ben Hutchings | 4c821d7 | 2008-04-13 21:52:48 -0700 | [diff] [blame] | 3950 | * a pointer to the first in a list of new skbs for the segments. |
| 3951 | * In case of error it returns ERR_PTR(err). |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3952 | */ |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3953 | struct sk_buff *skb_segment(struct sk_buff *head_skb, |
| 3954 | netdev_features_t features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3955 | { |
| 3956 | struct sk_buff *segs = NULL; |
| 3957 | struct sk_buff *tail = NULL; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3958 | struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3959 | skb_frag_t *frag = skb_shinfo(head_skb)->frags; |
| 3960 | unsigned int mss = skb_shinfo(head_skb)->gso_size; |
| 3961 | unsigned int doffset = head_skb->data - skb_mac_header(head_skb); |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 3962 | struct sk_buff *frag_skb = head_skb; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3963 | unsigned int offset = doffset; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3964 | unsigned int tnl_hlen = skb_tnl_header_len(head_skb); |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3965 | unsigned int partial_segs = 0; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3966 | unsigned int headroom; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 3967 | unsigned int len = head_skb->len; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3968 | __be16 proto; |
Alexander Duyck | 36c9838 | 2016-05-02 09:38:18 -0700 | [diff] [blame] | 3969 | bool csum, sg; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3970 | int nfrags = skb_shinfo(head_skb)->nr_frags; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3971 | int err = -ENOMEM; |
| 3972 | int i = 0; |
| 3973 | int pos; |
| 3974 | |
Shmulik Ladkani | 3dcbdb1 | 2019-09-06 12:23:50 +0300 | [diff] [blame] | 3975 | if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && |
| 3976 | (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { |
| 3977 | /* gso_size is untrusted, and we have a frag_list with a linear |
| 3978 | * non head_frag head. |
| 3979 | * |
| 3980 | * (we assume checking the first list_skb member suffices; |
| 3981 | * i.e if either of the list_skb members have non head_frag |
| 3982 | * head, then the first one has too). |
| 3983 | * |
| 3984 | * If head_skb's headlen does not fit requested gso_size, it |
| 3985 | * means that the frag_list members do NOT terminate on exact |
| 3986 | * gso_size boundaries. Hence we cannot perform skb_frag_t page |
| 3987 | * sharing. Therefore we must fallback to copying the frag_list |
| 3988 | * skbs; we do so by disabling SG. |
| 3989 | */ |
| 3990 | if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) |
| 3991 | features &= ~NETIF_F_SG; |
| 3992 | } |
| 3993 | |
Wei-Chun Chao | 5882a07 | 2014-06-08 23:48:54 -0700 | [diff] [blame] | 3994 | __skb_push(head_skb, doffset); |
Miaohe Lin | 2f63113 | 2020-08-01 17:36:05 +0800 | [diff] [blame] | 3995 | proto = skb_network_protocol(head_skb, NULL); |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3996 | if (unlikely(!proto)) |
| 3997 | return ERR_PTR(-EINVAL); |
| 3998 | |
Alexander Duyck | 36c9838 | 2016-05-02 09:38:18 -0700 | [diff] [blame] | 3999 | sg = !!(features & NETIF_F_SG); |
Alexander Duyck | f245d07 | 2016-02-05 15:28:26 -0800 | [diff] [blame] | 4000 | csum = !!can_checksum_protocol(features, proto); |
Tom Herbert | 7e2b10c | 2014-06-04 17:20:02 -0700 | [diff] [blame] | 4001 | |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4002 | if (sg && csum && (mss != GSO_BY_FRAGS)) { |
| 4003 | if (!(features & NETIF_F_GSO_PARTIAL)) { |
| 4004 | struct sk_buff *iter; |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 4005 | unsigned int frag_len; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4006 | |
| 4007 | if (!list_skb || |
| 4008 | !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) |
| 4009 | goto normal; |
| 4010 | |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 4011 | /* If we get here then all the required |
| 4012 | * GSO features except frag_list are supported. |
| 4013 | * Try to split the SKB to multiple GSO SKBs |
| 4014 | * with no frag_list. |
| 4015 | * Currently we can do that only when the buffers don't |
| 4016 | * have a linear part and all the buffers except |
| 4017 | * the last are of the same length. |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4018 | */ |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 4019 | frag_len = list_skb->len; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4020 | skb_walk_frags(head_skb, iter) { |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 4021 | if (frag_len != iter->len && iter->next) |
| 4022 | goto normal; |
Ilan Tayari | eaffadb | 2017-04-08 02:07:08 +0300 | [diff] [blame] | 4023 | if (skb_headlen(iter) && !iter->head_frag) |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4024 | goto normal; |
| 4025 | |
| 4026 | len -= iter->len; |
| 4027 | } |
Ilan Tayari | 43170c4 | 2017-04-19 21:26:07 +0300 | [diff] [blame] | 4028 | |
| 4029 | if (len != frag_len) |
| 4030 | goto normal; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4031 | } |
| 4032 | |
| 4033 | /* GSO partial only requires that we trim off any excess that |
| 4034 | * doesn't fit into an MSS sized block, so take care of that |
| 4035 | * now. |
| 4036 | */ |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4037 | partial_segs = len / mss; |
Alexander Duyck | d7fb5a8 | 2016-05-02 09:38:12 -0700 | [diff] [blame] | 4038 | if (partial_segs > 1) |
| 4039 | mss *= partial_segs; |
| 4040 | else |
| 4041 | partial_segs = 0; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4042 | } |
| 4043 | |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4044 | normal: |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4045 | headroom = skb_headroom(head_skb); |
| 4046 | pos = skb_headlen(head_skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4047 | |
| 4048 | do { |
| 4049 | struct sk_buff *nskb; |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4050 | skb_frag_t *nskb_frag; |
Herbert Xu | c8884ed | 2006-10-29 15:59:41 -0800 | [diff] [blame] | 4051 | int hsize; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4052 | int size; |
| 4053 | |
Marcelo Ricardo Leitner | 3953c46 | 2016-06-02 15:05:40 -0300 | [diff] [blame] | 4054 | if (unlikely(mss == GSO_BY_FRAGS)) { |
| 4055 | len = list_skb->len; |
| 4056 | } else { |
| 4057 | len = head_skb->len - offset; |
| 4058 | if (len > mss) |
| 4059 | len = mss; |
| 4060 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4061 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4062 | hsize = skb_headlen(head_skb) - offset; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4063 | |
Xin Long | dbd50f2 | 2021-01-15 17:36:38 +0800 | [diff] [blame] | 4064 | if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4065 | (skb_headlen(list_skb) == len || sg)) { |
| 4066 | BUG_ON(skb_headlen(list_skb) > len); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4067 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4068 | i = 0; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4069 | nfrags = skb_shinfo(list_skb)->nr_frags; |
| 4070 | frag = skb_shinfo(list_skb)->frags; |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 4071 | frag_skb = list_skb; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4072 | pos += skb_headlen(list_skb); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4073 | |
| 4074 | while (pos < offset + len) { |
| 4075 | BUG_ON(i >= nfrags); |
| 4076 | |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 4077 | size = skb_frag_size(frag); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4078 | if (pos + size > offset + len) |
| 4079 | break; |
| 4080 | |
| 4081 | i++; |
| 4082 | pos += size; |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 4083 | frag++; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4084 | } |
| 4085 | |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4086 | nskb = skb_clone(list_skb, GFP_ATOMIC); |
| 4087 | list_skb = list_skb->next; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4088 | |
| 4089 | if (unlikely(!nskb)) |
| 4090 | goto err; |
| 4091 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4092 | if (unlikely(pskb_trim(nskb, len))) { |
| 4093 | kfree_skb(nskb); |
| 4094 | goto err; |
| 4095 | } |
| 4096 | |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 4097 | hsize = skb_end_offset(nskb); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4098 | if (skb_cow_head(nskb, doffset + headroom)) { |
| 4099 | kfree_skb(nskb); |
| 4100 | goto err; |
| 4101 | } |
| 4102 | |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 4103 | nskb->truesize += skb_end_offset(nskb) - hsize; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4104 | skb_release_head_state(nskb); |
| 4105 | __skb_push(nskb, doffset); |
| 4106 | } else { |
Paolo Abeni | 00b229f | 2021-01-19 17:56:56 +0100 | [diff] [blame] | 4107 | if (hsize < 0) |
| 4108 | hsize = 0; |
Xin Long | dbd50f2 | 2021-01-15 17:36:38 +0800 | [diff] [blame] | 4109 | if (hsize > len || !sg) |
| 4110 | hsize = len; |
Xin Long | dbd50f2 | 2021-01-15 17:36:38 +0800 | [diff] [blame] | 4111 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 4112 | nskb = __alloc_skb(hsize + doffset + headroom, |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4113 | GFP_ATOMIC, skb_alloc_rx_flag(head_skb), |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 4114 | NUMA_NO_NODE); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4115 | |
| 4116 | if (unlikely(!nskb)) |
| 4117 | goto err; |
| 4118 | |
| 4119 | skb_reserve(nskb, headroom); |
| 4120 | __skb_put(nskb, doffset); |
| 4121 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4122 | |
| 4123 | if (segs) |
| 4124 | tail->next = nskb; |
| 4125 | else |
| 4126 | segs = nskb; |
| 4127 | tail = nskb; |
| 4128 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4129 | __copy_skb_header(nskb, head_skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4130 | |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 4131 | skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); |
Vlad Yasevich | fcdfe3a | 2014-07-31 10:33:06 -0400 | [diff] [blame] | 4132 | skb_reset_mac_len(nskb); |
Pravin B Shelar | 68c3316 | 2013-02-14 14:02:41 +0000 | [diff] [blame] | 4133 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4134 | skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, |
Pravin B Shelar | 68c3316 | 2013-02-14 14:02:41 +0000 | [diff] [blame] | 4135 | nskb->data - tnl_hlen, |
| 4136 | doffset + tnl_hlen); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4137 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4138 | if (nskb->len == len + doffset) |
Simon Horman | 1cdbcb7 | 2013-05-19 15:46:49 +0000 | [diff] [blame] | 4139 | goto perform_csum_check; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4140 | |
Alexander Duyck | 7fbeffe | 2016-02-05 15:27:43 -0800 | [diff] [blame] | 4141 | if (!sg) { |
Yadu Kishore | 1454c9f | 2020-03-17 14:08:38 +0530 | [diff] [blame] | 4142 | if (!csum) { |
| 4143 | if (!nskb->remcsum_offload) |
| 4144 | nskb->ip_summed = CHECKSUM_NONE; |
| 4145 | SKB_GSO_CB(nskb)->csum = |
| 4146 | skb_copy_and_csum_bits(head_skb, offset, |
| 4147 | skb_put(nskb, |
| 4148 | len), |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 4149 | len); |
Yadu Kishore | 1454c9f | 2020-03-17 14:08:38 +0530 | [diff] [blame] | 4150 | SKB_GSO_CB(nskb)->csum_start = |
| 4151 | skb_headroom(nskb) + doffset; |
| 4152 | } else { |
| 4153 | skb_copy_bits(head_skb, offset, |
| 4154 | skb_put(nskb, len), |
| 4155 | len); |
| 4156 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4157 | continue; |
| 4158 | } |
| 4159 | |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4160 | nskb_frag = skb_shinfo(nskb)->frags; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4161 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4162 | skb_copy_from_linear_data_offset(head_skb, offset, |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 4163 | skb_put(nskb, hsize), hsize); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4164 | |
Jonathan Lemon | 06b4feb | 2021-01-06 14:18:38 -0800 | [diff] [blame] | 4165 | skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & |
| 4166 | SKBFL_SHARED_FRAG; |
Eric Dumazet | cef401d | 2013-01-25 20:34:37 +0000 | [diff] [blame] | 4167 | |
Willem de Bruijn | bf5c25d | 2017-12-22 19:00:17 -0500 | [diff] [blame] | 4168 | if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || |
| 4169 | skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) |
| 4170 | goto err; |
| 4171 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4172 | while (pos < offset + len) { |
| 4173 | if (i >= nfrags) { |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4174 | i = 0; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4175 | nfrags = skb_shinfo(list_skb)->nr_frags; |
| 4176 | frag = skb_shinfo(list_skb)->frags; |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 4177 | frag_skb = list_skb; |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 4178 | if (!skb_headlen(list_skb)) { |
| 4179 | BUG_ON(!nfrags); |
| 4180 | } else { |
| 4181 | BUG_ON(!list_skb->head_frag); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4182 | |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 4183 | /* to make room for head_frag. */ |
| 4184 | i--; |
| 4185 | frag--; |
| 4186 | } |
Willem de Bruijn | bf5c25d | 2017-12-22 19:00:17 -0500 | [diff] [blame] | 4187 | if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || |
| 4188 | skb_zerocopy_clone(nskb, frag_skb, |
| 4189 | GFP_ATOMIC)) |
| 4190 | goto err; |
| 4191 | |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 4192 | list_skb = list_skb->next; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4193 | } |
| 4194 | |
| 4195 | if (unlikely(skb_shinfo(nskb)->nr_frags >= |
| 4196 | MAX_SKB_FRAGS)) { |
| 4197 | net_warn_ratelimited( |
| 4198 | "skb_segment: too many frags: %u %u\n", |
| 4199 | pos, mss); |
Eric Dumazet | ff907a1 | 2018-07-19 16:04:38 -0700 | [diff] [blame] | 4200 | err = -EINVAL; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 4201 | goto err; |
| 4202 | } |
| 4203 | |
Yonghong Song | 13acc94 | 2018-03-21 16:31:03 -0700 | [diff] [blame] | 4204 | *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4205 | __skb_frag_ref(nskb_frag); |
| 4206 | size = skb_frag_size(nskb_frag); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4207 | |
| 4208 | if (pos < offset) { |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 4209 | skb_frag_off_add(nskb_frag, offset - pos); |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4210 | skb_frag_size_sub(nskb_frag, offset - pos); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4211 | } |
| 4212 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4213 | skb_shinfo(nskb)->nr_frags++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4214 | |
| 4215 | if (pos + size <= offset + len) { |
| 4216 | i++; |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 4217 | frag++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4218 | pos += size; |
| 4219 | } else { |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4220 | skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4221 | goto skip_fraglist; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4222 | } |
| 4223 | |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 4224 | nskb_frag++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4225 | } |
| 4226 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 4227 | skip_fraglist: |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4228 | nskb->data_len = len - hsize; |
| 4229 | nskb->len += nskb->data_len; |
| 4230 | nskb->truesize += nskb->data_len; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 4231 | |
Simon Horman | 1cdbcb7 | 2013-05-19 15:46:49 +0000 | [diff] [blame] | 4232 | perform_csum_check: |
Alexander Duyck | 7fbeffe | 2016-02-05 15:27:43 -0800 | [diff] [blame] | 4233 | if (!csum) { |
Eric Dumazet | ff907a1 | 2018-07-19 16:04:38 -0700 | [diff] [blame] | 4234 | if (skb_has_shared_frag(nskb) && |
| 4235 | __skb_linearize(nskb)) |
| 4236 | goto err; |
| 4237 | |
Alexander Duyck | 7fbeffe | 2016-02-05 15:27:43 -0800 | [diff] [blame] | 4238 | if (!nskb->remcsum_offload) |
| 4239 | nskb->ip_summed = CHECKSUM_NONE; |
Alexander Duyck | 7644345 | 2016-02-05 15:27:37 -0800 | [diff] [blame] | 4240 | SKB_GSO_CB(nskb)->csum = |
| 4241 | skb_checksum(nskb, doffset, |
| 4242 | nskb->len - doffset, 0); |
Tom Herbert | 7e2b10c | 2014-06-04 17:20:02 -0700 | [diff] [blame] | 4243 | SKB_GSO_CB(nskb)->csum_start = |
Alexander Duyck | 7644345 | 2016-02-05 15:27:37 -0800 | [diff] [blame] | 4244 | skb_headroom(nskb) + doffset; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 4245 | } |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 4246 | } while ((offset += len) < head_skb->len); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4247 | |
Eric Dumazet | bec3cfd | 2014-10-03 20:59:19 -0700 | [diff] [blame] | 4248 | /* Some callers want to get the end of the list. |
| 4249 | * Put it in segs->prev to avoid walking the list. |
| 4250 | * (see validate_xmit_skb_list() for example) |
| 4251 | */ |
| 4252 | segs->prev = tail; |
Toshiaki Makita | 432c856 | 2014-10-27 10:30:51 -0700 | [diff] [blame] | 4253 | |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4254 | if (partial_segs) { |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4255 | struct sk_buff *iter; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4256 | int type = skb_shinfo(head_skb)->gso_type; |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4257 | unsigned short gso_size = skb_shinfo(head_skb)->gso_size; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4258 | |
| 4259 | /* Update type to add partial and then remove dodgy if set */ |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4260 | type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4261 | type &= ~SKB_GSO_DODGY; |
| 4262 | |
| 4263 | /* Update GSO info and prepare to start updating headers on |
| 4264 | * our way back down the stack of protocols. |
| 4265 | */ |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 4266 | for (iter = segs; iter; iter = iter->next) { |
| 4267 | skb_shinfo(iter)->gso_size = gso_size; |
| 4268 | skb_shinfo(iter)->gso_segs = partial_segs; |
| 4269 | skb_shinfo(iter)->gso_type = type; |
| 4270 | SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; |
| 4271 | } |
| 4272 | |
| 4273 | if (tail->len - doffset <= gso_size) |
| 4274 | skb_shinfo(tail)->gso_size = 0; |
| 4275 | else if (tail != segs) |
| 4276 | skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 4277 | } |
| 4278 | |
Toshiaki Makita | 432c856 | 2014-10-27 10:30:51 -0700 | [diff] [blame] | 4279 | /* Following permits correct backpressure, for protocols |
| 4280 | * using skb_set_owner_w(). |
| 4281 | * Idea is to tranfert ownership from head_skb to last segment. |
| 4282 | */ |
| 4283 | if (head_skb->destructor == sock_wfree) { |
| 4284 | swap(tail->truesize, head_skb->truesize); |
| 4285 | swap(tail->destructor, head_skb->destructor); |
| 4286 | swap(tail->sk, head_skb->sk); |
| 4287 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4288 | return segs; |
| 4289 | |
| 4290 | err: |
Eric Dumazet | 289dccb | 2013-12-20 14:29:08 -0800 | [diff] [blame] | 4291 | kfree_skb_list(segs); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4292 | return ERR_PTR(err); |
| 4293 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 4294 | EXPORT_SYMBOL_GPL(skb_segment); |
| 4295 | |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 4296 | #ifdef CONFIG_SKB_EXTENSIONS |
| 4297 | #define SKB_EXT_ALIGN_VALUE 8 |
| 4298 | #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) |
| 4299 | |
| 4300 | static const u8 skb_ext_type_len[] = { |
| 4301 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
| 4302 | [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), |
| 4303 | #endif |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 4304 | #ifdef CONFIG_XFRM |
| 4305 | [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), |
| 4306 | #endif |
Paul Blakey | 95a7233 | 2019-09-04 16:56:37 +0300 | [diff] [blame] | 4307 | #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
| 4308 | [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), |
| 4309 | #endif |
Mat Martineau | 3ee17bc | 2020-01-09 07:59:19 -0800 | [diff] [blame] | 4310 | #if IS_ENABLED(CONFIG_MPTCP) |
| 4311 | [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), |
| 4312 | #endif |
Jeremy Kerr | 78476d3 | 2021-10-29 11:01:44 +0800 | [diff] [blame] | 4313 | #if IS_ENABLED(CONFIG_MCTP_FLOWS) |
| 4314 | [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), |
| 4315 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 4316 | }; |
| 4317 | |
| 4318 | static __always_inline unsigned int skb_ext_total_length(void) |
| 4319 | { |
| 4320 | return SKB_EXT_CHUNKSIZEOF(struct skb_ext) + |
| 4321 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
| 4322 | skb_ext_type_len[SKB_EXT_BRIDGE_NF] + |
| 4323 | #endif |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 4324 | #ifdef CONFIG_XFRM |
| 4325 | skb_ext_type_len[SKB_EXT_SEC_PATH] + |
| 4326 | #endif |
Paul Blakey | 95a7233 | 2019-09-04 16:56:37 +0300 | [diff] [blame] | 4327 | #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
| 4328 | skb_ext_type_len[TC_SKB_EXT] + |
| 4329 | #endif |
Mat Martineau | 3ee17bc | 2020-01-09 07:59:19 -0800 | [diff] [blame] | 4330 | #if IS_ENABLED(CONFIG_MPTCP) |
| 4331 | skb_ext_type_len[SKB_EXT_MPTCP] + |
| 4332 | #endif |
Jeremy Kerr | 78476d3 | 2021-10-29 11:01:44 +0800 | [diff] [blame] | 4333 | #if IS_ENABLED(CONFIG_MCTP_FLOWS) |
| 4334 | skb_ext_type_len[SKB_EXT_MCTP] + |
| 4335 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 4336 | 0; |
| 4337 | } |
| 4338 | |
| 4339 | static void skb_extensions_init(void) |
| 4340 | { |
| 4341 | BUILD_BUG_ON(SKB_EXT_NUM >= 8); |
| 4342 | BUILD_BUG_ON(skb_ext_total_length() > 255); |
| 4343 | |
| 4344 | skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", |
| 4345 | SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), |
| 4346 | 0, |
| 4347 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
| 4348 | NULL); |
| 4349 | } |
| 4350 | #else |
| 4351 | static void skb_extensions_init(void) {} |
| 4352 | #endif |
| 4353 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4354 | void __init skb_init(void) |
| 4355 | { |
Kees Cook | 79a8a64 | 2018-02-07 17:44:38 -0800 | [diff] [blame] | 4356 | skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4357 | sizeof(struct sk_buff), |
| 4358 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 4359 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Kees Cook | 79a8a64 | 2018-02-07 17:44:38 -0800 | [diff] [blame] | 4360 | offsetof(struct sk_buff, cb), |
| 4361 | sizeof_field(struct sk_buff, cb), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 4362 | NULL); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 4363 | skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 4364 | sizeof(struct sk_buff_fclones), |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 4365 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 4366 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 4367 | NULL); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 4368 | skb_extensions_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4369 | } |
| 4370 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 4371 | static int |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4372 | __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, |
| 4373 | unsigned int recursion_level) |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4374 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 4375 | int start = skb_headlen(skb); |
| 4376 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4377 | struct sk_buff *frag_iter; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4378 | int elt = 0; |
| 4379 | |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4380 | if (unlikely(recursion_level >= 24)) |
| 4381 | return -EMSGSIZE; |
| 4382 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4383 | if (copy > 0) { |
| 4384 | if (copy > len) |
| 4385 | copy = len; |
Jens Axboe | 642f14903 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 4386 | sg_set_buf(sg, skb->data + offset, copy); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4387 | elt++; |
| 4388 | if ((len -= copy) == 0) |
| 4389 | return elt; |
| 4390 | offset += copy; |
| 4391 | } |
| 4392 | |
| 4393 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 4394 | int end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4395 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 4396 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 4397 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 4398 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4399 | if ((copy = end - offset) > 0) { |
| 4400 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4401 | if (unlikely(elt && sg_is_last(&sg[elt - 1]))) |
| 4402 | return -EMSGSIZE; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4403 | |
| 4404 | if (copy > len) |
| 4405 | copy = len; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 4406 | sg_set_page(&sg[elt], skb_frag_page(frag), copy, |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 4407 | skb_frag_off(frag) + offset - start); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4408 | elt++; |
| 4409 | if (!(len -= copy)) |
| 4410 | return elt; |
| 4411 | offset += copy; |
| 4412 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 4413 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4414 | } |
| 4415 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4416 | skb_walk_frags(skb, frag_iter) { |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4417 | int end, ret; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4418 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4419 | WARN_ON(start > offset + len); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4420 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4421 | end = start + frag_iter->len; |
| 4422 | if ((copy = end - offset) > 0) { |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4423 | if (unlikely(elt && sg_is_last(&sg[elt - 1]))) |
| 4424 | return -EMSGSIZE; |
| 4425 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4426 | if (copy > len) |
| 4427 | copy = len; |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4428 | ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
| 4429 | copy, recursion_level + 1); |
| 4430 | if (unlikely(ret < 0)) |
| 4431 | return ret; |
| 4432 | elt += ret; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4433 | if ((len -= copy) == 0) |
| 4434 | return elt; |
| 4435 | offset += copy; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4436 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 4437 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4438 | } |
| 4439 | BUG_ON(len); |
| 4440 | return elt; |
| 4441 | } |
| 4442 | |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4443 | /** |
| 4444 | * skb_to_sgvec - Fill a scatter-gather list from a socket buffer |
| 4445 | * @skb: Socket buffer containing the buffers to be mapped |
| 4446 | * @sg: The scatter-gather list to map into |
| 4447 | * @offset: The offset into the buffer's contents to start mapping |
| 4448 | * @len: Length of buffer space to be mapped |
| 4449 | * |
| 4450 | * Fill the specified scatter-gather list with mappings/pointers into a |
| 4451 | * region of the buffer space attached to a socket buffer. Returns either |
| 4452 | * the number of scatterlist items used, or -EMSGSIZE if the contents |
| 4453 | * could not fit. |
| 4454 | */ |
| 4455 | int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
| 4456 | { |
| 4457 | int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); |
| 4458 | |
| 4459 | if (nsg <= 0) |
| 4460 | return nsg; |
| 4461 | |
| 4462 | sg_mark_end(&sg[nsg - 1]); |
| 4463 | |
| 4464 | return nsg; |
| 4465 | } |
| 4466 | EXPORT_SYMBOL_GPL(skb_to_sgvec); |
| 4467 | |
Fan Du | 25a91d8 | 2014-01-18 09:54:23 +0800 | [diff] [blame] | 4468 | /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given |
| 4469 | * sglist without mark the sg which contain last skb data as the end. |
| 4470 | * So the caller can mannipulate sg list as will when padding new data after |
| 4471 | * the first call without calling sg_unmark_end to expend sg list. |
| 4472 | * |
| 4473 | * Scenario to use skb_to_sgvec_nomark: |
| 4474 | * 1. sg_init_table |
| 4475 | * 2. skb_to_sgvec_nomark(payload1) |
| 4476 | * 3. skb_to_sgvec_nomark(payload2) |
| 4477 | * |
| 4478 | * This is equivalent to: |
| 4479 | * 1. sg_init_table |
| 4480 | * 2. skb_to_sgvec(payload1) |
| 4481 | * 3. sg_unmark_end |
| 4482 | * 4. skb_to_sgvec(payload2) |
| 4483 | * |
| 4484 | * When mapping mutilple payload conditionally, skb_to_sgvec_nomark |
| 4485 | * is more preferable. |
| 4486 | */ |
| 4487 | int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, |
| 4488 | int offset, int len) |
| 4489 | { |
Jason A. Donenfeld | 48a1df6 | 2017-06-04 04:16:22 +0200 | [diff] [blame] | 4490 | return __skb_to_sgvec(skb, sg, offset, len, 0); |
Fan Du | 25a91d8 | 2014-01-18 09:54:23 +0800 | [diff] [blame] | 4491 | } |
| 4492 | EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); |
| 4493 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 4494 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 4495 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4496 | /** |
| 4497 | * skb_cow_data - Check that a socket buffer's data buffers are writable |
| 4498 | * @skb: The socket buffer to check. |
| 4499 | * @tailbits: Amount of trailing space to be added |
| 4500 | * @trailer: Returned pointer to the skb where the @tailbits space begins |
| 4501 | * |
| 4502 | * Make sure that the data buffers attached to a socket buffer are |
| 4503 | * writable. If they are not, private copies are made of the data buffers |
| 4504 | * and the socket buffer is set to use these instead. |
| 4505 | * |
| 4506 | * If @tailbits is given, make sure that there is space to write @tailbits |
| 4507 | * bytes of data beyond current end of socket buffer. @trailer will be |
| 4508 | * set to point to the skb in which this space begins. |
| 4509 | * |
| 4510 | * The number of scatterlist elements required to completely map the |
| 4511 | * COW'd and extended socket buffer will be returned. |
| 4512 | */ |
| 4513 | int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) |
| 4514 | { |
| 4515 | int copyflag; |
| 4516 | int elt; |
| 4517 | struct sk_buff *skb1, **skb_p; |
| 4518 | |
| 4519 | /* If skb is cloned or its head is paged, reallocate |
| 4520 | * head pulling out all the pages (pages are considered not writable |
| 4521 | * at the moment even if they are anonymous). |
| 4522 | */ |
| 4523 | if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && |
Miaohe Lin | c15fc19 | 2020-08-01 17:30:23 +0800 | [diff] [blame] | 4524 | !__pskb_pull_tail(skb, __skb_pagelen(skb))) |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4525 | return -ENOMEM; |
| 4526 | |
| 4527 | /* Easy case. Most of packets will go this way. */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 4528 | if (!skb_has_frag_list(skb)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4529 | /* A little of trouble, not enough of space for trailer. |
| 4530 | * This should not happen, when stack is tuned to generate |
| 4531 | * good frames. OK, on miss we reallocate and reserve even more |
| 4532 | * space, 128 bytes is fair. */ |
| 4533 | |
| 4534 | if (skb_tailroom(skb) < tailbits && |
| 4535 | pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) |
| 4536 | return -ENOMEM; |
| 4537 | |
| 4538 | /* Voila! */ |
| 4539 | *trailer = skb; |
| 4540 | return 1; |
| 4541 | } |
| 4542 | |
| 4543 | /* Misery. We are in troubles, going to mincer fragments... */ |
| 4544 | |
| 4545 | elt = 1; |
| 4546 | skb_p = &skb_shinfo(skb)->frag_list; |
| 4547 | copyflag = 0; |
| 4548 | |
| 4549 | while ((skb1 = *skb_p) != NULL) { |
| 4550 | int ntail = 0; |
| 4551 | |
| 4552 | /* The fragment is partially pulled by someone, |
| 4553 | * this can happen on input. Copy it and everything |
| 4554 | * after it. */ |
| 4555 | |
| 4556 | if (skb_shared(skb1)) |
| 4557 | copyflag = 1; |
| 4558 | |
| 4559 | /* If the skb is the last, worry about trailer. */ |
| 4560 | |
| 4561 | if (skb1->next == NULL && tailbits) { |
| 4562 | if (skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 4563 | skb_has_frag_list(skb1) || |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4564 | skb_tailroom(skb1) < tailbits) |
| 4565 | ntail = tailbits + 128; |
| 4566 | } |
| 4567 | |
| 4568 | if (copyflag || |
| 4569 | skb_cloned(skb1) || |
| 4570 | ntail || |
| 4571 | skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 4572 | skb_has_frag_list(skb1)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4573 | struct sk_buff *skb2; |
| 4574 | |
| 4575 | /* Fuck, we are miserable poor guys... */ |
| 4576 | if (ntail == 0) |
| 4577 | skb2 = skb_copy(skb1, GFP_ATOMIC); |
| 4578 | else |
| 4579 | skb2 = skb_copy_expand(skb1, |
| 4580 | skb_headroom(skb1), |
| 4581 | ntail, |
| 4582 | GFP_ATOMIC); |
| 4583 | if (unlikely(skb2 == NULL)) |
| 4584 | return -ENOMEM; |
| 4585 | |
| 4586 | if (skb1->sk) |
| 4587 | skb_set_owner_w(skb2, skb1->sk); |
| 4588 | |
| 4589 | /* Looking around. Are we still alive? |
| 4590 | * OK, link new skb, drop old one */ |
| 4591 | |
| 4592 | skb2->next = skb1->next; |
| 4593 | *skb_p = skb2; |
| 4594 | kfree_skb(skb1); |
| 4595 | skb1 = skb2; |
| 4596 | } |
| 4597 | elt++; |
| 4598 | *trailer = skb1; |
| 4599 | skb_p = &skb1->next; |
| 4600 | } |
| 4601 | |
| 4602 | return elt; |
| 4603 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 4604 | EXPORT_SYMBOL_GPL(skb_cow_data); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 4605 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4606 | static void sock_rmem_free(struct sk_buff *skb) |
| 4607 | { |
| 4608 | struct sock *sk = skb->sk; |
| 4609 | |
| 4610 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
| 4611 | } |
| 4612 | |
Soheil Hassas Yeganeh | 8605330a | 2017-03-18 17:02:59 -0400 | [diff] [blame] | 4613 | static void skb_set_err_queue(struct sk_buff *skb) |
| 4614 | { |
| 4615 | /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. |
| 4616 | * So, it is safe to (mis)use it to mark skbs on the error queue. |
| 4617 | */ |
| 4618 | skb->pkt_type = PACKET_OUTGOING; |
| 4619 | BUILD_BUG_ON(PACKET_OUTGOING == 0); |
| 4620 | } |
| 4621 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4622 | /* |
| 4623 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) |
| 4624 | */ |
| 4625 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) |
| 4626 | { |
| 4627 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
Eric Dumazet | ebb3b78 | 2019-10-10 20:17:44 -0700 | [diff] [blame] | 4628 | (unsigned int)READ_ONCE(sk->sk_rcvbuf)) |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4629 | return -ENOMEM; |
| 4630 | |
| 4631 | skb_orphan(skb); |
| 4632 | skb->sk = sk; |
| 4633 | skb->destructor = sock_rmem_free; |
| 4634 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); |
Soheil Hassas Yeganeh | 8605330a | 2017-03-18 17:02:59 -0400 | [diff] [blame] | 4635 | skb_set_err_queue(skb); |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4636 | |
Eric Dumazet | abb57ea | 2011-05-18 02:21:31 -0400 | [diff] [blame] | 4637 | /* before exiting rcu section, make sure dst is refcounted */ |
| 4638 | skb_dst_force(skb); |
| 4639 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4640 | skb_queue_tail(&sk->sk_error_queue, skb); |
| 4641 | if (!sock_flag(sk, SOCK_DEAD)) |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 4642 | sk_error_report(sk); |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 4643 | return 0; |
| 4644 | } |
| 4645 | EXPORT_SYMBOL(sock_queue_err_skb); |
| 4646 | |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4647 | static bool is_icmp_err_skb(const struct sk_buff *skb) |
| 4648 | { |
| 4649 | return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || |
| 4650 | SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); |
| 4651 | } |
| 4652 | |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4653 | struct sk_buff *sock_dequeue_err_skb(struct sock *sk) |
| 4654 | { |
| 4655 | struct sk_buff_head *q = &sk->sk_error_queue; |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4656 | struct sk_buff *skb, *skb_next = NULL; |
| 4657 | bool icmp_next = false; |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 4658 | unsigned long flags; |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4659 | |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 4660 | spin_lock_irqsave(&q->lock, flags); |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4661 | skb = __skb_dequeue(q); |
Soheil Hassas Yeganeh | 38b2579 | 2017-06-02 12:38:22 -0400 | [diff] [blame] | 4662 | if (skb && (skb_next = skb_peek(q))) { |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4663 | icmp_next = is_icmp_err_skb(skb_next); |
Soheil Hassas Yeganeh | 38b2579 | 2017-06-02 12:38:22 -0400 | [diff] [blame] | 4664 | if (icmp_next) |
Willem de Bruijn | 985f733 | 2020-11-26 10:12:20 -0500 | [diff] [blame] | 4665 | sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; |
Soheil Hassas Yeganeh | 38b2579 | 2017-06-02 12:38:22 -0400 | [diff] [blame] | 4666 | } |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 4667 | spin_unlock_irqrestore(&q->lock, flags); |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4668 | |
Soheil Hassas Yeganeh | 83a1a1a | 2016-11-30 14:01:08 -0500 | [diff] [blame] | 4669 | if (is_icmp_err_skb(skb) && !icmp_next) |
| 4670 | sk->sk_err = 0; |
| 4671 | |
| 4672 | if (skb_next) |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 4673 | sk_error_report(sk); |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 4674 | |
| 4675 | return skb; |
| 4676 | } |
| 4677 | EXPORT_SYMBOL(sock_dequeue_err_skb); |
| 4678 | |
Alexander Duyck | cab41c4 | 2014-09-10 18:05:26 -0400 | [diff] [blame] | 4679 | /** |
| 4680 | * skb_clone_sk - create clone of skb, and take reference to socket |
| 4681 | * @skb: the skb to clone |
| 4682 | * |
| 4683 | * This function creates a clone of a buffer that holds a reference on |
| 4684 | * sk_refcnt. Buffers created via this function are meant to be |
| 4685 | * returned using sock_queue_err_skb, or free via kfree_skb. |
| 4686 | * |
| 4687 | * When passing buffers allocated with this function to sock_queue_err_skb |
| 4688 | * it is necessary to wrap the call with sock_hold/sock_put in order to |
| 4689 | * prevent the socket from being released prior to being enqueued on |
| 4690 | * the sk_error_queue. |
| 4691 | */ |
Alexander Duyck | 62bccb8 | 2014-09-04 13:31:35 -0400 | [diff] [blame] | 4692 | struct sk_buff *skb_clone_sk(struct sk_buff *skb) |
| 4693 | { |
| 4694 | struct sock *sk = skb->sk; |
| 4695 | struct sk_buff *clone; |
| 4696 | |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 4697 | if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) |
Alexander Duyck | 62bccb8 | 2014-09-04 13:31:35 -0400 | [diff] [blame] | 4698 | return NULL; |
| 4699 | |
| 4700 | clone = skb_clone(skb, GFP_ATOMIC); |
| 4701 | if (!clone) { |
| 4702 | sock_put(sk); |
| 4703 | return NULL; |
| 4704 | } |
| 4705 | |
| 4706 | clone->sk = sk; |
| 4707 | clone->destructor = sock_efree; |
| 4708 | |
| 4709 | return clone; |
| 4710 | } |
| 4711 | EXPORT_SYMBOL(skb_clone_sk); |
| 4712 | |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4713 | static void __skb_complete_tx_timestamp(struct sk_buff *skb, |
| 4714 | struct sock *sk, |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4715 | int tstype, |
| 4716 | bool opt_stats) |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4717 | { |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4718 | struct sock_exterr_skb *serr; |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4719 | int err; |
| 4720 | |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4721 | BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); |
| 4722 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4723 | serr = SKB_EXT_ERR(skb); |
| 4724 | memset(serr, 0, sizeof(*serr)); |
| 4725 | serr->ee.ee_errno = ENOMSG; |
| 4726 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
Willem de Bruijn | e7fd288 | 2014-08-04 22:11:48 -0400 | [diff] [blame] | 4727 | serr->ee.ee_info = tstype; |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4728 | serr->opt_stats = opt_stats; |
Willem de Bruijn | 1862d62 | 2017-04-12 19:24:35 -0400 | [diff] [blame] | 4729 | serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 4730 | if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { |
Willem de Bruijn | 09c2d25 | 2014-08-04 22:11:47 -0400 | [diff] [blame] | 4731 | serr->ee.ee_data = skb_shinfo(skb)->tskey; |
Eric Dumazet | 42f67ee | 2021-11-15 11:02:33 -0800 | [diff] [blame] | 4732 | if (sk_is_tcp(sk)) |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 4733 | serr->ee.ee_data -= sk->sk_tskey; |
| 4734 | } |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 4735 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4736 | err = sock_queue_err_skb(sk, skb); |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 4737 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4738 | if (err) |
| 4739 | kfree_skb(skb); |
| 4740 | } |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4741 | |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 4742 | static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) |
| 4743 | { |
| 4744 | bool ret; |
| 4745 | |
| 4746 | if (likely(sysctl_tstamp_allow_data || tsonly)) |
| 4747 | return true; |
| 4748 | |
| 4749 | read_lock_bh(&sk->sk_callback_lock); |
| 4750 | ret = sk->sk_socket && sk->sk_socket->file && |
| 4751 | file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); |
| 4752 | read_unlock_bh(&sk->sk_callback_lock); |
| 4753 | return ret; |
| 4754 | } |
| 4755 | |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4756 | void skb_complete_tx_timestamp(struct sk_buff *skb, |
| 4757 | struct skb_shared_hwtstamps *hwtstamps) |
| 4758 | { |
| 4759 | struct sock *sk = skb->sk; |
| 4760 | |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 4761 | if (!skb_may_tx_timestamp(sk, false)) |
Willem de Bruijn | 35b99df | 2017-12-13 14:41:06 -0500 | [diff] [blame] | 4762 | goto err; |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 4763 | |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4764 | /* Take a reference to prevent skb_orphan() from freeing the socket, |
| 4765 | * but only if the socket refcount is not zero. |
| 4766 | */ |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 4767 | if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4768 | *skb_hwtstamps(skb) = *hwtstamps; |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4769 | __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4770 | sock_put(sk); |
Willem de Bruijn | 35b99df | 2017-12-13 14:41:06 -0500 | [diff] [blame] | 4771 | return; |
Eric Dumazet | 9ac25fc | 2017-03-03 21:01:03 -0800 | [diff] [blame] | 4772 | } |
Willem de Bruijn | 35b99df | 2017-12-13 14:41:06 -0500 | [diff] [blame] | 4773 | |
| 4774 | err: |
| 4775 | kfree_skb(skb); |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4776 | } |
| 4777 | EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); |
| 4778 | |
| 4779 | void __skb_tstamp_tx(struct sk_buff *orig_skb, |
Yousuk Seung | e7ed11e | 2021-01-20 12:41:55 -0800 | [diff] [blame] | 4780 | const struct sk_buff *ack_skb, |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4781 | struct skb_shared_hwtstamps *hwtstamps, |
| 4782 | struct sock *sk, int tstype) |
| 4783 | { |
| 4784 | struct sk_buff *skb; |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4785 | bool tsonly, opt_stats = false; |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4786 | |
Willem de Bruijn | 3a8dd97 | 2015-03-11 15:43:55 -0400 | [diff] [blame] | 4787 | if (!sk) |
| 4788 | return; |
| 4789 | |
Miroslav Lichvar | b50a5c7 | 2017-05-19 17:52:40 +0200 | [diff] [blame] | 4790 | if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && |
| 4791 | skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) |
| 4792 | return; |
| 4793 | |
Willem de Bruijn | 3a8dd97 | 2015-03-11 15:43:55 -0400 | [diff] [blame] | 4794 | tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; |
| 4795 | if (!skb_may_tx_timestamp(sk, tsonly)) |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4796 | return; |
| 4797 | |
Francis Yan | 1c88580 | 2016-11-27 23:07:18 -0800 | [diff] [blame] | 4798 | if (tsonly) { |
| 4799 | #ifdef CONFIG_INET |
| 4800 | if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && |
Eric Dumazet | 42f67ee | 2021-11-15 11:02:33 -0800 | [diff] [blame] | 4801 | sk_is_tcp(sk)) { |
Yousuk Seung | e7ed11e | 2021-01-20 12:41:55 -0800 | [diff] [blame] | 4802 | skb = tcp_get_timestamping_opt_stats(sk, orig_skb, |
| 4803 | ack_skb); |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4804 | opt_stats = true; |
| 4805 | } else |
Francis Yan | 1c88580 | 2016-11-27 23:07:18 -0800 | [diff] [blame] | 4806 | #endif |
| 4807 | skb = alloc_skb(0, GFP_ATOMIC); |
| 4808 | } else { |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 4809 | skb = skb_clone(orig_skb, GFP_ATOMIC); |
Francis Yan | 1c88580 | 2016-11-27 23:07:18 -0800 | [diff] [blame] | 4810 | } |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4811 | if (!skb) |
| 4812 | return; |
| 4813 | |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 4814 | if (tsonly) { |
Willem de Bruijn | fff8803 | 2017-06-08 11:35:03 -0400 | [diff] [blame] | 4815 | skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & |
| 4816 | SKBTX_ANY_TSTAMP; |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 4817 | skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; |
| 4818 | } |
| 4819 | |
| 4820 | if (hwtstamps) |
| 4821 | *skb_hwtstamps(skb) = *hwtstamps; |
| 4822 | else |
| 4823 | skb->tstamp = ktime_get_real(); |
| 4824 | |
Soheil Hassas Yeganeh | 4ef1b28 | 2017-03-18 17:03:00 -0400 | [diff] [blame] | 4825 | __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 4826 | } |
Willem de Bruijn | e7fd288 | 2014-08-04 22:11:48 -0400 | [diff] [blame] | 4827 | EXPORT_SYMBOL_GPL(__skb_tstamp_tx); |
| 4828 | |
| 4829 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
| 4830 | struct skb_shared_hwtstamps *hwtstamps) |
| 4831 | { |
Yousuk Seung | e7ed11e | 2021-01-20 12:41:55 -0800 | [diff] [blame] | 4832 | return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, |
Willem de Bruijn | e7fd288 | 2014-08-04 22:11:48 -0400 | [diff] [blame] | 4833 | SCM_TSTAMP_SND); |
| 4834 | } |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 4835 | EXPORT_SYMBOL_GPL(skb_tstamp_tx); |
| 4836 | |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 4837 | void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) |
| 4838 | { |
| 4839 | struct sock *sk = skb->sk; |
| 4840 | struct sock_exterr_skb *serr; |
Eric Dumazet | dd4f107 | 2017-03-03 21:01:02 -0800 | [diff] [blame] | 4841 | int err = 1; |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 4842 | |
| 4843 | skb->wifi_acked_valid = 1; |
| 4844 | skb->wifi_acked = acked; |
| 4845 | |
| 4846 | serr = SKB_EXT_ERR(skb); |
| 4847 | memset(serr, 0, sizeof(*serr)); |
| 4848 | serr->ee.ee_errno = ENOMSG; |
| 4849 | serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; |
| 4850 | |
Eric Dumazet | dd4f107 | 2017-03-03 21:01:02 -0800 | [diff] [blame] | 4851 | /* Take a reference to prevent skb_orphan() from freeing the socket, |
| 4852 | * but only if the socket refcount is not zero. |
| 4853 | */ |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 4854 | if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { |
Eric Dumazet | dd4f107 | 2017-03-03 21:01:02 -0800 | [diff] [blame] | 4855 | err = sock_queue_err_skb(sk, skb); |
| 4856 | sock_put(sk); |
| 4857 | } |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 4858 | if (err) |
| 4859 | kfree_skb(skb); |
| 4860 | } |
| 4861 | EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); |
| 4862 | |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4863 | /** |
| 4864 | * skb_partial_csum_set - set up and verify partial csum values for packet |
| 4865 | * @skb: the skb to set |
| 4866 | * @start: the number of bytes after skb->data to start checksumming. |
| 4867 | * @off: the offset from start to place the checksum. |
| 4868 | * |
| 4869 | * For untrusted partially-checksummed packets, we need to make sure the values |
| 4870 | * for skb->csum_start and skb->csum_offset are valid so we don't oops. |
| 4871 | * |
| 4872 | * This function checks and sets those values and skb->ip_summed: if this |
| 4873 | * returns false you should drop the packet. |
| 4874 | */ |
| 4875 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) |
| 4876 | { |
Eric Dumazet | 52b5d6f | 2018-10-10 06:59:35 -0700 | [diff] [blame] | 4877 | u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); |
| 4878 | u32 csum_start = skb_headroom(skb) + (u32)start; |
| 4879 | |
| 4880 | if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { |
| 4881 | net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", |
| 4882 | start, off, skb_headroom(skb), skb_headlen(skb)); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4883 | return false; |
| 4884 | } |
| 4885 | skb->ip_summed = CHECKSUM_PARTIAL; |
Eric Dumazet | 52b5d6f | 2018-10-10 06:59:35 -0700 | [diff] [blame] | 4886 | skb->csum_start = csum_start; |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4887 | skb->csum_offset = off; |
Jason Wang | e5d5dec | 2013-03-26 23:11:20 +0000 | [diff] [blame] | 4888 | skb_set_transport_header(skb, start); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4889 | return true; |
| 4890 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 4891 | EXPORT_SYMBOL_GPL(skb_partial_csum_set); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 4892 | |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4893 | static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, |
| 4894 | unsigned int max) |
| 4895 | { |
| 4896 | if (skb_headlen(skb) >= len) |
| 4897 | return 0; |
| 4898 | |
| 4899 | /* If we need to pullup then pullup to the max, so we |
| 4900 | * won't need to do it again. |
| 4901 | */ |
| 4902 | if (max > skb->len) |
| 4903 | max = skb->len; |
| 4904 | |
| 4905 | if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) |
| 4906 | return -ENOMEM; |
| 4907 | |
| 4908 | if (skb_headlen(skb) < len) |
| 4909 | return -EPROTO; |
| 4910 | |
| 4911 | return 0; |
| 4912 | } |
| 4913 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4914 | #define MAX_TCP_HDR_LEN (15 * 4) |
| 4915 | |
| 4916 | static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, |
| 4917 | typeof(IPPROTO_IP) proto, |
| 4918 | unsigned int off) |
| 4919 | { |
Kees Cook | 161d179 | 2020-02-19 22:23:04 -0800 | [diff] [blame] | 4920 | int err; |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4921 | |
Kees Cook | 161d179 | 2020-02-19 22:23:04 -0800 | [diff] [blame] | 4922 | switch (proto) { |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4923 | case IPPROTO_TCP: |
| 4924 | err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), |
| 4925 | off + MAX_TCP_HDR_LEN); |
| 4926 | if (!err && !skb_partial_csum_set(skb, off, |
| 4927 | offsetof(struct tcphdr, |
| 4928 | check))) |
| 4929 | err = -EPROTO; |
| 4930 | return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; |
| 4931 | |
| 4932 | case IPPROTO_UDP: |
| 4933 | err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), |
| 4934 | off + sizeof(struct udphdr)); |
| 4935 | if (!err && !skb_partial_csum_set(skb, off, |
| 4936 | offsetof(struct udphdr, |
| 4937 | check))) |
| 4938 | err = -EPROTO; |
| 4939 | return err ? ERR_PTR(err) : &udp_hdr(skb)->check; |
| 4940 | } |
| 4941 | |
| 4942 | return ERR_PTR(-EPROTO); |
| 4943 | } |
| 4944 | |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4945 | /* This value should be large enough to cover a tagged ethernet header plus |
| 4946 | * maximally sized IP and TCP or UDP headers. |
| 4947 | */ |
| 4948 | #define MAX_IP_HDR_LEN 128 |
| 4949 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4950 | static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4951 | { |
| 4952 | unsigned int off; |
| 4953 | bool fragment; |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4954 | __sum16 *csum; |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4955 | int err; |
| 4956 | |
| 4957 | fragment = false; |
| 4958 | |
| 4959 | err = skb_maybe_pull_tail(skb, |
| 4960 | sizeof(struct iphdr), |
| 4961 | MAX_IP_HDR_LEN); |
| 4962 | if (err < 0) |
| 4963 | goto out; |
| 4964 | |
Miaohe Lin | 11f920d | 2020-08-06 19:57:18 +0800 | [diff] [blame] | 4965 | if (ip_is_fragment(ip_hdr(skb))) |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4966 | fragment = true; |
| 4967 | |
| 4968 | off = ip_hdrlen(skb); |
| 4969 | |
| 4970 | err = -EPROTO; |
| 4971 | |
| 4972 | if (fragment) |
| 4973 | goto out; |
| 4974 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4975 | csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); |
| 4976 | if (IS_ERR(csum)) |
| 4977 | return PTR_ERR(csum); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4978 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4979 | if (recalculate) |
| 4980 | *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, |
| 4981 | ip_hdr(skb)->daddr, |
| 4982 | skb->len - off, |
| 4983 | ip_hdr(skb)->protocol, 0); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4984 | err = 0; |
| 4985 | |
| 4986 | out: |
| 4987 | return err; |
| 4988 | } |
| 4989 | |
| 4990 | /* This value should be large enough to cover a tagged ethernet header plus |
| 4991 | * an IPv6 header, all options, and a maximal TCP or UDP header. |
| 4992 | */ |
| 4993 | #define MAX_IPV6_HDR_LEN 256 |
| 4994 | |
| 4995 | #define OPT_HDR(type, skb, off) \ |
| 4996 | (type *)(skb_network_header(skb) + (off)) |
| 4997 | |
| 4998 | static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) |
| 4999 | { |
| 5000 | int err; |
| 5001 | u8 nexthdr; |
| 5002 | unsigned int off; |
| 5003 | unsigned int len; |
| 5004 | bool fragment; |
| 5005 | bool done; |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5006 | __sum16 *csum; |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5007 | |
| 5008 | fragment = false; |
| 5009 | done = false; |
| 5010 | |
| 5011 | off = sizeof(struct ipv6hdr); |
| 5012 | |
| 5013 | err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); |
| 5014 | if (err < 0) |
| 5015 | goto out; |
| 5016 | |
| 5017 | nexthdr = ipv6_hdr(skb)->nexthdr; |
| 5018 | |
| 5019 | len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); |
| 5020 | while (off <= len && !done) { |
| 5021 | switch (nexthdr) { |
| 5022 | case IPPROTO_DSTOPTS: |
| 5023 | case IPPROTO_HOPOPTS: |
| 5024 | case IPPROTO_ROUTING: { |
| 5025 | struct ipv6_opt_hdr *hp; |
| 5026 | |
| 5027 | err = skb_maybe_pull_tail(skb, |
| 5028 | off + |
| 5029 | sizeof(struct ipv6_opt_hdr), |
| 5030 | MAX_IPV6_HDR_LEN); |
| 5031 | if (err < 0) |
| 5032 | goto out; |
| 5033 | |
| 5034 | hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); |
| 5035 | nexthdr = hp->nexthdr; |
| 5036 | off += ipv6_optlen(hp); |
| 5037 | break; |
| 5038 | } |
| 5039 | case IPPROTO_AH: { |
| 5040 | struct ip_auth_hdr *hp; |
| 5041 | |
| 5042 | err = skb_maybe_pull_tail(skb, |
| 5043 | off + |
| 5044 | sizeof(struct ip_auth_hdr), |
| 5045 | MAX_IPV6_HDR_LEN); |
| 5046 | if (err < 0) |
| 5047 | goto out; |
| 5048 | |
| 5049 | hp = OPT_HDR(struct ip_auth_hdr, skb, off); |
| 5050 | nexthdr = hp->nexthdr; |
| 5051 | off += ipv6_authlen(hp); |
| 5052 | break; |
| 5053 | } |
| 5054 | case IPPROTO_FRAGMENT: { |
| 5055 | struct frag_hdr *hp; |
| 5056 | |
| 5057 | err = skb_maybe_pull_tail(skb, |
| 5058 | off + |
| 5059 | sizeof(struct frag_hdr), |
| 5060 | MAX_IPV6_HDR_LEN); |
| 5061 | if (err < 0) |
| 5062 | goto out; |
| 5063 | |
| 5064 | hp = OPT_HDR(struct frag_hdr, skb, off); |
| 5065 | |
| 5066 | if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) |
| 5067 | fragment = true; |
| 5068 | |
| 5069 | nexthdr = hp->nexthdr; |
| 5070 | off += sizeof(struct frag_hdr); |
| 5071 | break; |
| 5072 | } |
| 5073 | default: |
| 5074 | done = true; |
| 5075 | break; |
| 5076 | } |
| 5077 | } |
| 5078 | |
| 5079 | err = -EPROTO; |
| 5080 | |
| 5081 | if (!done || fragment) |
| 5082 | goto out; |
| 5083 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5084 | csum = skb_checksum_setup_ip(skb, nexthdr, off); |
| 5085 | if (IS_ERR(csum)) |
| 5086 | return PTR_ERR(csum); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5087 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5088 | if (recalculate) |
| 5089 | *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
| 5090 | &ipv6_hdr(skb)->daddr, |
| 5091 | skb->len - off, nexthdr, 0); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5092 | err = 0; |
| 5093 | |
| 5094 | out: |
| 5095 | return err; |
| 5096 | } |
| 5097 | |
| 5098 | /** |
| 5099 | * skb_checksum_setup - set up partial checksum offset |
| 5100 | * @skb: the skb to set up |
| 5101 | * @recalculate: if true the pseudo-header checksum will be recalculated |
| 5102 | */ |
| 5103 | int skb_checksum_setup(struct sk_buff *skb, bool recalculate) |
| 5104 | { |
| 5105 | int err; |
| 5106 | |
| 5107 | switch (skb->protocol) { |
| 5108 | case htons(ETH_P_IP): |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 5109 | err = skb_checksum_setup_ipv4(skb, recalculate); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 5110 | break; |
| 5111 | |
| 5112 | case htons(ETH_P_IPV6): |
| 5113 | err = skb_checksum_setup_ipv6(skb, recalculate); |
| 5114 | break; |
| 5115 | |
| 5116 | default: |
| 5117 | err = -EPROTO; |
| 5118 | break; |
| 5119 | } |
| 5120 | |
| 5121 | return err; |
| 5122 | } |
| 5123 | EXPORT_SYMBOL(skb_checksum_setup); |
| 5124 | |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5125 | /** |
| 5126 | * skb_checksum_maybe_trim - maybe trims the given skb |
| 5127 | * @skb: the skb to check |
| 5128 | * @transport_len: the data length beyond the network header |
| 5129 | * |
| 5130 | * Checks whether the given skb has data beyond the given transport length. |
| 5131 | * If so, returns a cloned skb trimmed to this transport length. |
| 5132 | * Otherwise returns the provided skb. Returns NULL in error cases |
| 5133 | * (e.g. transport_len exceeds skb length or out-of-memory). |
| 5134 | * |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5135 | * Caller needs to set the skb transport header and free any returned skb if it |
| 5136 | * differs from the provided skb. |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5137 | */ |
| 5138 | static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, |
| 5139 | unsigned int transport_len) |
| 5140 | { |
| 5141 | struct sk_buff *skb_chk; |
| 5142 | unsigned int len = skb_transport_offset(skb) + transport_len; |
| 5143 | int ret; |
| 5144 | |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5145 | if (skb->len < len) |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5146 | return NULL; |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5147 | else if (skb->len == len) |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5148 | return skb; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5149 | |
| 5150 | skb_chk = skb_clone(skb, GFP_ATOMIC); |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5151 | if (!skb_chk) |
| 5152 | return NULL; |
| 5153 | |
| 5154 | ret = pskb_trim_rcsum(skb_chk, len); |
| 5155 | if (ret) { |
| 5156 | kfree_skb(skb_chk); |
| 5157 | return NULL; |
| 5158 | } |
| 5159 | |
| 5160 | return skb_chk; |
| 5161 | } |
| 5162 | |
| 5163 | /** |
| 5164 | * skb_checksum_trimmed - validate checksum of an skb |
| 5165 | * @skb: the skb to check |
| 5166 | * @transport_len: the data length beyond the network header |
| 5167 | * @skb_chkf: checksum function to use |
| 5168 | * |
| 5169 | * Applies the given checksum function skb_chkf to the provided skb. |
| 5170 | * Returns a checked and maybe trimmed skb. Returns NULL on error. |
| 5171 | * |
| 5172 | * If the skb has data beyond the given transport length, then a |
| 5173 | * trimmed & cloned skb is checked and returned. |
| 5174 | * |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5175 | * Caller needs to set the skb transport header and free any returned skb if it |
| 5176 | * differs from the provided skb. |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5177 | */ |
| 5178 | struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, |
| 5179 | unsigned int transport_len, |
| 5180 | __sum16(*skb_chkf)(struct sk_buff *skb)) |
| 5181 | { |
| 5182 | struct sk_buff *skb_chk; |
| 5183 | unsigned int offset = skb_transport_offset(skb); |
Linus Lüssing | fcba67c | 2015-05-05 00:19:35 +0200 | [diff] [blame] | 5184 | __sum16 ret; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5185 | |
| 5186 | skb_chk = skb_checksum_maybe_trim(skb, transport_len); |
| 5187 | if (!skb_chk) |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5188 | goto err; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5189 | |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5190 | if (!pskb_may_pull(skb_chk, offset)) |
| 5191 | goto err; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5192 | |
Linus Lüssing | 9b36881 | 2016-02-24 04:21:42 +0100 | [diff] [blame] | 5193 | skb_pull_rcsum(skb_chk, offset); |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5194 | ret = skb_chkf(skb_chk); |
Linus Lüssing | 9b36881 | 2016-02-24 04:21:42 +0100 | [diff] [blame] | 5195 | skb_push_rcsum(skb_chk, offset); |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5196 | |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5197 | if (ret) |
| 5198 | goto err; |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5199 | |
| 5200 | return skb_chk; |
Linus Lüssing | a516993 | 2015-08-13 05:54:07 +0200 | [diff] [blame] | 5201 | |
| 5202 | err: |
| 5203 | if (skb_chk && skb_chk != skb) |
| 5204 | kfree_skb(skb_chk); |
| 5205 | |
| 5206 | return NULL; |
| 5207 | |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 5208 | } |
| 5209 | EXPORT_SYMBOL(skb_checksum_trimmed); |
| 5210 | |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 5211 | void __skb_warn_lro_forwarding(const struct sk_buff *skb) |
| 5212 | { |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 5213 | net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", |
| 5214 | skb->dev->name); |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 5215 | } |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 5216 | EXPORT_SYMBOL(__skb_warn_lro_forwarding); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5217 | |
| 5218 | void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) |
| 5219 | { |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 5220 | if (head_stolen) { |
| 5221 | skb_release_head_state(skb); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5222 | kmem_cache_free(skbuff_head_cache, skb); |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 5223 | } else { |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5224 | __kfree_skb(skb); |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 5225 | } |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5226 | } |
| 5227 | EXPORT_SYMBOL(kfree_skb_partial); |
| 5228 | |
| 5229 | /** |
| 5230 | * skb_try_coalesce - try to merge skb to prior one |
| 5231 | * @to: prior buffer |
| 5232 | * @from: buffer to add |
| 5233 | * @fragstolen: pointer to boolean |
Randy Dunlap | c6c4b97 | 2012-06-08 14:01:44 +0000 | [diff] [blame] | 5234 | * @delta_truesize: how much more was allocated than was requested |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5235 | */ |
| 5236 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, |
| 5237 | bool *fragstolen, int *delta_truesize) |
| 5238 | { |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5239 | struct skb_shared_info *to_shinfo, *from_shinfo; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5240 | int i, delta, len = from->len; |
| 5241 | |
| 5242 | *fragstolen = false; |
| 5243 | |
| 5244 | if (skb_cloned(to)) |
| 5245 | return false; |
| 5246 | |
Ilias Apalodimas | 6a5bcd8 | 2021-06-07 21:02:38 +0200 | [diff] [blame] | 5247 | /* The page pool signature of struct page will eventually figure out |
| 5248 | * which pages can be recycled or not but for now let's prohibit slab |
| 5249 | * allocated and page_pool allocated SKBs from being coalesced. |
| 5250 | */ |
| 5251 | if (to->pp_recycle != from->pp_recycle) |
| 5252 | return false; |
| 5253 | |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5254 | if (len <= skb_tailroom(to)) { |
Eric Dumazet | e93a043 | 2014-09-15 04:19:52 -0700 | [diff] [blame] | 5255 | if (len) |
| 5256 | BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5257 | *delta_truesize = 0; |
| 5258 | return true; |
| 5259 | } |
| 5260 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5261 | to_shinfo = skb_shinfo(to); |
| 5262 | from_shinfo = skb_shinfo(from); |
| 5263 | if (to_shinfo->frag_list || from_shinfo->frag_list) |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5264 | return false; |
Willem de Bruijn | 1f8b977 | 2017-08-03 16:29:41 -0400 | [diff] [blame] | 5265 | if (skb_zcopy(to) || skb_zcopy(from)) |
| 5266 | return false; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5267 | |
| 5268 | if (skb_headlen(from) != 0) { |
| 5269 | struct page *page; |
| 5270 | unsigned int offset; |
| 5271 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5272 | if (to_shinfo->nr_frags + |
| 5273 | from_shinfo->nr_frags >= MAX_SKB_FRAGS) |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5274 | return false; |
| 5275 | |
| 5276 | if (skb_head_is_locked(from)) |
| 5277 | return false; |
| 5278 | |
| 5279 | delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); |
| 5280 | |
| 5281 | page = virt_to_head_page(from->head); |
| 5282 | offset = from->data - (unsigned char *)page_address(page); |
| 5283 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5284 | skb_fill_page_desc(to, to_shinfo->nr_frags, |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5285 | page, offset, skb_headlen(from)); |
| 5286 | *fragstolen = true; |
| 5287 | } else { |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5288 | if (to_shinfo->nr_frags + |
| 5289 | from_shinfo->nr_frags > MAX_SKB_FRAGS) |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5290 | return false; |
| 5291 | |
Weiping Pan | f4b549a | 2012-09-28 20:15:30 +0000 | [diff] [blame] | 5292 | delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5293 | } |
| 5294 | |
| 5295 | WARN_ON_ONCE(delta < len); |
| 5296 | |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5297 | memcpy(to_shinfo->frags + to_shinfo->nr_frags, |
| 5298 | from_shinfo->frags, |
| 5299 | from_shinfo->nr_frags * sizeof(skb_frag_t)); |
| 5300 | to_shinfo->nr_frags += from_shinfo->nr_frags; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5301 | |
| 5302 | if (!skb_cloned(from)) |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5303 | from_shinfo->nr_frags = 0; |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5304 | |
Li RongQing | 8ea853f | 2012-09-18 16:53:21 +0000 | [diff] [blame] | 5305 | /* if the skb is not cloned this does nothing |
| 5306 | * since we set nr_frags to 0. |
| 5307 | */ |
Eric Dumazet | c818fa9 | 2017-10-04 10:48:35 -0700 | [diff] [blame] | 5308 | for (i = 0; i < from_shinfo->nr_frags; i++) |
| 5309 | __skb_frag_ref(&from_shinfo->frags[i]); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 5310 | |
| 5311 | to->truesize += delta; |
| 5312 | to->len += len; |
| 5313 | to->data_len += len; |
| 5314 | |
| 5315 | *delta_truesize = delta; |
| 5316 | return true; |
| 5317 | } |
| 5318 | EXPORT_SYMBOL(skb_try_coalesce); |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5319 | |
| 5320 | /** |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 5321 | * skb_scrub_packet - scrub an skb |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5322 | * |
| 5323 | * @skb: buffer to clean |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 5324 | * @xnet: packet is crossing netns |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5325 | * |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 5326 | * skb_scrub_packet can be used after encapsulating or decapsulting a packet |
| 5327 | * into/from a tunnel. Some information have to be cleared during these |
| 5328 | * operations. |
| 5329 | * skb_scrub_packet can also be used to clean a skb before injecting it in |
| 5330 | * another namespace (@xnet == true). We have to clear all information in the |
| 5331 | * skb that could impact namespace isolation. |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5332 | */ |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 5333 | void skb_scrub_packet(struct sk_buff *skb, bool xnet) |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5334 | { |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5335 | skb->pkt_type = PACKET_HOST; |
| 5336 | skb->skb_iif = 0; |
WANG Cong | 60ff746 | 2014-05-04 16:39:18 -0700 | [diff] [blame] | 5337 | skb->ignore_df = 0; |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5338 | skb_dst_drop(skb); |
Florian Westphal | 174e238 | 2019-09-26 20:37:05 +0200 | [diff] [blame] | 5339 | skb_ext_reset(skb); |
Florian Westphal | 895b5c9 | 2019-09-29 20:54:03 +0200 | [diff] [blame] | 5340 | nf_reset_ct(skb); |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5341 | nf_reset_trace(skb); |
Herbert Xu | 213dd74 | 2015-04-16 09:03:27 +0800 | [diff] [blame] | 5342 | |
Petr Machata | 6f9a506 | 2018-11-19 16:11:07 +0000 | [diff] [blame] | 5343 | #ifdef CONFIG_NET_SWITCHDEV |
| 5344 | skb->offload_fwd_mark = 0; |
Ido Schimmel | 875e893 | 2018-12-04 08:15:10 +0000 | [diff] [blame] | 5345 | skb->offload_l3_fwd_mark = 0; |
Petr Machata | 6f9a506 | 2018-11-19 16:11:07 +0000 | [diff] [blame] | 5346 | #endif |
| 5347 | |
Herbert Xu | 213dd74 | 2015-04-16 09:03:27 +0800 | [diff] [blame] | 5348 | if (!xnet) |
| 5349 | return; |
| 5350 | |
Ye Yin | 2b5ec1a | 2017-10-26 16:57:05 +0800 | [diff] [blame] | 5351 | ipvs_reset(skb); |
Herbert Xu | 213dd74 | 2015-04-16 09:03:27 +0800 | [diff] [blame] | 5352 | skb->mark = 0; |
Jesus Sanchez-Palencia | c47d8c2 | 2018-07-03 15:42:47 -0700 | [diff] [blame] | 5353 | skb->tstamp = 0; |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 5354 | } |
| 5355 | EXPORT_SYMBOL_GPL(skb_scrub_packet); |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 5356 | |
| 5357 | /** |
| 5358 | * skb_gso_transport_seglen - Return length of individual segments of a gso packet |
| 5359 | * |
| 5360 | * @skb: GSO skb |
| 5361 | * |
| 5362 | * skb_gso_transport_seglen is used to determine the real size of the |
| 5363 | * individual segments, including Layer4 headers (TCP/UDP). |
| 5364 | * |
| 5365 | * The MAC/L2 or network (IP, IPv6) headers are not accounted for. |
| 5366 | */ |
Daniel Axtens | a4a7771 | 2018-03-01 17:13:40 +1100 | [diff] [blame] | 5367 | static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 5368 | { |
| 5369 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 5370 | unsigned int thlen = 0; |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 5371 | |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 5372 | if (skb->encapsulation) { |
| 5373 | thlen = skb_inner_transport_header(skb) - |
| 5374 | skb_transport_header(skb); |
Florian Westphal | 6d39d58 | 2014-04-09 10:28:50 +0200 | [diff] [blame] | 5375 | |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 5376 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) |
| 5377 | thlen += inner_tcp_hdrlen(skb); |
| 5378 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { |
| 5379 | thlen = tcp_hdrlen(skb); |
Daniel Axtens | 1dd27cd | 2018-03-09 14:06:09 +1100 | [diff] [blame] | 5380 | } else if (unlikely(skb_is_gso_sctp(skb))) { |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 5381 | thlen = sizeof(struct sctphdr); |
Willem de Bruijn | ee80d1e | 2018-04-26 13:42:16 -0400 | [diff] [blame] | 5382 | } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { |
| 5383 | thlen = sizeof(struct udphdr); |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 5384 | } |
Florian Westphal | 6d39d58 | 2014-04-09 10:28:50 +0200 | [diff] [blame] | 5385 | /* UFO sets gso_size to the size of the fragmentation |
| 5386 | * payload, i.e. the size of the L4 (UDP) header is already |
| 5387 | * accounted for. |
| 5388 | */ |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 5389 | return thlen + shinfo->gso_size; |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 5390 | } |
Daniel Axtens | a4a7771 | 2018-03-01 17:13:40 +1100 | [diff] [blame] | 5391 | |
| 5392 | /** |
| 5393 | * skb_gso_network_seglen - Return length of individual segments of a gso packet |
| 5394 | * |
| 5395 | * @skb: GSO skb |
| 5396 | * |
| 5397 | * skb_gso_network_seglen is used to determine the real size of the |
| 5398 | * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). |
| 5399 | * |
| 5400 | * The MAC/L2 header is not accounted for. |
| 5401 | */ |
| 5402 | static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) |
| 5403 | { |
| 5404 | unsigned int hdr_len = skb_transport_header(skb) - |
| 5405 | skb_network_header(skb); |
| 5406 | |
| 5407 | return hdr_len + skb_gso_transport_seglen(skb); |
| 5408 | } |
| 5409 | |
| 5410 | /** |
| 5411 | * skb_gso_mac_seglen - Return length of individual segments of a gso packet |
| 5412 | * |
| 5413 | * @skb: GSO skb |
| 5414 | * |
| 5415 | * skb_gso_mac_seglen is used to determine the real size of the |
| 5416 | * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 |
| 5417 | * headers (TCP/UDP). |
| 5418 | */ |
| 5419 | static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) |
| 5420 | { |
| 5421 | unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); |
| 5422 | |
| 5423 | return hdr_len + skb_gso_transport_seglen(skb); |
| 5424 | } |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5425 | |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5426 | /** |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5427 | * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS |
| 5428 | * |
| 5429 | * There are a couple of instances where we have a GSO skb, and we |
| 5430 | * want to determine what size it would be after it is segmented. |
| 5431 | * |
| 5432 | * We might want to check: |
| 5433 | * - L3+L4+payload size (e.g. IP forwarding) |
| 5434 | * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) |
| 5435 | * |
| 5436 | * This is a helper to do that correctly considering GSO_BY_FRAGS. |
| 5437 | * |
Mathieu Malaterre | 49682bf | 2018-10-31 13:16:58 +0100 | [diff] [blame] | 5438 | * @skb: GSO skb |
| 5439 | * |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5440 | * @seg_len: The segmented length (from skb_gso_*_seglen). In the |
| 5441 | * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. |
| 5442 | * |
| 5443 | * @max_len: The maximum permissible length. |
| 5444 | * |
| 5445 | * Returns true if the segmented length <= max length. |
| 5446 | */ |
| 5447 | static inline bool skb_gso_size_check(const struct sk_buff *skb, |
| 5448 | unsigned int seg_len, |
| 5449 | unsigned int max_len) { |
| 5450 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 5451 | const struct sk_buff *iter; |
| 5452 | |
| 5453 | if (shinfo->gso_size != GSO_BY_FRAGS) |
| 5454 | return seg_len <= max_len; |
| 5455 | |
| 5456 | /* Undo this so we can re-use header sizes */ |
| 5457 | seg_len -= GSO_BY_FRAGS; |
| 5458 | |
| 5459 | skb_walk_frags(skb, iter) { |
| 5460 | if (seg_len + skb_headlen(iter) > max_len) |
| 5461 | return false; |
| 5462 | } |
| 5463 | |
| 5464 | return true; |
| 5465 | } |
| 5466 | |
| 5467 | /** |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5468 | * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5469 | * |
| 5470 | * @skb: GSO skb |
David S. Miller | 76f21b9 | 2016-06-03 22:56:28 -0700 | [diff] [blame] | 5471 | * @mtu: MTU to validate against |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5472 | * |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5473 | * skb_gso_validate_network_len validates if a given skb will fit a |
| 5474 | * wanted MTU once split. It considers L3 headers, L4 headers, and the |
| 5475 | * payload. |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5476 | */ |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5477 | bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5478 | { |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5479 | return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5480 | } |
Daniel Axtens | 779b793 | 2018-03-01 17:13:37 +1100 | [diff] [blame] | 5481 | EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); |
Marcelo Ricardo Leitner | ae7ef81 | 2016-06-02 15:05:41 -0300 | [diff] [blame] | 5482 | |
Daniel Axtens | 2b16f04 | 2018-01-31 14:15:33 +1100 | [diff] [blame] | 5483 | /** |
| 5484 | * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? |
| 5485 | * |
| 5486 | * @skb: GSO skb |
| 5487 | * @len: length to validate against |
| 5488 | * |
| 5489 | * skb_gso_validate_mac_len validates if a given skb will fit a wanted |
| 5490 | * length once split, including L2, L3 and L4 headers and the payload. |
| 5491 | */ |
| 5492 | bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) |
| 5493 | { |
| 5494 | return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); |
| 5495 | } |
| 5496 | EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); |
| 5497 | |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5498 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
| 5499 | { |
Yuya Kusakabe | d85e8be | 2019-04-16 10:22:28 +0900 | [diff] [blame] | 5500 | int mac_len, meta_len; |
| 5501 | void *meta; |
Toshiaki Makita | 4bbb3e0 | 2018-03-13 14:51:27 +0900 | [diff] [blame] | 5502 | |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5503 | if (skb_cow(skb, skb_headroom(skb)) < 0) { |
| 5504 | kfree_skb(skb); |
| 5505 | return NULL; |
| 5506 | } |
| 5507 | |
Toshiaki Makita | 4bbb3e0 | 2018-03-13 14:51:27 +0900 | [diff] [blame] | 5508 | mac_len = skb->data - skb_mac_header(skb); |
Toshiaki Makita | ae47457 | 2018-03-29 19:05:29 +0900 | [diff] [blame] | 5509 | if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { |
| 5510 | memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), |
| 5511 | mac_len - VLAN_HLEN - ETH_TLEN); |
| 5512 | } |
Yuya Kusakabe | d85e8be | 2019-04-16 10:22:28 +0900 | [diff] [blame] | 5513 | |
| 5514 | meta_len = skb_metadata_len(skb); |
| 5515 | if (meta_len) { |
| 5516 | meta = skb_metadata_end(skb) - meta_len; |
| 5517 | memmove(meta + VLAN_HLEN, meta, meta_len); |
| 5518 | } |
| 5519 | |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5520 | skb->mac_header += VLAN_HLEN; |
| 5521 | return skb; |
| 5522 | } |
| 5523 | |
| 5524 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb) |
| 5525 | { |
| 5526 | struct vlan_hdr *vhdr; |
| 5527 | u16 vlan_tci; |
| 5528 | |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5529 | if (unlikely(skb_vlan_tag_present(skb))) { |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5530 | /* vlan_tci is already set-up so leave this for another time */ |
| 5531 | return skb; |
| 5532 | } |
| 5533 | |
| 5534 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 5535 | if (unlikely(!skb)) |
| 5536 | goto err_free; |
Miaohe Lin | 55eff0e | 2020-08-15 04:44:31 -0400 | [diff] [blame] | 5537 | /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ |
| 5538 | if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5539 | goto err_free; |
| 5540 | |
| 5541 | vhdr = (struct vlan_hdr *)skb->data; |
| 5542 | vlan_tci = ntohs(vhdr->h_vlan_TCI); |
| 5543 | __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); |
| 5544 | |
| 5545 | skb_pull_rcsum(skb, VLAN_HLEN); |
| 5546 | vlan_set_encap_proto(skb, vhdr); |
| 5547 | |
| 5548 | skb = skb_reorder_vlan_header(skb); |
| 5549 | if (unlikely(!skb)) |
| 5550 | goto err_free; |
| 5551 | |
| 5552 | skb_reset_network_header(skb); |
Alexander Lobakin | 8be33ec | 2020-11-09 23:47:23 +0000 | [diff] [blame] | 5553 | if (!skb_transport_header_was_set(skb)) |
| 5554 | skb_reset_transport_header(skb); |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 5555 | skb_reset_mac_len(skb); |
| 5556 | |
| 5557 | return skb; |
| 5558 | |
| 5559 | err_free: |
| 5560 | kfree_skb(skb); |
| 5561 | return NULL; |
| 5562 | } |
| 5563 | EXPORT_SYMBOL(skb_vlan_untag); |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5564 | |
Jiri Pirko | e219512 | 2014-11-19 14:05:01 +0100 | [diff] [blame] | 5565 | int skb_ensure_writable(struct sk_buff *skb, int write_len) |
| 5566 | { |
| 5567 | if (!pskb_may_pull(skb, write_len)) |
| 5568 | return -ENOMEM; |
| 5569 | |
| 5570 | if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) |
| 5571 | return 0; |
| 5572 | |
| 5573 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
| 5574 | } |
| 5575 | EXPORT_SYMBOL(skb_ensure_writable); |
| 5576 | |
Shmulik Ladkani | bfca4c5 | 2016-09-19 19:11:09 +0300 | [diff] [blame] | 5577 | /* remove VLAN header from packet and update csum accordingly. |
| 5578 | * expects a non skb_vlan_tag_present skb with a vlan tag payload |
| 5579 | */ |
| 5580 | int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5581 | { |
| 5582 | struct vlan_hdr *vhdr; |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5583 | int offset = skb->data - skb_mac_header(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5584 | int err; |
| 5585 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5586 | if (WARN_ONCE(offset, |
| 5587 | "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", |
| 5588 | offset)) { |
| 5589 | return -EINVAL; |
| 5590 | } |
| 5591 | |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5592 | err = skb_ensure_writable(skb, VLAN_ETH_HLEN); |
| 5593 | if (unlikely(err)) |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5594 | return err; |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5595 | |
| 5596 | skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); |
| 5597 | |
| 5598 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
| 5599 | *vlan_tci = ntohs(vhdr->h_vlan_TCI); |
| 5600 | |
| 5601 | memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); |
| 5602 | __skb_pull(skb, VLAN_HLEN); |
| 5603 | |
| 5604 | vlan_set_encap_proto(skb, vhdr); |
| 5605 | skb->mac_header += VLAN_HLEN; |
| 5606 | |
| 5607 | if (skb_network_offset(skb) < ETH_HLEN) |
| 5608 | skb_set_network_header(skb, ETH_HLEN); |
| 5609 | |
| 5610 | skb_reset_mac_len(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5611 | |
| 5612 | return err; |
| 5613 | } |
Shmulik Ladkani | bfca4c5 | 2016-09-19 19:11:09 +0300 | [diff] [blame] | 5614 | EXPORT_SYMBOL(__skb_vlan_pop); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5615 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5616 | /* Pop a vlan tag either from hwaccel or from payload. |
| 5617 | * Expects skb->data at mac header. |
| 5618 | */ |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5619 | int skb_vlan_pop(struct sk_buff *skb) |
| 5620 | { |
| 5621 | u16 vlan_tci; |
| 5622 | __be16 vlan_proto; |
| 5623 | int err; |
| 5624 | |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5625 | if (likely(skb_vlan_tag_present(skb))) { |
Michał Mirosław | b1817524 | 2018-11-09 00:18:02 +0100 | [diff] [blame] | 5626 | __vlan_hwaccel_clear_tag(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5627 | } else { |
Shmulik Ladkani | ecf4ee4 | 2016-09-20 12:48:37 +0300 | [diff] [blame] | 5628 | if (unlikely(!eth_type_vlan(skb->protocol))) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5629 | return 0; |
| 5630 | |
| 5631 | err = __skb_vlan_pop(skb, &vlan_tci); |
| 5632 | if (err) |
| 5633 | return err; |
| 5634 | } |
| 5635 | /* move next vlan tag to hw accel tag */ |
Shmulik Ladkani | ecf4ee4 | 2016-09-20 12:48:37 +0300 | [diff] [blame] | 5636 | if (likely(!eth_type_vlan(skb->protocol))) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5637 | return 0; |
| 5638 | |
| 5639 | vlan_proto = skb->protocol; |
| 5640 | err = __skb_vlan_pop(skb, &vlan_tci); |
| 5641 | if (unlikely(err)) |
| 5642 | return err; |
| 5643 | |
| 5644 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); |
| 5645 | return 0; |
| 5646 | } |
| 5647 | EXPORT_SYMBOL(skb_vlan_pop); |
| 5648 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5649 | /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). |
| 5650 | * Expects skb->data at mac header. |
| 5651 | */ |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5652 | int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) |
| 5653 | { |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5654 | if (skb_vlan_tag_present(skb)) { |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5655 | int offset = skb->data - skb_mac_header(skb); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5656 | int err; |
| 5657 | |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5658 | if (WARN_ONCE(offset, |
| 5659 | "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", |
| 5660 | offset)) { |
| 5661 | return -EINVAL; |
| 5662 | } |
| 5663 | |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5664 | err = __vlan_insert_tag(skb, skb->vlan_proto, |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 5665 | skb_vlan_tag_get(skb)); |
Shmulik Ladkani | b6a7920 | 2016-09-29 12:10:41 +0300 | [diff] [blame] | 5666 | if (err) |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5667 | return err; |
Daniel Borkmann | 9241e2d | 2016-04-16 02:27:58 +0200 | [diff] [blame] | 5668 | |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5669 | skb->protocol = skb->vlan_proto; |
| 5670 | skb->mac_len += VLAN_HLEN; |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5671 | |
Daniel Borkmann | 6b83d28 | 2016-02-20 00:29:30 +0100 | [diff] [blame] | 5672 | skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 5673 | } |
| 5674 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); |
| 5675 | return 0; |
| 5676 | } |
| 5677 | EXPORT_SYMBOL(skb_vlan_push); |
| 5678 | |
Guillaume Nault | 19fbcb3 | 2020-10-03 00:44:28 +0200 | [diff] [blame] | 5679 | /** |
| 5680 | * skb_eth_pop() - Drop the Ethernet header at the head of a packet |
| 5681 | * |
| 5682 | * @skb: Socket buffer to modify |
| 5683 | * |
| 5684 | * Drop the Ethernet header of @skb. |
| 5685 | * |
| 5686 | * Expects that skb->data points to the mac header and that no VLAN tags are |
| 5687 | * present. |
| 5688 | * |
| 5689 | * Returns 0 on success, -errno otherwise. |
| 5690 | */ |
| 5691 | int skb_eth_pop(struct sk_buff *skb) |
| 5692 | { |
| 5693 | if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || |
| 5694 | skb_network_offset(skb) < ETH_HLEN) |
| 5695 | return -EPROTO; |
| 5696 | |
| 5697 | skb_pull_rcsum(skb, ETH_HLEN); |
| 5698 | skb_reset_mac_header(skb); |
| 5699 | skb_reset_mac_len(skb); |
| 5700 | |
| 5701 | return 0; |
| 5702 | } |
| 5703 | EXPORT_SYMBOL(skb_eth_pop); |
| 5704 | |
| 5705 | /** |
| 5706 | * skb_eth_push() - Add a new Ethernet header at the head of a packet |
| 5707 | * |
| 5708 | * @skb: Socket buffer to modify |
| 5709 | * @dst: Destination MAC address of the new header |
| 5710 | * @src: Source MAC address of the new header |
| 5711 | * |
| 5712 | * Prepend @skb with a new Ethernet header. |
| 5713 | * |
| 5714 | * Expects that skb->data points to the mac header, which must be empty. |
| 5715 | * |
| 5716 | * Returns 0 on success, -errno otherwise. |
| 5717 | */ |
| 5718 | int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, |
| 5719 | const unsigned char *src) |
| 5720 | { |
| 5721 | struct ethhdr *eth; |
| 5722 | int err; |
| 5723 | |
| 5724 | if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) |
| 5725 | return -EPROTO; |
| 5726 | |
| 5727 | err = skb_cow_head(skb, sizeof(*eth)); |
| 5728 | if (err < 0) |
| 5729 | return err; |
| 5730 | |
| 5731 | skb_push(skb, sizeof(*eth)); |
| 5732 | skb_reset_mac_header(skb); |
| 5733 | skb_reset_mac_len(skb); |
| 5734 | |
| 5735 | eth = eth_hdr(skb); |
| 5736 | ether_addr_copy(eth->h_dest, dst); |
| 5737 | ether_addr_copy(eth->h_source, src); |
| 5738 | eth->h_proto = skb->protocol; |
| 5739 | |
| 5740 | skb_postpush_rcsum(skb, eth, sizeof(*eth)); |
| 5741 | |
| 5742 | return 0; |
| 5743 | } |
| 5744 | EXPORT_SYMBOL(skb_eth_push); |
| 5745 | |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5746 | /* Update the ethertype of hdr and the skb csum value if required. */ |
| 5747 | static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, |
| 5748 | __be16 ethertype) |
| 5749 | { |
| 5750 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
| 5751 | __be16 diff[] = { ~hdr->h_proto, ethertype }; |
| 5752 | |
| 5753 | skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); |
| 5754 | } |
| 5755 | |
| 5756 | hdr->h_proto = ethertype; |
| 5757 | } |
| 5758 | |
| 5759 | /** |
Martin Varghese | e7dbfed | 2019-12-21 08:50:01 +0530 | [diff] [blame] | 5760 | * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of |
| 5761 | * the packet |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5762 | * |
| 5763 | * @skb: buffer |
| 5764 | * @mpls_lse: MPLS label stack entry to push |
| 5765 | * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5766 | * @mac_len: length of the MAC header |
Martin Varghese | e7dbfed | 2019-12-21 08:50:01 +0530 | [diff] [blame] | 5767 | * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is |
| 5768 | * ethernet |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5769 | * |
| 5770 | * Expects skb->data at mac header. |
| 5771 | * |
| 5772 | * Returns 0 on success, -errno otherwise. |
| 5773 | */ |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5774 | int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, |
Martin Varghese | d04ac22 | 2019-12-05 05:57:22 +0530 | [diff] [blame] | 5775 | int mac_len, bool ethernet) |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5776 | { |
| 5777 | struct mpls_shim_hdr *lse; |
| 5778 | int err; |
| 5779 | |
| 5780 | if (unlikely(!eth_p_mpls(mpls_proto))) |
| 5781 | return -EINVAL; |
| 5782 | |
| 5783 | /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ |
| 5784 | if (skb->encapsulation) |
| 5785 | return -EINVAL; |
| 5786 | |
| 5787 | err = skb_cow_head(skb, MPLS_HLEN); |
| 5788 | if (unlikely(err)) |
| 5789 | return err; |
| 5790 | |
| 5791 | if (!skb->inner_protocol) { |
Martin Varghese | e7dbfed | 2019-12-21 08:50:01 +0530 | [diff] [blame] | 5792 | skb_set_inner_network_header(skb, skb_network_offset(skb)); |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5793 | skb_set_inner_protocol(skb, skb->protocol); |
| 5794 | } |
| 5795 | |
| 5796 | skb_push(skb, MPLS_HLEN); |
| 5797 | memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5798 | mac_len); |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5799 | skb_reset_mac_header(skb); |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5800 | skb_set_network_header(skb, mac_len); |
Martin Varghese | e7dbfed | 2019-12-21 08:50:01 +0530 | [diff] [blame] | 5801 | skb_reset_mac_len(skb); |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5802 | |
| 5803 | lse = mpls_hdr(skb); |
| 5804 | lse->label_stack_entry = mpls_lse; |
| 5805 | skb_postpush_rcsum(skb, lse, MPLS_HLEN); |
| 5806 | |
Guillaume Nault | 4296adc | 2020-10-02 21:53:08 +0200 | [diff] [blame] | 5807 | if (ethernet && mac_len >= ETH_HLEN) |
John Hurley | 8822e27 | 2019-07-07 15:01:54 +0100 | [diff] [blame] | 5808 | skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); |
| 5809 | skb->protocol = mpls_proto; |
| 5810 | |
| 5811 | return 0; |
| 5812 | } |
| 5813 | EXPORT_SYMBOL_GPL(skb_mpls_push); |
| 5814 | |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5815 | /** |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5816 | * skb_mpls_pop() - pop the outermost MPLS header |
| 5817 | * |
| 5818 | * @skb: buffer |
| 5819 | * @next_proto: ethertype of header after popped MPLS header |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5820 | * @mac_len: length of the MAC header |
Martin Varghese | 76f99f9 | 2019-12-21 08:50:23 +0530 | [diff] [blame] | 5821 | * @ethernet: flag to indicate if the packet is ethernet |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5822 | * |
| 5823 | * Expects skb->data at mac header. |
| 5824 | * |
| 5825 | * Returns 0 on success, -errno otherwise. |
| 5826 | */ |
Martin Varghese | 040b5cf | 2019-12-02 10:49:51 +0530 | [diff] [blame] | 5827 | int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, |
| 5828 | bool ethernet) |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5829 | { |
| 5830 | int err; |
| 5831 | |
| 5832 | if (unlikely(!eth_p_mpls(skb->protocol))) |
Davide Caratti | dedc5a0 | 2019-10-12 13:55:06 +0200 | [diff] [blame] | 5833 | return 0; |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5834 | |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5835 | err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5836 | if (unlikely(err)) |
| 5837 | return err; |
| 5838 | |
| 5839 | skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); |
| 5840 | memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5841 | mac_len); |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5842 | |
| 5843 | __skb_pull(skb, MPLS_HLEN); |
| 5844 | skb_reset_mac_header(skb); |
Davide Caratti | fa4e0f8 | 2019-10-12 13:55:07 +0200 | [diff] [blame] | 5845 | skb_set_network_header(skb, mac_len); |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5846 | |
Guillaume Nault | 4296adc | 2020-10-02 21:53:08 +0200 | [diff] [blame] | 5847 | if (ethernet && mac_len >= ETH_HLEN) { |
John Hurley | ed246ce | 2019-07-07 15:01:55 +0100 | [diff] [blame] | 5848 | struct ethhdr *hdr; |
| 5849 | |
| 5850 | /* use mpls_hdr() to get ethertype to account for VLANs. */ |
| 5851 | hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); |
| 5852 | skb_mod_eth_type(skb, hdr, next_proto); |
| 5853 | } |
| 5854 | skb->protocol = next_proto; |
| 5855 | |
| 5856 | return 0; |
| 5857 | } |
| 5858 | EXPORT_SYMBOL_GPL(skb_mpls_pop); |
| 5859 | |
| 5860 | /** |
John Hurley | d27cf5c | 2019-07-07 15:01:56 +0100 | [diff] [blame] | 5861 | * skb_mpls_update_lse() - modify outermost MPLS header and update csum |
| 5862 | * |
| 5863 | * @skb: buffer |
| 5864 | * @mpls_lse: new MPLS label stack entry to update to |
| 5865 | * |
| 5866 | * Expects skb->data at mac header. |
| 5867 | * |
| 5868 | * Returns 0 on success, -errno otherwise. |
| 5869 | */ |
| 5870 | int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) |
| 5871 | { |
| 5872 | int err; |
| 5873 | |
| 5874 | if (unlikely(!eth_p_mpls(skb->protocol))) |
| 5875 | return -EINVAL; |
| 5876 | |
| 5877 | err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); |
| 5878 | if (unlikely(err)) |
| 5879 | return err; |
| 5880 | |
| 5881 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
| 5882 | __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; |
| 5883 | |
| 5884 | skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); |
| 5885 | } |
| 5886 | |
| 5887 | mpls_hdr(skb)->label_stack_entry = mpls_lse; |
| 5888 | |
| 5889 | return 0; |
| 5890 | } |
| 5891 | EXPORT_SYMBOL_GPL(skb_mpls_update_lse); |
| 5892 | |
| 5893 | /** |
John Hurley | 2a2ea50 | 2019-07-07 15:01:57 +0100 | [diff] [blame] | 5894 | * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header |
| 5895 | * |
| 5896 | * @skb: buffer |
| 5897 | * |
| 5898 | * Expects skb->data at mac header. |
| 5899 | * |
| 5900 | * Returns 0 on success, -errno otherwise. |
| 5901 | */ |
| 5902 | int skb_mpls_dec_ttl(struct sk_buff *skb) |
| 5903 | { |
| 5904 | u32 lse; |
| 5905 | u8 ttl; |
| 5906 | |
| 5907 | if (unlikely(!eth_p_mpls(skb->protocol))) |
| 5908 | return -EINVAL; |
| 5909 | |
Davide Caratti | 13de4ed | 2020-12-03 10:58:21 +0100 | [diff] [blame] | 5910 | if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) |
| 5911 | return -ENOMEM; |
| 5912 | |
John Hurley | 2a2ea50 | 2019-07-07 15:01:57 +0100 | [diff] [blame] | 5913 | lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); |
| 5914 | ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; |
| 5915 | if (!--ttl) |
| 5916 | return -EINVAL; |
| 5917 | |
| 5918 | lse &= ~MPLS_LS_TTL_MASK; |
| 5919 | lse |= ttl << MPLS_LS_TTL_SHIFT; |
| 5920 | |
| 5921 | return skb_mpls_update_lse(skb, cpu_to_be32(lse)); |
| 5922 | } |
| 5923 | EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); |
| 5924 | |
| 5925 | /** |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5926 | * alloc_skb_with_frags - allocate skb with page frags |
| 5927 | * |
Masanari Iida | de3f0d0 | 2014-10-09 12:58:08 +0900 | [diff] [blame] | 5928 | * @header_len: size of linear part |
| 5929 | * @data_len: needed length in frags |
| 5930 | * @max_page_order: max page order desired. |
| 5931 | * @errcode: pointer to error code if any |
| 5932 | * @gfp_mask: allocation mask |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5933 | * |
| 5934 | * This can be used to allocate a paged skb, given a maximal order for frags. |
| 5935 | */ |
| 5936 | struct sk_buff *alloc_skb_with_frags(unsigned long header_len, |
| 5937 | unsigned long data_len, |
| 5938 | int max_page_order, |
| 5939 | int *errcode, |
| 5940 | gfp_t gfp_mask) |
| 5941 | { |
| 5942 | int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 5943 | unsigned long chunk; |
| 5944 | struct sk_buff *skb; |
| 5945 | struct page *page; |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5946 | int i; |
| 5947 | |
| 5948 | *errcode = -EMSGSIZE; |
| 5949 | /* Note this test could be relaxed, if we succeed to allocate |
| 5950 | * high order pages... |
| 5951 | */ |
| 5952 | if (npages > MAX_SKB_FRAGS) |
| 5953 | return NULL; |
| 5954 | |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5955 | *errcode = -ENOBUFS; |
David Rientjes | f8c468e | 2019-01-02 13:01:43 -0800 | [diff] [blame] | 5956 | skb = alloc_skb(header_len, gfp_mask); |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5957 | if (!skb) |
| 5958 | return NULL; |
| 5959 | |
| 5960 | skb->truesize += npages << PAGE_SHIFT; |
| 5961 | |
| 5962 | for (i = 0; npages > 0; i++) { |
| 5963 | int order = max_page_order; |
| 5964 | |
| 5965 | while (order) { |
| 5966 | if (npages >= 1 << order) { |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 5967 | page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5968 | __GFP_COMP | |
Michal Hocko | d14b56f | 2018-06-28 17:53:06 +0200 | [diff] [blame] | 5969 | __GFP_NOWARN, |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 5970 | order); |
| 5971 | if (page) |
| 5972 | goto fill_page; |
| 5973 | /* Do not retry other high order allocations */ |
| 5974 | order = 1; |
| 5975 | max_page_order = 0; |
| 5976 | } |
| 5977 | order--; |
| 5978 | } |
| 5979 | page = alloc_page(gfp_mask); |
| 5980 | if (!page) |
| 5981 | goto failure; |
| 5982 | fill_page: |
| 5983 | chunk = min_t(unsigned long, data_len, |
| 5984 | PAGE_SIZE << order); |
| 5985 | skb_fill_page_desc(skb, i, page, 0, chunk); |
| 5986 | data_len -= chunk; |
| 5987 | npages -= 1 << order; |
| 5988 | } |
| 5989 | return skb; |
| 5990 | |
| 5991 | failure: |
| 5992 | kfree_skb(skb); |
| 5993 | return NULL; |
| 5994 | } |
| 5995 | EXPORT_SYMBOL(alloc_skb_with_frags); |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 5996 | |
| 5997 | /* carve out the first off bytes from skb when off < headlen */ |
| 5998 | static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, |
| 5999 | const int headlen, gfp_t gfp_mask) |
| 6000 | { |
| 6001 | int i; |
| 6002 | int size = skb_end_offset(skb); |
| 6003 | int new_hlen = headlen - off; |
| 6004 | u8 *data; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6005 | |
| 6006 | size = SKB_DATA_ALIGN(size); |
| 6007 | |
| 6008 | if (skb_pfmemalloc(skb)) |
| 6009 | gfp_mask |= __GFP_MEMALLOC; |
| 6010 | data = kmalloc_reserve(size + |
| 6011 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 6012 | gfp_mask, NUMA_NO_NODE, NULL); |
| 6013 | if (!data) |
| 6014 | return -ENOMEM; |
| 6015 | |
| 6016 | size = SKB_WITH_OVERHEAD(ksize(data)); |
| 6017 | |
| 6018 | /* Copy real data, and all frags */ |
| 6019 | skb_copy_from_linear_data_offset(skb, off, data, new_hlen); |
| 6020 | skb->len -= off; |
| 6021 | |
| 6022 | memcpy((struct skb_shared_info *)(data + size), |
| 6023 | skb_shinfo(skb), |
| 6024 | offsetof(struct skb_shared_info, |
| 6025 | frags[skb_shinfo(skb)->nr_frags])); |
| 6026 | if (skb_cloned(skb)) { |
| 6027 | /* drop the old head gracefully */ |
| 6028 | if (skb_orphan_frags(skb, gfp_mask)) { |
| 6029 | kfree(data); |
| 6030 | return -ENOMEM; |
| 6031 | } |
| 6032 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
| 6033 | skb_frag_ref(skb, i); |
| 6034 | if (skb_has_frag_list(skb)) |
| 6035 | skb_clone_fraglist(skb); |
| 6036 | skb_release_data(skb); |
| 6037 | } else { |
| 6038 | /* we can reuse existing recount- all we did was |
| 6039 | * relocate values |
| 6040 | */ |
| 6041 | skb_free_head(skb); |
| 6042 | } |
| 6043 | |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6044 | skb->head = data; |
| 6045 | skb->data = data; |
| 6046 | skb->head_frag = 0; |
| 6047 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 6048 | skb->end = size; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6049 | #else |
| 6050 | skb->end = skb->head + size; |
| 6051 | #endif |
| 6052 | skb_set_tail_pointer(skb, skb_headlen(skb)); |
| 6053 | skb_headers_offset_update(skb, 0); |
| 6054 | skb->cloned = 0; |
| 6055 | skb->hdr_len = 0; |
| 6056 | skb->nohdr = 0; |
| 6057 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
| 6058 | |
| 6059 | return 0; |
| 6060 | } |
| 6061 | |
| 6062 | static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); |
| 6063 | |
| 6064 | /* carve out the first eat bytes from skb's frag_list. May recurse into |
| 6065 | * pskb_carve() |
| 6066 | */ |
| 6067 | static int pskb_carve_frag_list(struct sk_buff *skb, |
| 6068 | struct skb_shared_info *shinfo, int eat, |
| 6069 | gfp_t gfp_mask) |
| 6070 | { |
| 6071 | struct sk_buff *list = shinfo->frag_list; |
| 6072 | struct sk_buff *clone = NULL; |
| 6073 | struct sk_buff *insp = NULL; |
| 6074 | |
| 6075 | do { |
| 6076 | if (!list) { |
| 6077 | pr_err("Not enough bytes to eat. Want %d\n", eat); |
| 6078 | return -EFAULT; |
| 6079 | } |
| 6080 | if (list->len <= eat) { |
| 6081 | /* Eaten as whole. */ |
| 6082 | eat -= list->len; |
| 6083 | list = list->next; |
| 6084 | insp = list; |
| 6085 | } else { |
| 6086 | /* Eaten partially. */ |
| 6087 | if (skb_shared(list)) { |
| 6088 | clone = skb_clone(list, gfp_mask); |
| 6089 | if (!clone) |
| 6090 | return -ENOMEM; |
| 6091 | insp = list->next; |
| 6092 | list = clone; |
| 6093 | } else { |
| 6094 | /* This may be pulled without problems. */ |
| 6095 | insp = list; |
| 6096 | } |
| 6097 | if (pskb_carve(list, eat, gfp_mask) < 0) { |
| 6098 | kfree_skb(clone); |
| 6099 | return -ENOMEM; |
| 6100 | } |
| 6101 | break; |
| 6102 | } |
| 6103 | } while (eat); |
| 6104 | |
| 6105 | /* Free pulled out fragments. */ |
| 6106 | while ((list = shinfo->frag_list) != insp) { |
| 6107 | shinfo->frag_list = list->next; |
| 6108 | kfree_skb(list); |
| 6109 | } |
| 6110 | /* And insert new clone at head. */ |
| 6111 | if (clone) { |
| 6112 | clone->next = list; |
| 6113 | shinfo->frag_list = clone; |
| 6114 | } |
| 6115 | return 0; |
| 6116 | } |
| 6117 | |
| 6118 | /* carve off first len bytes from skb. Split line (off) is in the |
| 6119 | * non-linear part of skb |
| 6120 | */ |
| 6121 | static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, |
| 6122 | int pos, gfp_t gfp_mask) |
| 6123 | { |
| 6124 | int i, k = 0; |
| 6125 | int size = skb_end_offset(skb); |
| 6126 | u8 *data; |
| 6127 | const int nfrags = skb_shinfo(skb)->nr_frags; |
| 6128 | struct skb_shared_info *shinfo; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6129 | |
| 6130 | size = SKB_DATA_ALIGN(size); |
| 6131 | |
| 6132 | if (skb_pfmemalloc(skb)) |
| 6133 | gfp_mask |= __GFP_MEMALLOC; |
| 6134 | data = kmalloc_reserve(size + |
| 6135 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 6136 | gfp_mask, NUMA_NO_NODE, NULL); |
| 6137 | if (!data) |
| 6138 | return -ENOMEM; |
| 6139 | |
| 6140 | size = SKB_WITH_OVERHEAD(ksize(data)); |
| 6141 | |
| 6142 | memcpy((struct skb_shared_info *)(data + size), |
Miaohe Lin | e3ec1e8 | 2020-08-15 04:48:53 -0400 | [diff] [blame] | 6143 | skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6144 | if (skb_orphan_frags(skb, gfp_mask)) { |
| 6145 | kfree(data); |
| 6146 | return -ENOMEM; |
| 6147 | } |
| 6148 | shinfo = (struct skb_shared_info *)(data + size); |
| 6149 | for (i = 0; i < nfrags; i++) { |
| 6150 | int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 6151 | |
| 6152 | if (pos + fsize > off) { |
| 6153 | shinfo->frags[k] = skb_shinfo(skb)->frags[i]; |
| 6154 | |
| 6155 | if (pos < off) { |
| 6156 | /* Split frag. |
| 6157 | * We have two variants in this case: |
| 6158 | * 1. Move all the frag to the second |
| 6159 | * part, if it is possible. F.e. |
| 6160 | * this approach is mandatory for TUX, |
| 6161 | * where splitting is expensive. |
| 6162 | * 2. Split is accurately. We make this. |
| 6163 | */ |
Jonathan Lemon | b54c9d5 | 2019-07-30 07:40:33 -0700 | [diff] [blame] | 6164 | skb_frag_off_add(&shinfo->frags[0], off - pos); |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6165 | skb_frag_size_sub(&shinfo->frags[0], off - pos); |
| 6166 | } |
| 6167 | skb_frag_ref(skb, i); |
| 6168 | k++; |
| 6169 | } |
| 6170 | pos += fsize; |
| 6171 | } |
| 6172 | shinfo->nr_frags = k; |
| 6173 | if (skb_has_frag_list(skb)) |
| 6174 | skb_clone_fraglist(skb); |
| 6175 | |
Miaohe Lin | eabe861 | 2020-08-15 04:46:41 -0400 | [diff] [blame] | 6176 | /* split line is in frag list */ |
| 6177 | if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { |
| 6178 | /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ |
| 6179 | if (skb_has_frag_list(skb)) |
| 6180 | kfree_skb_list(skb_shinfo(skb)->frag_list); |
| 6181 | kfree(data); |
| 6182 | return -ENOMEM; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6183 | } |
| 6184 | skb_release_data(skb); |
| 6185 | |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6186 | skb->head = data; |
| 6187 | skb->head_frag = 0; |
| 6188 | skb->data = data; |
| 6189 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 6190 | skb->end = size; |
Sowmini Varadhan | 6fa01cc | 2016-04-22 18:36:35 -0700 | [diff] [blame] | 6191 | #else |
| 6192 | skb->end = skb->head + size; |
| 6193 | #endif |
| 6194 | skb_reset_tail_pointer(skb); |
| 6195 | skb_headers_offset_update(skb, 0); |
| 6196 | skb->cloned = 0; |
| 6197 | skb->hdr_len = 0; |
| 6198 | skb->nohdr = 0; |
| 6199 | skb->len -= off; |
| 6200 | skb->data_len = skb->len; |
| 6201 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
| 6202 | return 0; |
| 6203 | } |
| 6204 | |
| 6205 | /* remove len bytes from the beginning of the skb */ |
| 6206 | static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) |
| 6207 | { |
| 6208 | int headlen = skb_headlen(skb); |
| 6209 | |
| 6210 | if (len < headlen) |
| 6211 | return pskb_carve_inside_header(skb, len, headlen, gfp); |
| 6212 | else |
| 6213 | return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); |
| 6214 | } |
| 6215 | |
| 6216 | /* Extract to_copy bytes starting at off from skb, and return this in |
| 6217 | * a new skb |
| 6218 | */ |
| 6219 | struct sk_buff *pskb_extract(struct sk_buff *skb, int off, |
| 6220 | int to_copy, gfp_t gfp) |
| 6221 | { |
| 6222 | struct sk_buff *clone = skb_clone(skb, gfp); |
| 6223 | |
| 6224 | if (!clone) |
| 6225 | return NULL; |
| 6226 | |
| 6227 | if (pskb_carve(clone, off, gfp) < 0 || |
| 6228 | pskb_trim(clone, to_copy)) { |
| 6229 | kfree_skb(clone); |
| 6230 | return NULL; |
| 6231 | } |
| 6232 | return clone; |
| 6233 | } |
| 6234 | EXPORT_SYMBOL(pskb_extract); |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 6235 | |
| 6236 | /** |
| 6237 | * skb_condense - try to get rid of fragments/frag_list if possible |
| 6238 | * @skb: buffer |
| 6239 | * |
| 6240 | * Can be used to save memory before skb is added to a busy queue. |
| 6241 | * If packet has bytes in frags and enough tail room in skb->head, |
| 6242 | * pull all of them, so that we can free the frags right now and adjust |
| 6243 | * truesize. |
| 6244 | * Notes: |
| 6245 | * We do not reallocate skb->head thus can not fail. |
| 6246 | * Caller must re-evaluate skb->truesize if needed. |
| 6247 | */ |
| 6248 | void skb_condense(struct sk_buff *skb) |
| 6249 | { |
Eric Dumazet | 3174fed | 2016-12-09 08:02:05 -0800 | [diff] [blame] | 6250 | if (skb->data_len) { |
| 6251 | if (skb->data_len > skb->end - skb->tail || |
| 6252 | skb_cloned(skb)) |
| 6253 | return; |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 6254 | |
Eric Dumazet | 3174fed | 2016-12-09 08:02:05 -0800 | [diff] [blame] | 6255 | /* Nice, we can free page frag(s) right now */ |
| 6256 | __pskb_pull_tail(skb, skb->data_len); |
| 6257 | } |
| 6258 | /* At this point, skb->truesize might be over estimated, |
| 6259 | * because skb had a fragment, and fragments do not tell |
| 6260 | * their truesize. |
| 6261 | * When we pulled its content into skb->head, fragment |
| 6262 | * was freed, but __pskb_pull_tail() could not possibly |
| 6263 | * adjust skb->truesize, not knowing the frag truesize. |
Eric Dumazet | c8c8b12 | 2016-12-07 09:19:33 -0800 | [diff] [blame] | 6264 | */ |
| 6265 | skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); |
| 6266 | } |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6267 | |
| 6268 | #ifdef CONFIG_SKB_EXTENSIONS |
| 6269 | static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) |
| 6270 | { |
| 6271 | return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); |
| 6272 | } |
| 6273 | |
Paolo Abeni | 8b69a80 | 2020-01-09 07:59:24 -0800 | [diff] [blame] | 6274 | /** |
| 6275 | * __skb_ext_alloc - allocate a new skb extensions storage |
| 6276 | * |
Florian Westphal | 4930f48 | 2020-05-16 10:46:23 +0200 | [diff] [blame] | 6277 | * @flags: See kmalloc(). |
| 6278 | * |
Paolo Abeni | 8b69a80 | 2020-01-09 07:59:24 -0800 | [diff] [blame] | 6279 | * Returns the newly allocated pointer. The pointer can later attached to a |
| 6280 | * skb via __skb_ext_set(). |
| 6281 | * Note: caller must handle the skb_ext as an opaque data. |
| 6282 | */ |
Florian Westphal | 4930f48 | 2020-05-16 10:46:23 +0200 | [diff] [blame] | 6283 | struct skb_ext *__skb_ext_alloc(gfp_t flags) |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6284 | { |
Florian Westphal | 4930f48 | 2020-05-16 10:46:23 +0200 | [diff] [blame] | 6285 | struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6286 | |
| 6287 | if (new) { |
| 6288 | memset(new->offset, 0, sizeof(new->offset)); |
| 6289 | refcount_set(&new->refcnt, 1); |
| 6290 | } |
| 6291 | |
| 6292 | return new; |
| 6293 | } |
| 6294 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6295 | static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, |
| 6296 | unsigned int old_active) |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6297 | { |
| 6298 | struct skb_ext *new; |
| 6299 | |
| 6300 | if (refcount_read(&old->refcnt) == 1) |
| 6301 | return old; |
| 6302 | |
| 6303 | new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); |
| 6304 | if (!new) |
| 6305 | return NULL; |
| 6306 | |
| 6307 | memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); |
| 6308 | refcount_set(&new->refcnt, 1); |
| 6309 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6310 | #ifdef CONFIG_XFRM |
| 6311 | if (old_active & (1 << SKB_EXT_SEC_PATH)) { |
| 6312 | struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); |
| 6313 | unsigned int i; |
| 6314 | |
| 6315 | for (i = 0; i < sp->len; i++) |
| 6316 | xfrm_state_hold(sp->xvec[i]); |
| 6317 | } |
| 6318 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6319 | __skb_ext_put(old); |
| 6320 | return new; |
| 6321 | } |
| 6322 | |
| 6323 | /** |
Paolo Abeni | 8b69a80 | 2020-01-09 07:59:24 -0800 | [diff] [blame] | 6324 | * __skb_ext_set - attach the specified extension storage to this skb |
| 6325 | * @skb: buffer |
| 6326 | * @id: extension id |
| 6327 | * @ext: extension storage previously allocated via __skb_ext_alloc() |
| 6328 | * |
| 6329 | * Existing extensions, if any, are cleared. |
| 6330 | * |
| 6331 | * Returns the pointer to the extension. |
| 6332 | */ |
| 6333 | void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, |
| 6334 | struct skb_ext *ext) |
| 6335 | { |
| 6336 | unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); |
| 6337 | |
| 6338 | skb_ext_put(skb); |
| 6339 | newlen = newoff + skb_ext_type_len[id]; |
| 6340 | ext->chunks = newlen; |
| 6341 | ext->offset[id] = newoff; |
| 6342 | skb->extensions = ext; |
| 6343 | skb->active_extensions = 1 << id; |
| 6344 | return skb_ext_get_ptr(ext, id); |
| 6345 | } |
| 6346 | |
| 6347 | /** |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6348 | * skb_ext_add - allocate space for given extension, COW if needed |
| 6349 | * @skb: buffer |
| 6350 | * @id: extension to allocate space for |
| 6351 | * |
| 6352 | * Allocates enough space for the given extension. |
| 6353 | * If the extension is already present, a pointer to that extension |
| 6354 | * is returned. |
| 6355 | * |
| 6356 | * If the skb was cloned, COW applies and the returned memory can be |
| 6357 | * modified without changing the extension space of clones buffers. |
| 6358 | * |
| 6359 | * Returns pointer to the extension or NULL on allocation failure. |
| 6360 | */ |
| 6361 | void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) |
| 6362 | { |
| 6363 | struct skb_ext *new, *old = NULL; |
| 6364 | unsigned int newlen, newoff; |
| 6365 | |
| 6366 | if (skb->active_extensions) { |
| 6367 | old = skb->extensions; |
| 6368 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6369 | new = skb_ext_maybe_cow(old, skb->active_extensions); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6370 | if (!new) |
| 6371 | return NULL; |
| 6372 | |
Paolo Abeni | 682ec85 | 2018-12-21 19:03:15 +0100 | [diff] [blame] | 6373 | if (__skb_ext_exist(new, id)) |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6374 | goto set_active; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6375 | |
Paolo Abeni | e94e50b | 2018-12-21 19:03:13 +0100 | [diff] [blame] | 6376 | newoff = new->chunks; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6377 | } else { |
| 6378 | newoff = SKB_EXT_CHUNKSIZEOF(*new); |
| 6379 | |
Florian Westphal | 4930f48 | 2020-05-16 10:46:23 +0200 | [diff] [blame] | 6380 | new = __skb_ext_alloc(GFP_ATOMIC); |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6381 | if (!new) |
| 6382 | return NULL; |
| 6383 | } |
| 6384 | |
| 6385 | newlen = newoff + skb_ext_type_len[id]; |
| 6386 | new->chunks = newlen; |
| 6387 | new->offset[id] = newoff; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6388 | set_active: |
Paolo Abeni | b0999f3 | 2021-07-28 18:24:01 +0200 | [diff] [blame] | 6389 | skb->slow_gro = 1; |
Paolo Abeni | 682ec85 | 2018-12-21 19:03:15 +0100 | [diff] [blame] | 6390 | skb->extensions = new; |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6391 | skb->active_extensions |= 1 << id; |
| 6392 | return skb_ext_get_ptr(new, id); |
| 6393 | } |
| 6394 | EXPORT_SYMBOL(skb_ext_add); |
| 6395 | |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6396 | #ifdef CONFIG_XFRM |
| 6397 | static void skb_ext_put_sp(struct sec_path *sp) |
| 6398 | { |
| 6399 | unsigned int i; |
| 6400 | |
| 6401 | for (i = 0; i < sp->len; i++) |
| 6402 | xfrm_state_put(sp->xvec[i]); |
| 6403 | } |
| 6404 | #endif |
| 6405 | |
Jeremy Kerr | 78476d3 | 2021-10-29 11:01:44 +0800 | [diff] [blame] | 6406 | #ifdef CONFIG_MCTP_FLOWS |
| 6407 | static void skb_ext_put_mctp(struct mctp_flow *flow) |
| 6408 | { |
| 6409 | if (flow->key) |
| 6410 | mctp_key_unref(flow->key); |
| 6411 | } |
| 6412 | #endif |
| 6413 | |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6414 | void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) |
| 6415 | { |
| 6416 | struct skb_ext *ext = skb->extensions; |
| 6417 | |
| 6418 | skb->active_extensions &= ~(1 << id); |
| 6419 | if (skb->active_extensions == 0) { |
| 6420 | skb->extensions = NULL; |
| 6421 | __skb_ext_put(ext); |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6422 | #ifdef CONFIG_XFRM |
| 6423 | } else if (id == SKB_EXT_SEC_PATH && |
| 6424 | refcount_read(&ext->refcnt) == 1) { |
| 6425 | struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); |
| 6426 | |
| 6427 | skb_ext_put_sp(sp); |
| 6428 | sp->len = 0; |
| 6429 | #endif |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6430 | } |
| 6431 | } |
| 6432 | EXPORT_SYMBOL(__skb_ext_del); |
| 6433 | |
| 6434 | void __skb_ext_put(struct skb_ext *ext) |
| 6435 | { |
| 6436 | /* If this is last clone, nothing can increment |
| 6437 | * it after check passes. Avoids one atomic op. |
| 6438 | */ |
| 6439 | if (refcount_read(&ext->refcnt) == 1) |
| 6440 | goto free_now; |
| 6441 | |
| 6442 | if (!refcount_dec_and_test(&ext->refcnt)) |
| 6443 | return; |
| 6444 | free_now: |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6445 | #ifdef CONFIG_XFRM |
| 6446 | if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) |
| 6447 | skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); |
| 6448 | #endif |
Jeremy Kerr | 78476d3 | 2021-10-29 11:01:44 +0800 | [diff] [blame] | 6449 | #ifdef CONFIG_MCTP_FLOWS |
| 6450 | if (__skb_ext_exist(ext, SKB_EXT_MCTP)) |
| 6451 | skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); |
| 6452 | #endif |
Florian Westphal | 4165079 | 2018-12-18 17:15:27 +0100 | [diff] [blame] | 6453 | |
Florian Westphal | df5042f | 2018-12-18 17:15:16 +0100 | [diff] [blame] | 6454 | kmem_cache_free(skbuff_ext_cache, ext); |
| 6455 | } |
| 6456 | EXPORT_SYMBOL(__skb_ext_put); |
| 6457 | #endif /* CONFIG_SKB_EXTENSIONS */ |