Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Routines having to do with the 'struct sk_buff' memory handlers. |
| 3 | * |
Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 4 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Florian La Roche <rzsfl@rz.uni-sb.de> |
| 6 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * Fixes: |
| 8 | * Alan Cox : Fixed the worst of the load |
| 9 | * balancer bugs. |
| 10 | * Dave Platt : Interrupt stacking fix. |
| 11 | * Richard Kooijman : Timestamp fixes. |
| 12 | * Alan Cox : Changed buffer format. |
| 13 | * Alan Cox : destructor hook for AF_UNIX etc. |
| 14 | * Linus Torvalds : Better skb_clone. |
| 15 | * Alan Cox : Added skb_copy. |
| 16 | * Alan Cox : Added all the changed routines Linus |
| 17 | * only put in the headers |
| 18 | * Ray VanTassle : Fixed --skb->lock in free |
| 19 | * Alan Cox : skb_copy copy arp field |
| 20 | * Andi Kleen : slabified it. |
| 21 | * Robert Olsson : Removed skb_head_pool |
| 22 | * |
| 23 | * NOTE: |
| 24 | * The __skb_ routines should be called with interrupts |
| 25 | * disabled, or you better be *real* sure that the operation is atomic |
| 26 | * with respect to whatever list is being frobbed (e.g. via lock_sock() |
| 27 | * or via disabling bottom half handlers, etc). |
| 28 | * |
| 29 | * This program is free software; you can redistribute it and/or |
| 30 | * modify it under the terms of the GNU General Public License |
| 31 | * as published by the Free Software Foundation; either version |
| 32 | * 2 of the License, or (at your option) any later version. |
| 33 | */ |
| 34 | |
| 35 | /* |
| 36 | * The functions in this file will not compile correctly with gcc 2.4.x |
| 37 | */ |
| 38 | |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 39 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 40 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/module.h> |
| 42 | #include <linux/types.h> |
| 43 | #include <linux/kernel.h> |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 44 | #include <linux/kmemcheck.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include <linux/mm.h> |
| 46 | #include <linux/interrupt.h> |
| 47 | #include <linux/in.h> |
| 48 | #include <linux/inet.h> |
| 49 | #include <linux/slab.h> |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 50 | #include <linux/tcp.h> |
| 51 | #include <linux/udp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #include <linux/netdevice.h> |
| 53 | #ifdef CONFIG_NET_CLS_ACT |
| 54 | #include <net/pkt_sched.h> |
| 55 | #endif |
| 56 | #include <linux/string.h> |
| 57 | #include <linux/skbuff.h> |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 58 | #include <linux/splice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #include <linux/cache.h> |
| 60 | #include <linux/rtnetlink.h> |
| 61 | #include <linux/init.h> |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 62 | #include <linux/scatterlist.h> |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 63 | #include <linux/errqueue.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 64 | #include <linux/prefetch.h> |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 65 | #include <linux/if_vlan.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
| 67 | #include <net/protocol.h> |
| 68 | #include <net/dst.h> |
| 69 | #include <net/sock.h> |
| 70 | #include <net/checksum.h> |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 71 | #include <net/ip6_checksum.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | #include <net/xfrm.h> |
| 73 | |
| 74 | #include <asm/uaccess.h> |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 75 | #include <trace/events/skb.h> |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 76 | #include <linux/highmem.h> |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 77 | #include <linux/capability.h> |
| 78 | #include <linux/user_namespace.h> |
Al Viro | a1f8e7f7 | 2006-10-19 16:08:53 -0400 | [diff] [blame] | 79 | |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 80 | struct kmem_cache *skbuff_head_cache __read_mostly; |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 81 | static struct kmem_cache *skbuff_fclone_cache __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | /** |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 84 | * skb_panic - private function for out-of-line support |
| 85 | * @skb: buffer |
| 86 | * @sz: size |
| 87 | * @addr: address |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 88 | * @msg: skb_over_panic or skb_under_panic |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | * |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 90 | * Out-of-line support for skb_put() and skb_push(). |
| 91 | * Called via the wrapper skb_over_panic() or skb_under_panic(). |
| 92 | * Keep out of line to prevent kernel bloat. |
| 93 | * __builtin_return_address is not used because it is not always reliable. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | */ |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 95 | static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 96 | const char msg[]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | { |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 98 | pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", |
James Hogan | 99d5851 | 2013-02-13 11:20:27 +0000 | [diff] [blame] | 99 | msg, addr, skb->len, sz, skb->head, skb->data, |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 100 | (unsigned long)skb->tail, (unsigned long)skb->end, |
| 101 | skb->dev ? skb->dev->name : "<NULL>"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | BUG(); |
| 103 | } |
| 104 | |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 105 | static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | { |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 107 | skb_panic(skb, sz, addr, __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | } |
| 109 | |
Jean Sacren | f05de73 | 2013-02-11 13:30:38 +0000 | [diff] [blame] | 110 | static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
| 111 | { |
| 112 | skb_panic(skb, sz, addr, __func__); |
| 113 | } |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 114 | |
| 115 | /* |
| 116 | * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells |
| 117 | * the caller if emergency pfmemalloc reserves are being used. If it is and |
| 118 | * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves |
| 119 | * may be used. Otherwise, the packet data may be discarded until enough |
| 120 | * memory is free |
| 121 | */ |
| 122 | #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ |
| 123 | __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) |
stephen hemminger | 61c5e88 | 2012-12-28 18:24:28 +0000 | [diff] [blame] | 124 | |
| 125 | static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, |
| 126 | unsigned long ip, bool *pfmemalloc) |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 127 | { |
| 128 | void *obj; |
| 129 | bool ret_pfmemalloc = false; |
| 130 | |
| 131 | /* |
| 132 | * Try a regular allocation, when that fails and we're not entitled |
| 133 | * to the reserves, fail. |
| 134 | */ |
| 135 | obj = kmalloc_node_track_caller(size, |
| 136 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, |
| 137 | node); |
| 138 | if (obj || !(gfp_pfmemalloc_allowed(flags))) |
| 139 | goto out; |
| 140 | |
| 141 | /* Try again but now we are using pfmemalloc reserves */ |
| 142 | ret_pfmemalloc = true; |
| 143 | obj = kmalloc_node_track_caller(size, flags, node); |
| 144 | |
| 145 | out: |
| 146 | if (pfmemalloc) |
| 147 | *pfmemalloc = ret_pfmemalloc; |
| 148 | |
| 149 | return obj; |
| 150 | } |
| 151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
| 153 | * 'private' fields and also do memory statistics to find all the |
| 154 | * [BEEP] leaks. |
| 155 | * |
| 156 | */ |
| 157 | |
Patrick McHardy | 0ebd0ac | 2013-04-17 06:46:58 +0000 | [diff] [blame] | 158 | struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) |
| 159 | { |
| 160 | struct sk_buff *skb; |
| 161 | |
| 162 | /* Get the HEAD */ |
| 163 | skb = kmem_cache_alloc_node(skbuff_head_cache, |
| 164 | gfp_mask & ~__GFP_DMA, node); |
| 165 | if (!skb) |
| 166 | goto out; |
| 167 | |
| 168 | /* |
| 169 | * Only clear those fields we need to clear, not those that we will |
| 170 | * actually initialise below. Hence, don't put any more fields after |
| 171 | * the tail pointer in struct sk_buff! |
| 172 | */ |
| 173 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
Pablo Neira | 5e71d9d | 2013-06-03 09:28:43 +0000 | [diff] [blame] | 174 | skb->head = NULL; |
Patrick McHardy | 0ebd0ac | 2013-04-17 06:46:58 +0000 | [diff] [blame] | 175 | skb->truesize = sizeof(struct sk_buff); |
| 176 | atomic_set(&skb->users, 1); |
| 177 | |
Cong Wang | 35d0461 | 2013-05-29 15:16:05 +0800 | [diff] [blame] | 178 | skb->mac_header = (typeof(skb->mac_header))~0U; |
Patrick McHardy | 0ebd0ac | 2013-04-17 06:46:58 +0000 | [diff] [blame] | 179 | out: |
| 180 | return skb; |
| 181 | } |
| 182 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | /** |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 184 | * __alloc_skb - allocate a network buffer |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | * @size: size to allocate |
| 186 | * @gfp_mask: allocation mask |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 187 | * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache |
| 188 | * instead of head cache and allocate a cloned (child) skb. |
| 189 | * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for |
| 190 | * allocations in case the data is required for writeback |
Christoph Hellwig | b30973f | 2006-12-06 20:32:36 -0800 | [diff] [blame] | 191 | * @node: numa node to allocate memory on |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | * |
| 193 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
Ben Hutchings | 94b6042 | 2012-06-06 15:23:37 +0000 | [diff] [blame] | 194 | * tail room of at least size bytes. The object has a reference count |
| 195 | * of one. The return is the buffer. On a failure the return is %NULL. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | * |
| 197 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
| 198 | * %GFP_ATOMIC. |
| 199 | */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 200 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 201 | int flags, int node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | { |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 203 | struct kmem_cache *cache; |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 204 | struct skb_shared_info *shinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | struct sk_buff *skb; |
| 206 | u8 *data; |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 207 | bool pfmemalloc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 209 | cache = (flags & SKB_ALLOC_FCLONE) |
| 210 | ? skbuff_fclone_cache : skbuff_head_cache; |
| 211 | |
| 212 | if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) |
| 213 | gfp_mask |= __GFP_MEMALLOC; |
Herbert Xu | 8798b3f | 2006-01-23 16:32:45 -0800 | [diff] [blame] | 214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | /* Get the HEAD */ |
Christoph Hellwig | b30973f | 2006-12-06 20:32:36 -0800 | [diff] [blame] | 216 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | if (!skb) |
| 218 | goto out; |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 219 | prefetchw(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 221 | /* We do our best to align skb_shared_info on a separate cache |
| 222 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives |
| 223 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. |
| 224 | * Both skb->head and skb_shared_info are cache line aligned. |
| 225 | */ |
Tony Lindgren | bc417e3 | 2011-11-02 13:40:28 +0000 | [diff] [blame] | 226 | size = SKB_DATA_ALIGN(size); |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 227 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 228 | data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | if (!data) |
| 230 | goto nodata; |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 231 | /* kmalloc(size) might give us more room than requested. |
| 232 | * Put skb_shared_info exactly at the end of allocated zone, |
| 233 | * to allow max possible filling before reallocation. |
| 234 | */ |
| 235 | size = SKB_WITH_OVERHEAD(ksize(data)); |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 236 | prefetchw(data + size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | |
Arnaldo Carvalho de Melo | ca0605a | 2007-03-19 10:48:59 -0300 | [diff] [blame] | 238 | /* |
Johannes Berg | c800578 | 2008-05-03 20:56:42 -0700 | [diff] [blame] | 239 | * Only clear those fields we need to clear, not those that we will |
| 240 | * actually initialise below. Hence, don't put any more fields after |
| 241 | * the tail pointer in struct sk_buff! |
Arnaldo Carvalho de Melo | ca0605a | 2007-03-19 10:48:59 -0300 | [diff] [blame] | 242 | */ |
| 243 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 244 | /* Account for allocated memory : skb + skb->head */ |
| 245 | skb->truesize = SKB_TRUESIZE(size); |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 246 | skb->pfmemalloc = pfmemalloc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | atomic_set(&skb->users, 1); |
| 248 | skb->head = data; |
| 249 | skb->data = data; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 250 | skb_reset_tail_pointer(skb); |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 251 | skb->end = skb->tail + size; |
Cong Wang | 35d0461 | 2013-05-29 15:16:05 +0800 | [diff] [blame] | 252 | skb->mac_header = (typeof(skb->mac_header))~0U; |
| 253 | skb->transport_header = (typeof(skb->transport_header))~0U; |
Stephen Hemminger | 19633e1 | 2009-06-17 05:23:27 +0000 | [diff] [blame] | 254 | |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 255 | /* make sure we initialize shinfo sequentially */ |
| 256 | shinfo = skb_shinfo(skb); |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 257 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 258 | atomic_set(&shinfo->dataref, 1); |
Eric Dumazet | c2aa366 | 2011-01-25 23:18:38 +0000 | [diff] [blame] | 259 | kmemcheck_annotate_variable(shinfo->destructor_arg); |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 260 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 261 | if (flags & SKB_ALLOC_FCLONE) { |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 262 | struct sk_buff_fclones *fclones; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 264 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
| 265 | |
| 266 | kmemcheck_annotate_bitfield(&fclones->skb2, flags1); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 267 | skb->fclone = SKB_FCLONE_ORIG; |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 268 | atomic_set(&fclones->fclone_ref, 1); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 269 | |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 270 | fclones->skb2.fclone = SKB_FCLONE_CLONE; |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 271 | fclones->skb2.pfmemalloc = pfmemalloc; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 272 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | out: |
| 274 | return skb; |
| 275 | nodata: |
Herbert Xu | 8798b3f | 2006-01-23 16:32:45 -0800 | [diff] [blame] | 276 | kmem_cache_free(cache, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | skb = NULL; |
| 278 | goto out; |
| 279 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 280 | EXPORT_SYMBOL(__alloc_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | |
| 282 | /** |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 283 | * __build_skb - build a network buffer |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 284 | * @data: data buffer provided by caller |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 285 | * @frag_size: size of data, or 0 if head was kmalloced |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 286 | * |
| 287 | * Allocate a new &sk_buff. Caller provides space holding head and |
Florian Fainelli | deceb4c | 2013-07-23 20:22:39 +0100 | [diff] [blame] | 288 | * skb_shared_info. @data must have been allocated by kmalloc() only if |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 289 | * @frag_size is 0, otherwise data should come from the page allocator |
| 290 | * or vmalloc() |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 291 | * The return is the new skb buffer. |
| 292 | * On a failure the return is %NULL, and @data is not freed. |
| 293 | * Notes : |
| 294 | * Before IO, driver allocates only data buffer where NIC put incoming frame |
| 295 | * Driver should add room at head (NET_SKB_PAD) and |
| 296 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) |
| 297 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it |
| 298 | * before giving packet to stack. |
| 299 | * RX rings only contains data buffers, not full skbs. |
| 300 | */ |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 301 | struct sk_buff *__build_skb(void *data, unsigned int frag_size) |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 302 | { |
| 303 | struct skb_shared_info *shinfo; |
| 304 | struct sk_buff *skb; |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 305 | unsigned int size = frag_size ? : ksize(data); |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 306 | |
| 307 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); |
| 308 | if (!skb) |
| 309 | return NULL; |
| 310 | |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 311 | size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 312 | |
| 313 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
| 314 | skb->truesize = SKB_TRUESIZE(size); |
| 315 | atomic_set(&skb->users, 1); |
| 316 | skb->head = data; |
| 317 | skb->data = data; |
| 318 | skb_reset_tail_pointer(skb); |
| 319 | skb->end = skb->tail + size; |
Cong Wang | 35d0461 | 2013-05-29 15:16:05 +0800 | [diff] [blame] | 320 | skb->mac_header = (typeof(skb->mac_header))~0U; |
| 321 | skb->transport_header = (typeof(skb->transport_header))~0U; |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 322 | |
| 323 | /* make sure we initialize shinfo sequentially */ |
| 324 | shinfo = skb_shinfo(skb); |
| 325 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
| 326 | atomic_set(&shinfo->dataref, 1); |
| 327 | kmemcheck_annotate_variable(shinfo->destructor_arg); |
| 328 | |
| 329 | return skb; |
| 330 | } |
Eric Dumazet | 2ea2f62 | 2015-04-24 16:05:01 -0700 | [diff] [blame] | 331 | |
| 332 | /* build_skb() is wrapper over __build_skb(), that specifically |
| 333 | * takes care of skb->head and skb->pfmemalloc |
| 334 | * This means that if @frag_size is not zero, then @data must be backed |
| 335 | * by a page fragment, not kmalloc() or vmalloc() |
| 336 | */ |
| 337 | struct sk_buff *build_skb(void *data, unsigned int frag_size) |
| 338 | { |
| 339 | struct sk_buff *skb = __build_skb(data, frag_size); |
| 340 | |
| 341 | if (skb && frag_size) { |
| 342 | skb->head_frag = 1; |
| 343 | if (virt_to_head_page(data)->pfmemalloc) |
| 344 | skb->pfmemalloc = 1; |
| 345 | } |
| 346 | return skb; |
| 347 | } |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 348 | EXPORT_SYMBOL(build_skb); |
| 349 | |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 350 | struct netdev_alloc_cache { |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 351 | struct page_frag frag; |
| 352 | /* we maintain a pagecount bias, so that we dont dirty cache line |
| 353 | * containing page->_count every time we allocate a fragment. |
| 354 | */ |
| 355 | unsigned int pagecnt_bias; |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 356 | }; |
| 357 | static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 358 | static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache); |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 359 | |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 360 | static struct page *__page_frag_refill(struct netdev_alloc_cache *nc, |
| 361 | gfp_t gfp_mask) |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 362 | { |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 363 | const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER; |
| 364 | struct page *page = NULL; |
| 365 | gfp_t gfp = gfp_mask; |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 366 | |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 367 | if (order) { |
Eric Dumazet | 79930f5 | 2015-04-22 07:33:36 -0700 | [diff] [blame] | 368 | gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | |
| 369 | __GFP_NOMEMALLOC; |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 370 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); |
| 371 | nc->frag.size = PAGE_SIZE << (page ? order : 0); |
| 372 | } |
| 373 | |
| 374 | if (unlikely(!page)) |
| 375 | page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); |
| 376 | |
| 377 | nc->frag.page = page; |
| 378 | |
| 379 | return page; |
| 380 | } |
| 381 | |
| 382 | static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache, |
| 383 | unsigned int fragsz, gfp_t gfp_mask) |
| 384 | { |
| 385 | struct netdev_alloc_cache *nc = this_cpu_ptr(cache); |
| 386 | struct page *page = nc->frag.page; |
| 387 | unsigned int size; |
| 388 | int offset; |
| 389 | |
| 390 | if (unlikely(!page)) { |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 391 | refill: |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 392 | page = __page_frag_refill(nc, gfp_mask); |
| 393 | if (!page) |
| 394 | return NULL; |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 395 | |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 396 | /* if size can vary use frag.size else just use PAGE_SIZE */ |
| 397 | size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE; |
| 398 | |
Eric Dumazet | 4c45058 | 2014-10-10 04:48:18 -0700 | [diff] [blame] | 399 | /* Even if we own the page, we do not use atomic_set(). |
| 400 | * This would break get_page_unless_zero() users. |
| 401 | */ |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 402 | atomic_add(size - 1, &page->_count); |
| 403 | |
| 404 | /* reset page count bias and offset to start of new frag */ |
| 405 | nc->pagecnt_bias = size; |
| 406 | nc->frag.offset = size; |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 407 | } |
Alexander Duyck | 540eb7b | 2012-07-12 14:23:50 +0000 | [diff] [blame] | 408 | |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 409 | offset = nc->frag.offset - fragsz; |
| 410 | if (unlikely(offset < 0)) { |
| 411 | if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count)) |
| 412 | goto refill; |
| 413 | |
| 414 | /* if size can vary use frag.size else just use PAGE_SIZE */ |
| 415 | size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE; |
| 416 | |
| 417 | /* OK, page count is 0, we can safely set it */ |
| 418 | atomic_set(&page->_count, size); |
| 419 | |
| 420 | /* reset page count bias and offset to start of new frag */ |
| 421 | nc->pagecnt_bias = size; |
| 422 | offset = size - fragsz; |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 423 | } |
Alexander Duyck | 540eb7b | 2012-07-12 14:23:50 +0000 | [diff] [blame] | 424 | |
Alexander Duyck | 540eb7b | 2012-07-12 14:23:50 +0000 | [diff] [blame] | 425 | nc->pagecnt_bias--; |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 426 | nc->frag.offset = offset; |
| 427 | |
| 428 | return page_address(page) + offset; |
| 429 | } |
| 430 | |
| 431 | static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
| 432 | { |
| 433 | unsigned long flags; |
| 434 | void *data; |
| 435 | |
| 436 | local_irq_save(flags); |
| 437 | data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask); |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 438 | local_irq_restore(flags); |
| 439 | return data; |
| 440 | } |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 441 | |
| 442 | /** |
| 443 | * netdev_alloc_frag - allocate a page fragment |
| 444 | * @fragsz: fragment size |
| 445 | * |
| 446 | * Allocates a frag from a page for receive buffer. |
| 447 | * Uses GFP_ATOMIC allocations. |
| 448 | */ |
| 449 | void *netdev_alloc_frag(unsigned int fragsz) |
| 450 | { |
| 451 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); |
| 452 | } |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 453 | EXPORT_SYMBOL(netdev_alloc_frag); |
| 454 | |
Alexander Duyck | ffde732 | 2014-12-09 19:40:42 -0800 | [diff] [blame] | 455 | static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
| 456 | { |
| 457 | return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask); |
| 458 | } |
| 459 | |
| 460 | void *napi_alloc_frag(unsigned int fragsz) |
| 461 | { |
| 462 | return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); |
| 463 | } |
| 464 | EXPORT_SYMBOL(napi_alloc_frag); |
| 465 | |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 466 | /** |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 467 | * __alloc_rx_skb - allocate an skbuff for rx |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 468 | * @length: length to allocate |
| 469 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 470 | * @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for |
| 471 | * allocations in case we have to fallback to __alloc_skb() |
| 472 | * If SKB_ALLOC_NAPI is set, page fragment will be allocated |
| 473 | * from napi_cache instead of netdev_cache. |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 474 | * |
| 475 | * Allocate a new &sk_buff and assign it a usage count of one. The |
| 476 | * buffer has unspecified headroom built in. Users should allocate |
| 477 | * the headroom they think they need without accounting for the |
| 478 | * built in space. The built in space is used for optimisations. |
| 479 | * |
| 480 | * %NULL is returned if there is no free memory. |
| 481 | */ |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 482 | static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask, |
| 483 | int flags) |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 484 | { |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 485 | struct sk_buff *skb = NULL; |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 486 | unsigned int fragsz = SKB_DATA_ALIGN(length) + |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 487 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 488 | |
Eric Dumazet | 310e158 | 2012-07-16 13:15:52 +0200 | [diff] [blame] | 489 | if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 490 | void *data; |
| 491 | |
| 492 | if (sk_memalloc_socks()) |
| 493 | gfp_mask |= __GFP_MEMALLOC; |
| 494 | |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 495 | data = (flags & SKB_ALLOC_NAPI) ? |
| 496 | __napi_alloc_frag(fragsz, gfp_mask) : |
| 497 | __netdev_alloc_frag(fragsz, gfp_mask); |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 498 | |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 499 | if (likely(data)) { |
| 500 | skb = build_skb(data, fragsz); |
| 501 | if (unlikely(!skb)) |
| 502 | put_page(virt_to_head_page(data)); |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 503 | } |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 504 | } else { |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 505 | skb = __alloc_skb(length, gfp_mask, |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 506 | SKB_ALLOC_RX, NUMA_NO_NODE); |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 507 | } |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 508 | return skb; |
| 509 | } |
| 510 | |
| 511 | /** |
| 512 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |
| 513 | * @dev: network device to receive on |
| 514 | * @length: length to allocate |
| 515 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
| 516 | * |
| 517 | * Allocate a new &sk_buff and assign it a usage count of one. The |
| 518 | * buffer has NET_SKB_PAD headroom built in. Users should allocate |
| 519 | * the headroom they think they need without accounting for the |
| 520 | * built in space. The built in space is used for optimisations. |
| 521 | * |
| 522 | * %NULL is returned if there is no free memory. |
| 523 | */ |
| 524 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, |
| 525 | unsigned int length, gfp_t gfp_mask) |
| 526 | { |
| 527 | struct sk_buff *skb; |
| 528 | |
| 529 | length += NET_SKB_PAD; |
| 530 | skb = __alloc_rx_skb(length, gfp_mask, 0); |
| 531 | |
Christoph Hellwig | 7b2e497 | 2006-08-07 16:09:04 -0700 | [diff] [blame] | 532 | if (likely(skb)) { |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 533 | skb_reserve(skb, NET_SKB_PAD); |
Christoph Hellwig | 7b2e497 | 2006-08-07 16:09:04 -0700 | [diff] [blame] | 534 | skb->dev = dev; |
| 535 | } |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 536 | |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 537 | return skb; |
| 538 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 539 | EXPORT_SYMBOL(__netdev_alloc_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | |
Alexander Duyck | fd11a83 | 2014-12-09 19:40:49 -0800 | [diff] [blame] | 541 | /** |
| 542 | * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance |
| 543 | * @napi: napi instance this buffer was allocated for |
| 544 | * @length: length to allocate |
| 545 | * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages |
| 546 | * |
| 547 | * Allocate a new sk_buff for use in NAPI receive. This buffer will |
| 548 | * attempt to allocate the head from a special reserved region used |
| 549 | * only for NAPI Rx allocation. By doing this we can save several |
| 550 | * CPU cycles by avoiding having to disable and re-enable IRQs. |
| 551 | * |
| 552 | * %NULL is returned if there is no free memory. |
| 553 | */ |
| 554 | struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, |
| 555 | unsigned int length, gfp_t gfp_mask) |
| 556 | { |
| 557 | struct sk_buff *skb; |
| 558 | |
| 559 | length += NET_SKB_PAD + NET_IP_ALIGN; |
| 560 | skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI); |
| 561 | |
| 562 | if (likely(skb)) { |
| 563 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
| 564 | skb->dev = napi->dev; |
| 565 | } |
| 566 | |
| 567 | return skb; |
| 568 | } |
| 569 | EXPORT_SYMBOL(__napi_alloc_skb); |
| 570 | |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 571 | void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
Eric Dumazet | 50269e1 | 2012-03-23 23:59:33 +0000 | [diff] [blame] | 572 | int size, unsigned int truesize) |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 573 | { |
| 574 | skb_fill_page_desc(skb, i, page, off, size); |
| 575 | skb->len += size; |
| 576 | skb->data_len += size; |
Eric Dumazet | 50269e1 | 2012-03-23 23:59:33 +0000 | [diff] [blame] | 577 | skb->truesize += truesize; |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 578 | } |
| 579 | EXPORT_SYMBOL(skb_add_rx_frag); |
| 580 | |
Jason Wang | f8e617e | 2013-11-01 14:07:47 +0800 | [diff] [blame] | 581 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, |
| 582 | unsigned int truesize) |
| 583 | { |
| 584 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 585 | |
| 586 | skb_frag_size_add(frag, size); |
| 587 | skb->len += size; |
| 588 | skb->data_len += size; |
| 589 | skb->truesize += truesize; |
| 590 | } |
| 591 | EXPORT_SYMBOL(skb_coalesce_rx_frag); |
| 592 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 593 | static void skb_drop_list(struct sk_buff **listp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | { |
Eric Dumazet | bd8a703 | 2013-06-24 06:26:00 -0700 | [diff] [blame] | 595 | kfree_skb_list(*listp); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 596 | *listp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | } |
| 598 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 599 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
| 600 | { |
| 601 | skb_drop_list(&skb_shinfo(skb)->frag_list); |
| 602 | } |
| 603 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | static void skb_clone_fraglist(struct sk_buff *skb) |
| 605 | { |
| 606 | struct sk_buff *list; |
| 607 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 608 | skb_walk_frags(skb, list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | skb_get(list); |
| 610 | } |
| 611 | |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 612 | static void skb_free_head(struct sk_buff *skb) |
| 613 | { |
| 614 | if (skb->head_frag) |
| 615 | put_page(virt_to_head_page(skb->head)); |
| 616 | else |
| 617 | kfree(skb->head); |
| 618 | } |
| 619 | |
Adrian Bunk | 5bba171 | 2006-06-29 13:02:35 -0700 | [diff] [blame] | 620 | static void skb_release_data(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | { |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 622 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 623 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 625 | if (skb->cloned && |
| 626 | atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, |
| 627 | &shinfo->dataref)) |
| 628 | return; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 629 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 630 | for (i = 0; i < shinfo->nr_frags; i++) |
| 631 | __skb_frag_unref(&shinfo->frags[i]); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 632 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 633 | /* |
| 634 | * If skb buf is from userspace, we need to notify the caller |
| 635 | * the lower device DMA has done; |
| 636 | */ |
| 637 | if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { |
| 638 | struct ubuf_info *uarg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 640 | uarg = shinfo->destructor_arg; |
| 641 | if (uarg->callback) |
| 642 | uarg->callback(uarg, true); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | } |
Eric Dumazet | ff04a77 | 2014-09-23 18:39:30 -0700 | [diff] [blame] | 644 | |
| 645 | if (shinfo->frag_list) |
| 646 | kfree_skb_list(shinfo->frag_list); |
| 647 | |
| 648 | skb_free_head(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | } |
| 650 | |
| 651 | /* |
| 652 | * Free an skbuff by memory without cleaning the state. |
| 653 | */ |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 654 | static void kfree_skbmem(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | { |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 656 | struct sk_buff_fclones *fclones; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 657 | |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 658 | switch (skb->fclone) { |
| 659 | case SKB_FCLONE_UNAVAILABLE: |
| 660 | kmem_cache_free(skbuff_head_cache, skb); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 661 | return; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 662 | |
| 663 | case SKB_FCLONE_ORIG: |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 664 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 665 | |
| 666 | /* We usually free the clone (TX completion) before original skb |
| 667 | * This test would have no chance to be true for the clone, |
| 668 | * while here, branch prediction will be good. |
| 669 | */ |
| 670 | if (atomic_read(&fclones->fclone_ref) == 1) |
| 671 | goto fastpath; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 672 | break; |
| 673 | |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 674 | default: /* SKB_FCLONE_CLONE */ |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 675 | fclones = container_of(skb, struct sk_buff_fclones, skb2); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 676 | break; |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 677 | } |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 678 | if (!atomic_dec_and_test(&fclones->fclone_ref)) |
| 679 | return; |
| 680 | fastpath: |
| 681 | kmem_cache_free(skbuff_fclone_cache, fclones); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | } |
| 683 | |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 684 | static void skb_release_head_state(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | { |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 686 | skb_dst_drop(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | #ifdef CONFIG_XFRM |
| 688 | secpath_put(skb->sp); |
| 689 | #endif |
Stephen Hemminger | 9c2b332 | 2005-04-19 22:39:42 -0700 | [diff] [blame] | 690 | if (skb->destructor) { |
| 691 | WARN_ON(in_irq()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | skb->destructor(skb); |
| 693 | } |
Igor Maravić | a3bf7ae | 2011-12-12 02:58:22 +0000 | [diff] [blame] | 694 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
Yasuyuki Kozakai | 5f79e0f | 2007-03-23 11:17:07 -0700 | [diff] [blame] | 695 | nf_conntrack_put(skb->nfct); |
KOVACS Krisztian | 2fc72c7 | 2011-01-12 20:25:08 +0100 | [diff] [blame] | 696 | #endif |
Pablo Neira Ayuso | 1109a90 | 2014-10-01 11:19:17 +0200 | [diff] [blame] | 697 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | nf_bridge_put(skb->nf_bridge); |
| 699 | #endif |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 700 | } |
| 701 | |
| 702 | /* Free everything but the sk_buff shell. */ |
| 703 | static void skb_release_all(struct sk_buff *skb) |
| 704 | { |
| 705 | skb_release_head_state(skb); |
Pablo Neira | 5e71d9d | 2013-06-03 09:28:43 +0000 | [diff] [blame] | 706 | if (likely(skb->head)) |
Patrick McHardy | 0ebd0ac | 2013-04-17 06:46:58 +0000 | [diff] [blame] | 707 | skb_release_data(skb); |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 708 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 710 | /** |
| 711 | * __kfree_skb - private function |
| 712 | * @skb: buffer |
| 713 | * |
| 714 | * Free an sk_buff. Release anything attached to the buffer. |
| 715 | * Clean the state. This is an internal helper function. Users should |
| 716 | * always call kfree_skb |
| 717 | */ |
| 718 | |
| 719 | void __kfree_skb(struct sk_buff *skb) |
| 720 | { |
| 721 | skb_release_all(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | kfree_skbmem(skb); |
| 723 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 724 | EXPORT_SYMBOL(__kfree_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | |
| 726 | /** |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 727 | * kfree_skb - free an sk_buff |
| 728 | * @skb: buffer to free |
| 729 | * |
| 730 | * Drop a reference to the buffer and free it if the usage count has |
| 731 | * hit zero. |
| 732 | */ |
| 733 | void kfree_skb(struct sk_buff *skb) |
| 734 | { |
| 735 | if (unlikely(!skb)) |
| 736 | return; |
| 737 | if (likely(atomic_read(&skb->users) == 1)) |
| 738 | smp_rmb(); |
| 739 | else if (likely(!atomic_dec_and_test(&skb->users))) |
| 740 | return; |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 741 | trace_kfree_skb(skb, __builtin_return_address(0)); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 742 | __kfree_skb(skb); |
| 743 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 744 | EXPORT_SYMBOL(kfree_skb); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 745 | |
Eric Dumazet | bd8a703 | 2013-06-24 06:26:00 -0700 | [diff] [blame] | 746 | void kfree_skb_list(struct sk_buff *segs) |
| 747 | { |
| 748 | while (segs) { |
| 749 | struct sk_buff *next = segs->next; |
| 750 | |
| 751 | kfree_skb(segs); |
| 752 | segs = next; |
| 753 | } |
| 754 | } |
| 755 | EXPORT_SYMBOL(kfree_skb_list); |
| 756 | |
Stephen Hemminger | d1a203e | 2008-11-01 21:01:09 -0700 | [diff] [blame] | 757 | /** |
Michael S. Tsirkin | 2512117 | 2012-11-01 09:16:28 +0000 | [diff] [blame] | 758 | * skb_tx_error - report an sk_buff xmit error |
| 759 | * @skb: buffer that triggered an error |
| 760 | * |
| 761 | * Report xmit error if a device callback is tracking this skb. |
| 762 | * skb must be freed afterwards. |
| 763 | */ |
| 764 | void skb_tx_error(struct sk_buff *skb) |
| 765 | { |
| 766 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { |
| 767 | struct ubuf_info *uarg; |
| 768 | |
| 769 | uarg = skb_shinfo(skb)->destructor_arg; |
| 770 | if (uarg->callback) |
| 771 | uarg->callback(uarg, false); |
| 772 | skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; |
| 773 | } |
| 774 | } |
| 775 | EXPORT_SYMBOL(skb_tx_error); |
| 776 | |
| 777 | /** |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 778 | * consume_skb - free an skbuff |
| 779 | * @skb: buffer to free |
| 780 | * |
| 781 | * Drop a ref to the buffer and free it if the usage count has hit zero |
| 782 | * Functions identically to kfree_skb, but kfree_skb assumes that the frame |
| 783 | * is being dropped after a failure and notes that |
| 784 | */ |
| 785 | void consume_skb(struct sk_buff *skb) |
| 786 | { |
| 787 | if (unlikely(!skb)) |
| 788 | return; |
| 789 | if (likely(atomic_read(&skb->users) == 1)) |
| 790 | smp_rmb(); |
| 791 | else if (likely(!atomic_dec_and_test(&skb->users))) |
| 792 | return; |
Koki Sanagi | 07dc22e | 2010-08-23 18:46:12 +0900 | [diff] [blame] | 793 | trace_consume_skb(skb); |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 794 | __kfree_skb(skb); |
| 795 | } |
| 796 | EXPORT_SYMBOL(consume_skb); |
| 797 | |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 798 | /* Make sure a field is enclosed inside headers_start/headers_end section */ |
| 799 | #define CHECK_SKB_FIELD(field) \ |
| 800 | BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ |
| 801 | offsetof(struct sk_buff, headers_start)); \ |
| 802 | BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ |
| 803 | offsetof(struct sk_buff, headers_end)); \ |
| 804 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 805 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
| 806 | { |
| 807 | new->tstamp = old->tstamp; |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 808 | /* We do not copy old->sk */ |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 809 | new->dev = old->dev; |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 810 | memcpy(new->cb, old->cb, sizeof(old->cb)); |
Eric Dumazet | 7fee226 | 2010-05-11 23:19:48 +0000 | [diff] [blame] | 811 | skb_dst_copy(new, old); |
Alexey Dobriyan | def8b4f | 2008-10-28 13:24:06 -0700 | [diff] [blame] | 812 | #ifdef CONFIG_XFRM |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 813 | new->sp = secpath_get(old->sp); |
| 814 | #endif |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 815 | __nf_copy(new, old, false); |
Patrick McHardy | 6aa895b | 2008-07-14 22:49:06 -0700 | [diff] [blame] | 816 | |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 817 | /* Note : this field could be in headers_start/headers_end section |
| 818 | * It is not yet because we do not want to have a 16 bit hole |
| 819 | */ |
| 820 | new->queue_mapping = old->queue_mapping; |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 821 | |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 822 | memcpy(&new->headers_start, &old->headers_start, |
| 823 | offsetof(struct sk_buff, headers_end) - |
| 824 | offsetof(struct sk_buff, headers_start)); |
| 825 | CHECK_SKB_FIELD(protocol); |
| 826 | CHECK_SKB_FIELD(csum); |
| 827 | CHECK_SKB_FIELD(hash); |
| 828 | CHECK_SKB_FIELD(priority); |
| 829 | CHECK_SKB_FIELD(skb_iif); |
| 830 | CHECK_SKB_FIELD(vlan_proto); |
| 831 | CHECK_SKB_FIELD(vlan_tci); |
| 832 | CHECK_SKB_FIELD(transport_header); |
| 833 | CHECK_SKB_FIELD(network_header); |
| 834 | CHECK_SKB_FIELD(mac_header); |
| 835 | CHECK_SKB_FIELD(inner_protocol); |
| 836 | CHECK_SKB_FIELD(inner_transport_header); |
| 837 | CHECK_SKB_FIELD(inner_network_header); |
| 838 | CHECK_SKB_FIELD(inner_mac_header); |
| 839 | CHECK_SKB_FIELD(mark); |
| 840 | #ifdef CONFIG_NETWORK_SECMARK |
| 841 | CHECK_SKB_FIELD(secmark); |
| 842 | #endif |
Cong Wang | e0d1095 | 2013-08-01 11:10:25 +0800 | [diff] [blame] | 843 | #ifdef CONFIG_NET_RX_BUSY_POLL |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 844 | CHECK_SKB_FIELD(napi_id); |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 845 | #endif |
Eric Dumazet | 2bd8248 | 2015-02-03 23:48:24 -0800 | [diff] [blame] | 846 | #ifdef CONFIG_XPS |
| 847 | CHECK_SKB_FIELD(sender_cpu); |
| 848 | #endif |
Eric Dumazet | b193722 | 2014-09-28 22:18:47 -0700 | [diff] [blame] | 849 | #ifdef CONFIG_NET_SCHED |
| 850 | CHECK_SKB_FIELD(tc_index); |
| 851 | #ifdef CONFIG_NET_CLS_ACT |
| 852 | CHECK_SKB_FIELD(tc_verd); |
| 853 | #endif |
| 854 | #endif |
| 855 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 856 | } |
| 857 | |
Herbert Xu | 82c49a3 | 2009-05-22 22:11:37 +0000 | [diff] [blame] | 858 | /* |
| 859 | * You should not add any new code to this function. Add it to |
| 860 | * __copy_skb_header above instead. |
| 861 | */ |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 862 | static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | #define C(x) n->x = skb->x |
| 865 | |
| 866 | n->next = n->prev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | n->sk = NULL; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 868 | __copy_skb_header(n, skb); |
| 869 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | C(len); |
| 871 | C(data_len); |
Alexey Dobriyan | 3e6b3b2 | 2007-03-16 15:00:46 -0700 | [diff] [blame] | 872 | C(mac_len); |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 873 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 874 | n->cloned = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | n->nohdr = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | n->destructor = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | C(tail); |
| 878 | C(end); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 879 | C(head); |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 880 | C(head_frag); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 881 | C(data); |
| 882 | C(truesize); |
| 883 | atomic_set(&n->users, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | |
| 885 | atomic_inc(&(skb_shinfo(skb)->dataref)); |
| 886 | skb->cloned = 1; |
| 887 | |
| 888 | return n; |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 889 | #undef C |
| 890 | } |
| 891 | |
| 892 | /** |
| 893 | * skb_morph - morph one skb into another |
| 894 | * @dst: the skb to receive the contents |
| 895 | * @src: the skb to supply the contents |
| 896 | * |
| 897 | * This is identical to skb_clone except that the target skb is |
| 898 | * supplied by the user. |
| 899 | * |
| 900 | * The target skb is returned upon exit. |
| 901 | */ |
| 902 | struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) |
| 903 | { |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 904 | skb_release_all(dst); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 905 | return __skb_clone(dst, src); |
| 906 | } |
| 907 | EXPORT_SYMBOL_GPL(skb_morph); |
| 908 | |
Ben Hutchings | 2c53040 | 2012-07-10 10:55:09 +0000 | [diff] [blame] | 909 | /** |
| 910 | * skb_copy_ubufs - copy userspace skb frags buffers to kernel |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 911 | * @skb: the skb to modify |
| 912 | * @gfp_mask: allocation priority |
| 913 | * |
| 914 | * This must be called on SKBTX_DEV_ZEROCOPY skb. |
| 915 | * It will copy all frags into kernel and drop the reference |
| 916 | * to userspace pages. |
| 917 | * |
| 918 | * If this function is called from an interrupt gfp_mask() must be |
| 919 | * %GFP_ATOMIC. |
| 920 | * |
| 921 | * Returns 0 on success or a negative error code on failure |
| 922 | * to allocate kernel memory to copy to. |
| 923 | */ |
| 924 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 925 | { |
| 926 | int i; |
| 927 | int num_frags = skb_shinfo(skb)->nr_frags; |
| 928 | struct page *page, *head = NULL; |
| 929 | struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; |
| 930 | |
| 931 | for (i = 0; i < num_frags; i++) { |
| 932 | u8 *vaddr; |
| 933 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
| 934 | |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 935 | page = alloc_page(gfp_mask); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 936 | if (!page) { |
| 937 | while (head) { |
Sunghan Suh | 40dadff | 2013-07-12 16:17:23 +0900 | [diff] [blame] | 938 | struct page *next = (struct page *)page_private(head); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 939 | put_page(head); |
| 940 | head = next; |
| 941 | } |
| 942 | return -ENOMEM; |
| 943 | } |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 944 | vaddr = kmap_atomic(skb_frag_page(f)); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 945 | memcpy(page_address(page), |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 946 | vaddr + f->page_offset, skb_frag_size(f)); |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 947 | kunmap_atomic(vaddr); |
Sunghan Suh | 40dadff | 2013-07-12 16:17:23 +0900 | [diff] [blame] | 948 | set_page_private(page, (unsigned long)head); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 949 | head = page; |
| 950 | } |
| 951 | |
| 952 | /* skb frags release userspace buffers */ |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 953 | for (i = 0; i < num_frags; i++) |
Ian Campbell | a8605c6 | 2011-10-19 23:01:49 +0000 | [diff] [blame] | 954 | skb_frag_unref(skb, i); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 955 | |
Michael S. Tsirkin | e19d676 | 2012-11-01 09:16:22 +0000 | [diff] [blame] | 956 | uarg->callback(uarg, false); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 957 | |
| 958 | /* skb frags point to kernel buffers */ |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 959 | for (i = num_frags - 1; i >= 0; i--) { |
| 960 | __skb_fill_page_desc(skb, i, head, 0, |
| 961 | skb_shinfo(skb)->frags[i].size); |
Sunghan Suh | 40dadff | 2013-07-12 16:17:23 +0900 | [diff] [blame] | 962 | head = (struct page *)page_private(head); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 963 | } |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 964 | |
| 965 | skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 966 | return 0; |
| 967 | } |
Michael S. Tsirkin | dcc0fb7 | 2012-07-20 09:23:20 +0000 | [diff] [blame] | 968 | EXPORT_SYMBOL_GPL(skb_copy_ubufs); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 969 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 970 | /** |
| 971 | * skb_clone - duplicate an sk_buff |
| 972 | * @skb: buffer to clone |
| 973 | * @gfp_mask: allocation priority |
| 974 | * |
| 975 | * Duplicate an &sk_buff. The new one is not owned by a socket. Both |
| 976 | * copies share the same packet data but not structure. The new |
| 977 | * buffer has a reference count of 1. If the allocation fails the |
| 978 | * function returns %NULL otherwise the new buffer is returned. |
| 979 | * |
| 980 | * If this function is called from an interrupt gfp_mask() must be |
| 981 | * %GFP_ATOMIC. |
| 982 | */ |
| 983 | |
| 984 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) |
| 985 | { |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 986 | struct sk_buff_fclones *fclones = container_of(skb, |
| 987 | struct sk_buff_fclones, |
| 988 | skb1); |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 989 | struct sk_buff *n; |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 990 | |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 991 | if (skb_orphan_frags(skb, gfp_mask)) |
| 992 | return NULL; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 993 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 994 | if (skb->fclone == SKB_FCLONE_ORIG && |
Eric Dumazet | 6ffe75eb | 2014-12-03 17:04:39 -0800 | [diff] [blame] | 995 | atomic_read(&fclones->fclone_ref) == 1) { |
| 996 | n = &fclones->skb2; |
| 997 | atomic_set(&fclones->fclone_ref, 2); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 998 | } else { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 999 | if (skb_pfmemalloc(skb)) |
| 1000 | gfp_mask |= __GFP_MEMALLOC; |
| 1001 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1002 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); |
| 1003 | if (!n) |
| 1004 | return NULL; |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 1005 | |
| 1006 | kmemcheck_annotate_bitfield(n, flags1); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 1007 | n->fclone = SKB_FCLONE_UNAVAILABLE; |
| 1008 | } |
| 1009 | |
| 1010 | return __skb_clone(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1011 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1012 | EXPORT_SYMBOL(skb_clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1014 | static void skb_headers_offset_update(struct sk_buff *skb, int off) |
| 1015 | { |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 1016 | /* Only adjust this if it actually is csum_start rather than csum */ |
| 1017 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 1018 | skb->csum_start += off; |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1019 | /* {transport,network,mac}_header and tail are relative to skb->head */ |
| 1020 | skb->transport_header += off; |
| 1021 | skb->network_header += off; |
| 1022 | if (skb_mac_header_was_set(skb)) |
| 1023 | skb->mac_header += off; |
| 1024 | skb->inner_transport_header += off; |
| 1025 | skb->inner_network_header += off; |
Pravin B Shelar | aefbd2b | 2013-03-07 13:21:46 +0000 | [diff] [blame] | 1026 | skb->inner_mac_header += off; |
Pravin B Shelar | f5b1729 | 2013-03-07 13:21:40 +0000 | [diff] [blame] | 1027 | } |
| 1028 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1029 | static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
| 1030 | { |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 1031 | __copy_skb_header(new, old); |
| 1032 | |
Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 1033 | skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; |
| 1034 | skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; |
| 1035 | skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 | } |
| 1037 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1038 | static inline int skb_alloc_rx_flag(const struct sk_buff *skb) |
| 1039 | { |
| 1040 | if (skb_pfmemalloc(skb)) |
| 1041 | return SKB_ALLOC_RX; |
| 1042 | return 0; |
| 1043 | } |
| 1044 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1045 | /** |
| 1046 | * skb_copy - create private copy of an sk_buff |
| 1047 | * @skb: buffer to copy |
| 1048 | * @gfp_mask: allocation priority |
| 1049 | * |
| 1050 | * Make a copy of both an &sk_buff and its data. This is used when the |
| 1051 | * caller wishes to modify the data and needs a private copy of the |
| 1052 | * data to alter. Returns %NULL on failure or the pointer to the buffer |
| 1053 | * on success. The returned buffer has a reference count of 1. |
| 1054 | * |
| 1055 | * As by-product this function converts non-linear &sk_buff to linear |
| 1056 | * one, so that &sk_buff becomes completely private and caller is allowed |
| 1057 | * to modify all the data of returned buffer. This means that this |
| 1058 | * function is not recommended for use in circumstances when only |
| 1059 | * header is going to be modified. Use pskb_copy() instead. |
| 1060 | */ |
| 1061 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1062 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 | { |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1064 | int headerlen = skb_headroom(skb); |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 1065 | unsigned int size = skb_end_offset(skb) + skb->data_len; |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1066 | struct sk_buff *n = __alloc_skb(size, gfp_mask, |
| 1067 | skb_alloc_rx_flag(skb), NUMA_NO_NODE); |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1068 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | if (!n) |
| 1070 | return NULL; |
| 1071 | |
| 1072 | /* Set the data pointer */ |
| 1073 | skb_reserve(n, headerlen); |
| 1074 | /* Set the tail pointer and length */ |
| 1075 | skb_put(n, skb->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | |
| 1077 | if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) |
| 1078 | BUG(); |
| 1079 | |
| 1080 | copy_skb_header(n, skb); |
| 1081 | return n; |
| 1082 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1083 | EXPORT_SYMBOL(skb_copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1084 | |
| 1085 | /** |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1086 | * __pskb_copy_fclone - create copy of an sk_buff with private head. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | * @skb: buffer to copy |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1088 | * @headroom: headroom of new skb |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1089 | * @gfp_mask: allocation priority |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1090 | * @fclone: if true allocate the copy of the skb from the fclone |
| 1091 | * cache instead of the head cache; it is recommended to set this |
| 1092 | * to true for the cases where the copy will likely be cloned |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 | * |
| 1094 | * Make a copy of both an &sk_buff and part of its data, located |
| 1095 | * in header. Fragmented data remain shared. This is used when |
| 1096 | * the caller wishes to modify only header of &sk_buff and needs |
| 1097 | * private copy of the header to alter. Returns %NULL on failure |
| 1098 | * or the pointer to the buffer on success. |
| 1099 | * The returned buffer has a reference count of 1. |
| 1100 | */ |
| 1101 | |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1102 | struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, |
| 1103 | gfp_t gfp_mask, bool fclone) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 | { |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1105 | unsigned int size = skb_headlen(skb) + headroom; |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1106 | int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); |
| 1107 | struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1109 | if (!n) |
| 1110 | goto out; |
| 1111 | |
| 1112 | /* Set the data pointer */ |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 1113 | skb_reserve(n, headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 | /* Set the tail pointer and length */ |
| 1115 | skb_put(n, skb_headlen(skb)); |
| 1116 | /* Copy the bytes */ |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 1117 | skb_copy_from_linear_data(skb, n->data, n->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | |
Herbert Xu | 25f484a | 2006-11-07 14:57:15 -0800 | [diff] [blame] | 1119 | n->truesize += skb->data_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | n->data_len = skb->data_len; |
| 1121 | n->len = skb->len; |
| 1122 | |
| 1123 | if (skb_shinfo(skb)->nr_frags) { |
| 1124 | int i; |
| 1125 | |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1126 | if (skb_orphan_frags(skb, gfp_mask)) { |
| 1127 | kfree_skb(n); |
| 1128 | n = NULL; |
| 1129 | goto out; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1130 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1131 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 1132 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1133 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 | } |
| 1135 | skb_shinfo(n)->nr_frags = i; |
| 1136 | } |
| 1137 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1138 | if (skb_has_frag_list(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; |
| 1140 | skb_clone_fraglist(n); |
| 1141 | } |
| 1142 | |
| 1143 | copy_skb_header(n, skb); |
| 1144 | out: |
| 1145 | return n; |
| 1146 | } |
Octavian Purdila | bad93e9 | 2014-06-12 01:36:26 +0300 | [diff] [blame] | 1147 | EXPORT_SYMBOL(__pskb_copy_fclone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 | |
| 1149 | /** |
| 1150 | * pskb_expand_head - reallocate header of &sk_buff |
| 1151 | * @skb: buffer to reallocate |
| 1152 | * @nhead: room to add at head |
| 1153 | * @ntail: room to add at tail |
| 1154 | * @gfp_mask: allocation priority |
| 1155 | * |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 1156 | * Expands (or creates identical copy, if @nhead and @ntail are zero) |
| 1157 | * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1158 | * reference count of 1. Returns zero in the case of success or error, |
| 1159 | * if expansion failed. In the last case, &sk_buff is not changed. |
| 1160 | * |
| 1161 | * All the pointers pointing into skb header may change and must be |
| 1162 | * reloaded after call to this function. |
| 1163 | */ |
| 1164 | |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1165 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1166 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1167 | { |
| 1168 | int i; |
| 1169 | u8 *data; |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 1170 | int size = nhead + skb_end_offset(skb) + ntail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1171 | long off; |
| 1172 | |
Herbert Xu | 4edd87a | 2008-10-01 07:09:38 -0700 | [diff] [blame] | 1173 | BUG_ON(nhead < 0); |
| 1174 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 | if (skb_shared(skb)) |
| 1176 | BUG(); |
| 1177 | |
| 1178 | size = SKB_DATA_ALIGN(size); |
| 1179 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1180 | if (skb_pfmemalloc(skb)) |
| 1181 | gfp_mask |= __GFP_MEMALLOC; |
| 1182 | data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 1183 | gfp_mask, NUMA_NO_NODE, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 | if (!data) |
| 1185 | goto nodata; |
Eric Dumazet | 87151b8 | 2012-04-10 20:08:39 +0000 | [diff] [blame] | 1186 | size = SKB_WITH_OVERHEAD(ksize(data)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 | |
| 1188 | /* Copy only real data... and, alas, header. This should be |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1189 | * optimized for the cases when header is void. |
| 1190 | */ |
| 1191 | memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); |
| 1192 | |
| 1193 | memcpy((struct skb_shared_info *)(data + size), |
| 1194 | skb_shinfo(skb), |
Eric Dumazet | fed6638 | 2010-07-22 19:09:08 +0000 | [diff] [blame] | 1195 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1196 | |
Alexander Duyck | 3e24591 | 2012-05-04 14:26:51 +0000 | [diff] [blame] | 1197 | /* |
| 1198 | * if shinfo is shared we must drop the old head gracefully, but if it |
| 1199 | * is not we can just drop the old head and let the existing refcount |
| 1200 | * be since all we did is relocate the values |
| 1201 | */ |
| 1202 | if (skb_cloned(skb)) { |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1203 | /* copy this zero copy skb frags */ |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1204 | if (skb_orphan_frags(skb, gfp_mask)) |
| 1205 | goto nofrags; |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1206 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1207 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 | |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1209 | if (skb_has_frag_list(skb)) |
| 1210 | skb_clone_fraglist(skb); |
| 1211 | |
| 1212 | skb_release_data(skb); |
Alexander Duyck | 3e24591 | 2012-05-04 14:26:51 +0000 | [diff] [blame] | 1213 | } else { |
| 1214 | skb_free_head(skb); |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1215 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | off = (data + nhead) - skb->head; |
| 1217 | |
| 1218 | skb->head = data; |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 1219 | skb->head_frag = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 | skb->data += off; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1221 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 1222 | skb->end = size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 1223 | off = nhead; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1224 | #else |
| 1225 | skb->end = skb->head + size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 1226 | #endif |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1227 | skb->tail += off; |
Peter Pan(潘卫平) | b41abb4 | 2013-06-06 21:27:21 +0800 | [diff] [blame] | 1228 | skb_headers_offset_update(skb, nhead); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | skb->cloned = 0; |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 1230 | skb->hdr_len = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | skb->nohdr = 0; |
| 1232 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
| 1233 | return 0; |
| 1234 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1235 | nofrags: |
| 1236 | kfree(data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | nodata: |
| 1238 | return -ENOMEM; |
| 1239 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1240 | EXPORT_SYMBOL(pskb_expand_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1241 | |
| 1242 | /* Make private copy of skb with writable head and some headroom */ |
| 1243 | |
| 1244 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) |
| 1245 | { |
| 1246 | struct sk_buff *skb2; |
| 1247 | int delta = headroom - skb_headroom(skb); |
| 1248 | |
| 1249 | if (delta <= 0) |
| 1250 | skb2 = pskb_copy(skb, GFP_ATOMIC); |
| 1251 | else { |
| 1252 | skb2 = skb_clone(skb, GFP_ATOMIC); |
| 1253 | if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, |
| 1254 | GFP_ATOMIC)) { |
| 1255 | kfree_skb(skb2); |
| 1256 | skb2 = NULL; |
| 1257 | } |
| 1258 | } |
| 1259 | return skb2; |
| 1260 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1261 | EXPORT_SYMBOL(skb_realloc_headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1262 | |
| 1263 | /** |
| 1264 | * skb_copy_expand - copy and expand sk_buff |
| 1265 | * @skb: buffer to copy |
| 1266 | * @newheadroom: new free bytes at head |
| 1267 | * @newtailroom: new free bytes at tail |
| 1268 | * @gfp_mask: allocation priority |
| 1269 | * |
| 1270 | * Make a copy of both an &sk_buff and its data and while doing so |
| 1271 | * allocate additional space. |
| 1272 | * |
| 1273 | * This is used when the caller wishes to modify the data and needs a |
| 1274 | * private copy of the data to alter as well as more space for new fields. |
| 1275 | * Returns %NULL on failure or the pointer to the buffer |
| 1276 | * on success. The returned buffer has a reference count of 1. |
| 1277 | * |
| 1278 | * You must pass %GFP_ATOMIC as the allocation priority if this function |
| 1279 | * is called from an interrupt. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1280 | */ |
| 1281 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1282 | int newheadroom, int newtailroom, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1283 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1284 | { |
| 1285 | /* |
| 1286 | * Allocate the copy buffer |
| 1287 | */ |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1288 | struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, |
| 1289 | gfp_mask, skb_alloc_rx_flag(skb), |
| 1290 | NUMA_NO_NODE); |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1291 | int oldheadroom = skb_headroom(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | int head_copy_len, head_copy_off; |
| 1293 | |
| 1294 | if (!n) |
| 1295 | return NULL; |
| 1296 | |
| 1297 | skb_reserve(n, newheadroom); |
| 1298 | |
| 1299 | /* Set the tail pointer and length */ |
| 1300 | skb_put(n, skb->len); |
| 1301 | |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1302 | head_copy_len = oldheadroom; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | head_copy_off = 0; |
| 1304 | if (newheadroom <= head_copy_len) |
| 1305 | head_copy_len = newheadroom; |
| 1306 | else |
| 1307 | head_copy_off = newheadroom - head_copy_len; |
| 1308 | |
| 1309 | /* Copy the linear header and data. */ |
| 1310 | if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, |
| 1311 | skb->len + head_copy_len)) |
| 1312 | BUG(); |
| 1313 | |
| 1314 | copy_skb_header(n, skb); |
| 1315 | |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 1316 | skb_headers_offset_update(n, newheadroom - oldheadroom); |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1317 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | return n; |
| 1319 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1320 | EXPORT_SYMBOL(skb_copy_expand); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1321 | |
| 1322 | /** |
| 1323 | * skb_pad - zero pad the tail of an skb |
| 1324 | * @skb: buffer to pad |
| 1325 | * @pad: space to pad |
| 1326 | * |
| 1327 | * Ensure that a buffer is followed by a padding area that is zero |
| 1328 | * filled. Used by network drivers which may DMA or transfer data |
| 1329 | * beyond the buffer end onto the wire. |
| 1330 | * |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1331 | * May return error in out of memory cases. The skb is freed on error. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1332 | */ |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1333 | |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1334 | int skb_pad(struct sk_buff *skb, int pad) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1335 | { |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1336 | int err; |
| 1337 | int ntail; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1338 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1339 | /* If the skbuff is non linear tailroom is always zero.. */ |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1340 | if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1341 | memset(skb->data+skb->len, 0, pad); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1342 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1343 | } |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1344 | |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1345 | ntail = skb->data_len + pad - (skb->end - skb->tail); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1346 | if (likely(skb_cloned(skb) || ntail > 0)) { |
| 1347 | err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); |
| 1348 | if (unlikely(err)) |
| 1349 | goto free_skb; |
| 1350 | } |
| 1351 | |
| 1352 | /* FIXME: The use of this function with non-linear skb's really needs |
| 1353 | * to be audited. |
| 1354 | */ |
| 1355 | err = skb_linearize(skb); |
| 1356 | if (unlikely(err)) |
| 1357 | goto free_skb; |
| 1358 | |
| 1359 | memset(skb->data + skb->len, 0, pad); |
| 1360 | return 0; |
| 1361 | |
| 1362 | free_skb: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1363 | kfree_skb(skb); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1364 | return err; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1365 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1366 | EXPORT_SYMBOL(skb_pad); |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1367 | |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1368 | /** |
Mathias Krause | 0c7ddf3 | 2013-11-07 14:18:24 +0100 | [diff] [blame] | 1369 | * pskb_put - add data to the tail of a potentially fragmented buffer |
| 1370 | * @skb: start of the buffer to use |
| 1371 | * @tail: tail fragment of the buffer to use |
| 1372 | * @len: amount of data to add |
| 1373 | * |
| 1374 | * This function extends the used data area of the potentially |
| 1375 | * fragmented buffer. @tail must be the last fragment of @skb -- or |
| 1376 | * @skb itself. If this would exceed the total buffer size the kernel |
| 1377 | * will panic. A pointer to the first byte of the extra data is |
| 1378 | * returned. |
| 1379 | */ |
| 1380 | |
| 1381 | unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) |
| 1382 | { |
| 1383 | if (tail != skb) { |
| 1384 | skb->data_len += len; |
| 1385 | skb->len += len; |
| 1386 | } |
| 1387 | return skb_put(tail, len); |
| 1388 | } |
| 1389 | EXPORT_SYMBOL_GPL(pskb_put); |
| 1390 | |
| 1391 | /** |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1392 | * skb_put - add data to a buffer |
| 1393 | * @skb: buffer to use |
| 1394 | * @len: amount of data to add |
| 1395 | * |
| 1396 | * This function extends the used data area of the buffer. If this would |
| 1397 | * exceed the total buffer size the kernel will panic. A pointer to the |
| 1398 | * first byte of the extra data is returned. |
| 1399 | */ |
| 1400 | unsigned char *skb_put(struct sk_buff *skb, unsigned int len) |
| 1401 | { |
| 1402 | unsigned char *tmp = skb_tail_pointer(skb); |
| 1403 | SKB_LINEAR_ASSERT(skb); |
| 1404 | skb->tail += len; |
| 1405 | skb->len += len; |
| 1406 | if (unlikely(skb->tail > skb->end)) |
| 1407 | skb_over_panic(skb, len, __builtin_return_address(0)); |
| 1408 | return tmp; |
| 1409 | } |
| 1410 | EXPORT_SYMBOL(skb_put); |
| 1411 | |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1412 | /** |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 1413 | * skb_push - add data to the start of a buffer |
| 1414 | * @skb: buffer to use |
| 1415 | * @len: amount of data to add |
| 1416 | * |
| 1417 | * This function extends the used data area of the buffer at the buffer |
| 1418 | * start. If this would exceed the total buffer headroom the kernel will |
| 1419 | * panic. A pointer to the first byte of the extra data is returned. |
| 1420 | */ |
| 1421 | unsigned char *skb_push(struct sk_buff *skb, unsigned int len) |
| 1422 | { |
| 1423 | skb->data -= len; |
| 1424 | skb->len += len; |
| 1425 | if (unlikely(skb->data<skb->head)) |
| 1426 | skb_under_panic(skb, len, __builtin_return_address(0)); |
| 1427 | return skb->data; |
| 1428 | } |
| 1429 | EXPORT_SYMBOL(skb_push); |
| 1430 | |
| 1431 | /** |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1432 | * skb_pull - remove data from the start of a buffer |
| 1433 | * @skb: buffer to use |
| 1434 | * @len: amount of data to remove |
| 1435 | * |
| 1436 | * This function removes data from the start of a buffer, returning |
| 1437 | * the memory to the headroom. A pointer to the next data in the buffer |
| 1438 | * is returned. Once the data has been pulled future pushes will overwrite |
| 1439 | * the old data. |
| 1440 | */ |
| 1441 | unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) |
| 1442 | { |
David S. Miller | 47d2964 | 2010-05-02 02:21:44 -0700 | [diff] [blame] | 1443 | return skb_pull_inline(skb, len); |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1444 | } |
| 1445 | EXPORT_SYMBOL(skb_pull); |
| 1446 | |
Ilpo Järvinen | 419ae74 | 2008-03-27 17:54:01 -0700 | [diff] [blame] | 1447 | /** |
| 1448 | * skb_trim - remove end from a buffer |
| 1449 | * @skb: buffer to alter |
| 1450 | * @len: new length |
| 1451 | * |
| 1452 | * Cut the length of a buffer down by removing data from the tail. If |
| 1453 | * the buffer is already under the length specified it is not modified. |
| 1454 | * The skb must be linear. |
| 1455 | */ |
| 1456 | void skb_trim(struct sk_buff *skb, unsigned int len) |
| 1457 | { |
| 1458 | if (skb->len > len) |
| 1459 | __skb_trim(skb, len); |
| 1460 | } |
| 1461 | EXPORT_SYMBOL(skb_trim); |
| 1462 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 1463 | /* Trims skb to length len. It can change skb pointers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1464 | */ |
| 1465 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 1466 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1467 | { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1468 | struct sk_buff **fragp; |
| 1469 | struct sk_buff *frag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1470 | int offset = skb_headlen(skb); |
| 1471 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 1472 | int i; |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1473 | int err; |
| 1474 | |
| 1475 | if (skb_cloned(skb) && |
| 1476 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) |
| 1477 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1478 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1479 | i = 0; |
| 1480 | if (offset >= len) |
| 1481 | goto drop_pages; |
| 1482 | |
| 1483 | for (; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1484 | int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1485 | |
| 1486 | if (end < len) { |
| 1487 | offset = end; |
| 1488 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1489 | } |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1490 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1491 | skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1492 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1493 | drop_pages: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1494 | skb_shinfo(skb)->nr_frags = i; |
| 1495 | |
| 1496 | for (; i < nfrags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1497 | skb_frag_unref(skb, i); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1498 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1499 | if (skb_has_frag_list(skb)) |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1500 | skb_drop_fraglist(skb); |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1501 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1502 | } |
| 1503 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1504 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); |
| 1505 | fragp = &frag->next) { |
| 1506 | int end = offset + frag->len; |
| 1507 | |
| 1508 | if (skb_shared(frag)) { |
| 1509 | struct sk_buff *nfrag; |
| 1510 | |
| 1511 | nfrag = skb_clone(frag, GFP_ATOMIC); |
| 1512 | if (unlikely(!nfrag)) |
| 1513 | return -ENOMEM; |
| 1514 | |
| 1515 | nfrag->next = frag->next; |
Eric Dumazet | 85bb2a6 | 2012-04-19 02:24:53 +0000 | [diff] [blame] | 1516 | consume_skb(frag); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1517 | frag = nfrag; |
| 1518 | *fragp = frag; |
| 1519 | } |
| 1520 | |
| 1521 | if (end < len) { |
| 1522 | offset = end; |
| 1523 | continue; |
| 1524 | } |
| 1525 | |
| 1526 | if (end > len && |
| 1527 | unlikely((err = pskb_trim(frag, len - offset)))) |
| 1528 | return err; |
| 1529 | |
| 1530 | if (frag->next) |
| 1531 | skb_drop_list(&frag->next); |
| 1532 | break; |
| 1533 | } |
| 1534 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1535 | done: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1536 | if (len > skb_headlen(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1537 | skb->data_len -= skb->len - len; |
| 1538 | skb->len = len; |
| 1539 | } else { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1540 | skb->len = len; |
| 1541 | skb->data_len = 0; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1542 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | } |
| 1544 | |
| 1545 | return 0; |
| 1546 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1547 | EXPORT_SYMBOL(___pskb_trim); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1548 | |
| 1549 | /** |
| 1550 | * __pskb_pull_tail - advance tail of skb header |
| 1551 | * @skb: buffer to reallocate |
| 1552 | * @delta: number of bytes to advance tail |
| 1553 | * |
| 1554 | * The function makes a sense only on a fragmented &sk_buff, |
| 1555 | * it expands header moving its tail forward and copying necessary |
| 1556 | * data from fragmented part. |
| 1557 | * |
| 1558 | * &sk_buff MUST have reference count of 1. |
| 1559 | * |
| 1560 | * Returns %NULL (and &sk_buff does not change) if pull failed |
| 1561 | * or value of new tail of skb in the case of success. |
| 1562 | * |
| 1563 | * All the pointers pointing into skb header may change and must be |
| 1564 | * reloaded after call to this function. |
| 1565 | */ |
| 1566 | |
| 1567 | /* Moves tail of skb head forward, copying data from fragmented part, |
| 1568 | * when it is necessary. |
| 1569 | * 1. It may fail due to malloc failure. |
| 1570 | * 2. It may change skb pointers. |
| 1571 | * |
| 1572 | * It is pretty complicated. Luckily, it is called only in exceptional cases. |
| 1573 | */ |
| 1574 | unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) |
| 1575 | { |
| 1576 | /* If skb has not enough free space at tail, get new one |
| 1577 | * plus 128 bytes for future expansions. If we have enough |
| 1578 | * room at tail, reallocate without expansion only if skb is cloned. |
| 1579 | */ |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1580 | int i, k, eat = (skb->tail + delta) - skb->end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1581 | |
| 1582 | if (eat > 0 || skb_cloned(skb)) { |
| 1583 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, |
| 1584 | GFP_ATOMIC)) |
| 1585 | return NULL; |
| 1586 | } |
| 1587 | |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1588 | if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1589 | BUG(); |
| 1590 | |
| 1591 | /* Optimization: no fragments, no reasons to preestimate |
| 1592 | * size of pulled pages. Superb. |
| 1593 | */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1594 | if (!skb_has_frag_list(skb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | goto pull_pages; |
| 1596 | |
| 1597 | /* Estimate size of pulled pages. */ |
| 1598 | eat = delta; |
| 1599 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1600 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 1601 | |
| 1602 | if (size >= eat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1603 | goto pull_pages; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1604 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | } |
| 1606 | |
| 1607 | /* If we need update frag list, we are in troubles. |
| 1608 | * Certainly, it possible to add an offset to skb data, |
| 1609 | * but taking into account that pulling is expected to |
| 1610 | * be very rare operation, it is worth to fight against |
| 1611 | * further bloating skb head and crucify ourselves here instead. |
| 1612 | * Pure masohism, indeed. 8)8) |
| 1613 | */ |
| 1614 | if (eat) { |
| 1615 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
| 1616 | struct sk_buff *clone = NULL; |
| 1617 | struct sk_buff *insp = NULL; |
| 1618 | |
| 1619 | do { |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 1620 | BUG_ON(!list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1621 | |
| 1622 | if (list->len <= eat) { |
| 1623 | /* Eaten as whole. */ |
| 1624 | eat -= list->len; |
| 1625 | list = list->next; |
| 1626 | insp = list; |
| 1627 | } else { |
| 1628 | /* Eaten partially. */ |
| 1629 | |
| 1630 | if (skb_shared(list)) { |
| 1631 | /* Sucks! We need to fork list. :-( */ |
| 1632 | clone = skb_clone(list, GFP_ATOMIC); |
| 1633 | if (!clone) |
| 1634 | return NULL; |
| 1635 | insp = list->next; |
| 1636 | list = clone; |
| 1637 | } else { |
| 1638 | /* This may be pulled without |
| 1639 | * problems. */ |
| 1640 | insp = list; |
| 1641 | } |
| 1642 | if (!pskb_pull(list, eat)) { |
Wei Yongjun | f3fbbe0 | 2009-02-25 00:37:32 +0000 | [diff] [blame] | 1643 | kfree_skb(clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1644 | return NULL; |
| 1645 | } |
| 1646 | break; |
| 1647 | } |
| 1648 | } while (eat); |
| 1649 | |
| 1650 | /* Free pulled out fragments. */ |
| 1651 | while ((list = skb_shinfo(skb)->frag_list) != insp) { |
| 1652 | skb_shinfo(skb)->frag_list = list->next; |
| 1653 | kfree_skb(list); |
| 1654 | } |
| 1655 | /* And insert new clone at head. */ |
| 1656 | if (clone) { |
| 1657 | clone->next = list; |
| 1658 | skb_shinfo(skb)->frag_list = clone; |
| 1659 | } |
| 1660 | } |
| 1661 | /* Success! Now we may commit changes to skb data. */ |
| 1662 | |
| 1663 | pull_pages: |
| 1664 | eat = delta; |
| 1665 | k = 0; |
| 1666 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1667 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 1668 | |
| 1669 | if (size <= eat) { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1670 | skb_frag_unref(skb, i); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1671 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1672 | } else { |
| 1673 | skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 1674 | if (eat) { |
| 1675 | skb_shinfo(skb)->frags[k].page_offset += eat; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1676 | skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1677 | eat = 0; |
| 1678 | } |
| 1679 | k++; |
| 1680 | } |
| 1681 | } |
| 1682 | skb_shinfo(skb)->nr_frags = k; |
| 1683 | |
| 1684 | skb->tail += delta; |
| 1685 | skb->data_len -= delta; |
| 1686 | |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1687 | return skb_tail_pointer(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1688 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1689 | EXPORT_SYMBOL(__pskb_pull_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1690 | |
Eric Dumazet | 22019b1 | 2011-07-29 18:37:31 +0000 | [diff] [blame] | 1691 | /** |
| 1692 | * skb_copy_bits - copy bits from skb to kernel buffer |
| 1693 | * @skb: source skb |
| 1694 | * @offset: offset in source |
| 1695 | * @to: destination buffer |
| 1696 | * @len: number of bytes to copy |
| 1697 | * |
| 1698 | * Copy the specified number of bytes from the source skb to the |
| 1699 | * destination buffer. |
| 1700 | * |
| 1701 | * CAUTION ! : |
| 1702 | * If its prototype is ever changed, |
| 1703 | * check arch/{*}/net/{*}.S files, |
| 1704 | * since it is called from BPF assembly code. |
| 1705 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1706 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) |
| 1707 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1708 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1709 | struct sk_buff *frag_iter; |
| 1710 | int i, copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1711 | |
| 1712 | if (offset > (int)skb->len - len) |
| 1713 | goto fault; |
| 1714 | |
| 1715 | /* Copy header. */ |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1716 | if ((copy = start - offset) > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1717 | if (copy > len) |
| 1718 | copy = len; |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 1719 | skb_copy_from_linear_data_offset(skb, offset, to, copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | if ((len -= copy) == 0) |
| 1721 | return 0; |
| 1722 | offset += copy; |
| 1723 | to += copy; |
| 1724 | } |
| 1725 | |
| 1726 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1727 | int end; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1728 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1729 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 1730 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1731 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1732 | end = start + skb_frag_size(f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1733 | if ((copy = end - offset) > 0) { |
| 1734 | u8 *vaddr; |
| 1735 | |
| 1736 | if (copy > len) |
| 1737 | copy = len; |
| 1738 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1739 | vaddr = kmap_atomic(skb_frag_page(f)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1740 | memcpy(to, |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1741 | vaddr + f->page_offset + offset - start, |
| 1742 | copy); |
| 1743 | kunmap_atomic(vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1744 | |
| 1745 | if ((len -= copy) == 0) |
| 1746 | return 0; |
| 1747 | offset += copy; |
| 1748 | to += copy; |
| 1749 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1750 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1751 | } |
| 1752 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1753 | skb_walk_frags(skb, frag_iter) { |
| 1754 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1755 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1756 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1757 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1758 | end = start + frag_iter->len; |
| 1759 | if ((copy = end - offset) > 0) { |
| 1760 | if (copy > len) |
| 1761 | copy = len; |
| 1762 | if (skb_copy_bits(frag_iter, offset - start, to, copy)) |
| 1763 | goto fault; |
| 1764 | if ((len -= copy) == 0) |
| 1765 | return 0; |
| 1766 | offset += copy; |
| 1767 | to += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1769 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1770 | } |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1771 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1772 | if (!len) |
| 1773 | return 0; |
| 1774 | |
| 1775 | fault: |
| 1776 | return -EFAULT; |
| 1777 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1778 | EXPORT_SYMBOL(skb_copy_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1779 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1780 | /* |
| 1781 | * Callback from splice_to_pipe(), if we need to release some pages |
| 1782 | * at the end of the spd in case we error'ed out in filling the pipe. |
| 1783 | */ |
| 1784 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) |
| 1785 | { |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1786 | put_page(spd->pages[i]); |
| 1787 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1788 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1789 | static struct page *linear_to_page(struct page *page, unsigned int *len, |
| 1790 | unsigned int *offset, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 1791 | struct sock *sk) |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1792 | { |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1793 | struct page_frag *pfrag = sk_page_frag(sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1794 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1795 | if (!sk_page_frag_refill(sk, pfrag)) |
| 1796 | return NULL; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1797 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1798 | *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1799 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1800 | memcpy(page_address(pfrag->page) + pfrag->offset, |
| 1801 | page_address(page) + *offset, *len); |
| 1802 | *offset = pfrag->offset; |
| 1803 | pfrag->offset += *len; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1804 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1805 | return pfrag->page; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1806 | } |
| 1807 | |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 1808 | static bool spd_can_coalesce(const struct splice_pipe_desc *spd, |
| 1809 | struct page *page, |
| 1810 | unsigned int offset) |
| 1811 | { |
| 1812 | return spd->nr_pages && |
| 1813 | spd->pages[spd->nr_pages - 1] == page && |
| 1814 | (spd->partial[spd->nr_pages - 1].offset + |
| 1815 | spd->partial[spd->nr_pages - 1].len == offset); |
| 1816 | } |
| 1817 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1818 | /* |
| 1819 | * Fill page/offset/length into spd, if it can hold more pages. |
| 1820 | */ |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1821 | static bool spd_fill_page(struct splice_pipe_desc *spd, |
| 1822 | struct pipe_inode_info *pipe, struct page *page, |
| 1823 | unsigned int *len, unsigned int offset, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 1824 | bool linear, |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1825 | struct sock *sk) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1826 | { |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 1827 | if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1828 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1829 | |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1830 | if (linear) { |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 1831 | page = linear_to_page(page, len, &offset, sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1832 | if (!page) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1833 | return true; |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 1834 | } |
| 1835 | if (spd_can_coalesce(spd, page, offset)) { |
| 1836 | spd->partial[spd->nr_pages - 1].len += *len; |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1837 | return false; |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 1838 | } |
| 1839 | get_page(page); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1840 | spd->pages[spd->nr_pages] = page; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1841 | spd->partial[spd->nr_pages].len = *len; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1842 | spd->partial[spd->nr_pages].offset = offset; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1843 | spd->nr_pages++; |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1844 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1845 | return false; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1846 | } |
| 1847 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1848 | static bool __splice_segment(struct page *page, unsigned int poff, |
| 1849 | unsigned int plen, unsigned int *off, |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 1850 | unsigned int *len, |
Eric Dumazet | d7ccf7c | 2012-04-23 23:35:04 -0400 | [diff] [blame] | 1851 | struct splice_pipe_desc *spd, bool linear, |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1852 | struct sock *sk, |
| 1853 | struct pipe_inode_info *pipe) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1854 | { |
| 1855 | if (!*len) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1856 | return true; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1857 | |
| 1858 | /* skip this segment if already processed */ |
| 1859 | if (*off >= plen) { |
| 1860 | *off -= plen; |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1861 | return false; |
Octavian Purdila | db43a28 | 2008-06-27 17:27:21 -0700 | [diff] [blame] | 1862 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1863 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1864 | /* ignore any bits we already processed */ |
Eric Dumazet | 9ca1b22 | 2013-01-05 21:31:18 +0000 | [diff] [blame] | 1865 | poff += *off; |
| 1866 | plen -= *off; |
| 1867 | *off = 0; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1868 | |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 1869 | do { |
| 1870 | unsigned int flen = min(*len, plen); |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1871 | |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 1872 | if (spd_fill_page(spd, pipe, page, &flen, poff, |
| 1873 | linear, sk)) |
| 1874 | return true; |
| 1875 | poff += flen; |
| 1876 | plen -= flen; |
| 1877 | *len -= flen; |
| 1878 | } while (*len && plen); |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1879 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1880 | return false; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1881 | } |
| 1882 | |
| 1883 | /* |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1884 | * Map linear and fragment data from the skb to spd. It reports true if the |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1885 | * pipe is full or if we already spliced the requested length. |
| 1886 | */ |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1887 | static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, |
| 1888 | unsigned int *offset, unsigned int *len, |
| 1889 | struct splice_pipe_desc *spd, struct sock *sk) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1890 | { |
| 1891 | int seg; |
| 1892 | |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 1893 | /* map the linear part : |
Alexander Duyck | 2996d31 | 2012-05-02 18:18:42 +0000 | [diff] [blame] | 1894 | * If skb->head_frag is set, this 'linear' part is backed by a |
| 1895 | * fragment, and if the head is not shared with any clones then |
| 1896 | * we can avoid a copy since we own the head portion of this page. |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1897 | */ |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1898 | if (__splice_segment(virt_to_page(skb->data), |
| 1899 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
| 1900 | skb_headlen(skb), |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 1901 | offset, len, spd, |
Alexander Duyck | 3a7c1ee4 | 2012-05-03 01:09:42 +0000 | [diff] [blame] | 1902 | skb_head_is_locked(skb), |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 1903 | sk, pipe)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1904 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1905 | |
| 1906 | /* |
| 1907 | * then map the fragments |
| 1908 | */ |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1909 | for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { |
| 1910 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
| 1911 | |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1912 | if (__splice_segment(skb_frag_page(f), |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1913 | f->page_offset, skb_frag_size(f), |
Eric Dumazet | 18aafc6 | 2013-01-11 14:46:37 +0000 | [diff] [blame] | 1914 | offset, len, spd, false, sk, pipe)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1915 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1916 | } |
| 1917 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1918 | return false; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1919 | } |
| 1920 | |
| 1921 | /* |
| 1922 | * Map data from the skb to a pipe. Should handle both the linear part, |
| 1923 | * the fragments, and the frag list. It does NOT handle frag lists within |
| 1924 | * the frag list, if such a thing exists. We'd probably need to recurse to |
| 1925 | * handle that cleanly. |
| 1926 | */ |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1927 | int skb_splice_bits(struct sk_buff *skb, unsigned int offset, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1928 | struct pipe_inode_info *pipe, unsigned int tlen, |
| 1929 | unsigned int flags) |
| 1930 | { |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 1931 | struct partial_page partial[MAX_SKB_FRAGS]; |
| 1932 | struct page *pages[MAX_SKB_FRAGS]; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1933 | struct splice_pipe_desc spd = { |
| 1934 | .pages = pages, |
| 1935 | .partial = partial, |
Eric Dumazet | 047fe36 | 2012-06-12 15:24:40 +0200 | [diff] [blame] | 1936 | .nr_pages_max = MAX_SKB_FRAGS, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1937 | .flags = flags, |
Miklos Szeredi | 28a625c | 2014-01-22 19:36:57 +0100 | [diff] [blame] | 1938 | .ops = &nosteal_pipe_buf_ops, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1939 | .spd_release = sock_spd_release, |
| 1940 | }; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1941 | struct sk_buff *frag_iter; |
Jarek Poplawski | 7a67e56 | 2009-04-30 05:41:19 -0700 | [diff] [blame] | 1942 | struct sock *sk = skb->sk; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1943 | int ret = 0; |
| 1944 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1945 | /* |
| 1946 | * __skb_splice_bits() only fails if the output has no room left, |
| 1947 | * so no point in going over the frag_list for the error case. |
| 1948 | */ |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1949 | if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1950 | goto done; |
| 1951 | else if (!tlen) |
| 1952 | goto done; |
| 1953 | |
| 1954 | /* |
| 1955 | * now see if we have a frag_list to map |
| 1956 | */ |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1957 | skb_walk_frags(skb, frag_iter) { |
| 1958 | if (!tlen) |
| 1959 | break; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1960 | if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1961 | break; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1962 | } |
| 1963 | |
| 1964 | done: |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1965 | if (spd.nr_pages) { |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1966 | /* |
| 1967 | * Drop the socket lock, otherwise we have reverse |
| 1968 | * locking dependencies between sk_lock and i_mutex |
| 1969 | * here as compared to sendfile(). We enter here |
| 1970 | * with the socket lock held, and splice_to_pipe() will |
| 1971 | * grab the pipe inode lock. For sendfile() emulation, |
| 1972 | * we call into ->sendpage() with the i_mutex lock held |
| 1973 | * and networking will grab the socket lock. |
| 1974 | */ |
Octavian Purdila | 293ad60 | 2008-06-04 15:45:58 -0700 | [diff] [blame] | 1975 | release_sock(sk); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1976 | ret = splice_to_pipe(pipe, &spd); |
Octavian Purdila | 293ad60 | 2008-06-04 15:45:58 -0700 | [diff] [blame] | 1977 | lock_sock(sk); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1978 | } |
| 1979 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1980 | return ret; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1981 | } |
| 1982 | |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1983 | /** |
| 1984 | * skb_store_bits - store bits from kernel buffer to skb |
| 1985 | * @skb: destination buffer |
| 1986 | * @offset: offset in destination |
| 1987 | * @from: source buffer |
| 1988 | * @len: number of bytes to copy |
| 1989 | * |
| 1990 | * Copy the specified number of bytes from the source buffer to the |
| 1991 | * destination skb. This function handles all the messy bits of |
| 1992 | * traversing fragment lists and such. |
| 1993 | */ |
| 1994 | |
Stephen Hemminger | 0c6fcc8 | 2007-04-20 16:40:01 -0700 | [diff] [blame] | 1995 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1996 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1997 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1998 | struct sk_buff *frag_iter; |
| 1999 | int i, copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2000 | |
| 2001 | if (offset > (int)skb->len - len) |
| 2002 | goto fault; |
| 2003 | |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2004 | if ((copy = start - offset) > 0) { |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2005 | if (copy > len) |
| 2006 | copy = len; |
Arnaldo Carvalho de Melo | 27d7ff4 | 2007-03-31 11:55:19 -0300 | [diff] [blame] | 2007 | skb_copy_to_linear_data_offset(skb, offset, from, copy); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2008 | if ((len -= copy) == 0) |
| 2009 | return 0; |
| 2010 | offset += copy; |
| 2011 | from += copy; |
| 2012 | } |
| 2013 | |
| 2014 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 2015 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2016 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2017 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2018 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2019 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2020 | end = start + skb_frag_size(frag); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2021 | if ((copy = end - offset) > 0) { |
| 2022 | u8 *vaddr; |
| 2023 | |
| 2024 | if (copy > len) |
| 2025 | copy = len; |
| 2026 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2027 | vaddr = kmap_atomic(skb_frag_page(frag)); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2028 | memcpy(vaddr + frag->page_offset + offset - start, |
| 2029 | from, copy); |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2030 | kunmap_atomic(vaddr); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2031 | |
| 2032 | if ((len -= copy) == 0) |
| 2033 | return 0; |
| 2034 | offset += copy; |
| 2035 | from += copy; |
| 2036 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2037 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2038 | } |
| 2039 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2040 | skb_walk_frags(skb, frag_iter) { |
| 2041 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2042 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2043 | WARN_ON(start > offset + len); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2044 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2045 | end = start + frag_iter->len; |
| 2046 | if ((copy = end - offset) > 0) { |
| 2047 | if (copy > len) |
| 2048 | copy = len; |
| 2049 | if (skb_store_bits(frag_iter, offset - start, |
| 2050 | from, copy)) |
| 2051 | goto fault; |
| 2052 | if ((len -= copy) == 0) |
| 2053 | return 0; |
| 2054 | offset += copy; |
| 2055 | from += copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2056 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2057 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2058 | } |
| 2059 | if (!len) |
| 2060 | return 0; |
| 2061 | |
| 2062 | fault: |
| 2063 | return -EFAULT; |
| 2064 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 2065 | EXPORT_SYMBOL(skb_store_bits); |
| 2066 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2067 | /* Checksum skb data. */ |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2068 | __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, |
| 2069 | __wsum csum, const struct skb_checksum_ops *ops) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2070 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2071 | int start = skb_headlen(skb); |
| 2072 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2073 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2074 | int pos = 0; |
| 2075 | |
| 2076 | /* Checksum header. */ |
| 2077 | if (copy > 0) { |
| 2078 | if (copy > len) |
| 2079 | copy = len; |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2080 | csum = ops->update(skb->data + offset, copy, csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2081 | if ((len -= copy) == 0) |
| 2082 | return csum; |
| 2083 | offset += copy; |
| 2084 | pos = copy; |
| 2085 | } |
| 2086 | |
| 2087 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2088 | int end; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2089 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2090 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2091 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2092 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2093 | end = start + skb_frag_size(frag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 | if ((copy = end - offset) > 0) { |
Al Viro | 44bb936 | 2006-11-14 21:36:14 -0800 | [diff] [blame] | 2095 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2096 | u8 *vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2097 | |
| 2098 | if (copy > len) |
| 2099 | copy = len; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2100 | vaddr = kmap_atomic(skb_frag_page(frag)); |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2101 | csum2 = ops->update(vaddr + frag->page_offset + |
| 2102 | offset - start, copy, 0); |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2103 | kunmap_atomic(vaddr); |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2104 | csum = ops->combine(csum, csum2, pos, copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2105 | if (!(len -= copy)) |
| 2106 | return csum; |
| 2107 | offset += copy; |
| 2108 | pos += copy; |
| 2109 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2110 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2111 | } |
| 2112 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2113 | skb_walk_frags(skb, frag_iter) { |
| 2114 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2115 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2116 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2117 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2118 | end = start + frag_iter->len; |
| 2119 | if ((copy = end - offset) > 0) { |
| 2120 | __wsum csum2; |
| 2121 | if (copy > len) |
| 2122 | copy = len; |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2123 | csum2 = __skb_checksum(frag_iter, offset - start, |
| 2124 | copy, 0, ops); |
| 2125 | csum = ops->combine(csum, csum2, pos, copy); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2126 | if ((len -= copy) == 0) |
| 2127 | return csum; |
| 2128 | offset += copy; |
| 2129 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2130 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2131 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2132 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2133 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2134 | |
| 2135 | return csum; |
| 2136 | } |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2137 | EXPORT_SYMBOL(__skb_checksum); |
| 2138 | |
| 2139 | __wsum skb_checksum(const struct sk_buff *skb, int offset, |
| 2140 | int len, __wsum csum) |
| 2141 | { |
| 2142 | const struct skb_checksum_ops ops = { |
Daniel Borkmann | cea80ea | 2013-11-04 17:10:25 +0100 | [diff] [blame] | 2143 | .update = csum_partial_ext, |
Daniel Borkmann | 2817a33 | 2013-10-30 11:50:51 +0100 | [diff] [blame] | 2144 | .combine = csum_block_add_ext, |
| 2145 | }; |
| 2146 | |
| 2147 | return __skb_checksum(skb, offset, len, csum, &ops); |
| 2148 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2149 | EXPORT_SYMBOL(skb_checksum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2150 | |
| 2151 | /* Both of above in one bottle. */ |
| 2152 | |
Al Viro | 81d7766 | 2006-11-14 21:37:33 -0800 | [diff] [blame] | 2153 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, |
| 2154 | u8 *to, int len, __wsum csum) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2155 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2156 | int start = skb_headlen(skb); |
| 2157 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2158 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2159 | int pos = 0; |
| 2160 | |
| 2161 | /* Copy header. */ |
| 2162 | if (copy > 0) { |
| 2163 | if (copy > len) |
| 2164 | copy = len; |
| 2165 | csum = csum_partial_copy_nocheck(skb->data + offset, to, |
| 2166 | copy, csum); |
| 2167 | if ((len -= copy) == 0) |
| 2168 | return csum; |
| 2169 | offset += copy; |
| 2170 | to += copy; |
| 2171 | pos = copy; |
| 2172 | } |
| 2173 | |
| 2174 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2175 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2176 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2177 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2178 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2179 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2180 | if ((copy = end - offset) > 0) { |
Al Viro | 5084205 | 2006-11-14 21:36:34 -0800 | [diff] [blame] | 2181 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2182 | u8 *vaddr; |
| 2183 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 2184 | |
| 2185 | if (copy > len) |
| 2186 | copy = len; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2187 | vaddr = kmap_atomic(skb_frag_page(frag)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2188 | csum2 = csum_partial_copy_nocheck(vaddr + |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2189 | frag->page_offset + |
| 2190 | offset - start, to, |
| 2191 | copy, 0); |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2192 | kunmap_atomic(vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2193 | csum = csum_block_add(csum, csum2, pos); |
| 2194 | if (!(len -= copy)) |
| 2195 | return csum; |
| 2196 | offset += copy; |
| 2197 | to += copy; |
| 2198 | pos += copy; |
| 2199 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2200 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2201 | } |
| 2202 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2203 | skb_walk_frags(skb, frag_iter) { |
| 2204 | __wsum csum2; |
| 2205 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2206 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2207 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2208 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2209 | end = start + frag_iter->len; |
| 2210 | if ((copy = end - offset) > 0) { |
| 2211 | if (copy > len) |
| 2212 | copy = len; |
| 2213 | csum2 = skb_copy_and_csum_bits(frag_iter, |
| 2214 | offset - start, |
| 2215 | to, copy, 0); |
| 2216 | csum = csum_block_add(csum, csum2, pos); |
| 2217 | if ((len -= copy) == 0) |
| 2218 | return csum; |
| 2219 | offset += copy; |
| 2220 | to += copy; |
| 2221 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2222 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2223 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2224 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2225 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2226 | return csum; |
| 2227 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2228 | EXPORT_SYMBOL(skb_copy_and_csum_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2229 | |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2230 | /** |
| 2231 | * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() |
| 2232 | * @from: source buffer |
| 2233 | * |
| 2234 | * Calculates the amount of linear headroom needed in the 'to' skb passed |
| 2235 | * into skb_zerocopy(). |
| 2236 | */ |
| 2237 | unsigned int |
| 2238 | skb_zerocopy_headlen(const struct sk_buff *from) |
| 2239 | { |
| 2240 | unsigned int hlen = 0; |
| 2241 | |
| 2242 | if (!from->head_frag || |
| 2243 | skb_headlen(from) < L1_CACHE_BYTES || |
| 2244 | skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) |
| 2245 | hlen = skb_headlen(from); |
| 2246 | |
| 2247 | if (skb_has_frag_list(from)) |
| 2248 | hlen = from->len; |
| 2249 | |
| 2250 | return hlen; |
| 2251 | } |
| 2252 | EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); |
| 2253 | |
| 2254 | /** |
| 2255 | * skb_zerocopy - Zero copy skb to skb |
| 2256 | * @to: destination buffer |
Masanari Iida | 7fceb4d | 2014-01-29 01:05:28 +0900 | [diff] [blame] | 2257 | * @from: source buffer |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2258 | * @len: number of bytes to copy from source buffer |
| 2259 | * @hlen: size of linear headroom in destination buffer |
| 2260 | * |
| 2261 | * Copies up to `len` bytes from `from` to `to` by creating references |
| 2262 | * to the frags in the source buffer. |
| 2263 | * |
| 2264 | * The `hlen` as calculated by skb_zerocopy_headlen() specifies the |
| 2265 | * headroom in the `to` buffer. |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2266 | * |
| 2267 | * Return value: |
| 2268 | * 0: everything is OK |
| 2269 | * -ENOMEM: couldn't orphan frags of @from due to lack of memory |
| 2270 | * -EFAULT: skb_copy_bits() found some problem with skb geometry |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2271 | */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2272 | int |
| 2273 | skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2274 | { |
| 2275 | int i, j = 0; |
| 2276 | int plen = 0; /* length of skb->head fragment */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2277 | int ret; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2278 | struct page *page; |
| 2279 | unsigned int offset; |
| 2280 | |
| 2281 | BUG_ON(!from->head_frag && !hlen); |
| 2282 | |
| 2283 | /* dont bother with small payloads */ |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2284 | if (len <= skb_tailroom(to)) |
| 2285 | return skb_copy_bits(from, 0, skb_put(to, len), len); |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2286 | |
| 2287 | if (hlen) { |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2288 | ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); |
| 2289 | if (unlikely(ret)) |
| 2290 | return ret; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2291 | len -= hlen; |
| 2292 | } else { |
| 2293 | plen = min_t(int, skb_headlen(from), len); |
| 2294 | if (plen) { |
| 2295 | page = virt_to_head_page(from->head); |
| 2296 | offset = from->data - (unsigned char *)page_address(page); |
| 2297 | __skb_fill_page_desc(to, 0, page, offset, plen); |
| 2298 | get_page(page); |
| 2299 | j = 1; |
| 2300 | len -= plen; |
| 2301 | } |
| 2302 | } |
| 2303 | |
| 2304 | to->truesize += len + plen; |
| 2305 | to->len += len + plen; |
| 2306 | to->data_len += len + plen; |
| 2307 | |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2308 | if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { |
| 2309 | skb_tx_error(from); |
| 2310 | return -ENOMEM; |
| 2311 | } |
| 2312 | |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2313 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { |
| 2314 | if (!len) |
| 2315 | break; |
| 2316 | skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; |
| 2317 | skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); |
| 2318 | len -= skb_shinfo(to)->frags[j].size; |
| 2319 | skb_frag_ref(to, j); |
| 2320 | j++; |
| 2321 | } |
| 2322 | skb_shinfo(to)->nr_frags = j; |
Zoltan Kiss | 36d5fe6 | 2014-03-26 22:37:45 +0000 | [diff] [blame] | 2323 | |
| 2324 | return 0; |
Thomas Graf | af2806f | 2013-12-13 15:22:17 +0100 | [diff] [blame] | 2325 | } |
| 2326 | EXPORT_SYMBOL_GPL(skb_zerocopy); |
| 2327 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2328 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) |
| 2329 | { |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 2330 | __wsum csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2331 | long csstart; |
| 2332 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2333 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 2334 | csstart = skb_checksum_start_offset(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2335 | else |
| 2336 | csstart = skb_headlen(skb); |
| 2337 | |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2338 | BUG_ON(csstart > skb_headlen(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2339 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2340 | skb_copy_from_linear_data(skb, to, csstart); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2341 | |
| 2342 | csum = 0; |
| 2343 | if (csstart != skb->len) |
| 2344 | csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, |
| 2345 | skb->len - csstart, 0); |
| 2346 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2347 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Al Viro | ff1dcad | 2006-11-20 18:07:29 -0800 | [diff] [blame] | 2348 | long csstuff = csstart + skb->csum_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2349 | |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 2350 | *((__sum16 *)(to + csstuff)) = csum_fold(csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2351 | } |
| 2352 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2353 | EXPORT_SYMBOL(skb_copy_and_csum_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2354 | |
| 2355 | /** |
| 2356 | * skb_dequeue - remove from the head of the queue |
| 2357 | * @list: list to dequeue from |
| 2358 | * |
| 2359 | * Remove the head of the list. The list lock is taken so the function |
| 2360 | * may be used safely with other locking list functions. The head item is |
| 2361 | * returned or %NULL if the list is empty. |
| 2362 | */ |
| 2363 | |
| 2364 | struct sk_buff *skb_dequeue(struct sk_buff_head *list) |
| 2365 | { |
| 2366 | unsigned long flags; |
| 2367 | struct sk_buff *result; |
| 2368 | |
| 2369 | spin_lock_irqsave(&list->lock, flags); |
| 2370 | result = __skb_dequeue(list); |
| 2371 | spin_unlock_irqrestore(&list->lock, flags); |
| 2372 | return result; |
| 2373 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2374 | EXPORT_SYMBOL(skb_dequeue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2375 | |
| 2376 | /** |
| 2377 | * skb_dequeue_tail - remove from the tail of the queue |
| 2378 | * @list: list to dequeue from |
| 2379 | * |
| 2380 | * Remove the tail of the list. The list lock is taken so the function |
| 2381 | * may be used safely with other locking list functions. The tail item is |
| 2382 | * returned or %NULL if the list is empty. |
| 2383 | */ |
| 2384 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) |
| 2385 | { |
| 2386 | unsigned long flags; |
| 2387 | struct sk_buff *result; |
| 2388 | |
| 2389 | spin_lock_irqsave(&list->lock, flags); |
| 2390 | result = __skb_dequeue_tail(list); |
| 2391 | spin_unlock_irqrestore(&list->lock, flags); |
| 2392 | return result; |
| 2393 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2394 | EXPORT_SYMBOL(skb_dequeue_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2395 | |
| 2396 | /** |
| 2397 | * skb_queue_purge - empty a list |
| 2398 | * @list: list to empty |
| 2399 | * |
| 2400 | * Delete all buffers on an &sk_buff list. Each buffer is removed from |
| 2401 | * the list and one reference dropped. This function takes the list |
| 2402 | * lock and is atomic with respect to other list locking functions. |
| 2403 | */ |
| 2404 | void skb_queue_purge(struct sk_buff_head *list) |
| 2405 | { |
| 2406 | struct sk_buff *skb; |
| 2407 | while ((skb = skb_dequeue(list)) != NULL) |
| 2408 | kfree_skb(skb); |
| 2409 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2410 | EXPORT_SYMBOL(skb_queue_purge); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2411 | |
| 2412 | /** |
| 2413 | * skb_queue_head - queue a buffer at the list head |
| 2414 | * @list: list to use |
| 2415 | * @newsk: buffer to queue |
| 2416 | * |
| 2417 | * Queue a buffer at the start of the list. This function takes the |
| 2418 | * list lock and can be used safely with other locking &sk_buff functions |
| 2419 | * safely. |
| 2420 | * |
| 2421 | * A buffer cannot be placed on two lists at the same time. |
| 2422 | */ |
| 2423 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) |
| 2424 | { |
| 2425 | unsigned long flags; |
| 2426 | |
| 2427 | spin_lock_irqsave(&list->lock, flags); |
| 2428 | __skb_queue_head(list, newsk); |
| 2429 | spin_unlock_irqrestore(&list->lock, flags); |
| 2430 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2431 | EXPORT_SYMBOL(skb_queue_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2432 | |
| 2433 | /** |
| 2434 | * skb_queue_tail - queue a buffer at the list tail |
| 2435 | * @list: list to use |
| 2436 | * @newsk: buffer to queue |
| 2437 | * |
| 2438 | * Queue a buffer at the tail of the list. This function takes the |
| 2439 | * list lock and can be used safely with other locking &sk_buff functions |
| 2440 | * safely. |
| 2441 | * |
| 2442 | * A buffer cannot be placed on two lists at the same time. |
| 2443 | */ |
| 2444 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) |
| 2445 | { |
| 2446 | unsigned long flags; |
| 2447 | |
| 2448 | spin_lock_irqsave(&list->lock, flags); |
| 2449 | __skb_queue_tail(list, newsk); |
| 2450 | spin_unlock_irqrestore(&list->lock, flags); |
| 2451 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2452 | EXPORT_SYMBOL(skb_queue_tail); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2453 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2454 | /** |
| 2455 | * skb_unlink - remove a buffer from a list |
| 2456 | * @skb: buffer to remove |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2457 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2458 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2459 | * Remove a packet from a list. The list locks are taken and this |
| 2460 | * function is atomic with respect to other list locked calls |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2461 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2462 | * You must know what list the SKB is on. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2463 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2464 | void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2465 | { |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2466 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2467 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2468 | spin_lock_irqsave(&list->lock, flags); |
| 2469 | __skb_unlink(skb, list); |
| 2470 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2471 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2472 | EXPORT_SYMBOL(skb_unlink); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2473 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2474 | /** |
| 2475 | * skb_append - append a buffer |
| 2476 | * @old: buffer to insert after |
| 2477 | * @newsk: buffer to insert |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2478 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2479 | * |
| 2480 | * Place a packet after a given packet in a list. The list locks are taken |
| 2481 | * and this function is atomic with respect to other list locked calls. |
| 2482 | * A buffer cannot be placed on two lists at the same time. |
| 2483 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2484 | void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2485 | { |
| 2486 | unsigned long flags; |
| 2487 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2488 | spin_lock_irqsave(&list->lock, flags); |
Gerrit Renker | 7de6c03 | 2008-04-14 00:05:09 -0700 | [diff] [blame] | 2489 | __skb_queue_after(list, old, newsk); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2490 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2491 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2492 | EXPORT_SYMBOL(skb_append); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | |
| 2494 | /** |
| 2495 | * skb_insert - insert a buffer |
| 2496 | * @old: buffer to insert before |
| 2497 | * @newsk: buffer to insert |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2498 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2499 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2500 | * Place a packet before a given packet in a list. The list locks are |
| 2501 | * taken and this function is atomic with respect to other list locked |
| 2502 | * calls. |
| 2503 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2504 | * A buffer cannot be placed on two lists at the same time. |
| 2505 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2506 | void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2507 | { |
| 2508 | unsigned long flags; |
| 2509 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2510 | spin_lock_irqsave(&list->lock, flags); |
| 2511 | __skb_insert(newsk, old->prev, old, list); |
| 2512 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2513 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2514 | EXPORT_SYMBOL(skb_insert); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2515 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2516 | static inline void skb_split_inside_header(struct sk_buff *skb, |
| 2517 | struct sk_buff* skb1, |
| 2518 | const u32 len, const int pos) |
| 2519 | { |
| 2520 | int i; |
| 2521 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2522 | skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), |
| 2523 | pos - len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2524 | /* And move data appendix as is. */ |
| 2525 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
| 2526 | skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; |
| 2527 | |
| 2528 | skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; |
| 2529 | skb_shinfo(skb)->nr_frags = 0; |
| 2530 | skb1->data_len = skb->data_len; |
| 2531 | skb1->len += skb1->data_len; |
| 2532 | skb->data_len = 0; |
| 2533 | skb->len = len; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 2534 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2535 | } |
| 2536 | |
| 2537 | static inline void skb_split_no_header(struct sk_buff *skb, |
| 2538 | struct sk_buff* skb1, |
| 2539 | const u32 len, int pos) |
| 2540 | { |
| 2541 | int i, k = 0; |
| 2542 | const int nfrags = skb_shinfo(skb)->nr_frags; |
| 2543 | |
| 2544 | skb_shinfo(skb)->nr_frags = 0; |
| 2545 | skb1->len = skb1->data_len = skb->len - len; |
| 2546 | skb->len = len; |
| 2547 | skb->data_len = len - pos; |
| 2548 | |
| 2549 | for (i = 0; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2550 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2551 | |
| 2552 | if (pos + size > len) { |
| 2553 | skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 2554 | |
| 2555 | if (pos < len) { |
| 2556 | /* Split frag. |
| 2557 | * We have two variants in this case: |
| 2558 | * 1. Move all the frag to the second |
| 2559 | * part, if it is possible. F.e. |
| 2560 | * this approach is mandatory for TUX, |
| 2561 | * where splitting is expensive. |
| 2562 | * 2. Split is accurately. We make this. |
| 2563 | */ |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2564 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2565 | skb_shinfo(skb1)->frags[0].page_offset += len - pos; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2566 | skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); |
| 2567 | skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2568 | skb_shinfo(skb)->nr_frags++; |
| 2569 | } |
| 2570 | k++; |
| 2571 | } else |
| 2572 | skb_shinfo(skb)->nr_frags++; |
| 2573 | pos += size; |
| 2574 | } |
| 2575 | skb_shinfo(skb1)->nr_frags = k; |
| 2576 | } |
| 2577 | |
| 2578 | /** |
| 2579 | * skb_split - Split fragmented skb to two parts at length len. |
| 2580 | * @skb: the buffer to split |
| 2581 | * @skb1: the buffer to receive the second part |
| 2582 | * @len: new length for skb |
| 2583 | */ |
| 2584 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) |
| 2585 | { |
| 2586 | int pos = skb_headlen(skb); |
| 2587 | |
Amerigo Wang | 68534c6 | 2013-02-19 22:51:30 +0000 | [diff] [blame] | 2588 | skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2589 | if (len < pos) /* Split line is inside header. */ |
| 2590 | skb_split_inside_header(skb, skb1, len, pos); |
| 2591 | else /* Second chunk has no header, nothing to copy. */ |
| 2592 | skb_split_no_header(skb, skb1, len, pos); |
| 2593 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2594 | EXPORT_SYMBOL(skb_split); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2595 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 2596 | /* Shifting from/to a cloned skb is a no-go. |
| 2597 | * |
| 2598 | * Caller cannot keep skb_shinfo related pointers past calling here! |
| 2599 | */ |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2600 | static int skb_prepare_for_shift(struct sk_buff *skb) |
| 2601 | { |
Ilpo Järvinen | 0ace285 | 2008-11-24 21:30:21 -0800 | [diff] [blame] | 2602 | return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2603 | } |
| 2604 | |
| 2605 | /** |
| 2606 | * skb_shift - Shifts paged data partially from skb to another |
| 2607 | * @tgt: buffer into which tail data gets added |
| 2608 | * @skb: buffer from which the paged data comes from |
| 2609 | * @shiftlen: shift up to this many bytes |
| 2610 | * |
| 2611 | * Attempts to shift up to shiftlen worth of bytes, which may be less than |
Feng King | 20e994a | 2011-11-21 01:47:11 +0000 | [diff] [blame] | 2612 | * the length of the skb, from skb to tgt. Returns number bytes shifted. |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2613 | * It's up to caller to free skb if everything was shifted. |
| 2614 | * |
| 2615 | * If @tgt runs out of frags, the whole operation is aborted. |
| 2616 | * |
| 2617 | * Skb cannot include anything else but paged data while tgt is allowed |
| 2618 | * to have non-paged data as well. |
| 2619 | * |
| 2620 | * TODO: full sized shift could be optimized but that would need |
| 2621 | * specialized skb free'er to handle frags without up-to-date nr_frags. |
| 2622 | */ |
| 2623 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) |
| 2624 | { |
| 2625 | int from, to, merge, todo; |
| 2626 | struct skb_frag_struct *fragfrom, *fragto; |
| 2627 | |
| 2628 | BUG_ON(shiftlen > skb->len); |
| 2629 | BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ |
| 2630 | |
| 2631 | todo = shiftlen; |
| 2632 | from = 0; |
| 2633 | to = skb_shinfo(tgt)->nr_frags; |
| 2634 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 2635 | |
| 2636 | /* Actual merge is delayed until the point when we know we can |
| 2637 | * commit all, so that we don't have to undo partial changes |
| 2638 | */ |
| 2639 | if (!to || |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2640 | !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), |
| 2641 | fragfrom->page_offset)) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2642 | merge = -1; |
| 2643 | } else { |
| 2644 | merge = to - 1; |
| 2645 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2646 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2647 | if (todo < 0) { |
| 2648 | if (skb_prepare_for_shift(skb) || |
| 2649 | skb_prepare_for_shift(tgt)) |
| 2650 | return 0; |
| 2651 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 2652 | /* All previous frag pointers might be stale! */ |
| 2653 | fragfrom = &skb_shinfo(skb)->frags[from]; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2654 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 2655 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2656 | skb_frag_size_add(fragto, shiftlen); |
| 2657 | skb_frag_size_sub(fragfrom, shiftlen); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2658 | fragfrom->page_offset += shiftlen; |
| 2659 | |
| 2660 | goto onlymerged; |
| 2661 | } |
| 2662 | |
| 2663 | from++; |
| 2664 | } |
| 2665 | |
| 2666 | /* Skip full, not-fitting skb to avoid expensive operations */ |
| 2667 | if ((shiftlen == skb->len) && |
| 2668 | (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) |
| 2669 | return 0; |
| 2670 | |
| 2671 | if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) |
| 2672 | return 0; |
| 2673 | |
| 2674 | while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { |
| 2675 | if (to == MAX_SKB_FRAGS) |
| 2676 | return 0; |
| 2677 | |
| 2678 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 2679 | fragto = &skb_shinfo(tgt)->frags[to]; |
| 2680 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2681 | if (todo >= skb_frag_size(fragfrom)) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2682 | *fragto = *fragfrom; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2683 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2684 | from++; |
| 2685 | to++; |
| 2686 | |
| 2687 | } else { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2688 | __skb_frag_ref(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2689 | fragto->page = fragfrom->page; |
| 2690 | fragto->page_offset = fragfrom->page_offset; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2691 | skb_frag_size_set(fragto, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2692 | |
| 2693 | fragfrom->page_offset += todo; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2694 | skb_frag_size_sub(fragfrom, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2695 | todo = 0; |
| 2696 | |
| 2697 | to++; |
| 2698 | break; |
| 2699 | } |
| 2700 | } |
| 2701 | |
| 2702 | /* Ready to "commit" this state change to tgt */ |
| 2703 | skb_shinfo(tgt)->nr_frags = to; |
| 2704 | |
| 2705 | if (merge >= 0) { |
| 2706 | fragfrom = &skb_shinfo(skb)->frags[0]; |
| 2707 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 2708 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2709 | skb_frag_size_add(fragto, skb_frag_size(fragfrom)); |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2710 | __skb_frag_unref(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2711 | } |
| 2712 | |
| 2713 | /* Reposition in the original skb */ |
| 2714 | to = 0; |
| 2715 | while (from < skb_shinfo(skb)->nr_frags) |
| 2716 | skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; |
| 2717 | skb_shinfo(skb)->nr_frags = to; |
| 2718 | |
| 2719 | BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); |
| 2720 | |
| 2721 | onlymerged: |
| 2722 | /* Most likely the tgt won't ever need its checksum anymore, skb on |
| 2723 | * the other hand might need it if it needs to be resent |
| 2724 | */ |
| 2725 | tgt->ip_summed = CHECKSUM_PARTIAL; |
| 2726 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 2727 | |
| 2728 | /* Yak, is it really working this way? Some helper please? */ |
| 2729 | skb->len -= shiftlen; |
| 2730 | skb->data_len -= shiftlen; |
| 2731 | skb->truesize -= shiftlen; |
| 2732 | tgt->len += shiftlen; |
| 2733 | tgt->data_len += shiftlen; |
| 2734 | tgt->truesize += shiftlen; |
| 2735 | |
| 2736 | return shiftlen; |
| 2737 | } |
| 2738 | |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2739 | /** |
| 2740 | * skb_prepare_seq_read - Prepare a sequential read of skb data |
| 2741 | * @skb: the buffer to read |
| 2742 | * @from: lower offset of data to be read |
| 2743 | * @to: upper offset of data to be read |
| 2744 | * @st: state variable |
| 2745 | * |
| 2746 | * Initializes the specified state variable. Must be called before |
| 2747 | * invoking skb_seq_read() for the first time. |
| 2748 | */ |
| 2749 | void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, |
| 2750 | unsigned int to, struct skb_seq_state *st) |
| 2751 | { |
| 2752 | st->lower_offset = from; |
| 2753 | st->upper_offset = to; |
| 2754 | st->root_skb = st->cur_skb = skb; |
| 2755 | st->frag_idx = st->stepped_offset = 0; |
| 2756 | st->frag_data = NULL; |
| 2757 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2758 | EXPORT_SYMBOL(skb_prepare_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2759 | |
| 2760 | /** |
| 2761 | * skb_seq_read - Sequentially read skb data |
| 2762 | * @consumed: number of bytes consumed by the caller so far |
| 2763 | * @data: destination pointer for data to be returned |
| 2764 | * @st: state variable |
| 2765 | * |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 2766 | * Reads a block of skb data at @consumed relative to the |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2767 | * lower offset specified to skb_prepare_seq_read(). Assigns |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 2768 | * the head of the data block to @data and returns the length |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2769 | * of the block or 0 if the end of the skb data or the upper |
| 2770 | * offset has been reached. |
| 2771 | * |
| 2772 | * The caller is not required to consume all of the data |
Mathias Krause | bc32383 | 2013-11-07 14:18:26 +0100 | [diff] [blame] | 2773 | * returned, i.e. @consumed is typically set to the number |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2774 | * of bytes already consumed and the next call to |
| 2775 | * skb_seq_read() will return the remaining part of the block. |
| 2776 | * |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 2777 | * Note 1: The size of each block of data returned can be arbitrary, |
Masanari Iida | e793c0f | 2014-09-04 23:44:36 +0900 | [diff] [blame] | 2778 | * this limitation is the cost for zerocopy sequential |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2779 | * reads of potentially non linear data. |
| 2780 | * |
Randy Dunlap | bc2cda1 | 2008-02-13 15:03:25 -0800 | [diff] [blame] | 2781 | * Note 2: Fragment lists within fragments are not implemented |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2782 | * at the moment, state->root_skb could be replaced with |
| 2783 | * a stack for this purpose. |
| 2784 | */ |
| 2785 | unsigned int skb_seq_read(unsigned int consumed, const u8 **data, |
| 2786 | struct skb_seq_state *st) |
| 2787 | { |
| 2788 | unsigned int block_limit, abs_offset = consumed + st->lower_offset; |
| 2789 | skb_frag_t *frag; |
| 2790 | |
Wedson Almeida Filho | aeb193e | 2013-06-23 23:33:48 -0700 | [diff] [blame] | 2791 | if (unlikely(abs_offset >= st->upper_offset)) { |
| 2792 | if (st->frag_data) { |
| 2793 | kunmap_atomic(st->frag_data); |
| 2794 | st->frag_data = NULL; |
| 2795 | } |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2796 | return 0; |
Wedson Almeida Filho | aeb193e | 2013-06-23 23:33:48 -0700 | [diff] [blame] | 2797 | } |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2798 | |
| 2799 | next_skb: |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 2800 | block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2801 | |
Thomas Chenault | 995b337 | 2009-05-18 21:43:27 -0700 | [diff] [blame] | 2802 | if (abs_offset < block_limit && !st->frag_data) { |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 2803 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2804 | return block_limit - abs_offset; |
| 2805 | } |
| 2806 | |
| 2807 | if (st->frag_idx == 0 && !st->frag_data) |
| 2808 | st->stepped_offset += skb_headlen(st->cur_skb); |
| 2809 | |
| 2810 | while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { |
| 2811 | frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2812 | block_limit = skb_frag_size(frag) + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2813 | |
| 2814 | if (abs_offset < block_limit) { |
| 2815 | if (!st->frag_data) |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2816 | st->frag_data = kmap_atomic(skb_frag_page(frag)); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2817 | |
| 2818 | *data = (u8 *) st->frag_data + frag->page_offset + |
| 2819 | (abs_offset - st->stepped_offset); |
| 2820 | |
| 2821 | return block_limit - abs_offset; |
| 2822 | } |
| 2823 | |
| 2824 | if (st->frag_data) { |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2825 | kunmap_atomic(st->frag_data); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2826 | st->frag_data = NULL; |
| 2827 | } |
| 2828 | |
| 2829 | st->frag_idx++; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2830 | st->stepped_offset += skb_frag_size(frag); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2831 | } |
| 2832 | |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 2833 | if (st->frag_data) { |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2834 | kunmap_atomic(st->frag_data); |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 2835 | st->frag_data = NULL; |
| 2836 | } |
| 2837 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 2838 | if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 2839 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2840 | st->frag_idx = 0; |
| 2841 | goto next_skb; |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 2842 | } else if (st->cur_skb->next) { |
| 2843 | st->cur_skb = st->cur_skb->next; |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 2844 | st->frag_idx = 0; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2845 | goto next_skb; |
| 2846 | } |
| 2847 | |
| 2848 | return 0; |
| 2849 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2850 | EXPORT_SYMBOL(skb_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2851 | |
| 2852 | /** |
| 2853 | * skb_abort_seq_read - Abort a sequential read of skb data |
| 2854 | * @st: state variable |
| 2855 | * |
| 2856 | * Must be called if skb_seq_read() was not called until it |
| 2857 | * returned 0. |
| 2858 | */ |
| 2859 | void skb_abort_seq_read(struct skb_seq_state *st) |
| 2860 | { |
| 2861 | if (st->frag_data) |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2862 | kunmap_atomic(st->frag_data); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2863 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2864 | EXPORT_SYMBOL(skb_abort_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2865 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2866 | #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) |
| 2867 | |
| 2868 | static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, |
| 2869 | struct ts_config *conf, |
| 2870 | struct ts_state *state) |
| 2871 | { |
| 2872 | return skb_seq_read(offset, text, TS_SKB_CB(state)); |
| 2873 | } |
| 2874 | |
| 2875 | static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) |
| 2876 | { |
| 2877 | skb_abort_seq_read(TS_SKB_CB(state)); |
| 2878 | } |
| 2879 | |
| 2880 | /** |
| 2881 | * skb_find_text - Find a text pattern in skb data |
| 2882 | * @skb: the buffer to look in |
| 2883 | * @from: search offset |
| 2884 | * @to: search limit |
| 2885 | * @config: textsearch configuration |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2886 | * |
| 2887 | * Finds a pattern in the skb data according to the specified |
| 2888 | * textsearch configuration. Use textsearch_next() to retrieve |
| 2889 | * subsequent occurrences of the pattern. Returns the offset |
| 2890 | * to the first occurrence or UINT_MAX if no match was found. |
| 2891 | */ |
| 2892 | unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 2893 | unsigned int to, struct ts_config *config) |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2894 | { |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 2895 | struct ts_state state; |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 2896 | unsigned int ret; |
| 2897 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2898 | config->get_next_block = skb_ts_get_next_block; |
| 2899 | config->finish = skb_ts_finish; |
| 2900 | |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 2901 | skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2902 | |
Bojan Prtvar | 059a244 | 2015-02-22 11:46:35 +0100 | [diff] [blame] | 2903 | ret = textsearch_find(config, &state); |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 2904 | return (ret <= to - from ? ret : UINT_MAX); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2905 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2906 | EXPORT_SYMBOL(skb_find_text); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2907 | |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2908 | /** |
Ben Hutchings | 2c53040 | 2012-07-10 10:55:09 +0000 | [diff] [blame] | 2909 | * skb_append_datato_frags - append the user data to a skb |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2910 | * @sk: sock structure |
Masanari Iida | e793c0f | 2014-09-04 23:44:36 +0900 | [diff] [blame] | 2911 | * @skb: skb structure to be appended with user data. |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2912 | * @getfrag: call back function to be used for getting the user data |
| 2913 | * @from: pointer to user message iov |
| 2914 | * @length: length of the iov message |
| 2915 | * |
| 2916 | * Description: This procedure append the user data in the fragment part |
| 2917 | * of the skb if any page alloc fails user this procedure returns -ENOMEM |
| 2918 | */ |
| 2919 | int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, |
Martin Waitz | dab9630 | 2005-12-05 13:40:12 -0800 | [diff] [blame] | 2920 | int (*getfrag)(void *from, char *to, int offset, |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2921 | int len, int odd, struct sk_buff *skb), |
| 2922 | void *from, int length) |
| 2923 | { |
Eric Dumazet | b211172 | 2012-12-28 06:06:37 +0000 | [diff] [blame] | 2924 | int frg_cnt = skb_shinfo(skb)->nr_frags; |
| 2925 | int copy; |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2926 | int offset = 0; |
| 2927 | int ret; |
Eric Dumazet | b211172 | 2012-12-28 06:06:37 +0000 | [diff] [blame] | 2928 | struct page_frag *pfrag = ¤t->task_frag; |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2929 | |
| 2930 | do { |
| 2931 | /* Return error if we don't have space for new frag */ |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2932 | if (frg_cnt >= MAX_SKB_FRAGS) |
Eric Dumazet | b211172 | 2012-12-28 06:06:37 +0000 | [diff] [blame] | 2933 | return -EMSGSIZE; |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2934 | |
Eric Dumazet | b211172 | 2012-12-28 06:06:37 +0000 | [diff] [blame] | 2935 | if (!sk_page_frag_refill(sk, pfrag)) |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2936 | return -ENOMEM; |
| 2937 | |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2938 | /* copy the user data to page */ |
Eric Dumazet | b211172 | 2012-12-28 06:06:37 +0000 | [diff] [blame] | 2939 | copy = min_t(int, length, pfrag->size - pfrag->offset); |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2940 | |
Eric Dumazet | b211172 | 2012-12-28 06:06:37 +0000 | [diff] [blame] | 2941 | ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, |
| 2942 | offset, copy, 0, skb); |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2943 | if (ret < 0) |
| 2944 | return -EFAULT; |
| 2945 | |
| 2946 | /* copy was successful so update the size parameters */ |
Eric Dumazet | b211172 | 2012-12-28 06:06:37 +0000 | [diff] [blame] | 2947 | skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, |
| 2948 | copy); |
| 2949 | frg_cnt++; |
| 2950 | pfrag->offset += copy; |
| 2951 | get_page(pfrag->page); |
| 2952 | |
| 2953 | skb->truesize += copy; |
| 2954 | atomic_add(copy, &sk->sk_wmem_alloc); |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2955 | skb->len += copy; |
| 2956 | skb->data_len += copy; |
| 2957 | offset += copy; |
| 2958 | length -= copy; |
| 2959 | |
| 2960 | } while (length > 0); |
| 2961 | |
| 2962 | return 0; |
| 2963 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2964 | EXPORT_SYMBOL(skb_append_datato_frags); |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2965 | |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 2966 | /** |
| 2967 | * skb_pull_rcsum - pull skb and update receive checksum |
| 2968 | * @skb: buffer to update |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 2969 | * @len: length of data pulled |
| 2970 | * |
| 2971 | * This function performs an skb_pull on the packet and updates |
Urs Thuermann | fee54fa | 2008-02-12 22:03:25 -0800 | [diff] [blame] | 2972 | * the CHECKSUM_COMPLETE checksum. It should be used on |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2973 | * receive path processing instead of skb_pull unless you know |
| 2974 | * that the checksum difference is zero (e.g., a valid IP header) |
| 2975 | * or you are setting ip_summed to CHECKSUM_NONE. |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 2976 | */ |
| 2977 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) |
| 2978 | { |
| 2979 | BUG_ON(len > skb->len); |
| 2980 | skb->len -= len; |
| 2981 | BUG_ON(skb->len < skb->data_len); |
| 2982 | skb_postpull_rcsum(skb, skb->data, len); |
| 2983 | return skb->data += len; |
| 2984 | } |
Arnaldo Carvalho de Melo | f94691a | 2006-03-20 22:47:55 -0800 | [diff] [blame] | 2985 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); |
| 2986 | |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2987 | /** |
| 2988 | * skb_segment - Perform protocol segmentation on skb. |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 2989 | * @head_skb: buffer to segment |
Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 2990 | * @features: features for the output path (see dev->features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2991 | * |
| 2992 | * This function performs segmentation on the given skb. It returns |
Ben Hutchings | 4c821d7 | 2008-04-13 21:52:48 -0700 | [diff] [blame] | 2993 | * a pointer to the first in a list of new skbs for the segments. |
| 2994 | * In case of error it returns ERR_PTR(err). |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2995 | */ |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 2996 | struct sk_buff *skb_segment(struct sk_buff *head_skb, |
| 2997 | netdev_features_t features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2998 | { |
| 2999 | struct sk_buff *segs = NULL; |
| 3000 | struct sk_buff *tail = NULL; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3001 | struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3002 | skb_frag_t *frag = skb_shinfo(head_skb)->frags; |
| 3003 | unsigned int mss = skb_shinfo(head_skb)->gso_size; |
| 3004 | unsigned int doffset = head_skb->data - skb_mac_header(head_skb); |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 3005 | struct sk_buff *frag_skb = head_skb; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3006 | unsigned int offset = doffset; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3007 | unsigned int tnl_hlen = skb_tnl_header_len(head_skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3008 | unsigned int headroom; |
| 3009 | unsigned int len; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3010 | __be16 proto; |
| 3011 | bool csum; |
Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 3012 | int sg = !!(features & NETIF_F_SG); |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3013 | int nfrags = skb_shinfo(head_skb)->nr_frags; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3014 | int err = -ENOMEM; |
| 3015 | int i = 0; |
| 3016 | int pos; |
Vlad Yasevich | 53d6471 | 2014-03-27 17:26:18 -0400 | [diff] [blame] | 3017 | int dummy; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3018 | |
Wei-Chun Chao | 5882a07 | 2014-06-08 23:48:54 -0700 | [diff] [blame] | 3019 | __skb_push(head_skb, doffset); |
Vlad Yasevich | 53d6471 | 2014-03-27 17:26:18 -0400 | [diff] [blame] | 3020 | proto = skb_network_protocol(head_skb, &dummy); |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3021 | if (unlikely(!proto)) |
| 3022 | return ERR_PTR(-EINVAL); |
| 3023 | |
Tom Herbert | 7e2b10c | 2014-06-04 17:20:02 -0700 | [diff] [blame] | 3024 | csum = !head_skb->encap_hdr_csum && |
| 3025 | !!can_checksum_protocol(features, proto); |
| 3026 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3027 | headroom = skb_headroom(head_skb); |
| 3028 | pos = skb_headlen(head_skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3029 | |
| 3030 | do { |
| 3031 | struct sk_buff *nskb; |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3032 | skb_frag_t *nskb_frag; |
Herbert Xu | c8884ed | 2006-10-29 15:59:41 -0800 | [diff] [blame] | 3033 | int hsize; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3034 | int size; |
| 3035 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3036 | len = head_skb->len - offset; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3037 | if (len > mss) |
| 3038 | len = mss; |
| 3039 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3040 | hsize = skb_headlen(head_skb) - offset; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3041 | if (hsize < 0) |
| 3042 | hsize = 0; |
Herbert Xu | c8884ed | 2006-10-29 15:59:41 -0800 | [diff] [blame] | 3043 | if (hsize > len || !sg) |
| 3044 | hsize = len; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3045 | |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3046 | if (!hsize && i >= nfrags && skb_headlen(list_skb) && |
| 3047 | (skb_headlen(list_skb) == len || sg)) { |
| 3048 | BUG_ON(skb_headlen(list_skb) > len); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3049 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3050 | i = 0; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3051 | nfrags = skb_shinfo(list_skb)->nr_frags; |
| 3052 | frag = skb_shinfo(list_skb)->frags; |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 3053 | frag_skb = list_skb; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3054 | pos += skb_headlen(list_skb); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3055 | |
| 3056 | while (pos < offset + len) { |
| 3057 | BUG_ON(i >= nfrags); |
| 3058 | |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 3059 | size = skb_frag_size(frag); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3060 | if (pos + size > offset + len) |
| 3061 | break; |
| 3062 | |
| 3063 | i++; |
| 3064 | pos += size; |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 3065 | frag++; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3066 | } |
| 3067 | |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3068 | nskb = skb_clone(list_skb, GFP_ATOMIC); |
| 3069 | list_skb = list_skb->next; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3070 | |
| 3071 | if (unlikely(!nskb)) |
| 3072 | goto err; |
| 3073 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3074 | if (unlikely(pskb_trim(nskb, len))) { |
| 3075 | kfree_skb(nskb); |
| 3076 | goto err; |
| 3077 | } |
| 3078 | |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 3079 | hsize = skb_end_offset(nskb); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3080 | if (skb_cow_head(nskb, doffset + headroom)) { |
| 3081 | kfree_skb(nskb); |
| 3082 | goto err; |
| 3083 | } |
| 3084 | |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 3085 | nskb->truesize += skb_end_offset(nskb) - hsize; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3086 | skb_release_head_state(nskb); |
| 3087 | __skb_push(nskb, doffset); |
| 3088 | } else { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 3089 | nskb = __alloc_skb(hsize + doffset + headroom, |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3090 | GFP_ATOMIC, skb_alloc_rx_flag(head_skb), |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 3091 | NUMA_NO_NODE); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3092 | |
| 3093 | if (unlikely(!nskb)) |
| 3094 | goto err; |
| 3095 | |
| 3096 | skb_reserve(nskb, headroom); |
| 3097 | __skb_put(nskb, doffset); |
| 3098 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3099 | |
| 3100 | if (segs) |
| 3101 | tail->next = nskb; |
| 3102 | else |
| 3103 | segs = nskb; |
| 3104 | tail = nskb; |
| 3105 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3106 | __copy_skb_header(nskb, head_skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3107 | |
Eric Dumazet | 030737b | 2013-10-19 11:42:54 -0700 | [diff] [blame] | 3108 | skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); |
Vlad Yasevich | fcdfe3a | 2014-07-31 10:33:06 -0400 | [diff] [blame] | 3109 | skb_reset_mac_len(nskb); |
Pravin B Shelar | 68c3316 | 2013-02-14 14:02:41 +0000 | [diff] [blame] | 3110 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3111 | skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, |
Pravin B Shelar | 68c3316 | 2013-02-14 14:02:41 +0000 | [diff] [blame] | 3112 | nskb->data - tnl_hlen, |
| 3113 | doffset + tnl_hlen); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3114 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3115 | if (nskb->len == len + doffset) |
Simon Horman | 1cdbcb7 | 2013-05-19 15:46:49 +0000 | [diff] [blame] | 3116 | goto perform_csum_check; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3117 | |
Tom Herbert | e585f23 | 2014-11-04 09:06:54 -0800 | [diff] [blame] | 3118 | if (!sg && !nskb->remcsum_offload) { |
Herbert Xu | 6f85a12 | 2008-08-15 14:55:02 -0700 | [diff] [blame] | 3119 | nskb->ip_summed = CHECKSUM_NONE; |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3120 | nskb->csum = skb_copy_and_csum_bits(head_skb, offset, |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3121 | skb_put(nskb, len), |
| 3122 | len, 0); |
Tom Herbert | 7e2b10c | 2014-06-04 17:20:02 -0700 | [diff] [blame] | 3123 | SKB_GSO_CB(nskb)->csum_start = |
Tom Herbert | de84372 | 2014-06-25 12:51:01 -0700 | [diff] [blame] | 3124 | skb_headroom(nskb) + doffset; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3125 | continue; |
| 3126 | } |
| 3127 | |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3128 | nskb_frag = skb_shinfo(nskb)->frags; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3129 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3130 | skb_copy_from_linear_data_offset(head_skb, offset, |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 3131 | skb_put(nskb, hsize), hsize); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3132 | |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3133 | skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & |
| 3134 | SKBTX_SHARED_FRAG; |
Eric Dumazet | cef401d | 2013-01-25 20:34:37 +0000 | [diff] [blame] | 3135 | |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3136 | while (pos < offset + len) { |
| 3137 | if (i >= nfrags) { |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3138 | BUG_ON(skb_headlen(list_skb)); |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3139 | |
| 3140 | i = 0; |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3141 | nfrags = skb_shinfo(list_skb)->nr_frags; |
| 3142 | frag = skb_shinfo(list_skb)->frags; |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 3143 | frag_skb = list_skb; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3144 | |
| 3145 | BUG_ON(!nfrags); |
| 3146 | |
Michael S. Tsirkin | 1a4ceda | 2014-03-10 19:27:59 +0200 | [diff] [blame] | 3147 | list_skb = list_skb->next; |
Herbert Xu | 9d8506c | 2013-11-21 11:10:04 -0800 | [diff] [blame] | 3148 | } |
| 3149 | |
| 3150 | if (unlikely(skb_shinfo(nskb)->nr_frags >= |
| 3151 | MAX_SKB_FRAGS)) { |
| 3152 | net_warn_ratelimited( |
| 3153 | "skb_segment: too many frags: %u %u\n", |
| 3154 | pos, mss); |
| 3155 | goto err; |
| 3156 | } |
| 3157 | |
Michael S. Tsirkin | 1fd819e | 2014-03-10 19:28:08 +0200 | [diff] [blame] | 3158 | if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) |
| 3159 | goto err; |
| 3160 | |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 3161 | *nskb_frag = *frag; |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3162 | __skb_frag_ref(nskb_frag); |
| 3163 | size = skb_frag_size(nskb_frag); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3164 | |
| 3165 | if (pos < offset) { |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3166 | nskb_frag->page_offset += offset - pos; |
| 3167 | skb_frag_size_sub(nskb_frag, offset - pos); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3168 | } |
| 3169 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3170 | skb_shinfo(nskb)->nr_frags++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3171 | |
| 3172 | if (pos + size <= offset + len) { |
| 3173 | i++; |
Michael S. Tsirkin | 4e1beba | 2014-03-10 18:29:14 +0200 | [diff] [blame] | 3174 | frag++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3175 | pos += size; |
| 3176 | } else { |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3177 | skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3178 | goto skip_fraglist; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3179 | } |
| 3180 | |
Michael S. Tsirkin | 8cb1990 | 2014-03-10 18:29:04 +0200 | [diff] [blame] | 3181 | nskb_frag++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3182 | } |
| 3183 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 3184 | skip_fraglist: |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3185 | nskb->data_len = len - hsize; |
| 3186 | nskb->len += nskb->data_len; |
| 3187 | nskb->truesize += nskb->data_len; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3188 | |
Simon Horman | 1cdbcb7 | 2013-05-19 15:46:49 +0000 | [diff] [blame] | 3189 | perform_csum_check: |
Tom Herbert | e585f23 | 2014-11-04 09:06:54 -0800 | [diff] [blame] | 3190 | if (!csum && !nskb->remcsum_offload) { |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3191 | nskb->csum = skb_checksum(nskb, doffset, |
| 3192 | nskb->len - doffset, 0); |
| 3193 | nskb->ip_summed = CHECKSUM_NONE; |
Tom Herbert | 7e2b10c | 2014-06-04 17:20:02 -0700 | [diff] [blame] | 3194 | SKB_GSO_CB(nskb)->csum_start = |
| 3195 | skb_headroom(nskb) + doffset; |
Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 3196 | } |
Michael S. Tsirkin | df5771f | 2014-03-10 18:29:19 +0200 | [diff] [blame] | 3197 | } while ((offset += len) < head_skb->len); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3198 | |
Eric Dumazet | bec3cfd | 2014-10-03 20:59:19 -0700 | [diff] [blame] | 3199 | /* Some callers want to get the end of the list. |
| 3200 | * Put it in segs->prev to avoid walking the list. |
| 3201 | * (see validate_xmit_skb_list() for example) |
| 3202 | */ |
| 3203 | segs->prev = tail; |
Toshiaki Makita | 432c856 | 2014-10-27 10:30:51 -0700 | [diff] [blame] | 3204 | |
| 3205 | /* Following permits correct backpressure, for protocols |
| 3206 | * using skb_set_owner_w(). |
| 3207 | * Idea is to tranfert ownership from head_skb to last segment. |
| 3208 | */ |
| 3209 | if (head_skb->destructor == sock_wfree) { |
| 3210 | swap(tail->truesize, head_skb->truesize); |
| 3211 | swap(tail->destructor, head_skb->destructor); |
| 3212 | swap(tail->sk, head_skb->sk); |
| 3213 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3214 | return segs; |
| 3215 | |
| 3216 | err: |
Eric Dumazet | 289dccb | 2013-12-20 14:29:08 -0800 | [diff] [blame] | 3217 | kfree_skb_list(segs); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3218 | return ERR_PTR(err); |
| 3219 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 3220 | EXPORT_SYMBOL_GPL(skb_segment); |
| 3221 | |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3222 | int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
| 3223 | { |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3224 | struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3225 | unsigned int offset = skb_gro_offset(skb); |
| 3226 | unsigned int headlen = skb_headlen(skb); |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3227 | unsigned int len = skb_gro_len(skb); |
Eric Dumazet | 58025e4 | 2015-03-05 13:47:48 -0800 | [diff] [blame] | 3228 | struct sk_buff *lp, *p = *head; |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3229 | unsigned int delta_truesize; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3230 | |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3231 | if (unlikely(p->len + len >= 65536)) |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3232 | return -E2BIG; |
| 3233 | |
Eric Dumazet | 29e9824 | 2014-05-16 11:34:37 -0700 | [diff] [blame] | 3234 | lp = NAPI_GRO_CB(p)->last; |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3235 | pinfo = skb_shinfo(lp); |
| 3236 | |
| 3237 | if (headlen <= offset) { |
Herbert Xu | 42da699 | 2009-05-26 18:50:19 +0000 | [diff] [blame] | 3238 | skb_frag_t *frag; |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 3239 | skb_frag_t *frag2; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 3240 | int i = skbinfo->nr_frags; |
| 3241 | int nr_frags = pinfo->nr_frags + i; |
Herbert Xu | 42da699 | 2009-05-26 18:50:19 +0000 | [diff] [blame] | 3242 | |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 3243 | if (nr_frags > MAX_SKB_FRAGS) |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3244 | goto merge; |
Herbert Xu | 81705ad | 2009-01-29 14:19:51 +0000 | [diff] [blame] | 3245 | |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3246 | offset -= headlen; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 3247 | pinfo->nr_frags = nr_frags; |
| 3248 | skbinfo->nr_frags = 0; |
Herbert Xu | f557206 | 2009-01-14 20:40:03 -0800 | [diff] [blame] | 3249 | |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 3250 | frag = pinfo->frags + nr_frags; |
| 3251 | frag2 = skbinfo->frags + i; |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 3252 | do { |
| 3253 | *--frag = *--frag2; |
| 3254 | } while (--i); |
| 3255 | |
| 3256 | frag->page_offset += offset; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3257 | skb_frag_size_sub(frag, offset); |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 3258 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3259 | /* all fragments truesize : remove (head size + sk_buff) */ |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 3260 | delta_truesize = skb->truesize - |
| 3261 | SKB_TRUESIZE(skb_end_offset(skb)); |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3262 | |
Herbert Xu | f557206 | 2009-01-14 20:40:03 -0800 | [diff] [blame] | 3263 | skb->truesize -= skb->data_len; |
| 3264 | skb->len -= skb->data_len; |
| 3265 | skb->data_len = 0; |
| 3266 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3267 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; |
Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3268 | goto done; |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 3269 | } else if (skb->head_frag) { |
| 3270 | int nr_frags = pinfo->nr_frags; |
| 3271 | skb_frag_t *frag = pinfo->frags + nr_frags; |
| 3272 | struct page *page = virt_to_head_page(skb->head); |
| 3273 | unsigned int first_size = headlen - offset; |
| 3274 | unsigned int first_offset; |
| 3275 | |
| 3276 | if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3277 | goto merge; |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 3278 | |
| 3279 | first_offset = skb->data - |
| 3280 | (unsigned char *)page_address(page) + |
| 3281 | offset; |
| 3282 | |
| 3283 | pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; |
| 3284 | |
| 3285 | frag->page.p = page; |
| 3286 | frag->page_offset = first_offset; |
| 3287 | skb_frag_size_set(frag, first_size); |
| 3288 | |
| 3289 | memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); |
| 3290 | /* We dont need to clear skbinfo->nr_frags here */ |
| 3291 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3292 | delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 3293 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; |
| 3294 | goto done; |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3295 | } |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3296 | |
| 3297 | merge: |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3298 | delta_truesize = skb->truesize; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3299 | if (offset > headlen) { |
Michal Schmidt | d1dc7ab | 2011-01-24 12:08:48 +0000 | [diff] [blame] | 3300 | unsigned int eat = offset - headlen; |
| 3301 | |
| 3302 | skbinfo->frags[0].page_offset += eat; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3303 | skb_frag_size_sub(&skbinfo->frags[0], eat); |
Michal Schmidt | d1dc7ab | 2011-01-24 12:08:48 +0000 | [diff] [blame] | 3304 | skb->data_len -= eat; |
| 3305 | skb->len -= eat; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3306 | offset = headlen; |
Herbert Xu | 5603502 | 2009-02-05 21:26:52 -0800 | [diff] [blame] | 3307 | } |
| 3308 | |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3309 | __skb_pull(skb, offset); |
Herbert Xu | 5603502 | 2009-02-05 21:26:52 -0800 | [diff] [blame] | 3310 | |
Eric Dumazet | 29e9824 | 2014-05-16 11:34:37 -0700 | [diff] [blame] | 3311 | if (NAPI_GRO_CB(p)->last == p) |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3312 | skb_shinfo(p)->frag_list = skb; |
| 3313 | else |
| 3314 | NAPI_GRO_CB(p)->last->next = skb; |
Eric Dumazet | c3c7c25 | 2012-12-06 13:54:59 +0000 | [diff] [blame] | 3315 | NAPI_GRO_CB(p)->last = skb; |
Eric Dumazet | f4a775d | 2014-09-22 16:29:32 -0700 | [diff] [blame] | 3316 | __skb_header_release(skb); |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3317 | lp = p; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3318 | |
Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3319 | done: |
| 3320 | NAPI_GRO_CB(p)->count++; |
Herbert Xu | 37fe473 | 2009-01-17 19:48:13 +0000 | [diff] [blame] | 3321 | p->data_len += len; |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3322 | p->truesize += delta_truesize; |
Herbert Xu | 37fe473 | 2009-01-17 19:48:13 +0000 | [diff] [blame] | 3323 | p->len += len; |
Eric Dumazet | 8a29111 | 2013-10-08 09:02:23 -0700 | [diff] [blame] | 3324 | if (lp != p) { |
| 3325 | lp->data_len += len; |
| 3326 | lp->truesize += delta_truesize; |
| 3327 | lp->len += len; |
| 3328 | } |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3329 | NAPI_GRO_CB(skb)->same_flow = 1; |
| 3330 | return 0; |
| 3331 | } |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3332 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3333 | void __init skb_init(void) |
| 3334 | { |
| 3335 | skbuff_head_cache = kmem_cache_create("skbuff_head_cache", |
| 3336 | sizeof(struct sk_buff), |
| 3337 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 3338 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 3339 | NULL); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 3340 | skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", |
Eric Dumazet | d0bf4a9 | 2014-09-29 13:29:15 -0700 | [diff] [blame] | 3341 | sizeof(struct sk_buff_fclones), |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 3342 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 3343 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 3344 | NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3345 | } |
| 3346 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3347 | /** |
| 3348 | * skb_to_sgvec - Fill a scatter-gather list from a socket buffer |
| 3349 | * @skb: Socket buffer containing the buffers to be mapped |
| 3350 | * @sg: The scatter-gather list to map into |
| 3351 | * @offset: The offset into the buffer's contents to start mapping |
| 3352 | * @len: Length of buffer space to be mapped |
| 3353 | * |
| 3354 | * Fill the specified scatter-gather list with mappings/pointers into a |
| 3355 | * region of the buffer space attached to a socket buffer. |
| 3356 | */ |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3357 | static int |
| 3358 | __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3359 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3360 | int start = skb_headlen(skb); |
| 3361 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3362 | struct sk_buff *frag_iter; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3363 | int elt = 0; |
| 3364 | |
| 3365 | if (copy > 0) { |
| 3366 | if (copy > len) |
| 3367 | copy = len; |
Jens Axboe | 642f14903 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 3368 | sg_set_buf(sg, skb->data + offset, copy); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3369 | elt++; |
| 3370 | if ((len -= copy) == 0) |
| 3371 | return elt; |
| 3372 | offset += copy; |
| 3373 | } |
| 3374 | |
| 3375 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3376 | int end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3377 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 3378 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3379 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3380 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3381 | if ((copy = end - offset) > 0) { |
| 3382 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 3383 | |
| 3384 | if (copy > len) |
| 3385 | copy = len; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3386 | sg_set_page(&sg[elt], skb_frag_page(frag), copy, |
Jens Axboe | 642f14903 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 3387 | frag->page_offset+offset-start); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3388 | elt++; |
| 3389 | if (!(len -= copy)) |
| 3390 | return elt; |
| 3391 | offset += copy; |
| 3392 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3393 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3394 | } |
| 3395 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3396 | skb_walk_frags(skb, frag_iter) { |
| 3397 | int end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3398 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3399 | WARN_ON(start > offset + len); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3400 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3401 | end = start + frag_iter->len; |
| 3402 | if ((copy = end - offset) > 0) { |
| 3403 | if (copy > len) |
| 3404 | copy = len; |
| 3405 | elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
| 3406 | copy); |
| 3407 | if ((len -= copy) == 0) |
| 3408 | return elt; |
| 3409 | offset += copy; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3410 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3411 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3412 | } |
| 3413 | BUG_ON(len); |
| 3414 | return elt; |
| 3415 | } |
| 3416 | |
Fan Du | 25a91d8 | 2014-01-18 09:54:23 +0800 | [diff] [blame] | 3417 | /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given |
| 3418 | * sglist without mark the sg which contain last skb data as the end. |
| 3419 | * So the caller can mannipulate sg list as will when padding new data after |
| 3420 | * the first call without calling sg_unmark_end to expend sg list. |
| 3421 | * |
| 3422 | * Scenario to use skb_to_sgvec_nomark: |
| 3423 | * 1. sg_init_table |
| 3424 | * 2. skb_to_sgvec_nomark(payload1) |
| 3425 | * 3. skb_to_sgvec_nomark(payload2) |
| 3426 | * |
| 3427 | * This is equivalent to: |
| 3428 | * 1. sg_init_table |
| 3429 | * 2. skb_to_sgvec(payload1) |
| 3430 | * 3. sg_unmark_end |
| 3431 | * 4. skb_to_sgvec(payload2) |
| 3432 | * |
| 3433 | * When mapping mutilple payload conditionally, skb_to_sgvec_nomark |
| 3434 | * is more preferable. |
| 3435 | */ |
| 3436 | int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, |
| 3437 | int offset, int len) |
| 3438 | { |
| 3439 | return __skb_to_sgvec(skb, sg, offset, len); |
| 3440 | } |
| 3441 | EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); |
| 3442 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3443 | int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
| 3444 | { |
| 3445 | int nsg = __skb_to_sgvec(skb, sg, offset, len); |
| 3446 | |
Jens Axboe | c46f233 | 2007-10-31 12:06:37 +0100 | [diff] [blame] | 3447 | sg_mark_end(&sg[nsg - 1]); |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3448 | |
| 3449 | return nsg; |
| 3450 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3451 | EXPORT_SYMBOL_GPL(skb_to_sgvec); |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3452 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3453 | /** |
| 3454 | * skb_cow_data - Check that a socket buffer's data buffers are writable |
| 3455 | * @skb: The socket buffer to check. |
| 3456 | * @tailbits: Amount of trailing space to be added |
| 3457 | * @trailer: Returned pointer to the skb where the @tailbits space begins |
| 3458 | * |
| 3459 | * Make sure that the data buffers attached to a socket buffer are |
| 3460 | * writable. If they are not, private copies are made of the data buffers |
| 3461 | * and the socket buffer is set to use these instead. |
| 3462 | * |
| 3463 | * If @tailbits is given, make sure that there is space to write @tailbits |
| 3464 | * bytes of data beyond current end of socket buffer. @trailer will be |
| 3465 | * set to point to the skb in which this space begins. |
| 3466 | * |
| 3467 | * The number of scatterlist elements required to completely map the |
| 3468 | * COW'd and extended socket buffer will be returned. |
| 3469 | */ |
| 3470 | int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) |
| 3471 | { |
| 3472 | int copyflag; |
| 3473 | int elt; |
| 3474 | struct sk_buff *skb1, **skb_p; |
| 3475 | |
| 3476 | /* If skb is cloned or its head is paged, reallocate |
| 3477 | * head pulling out all the pages (pages are considered not writable |
| 3478 | * at the moment even if they are anonymous). |
| 3479 | */ |
| 3480 | if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && |
| 3481 | __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) |
| 3482 | return -ENOMEM; |
| 3483 | |
| 3484 | /* Easy case. Most of packets will go this way. */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3485 | if (!skb_has_frag_list(skb)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3486 | /* A little of trouble, not enough of space for trailer. |
| 3487 | * This should not happen, when stack is tuned to generate |
| 3488 | * good frames. OK, on miss we reallocate and reserve even more |
| 3489 | * space, 128 bytes is fair. */ |
| 3490 | |
| 3491 | if (skb_tailroom(skb) < tailbits && |
| 3492 | pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) |
| 3493 | return -ENOMEM; |
| 3494 | |
| 3495 | /* Voila! */ |
| 3496 | *trailer = skb; |
| 3497 | return 1; |
| 3498 | } |
| 3499 | |
| 3500 | /* Misery. We are in troubles, going to mincer fragments... */ |
| 3501 | |
| 3502 | elt = 1; |
| 3503 | skb_p = &skb_shinfo(skb)->frag_list; |
| 3504 | copyflag = 0; |
| 3505 | |
| 3506 | while ((skb1 = *skb_p) != NULL) { |
| 3507 | int ntail = 0; |
| 3508 | |
| 3509 | /* The fragment is partially pulled by someone, |
| 3510 | * this can happen on input. Copy it and everything |
| 3511 | * after it. */ |
| 3512 | |
| 3513 | if (skb_shared(skb1)) |
| 3514 | copyflag = 1; |
| 3515 | |
| 3516 | /* If the skb is the last, worry about trailer. */ |
| 3517 | |
| 3518 | if (skb1->next == NULL && tailbits) { |
| 3519 | if (skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3520 | skb_has_frag_list(skb1) || |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3521 | skb_tailroom(skb1) < tailbits) |
| 3522 | ntail = tailbits + 128; |
| 3523 | } |
| 3524 | |
| 3525 | if (copyflag || |
| 3526 | skb_cloned(skb1) || |
| 3527 | ntail || |
| 3528 | skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3529 | skb_has_frag_list(skb1)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3530 | struct sk_buff *skb2; |
| 3531 | |
| 3532 | /* Fuck, we are miserable poor guys... */ |
| 3533 | if (ntail == 0) |
| 3534 | skb2 = skb_copy(skb1, GFP_ATOMIC); |
| 3535 | else |
| 3536 | skb2 = skb_copy_expand(skb1, |
| 3537 | skb_headroom(skb1), |
| 3538 | ntail, |
| 3539 | GFP_ATOMIC); |
| 3540 | if (unlikely(skb2 == NULL)) |
| 3541 | return -ENOMEM; |
| 3542 | |
| 3543 | if (skb1->sk) |
| 3544 | skb_set_owner_w(skb2, skb1->sk); |
| 3545 | |
| 3546 | /* Looking around. Are we still alive? |
| 3547 | * OK, link new skb, drop old one */ |
| 3548 | |
| 3549 | skb2->next = skb1->next; |
| 3550 | *skb_p = skb2; |
| 3551 | kfree_skb(skb1); |
| 3552 | skb1 = skb2; |
| 3553 | } |
| 3554 | elt++; |
| 3555 | *trailer = skb1; |
| 3556 | skb_p = &skb1->next; |
| 3557 | } |
| 3558 | |
| 3559 | return elt; |
| 3560 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3561 | EXPORT_SYMBOL_GPL(skb_cow_data); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3562 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3563 | static void sock_rmem_free(struct sk_buff *skb) |
| 3564 | { |
| 3565 | struct sock *sk = skb->sk; |
| 3566 | |
| 3567 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
| 3568 | } |
| 3569 | |
| 3570 | /* |
| 3571 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) |
| 3572 | */ |
| 3573 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) |
| 3574 | { |
| 3575 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 3576 | (unsigned int)sk->sk_rcvbuf) |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3577 | return -ENOMEM; |
| 3578 | |
| 3579 | skb_orphan(skb); |
| 3580 | skb->sk = sk; |
| 3581 | skb->destructor = sock_rmem_free; |
| 3582 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); |
| 3583 | |
Eric Dumazet | abb57ea | 2011-05-18 02:21:31 -0400 | [diff] [blame] | 3584 | /* before exiting rcu section, make sure dst is refcounted */ |
| 3585 | skb_dst_force(skb); |
| 3586 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3587 | skb_queue_tail(&sk->sk_error_queue, skb); |
| 3588 | if (!sock_flag(sk, SOCK_DEAD)) |
David S. Miller | 676d236 | 2014-04-11 16:15:36 -0400 | [diff] [blame] | 3589 | sk->sk_data_ready(sk); |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3590 | return 0; |
| 3591 | } |
| 3592 | EXPORT_SYMBOL(sock_queue_err_skb); |
| 3593 | |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 3594 | struct sk_buff *sock_dequeue_err_skb(struct sock *sk) |
| 3595 | { |
| 3596 | struct sk_buff_head *q = &sk->sk_error_queue; |
| 3597 | struct sk_buff *skb, *skb_next; |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 3598 | unsigned long flags; |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 3599 | int err = 0; |
| 3600 | |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 3601 | spin_lock_irqsave(&q->lock, flags); |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 3602 | skb = __skb_dequeue(q); |
| 3603 | if (skb && (skb_next = skb_peek(q))) |
| 3604 | err = SKB_EXT_ERR(skb_next)->ee.ee_errno; |
Eric Dumazet | 997d5c3 | 2015-02-18 05:47:55 -0800 | [diff] [blame] | 3605 | spin_unlock_irqrestore(&q->lock, flags); |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 3606 | |
| 3607 | sk->sk_err = err; |
| 3608 | if (err) |
| 3609 | sk->sk_error_report(sk); |
| 3610 | |
| 3611 | return skb; |
| 3612 | } |
| 3613 | EXPORT_SYMBOL(sock_dequeue_err_skb); |
| 3614 | |
Alexander Duyck | cab41c4 | 2014-09-10 18:05:26 -0400 | [diff] [blame] | 3615 | /** |
| 3616 | * skb_clone_sk - create clone of skb, and take reference to socket |
| 3617 | * @skb: the skb to clone |
| 3618 | * |
| 3619 | * This function creates a clone of a buffer that holds a reference on |
| 3620 | * sk_refcnt. Buffers created via this function are meant to be |
| 3621 | * returned using sock_queue_err_skb, or free via kfree_skb. |
| 3622 | * |
| 3623 | * When passing buffers allocated with this function to sock_queue_err_skb |
| 3624 | * it is necessary to wrap the call with sock_hold/sock_put in order to |
| 3625 | * prevent the socket from being released prior to being enqueued on |
| 3626 | * the sk_error_queue. |
| 3627 | */ |
Alexander Duyck | 62bccb8 | 2014-09-04 13:31:35 -0400 | [diff] [blame] | 3628 | struct sk_buff *skb_clone_sk(struct sk_buff *skb) |
| 3629 | { |
| 3630 | struct sock *sk = skb->sk; |
| 3631 | struct sk_buff *clone; |
| 3632 | |
| 3633 | if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) |
| 3634 | return NULL; |
| 3635 | |
| 3636 | clone = skb_clone(skb, GFP_ATOMIC); |
| 3637 | if (!clone) { |
| 3638 | sock_put(sk); |
| 3639 | return NULL; |
| 3640 | } |
| 3641 | |
| 3642 | clone->sk = sk; |
| 3643 | clone->destructor = sock_efree; |
| 3644 | |
| 3645 | return clone; |
| 3646 | } |
| 3647 | EXPORT_SYMBOL(skb_clone_sk); |
| 3648 | |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 3649 | static void __skb_complete_tx_timestamp(struct sk_buff *skb, |
| 3650 | struct sock *sk, |
| 3651 | int tstype) |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3652 | { |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3653 | struct sock_exterr_skb *serr; |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3654 | int err; |
| 3655 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3656 | serr = SKB_EXT_ERR(skb); |
| 3657 | memset(serr, 0, sizeof(*serr)); |
| 3658 | serr->ee.ee_errno = ENOMSG; |
| 3659 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
Willem de Bruijn | e7fd288 | 2014-08-04 22:11:48 -0400 | [diff] [blame] | 3660 | serr->ee.ee_info = tstype; |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 3661 | if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { |
Willem de Bruijn | 09c2d25 | 2014-08-04 22:11:47 -0400 | [diff] [blame] | 3662 | serr->ee.ee_data = skb_shinfo(skb)->tskey; |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 3663 | if (sk->sk_protocol == IPPROTO_TCP) |
| 3664 | serr->ee.ee_data -= sk->sk_tskey; |
| 3665 | } |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 3666 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3667 | err = sock_queue_err_skb(sk, skb); |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 3668 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3669 | if (err) |
| 3670 | kfree_skb(skb); |
| 3671 | } |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 3672 | |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 3673 | static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) |
| 3674 | { |
| 3675 | bool ret; |
| 3676 | |
| 3677 | if (likely(sysctl_tstamp_allow_data || tsonly)) |
| 3678 | return true; |
| 3679 | |
| 3680 | read_lock_bh(&sk->sk_callback_lock); |
| 3681 | ret = sk->sk_socket && sk->sk_socket->file && |
| 3682 | file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); |
| 3683 | read_unlock_bh(&sk->sk_callback_lock); |
| 3684 | return ret; |
| 3685 | } |
| 3686 | |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 3687 | void skb_complete_tx_timestamp(struct sk_buff *skb, |
| 3688 | struct skb_shared_hwtstamps *hwtstamps) |
| 3689 | { |
| 3690 | struct sock *sk = skb->sk; |
| 3691 | |
Willem de Bruijn | b245be1 | 2015-01-30 13:29:32 -0500 | [diff] [blame] | 3692 | if (!skb_may_tx_timestamp(sk, false)) |
| 3693 | return; |
| 3694 | |
Alexander Duyck | 62bccb8 | 2014-09-04 13:31:35 -0400 | [diff] [blame] | 3695 | /* take a reference to prevent skb_orphan() from freeing the socket */ |
| 3696 | sock_hold(sk); |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 3697 | |
Alexander Duyck | 62bccb8 | 2014-09-04 13:31:35 -0400 | [diff] [blame] | 3698 | *skb_hwtstamps(skb) = *hwtstamps; |
| 3699 | __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 3700 | |
| 3701 | sock_put(sk); |
| 3702 | } |
| 3703 | EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); |
| 3704 | |
| 3705 | void __skb_tstamp_tx(struct sk_buff *orig_skb, |
| 3706 | struct skb_shared_hwtstamps *hwtstamps, |
| 3707 | struct sock *sk, int tstype) |
| 3708 | { |
| 3709 | struct sk_buff *skb; |
Willem de Bruijn | 3a8dd97 | 2015-03-11 15:43:55 -0400 | [diff] [blame] | 3710 | bool tsonly; |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 3711 | |
Willem de Bruijn | 3a8dd97 | 2015-03-11 15:43:55 -0400 | [diff] [blame] | 3712 | if (!sk) |
| 3713 | return; |
| 3714 | |
| 3715 | tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; |
| 3716 | if (!skb_may_tx_timestamp(sk, tsonly)) |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 3717 | return; |
| 3718 | |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 3719 | if (tsonly) |
| 3720 | skb = alloc_skb(0, GFP_ATOMIC); |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 3721 | else |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 3722 | skb = skb_clone(orig_skb, GFP_ATOMIC); |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 3723 | if (!skb) |
| 3724 | return; |
| 3725 | |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 3726 | if (tsonly) { |
| 3727 | skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags; |
| 3728 | skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; |
| 3729 | } |
| 3730 | |
| 3731 | if (hwtstamps) |
| 3732 | *skb_hwtstamps(skb) = *hwtstamps; |
| 3733 | else |
| 3734 | skb->tstamp = ktime_get_real(); |
| 3735 | |
Alexander Duyck | 37846ef | 2014-09-04 13:31:10 -0400 | [diff] [blame] | 3736 | __skb_complete_tx_timestamp(skb, sk, tstype); |
| 3737 | } |
Willem de Bruijn | e7fd288 | 2014-08-04 22:11:48 -0400 | [diff] [blame] | 3738 | EXPORT_SYMBOL_GPL(__skb_tstamp_tx); |
| 3739 | |
| 3740 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
| 3741 | struct skb_shared_hwtstamps *hwtstamps) |
| 3742 | { |
| 3743 | return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, |
| 3744 | SCM_TSTAMP_SND); |
| 3745 | } |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3746 | EXPORT_SYMBOL_GPL(skb_tstamp_tx); |
| 3747 | |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 3748 | void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) |
| 3749 | { |
| 3750 | struct sock *sk = skb->sk; |
| 3751 | struct sock_exterr_skb *serr; |
| 3752 | int err; |
| 3753 | |
| 3754 | skb->wifi_acked_valid = 1; |
| 3755 | skb->wifi_acked = acked; |
| 3756 | |
| 3757 | serr = SKB_EXT_ERR(skb); |
| 3758 | memset(serr, 0, sizeof(*serr)); |
| 3759 | serr->ee.ee_errno = ENOMSG; |
| 3760 | serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; |
| 3761 | |
Alexander Duyck | bf7fa55 | 2014-09-10 18:05:42 -0400 | [diff] [blame] | 3762 | /* take a reference to prevent skb_orphan() from freeing the socket */ |
| 3763 | sock_hold(sk); |
| 3764 | |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 3765 | err = sock_queue_err_skb(sk, skb); |
| 3766 | if (err) |
| 3767 | kfree_skb(skb); |
Alexander Duyck | bf7fa55 | 2014-09-10 18:05:42 -0400 | [diff] [blame] | 3768 | |
| 3769 | sock_put(sk); |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 3770 | } |
| 3771 | EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); |
| 3772 | |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3773 | /** |
| 3774 | * skb_partial_csum_set - set up and verify partial csum values for packet |
| 3775 | * @skb: the skb to set |
| 3776 | * @start: the number of bytes after skb->data to start checksumming. |
| 3777 | * @off: the offset from start to place the checksum. |
| 3778 | * |
| 3779 | * For untrusted partially-checksummed packets, we need to make sure the values |
| 3780 | * for skb->csum_start and skb->csum_offset are valid so we don't oops. |
| 3781 | * |
| 3782 | * This function checks and sets those values and skb->ip_summed: if this |
| 3783 | * returns false you should drop the packet. |
| 3784 | */ |
| 3785 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) |
| 3786 | { |
Herbert Xu | 5ff8dda | 2009-06-04 01:22:01 +0000 | [diff] [blame] | 3787 | if (unlikely(start > skb_headlen(skb)) || |
| 3788 | unlikely((int)start + off > skb_headlen(skb) - 2)) { |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 3789 | net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", |
| 3790 | start, off, skb_headlen(skb)); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3791 | return false; |
| 3792 | } |
| 3793 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 3794 | skb->csum_start = skb_headroom(skb) + start; |
| 3795 | skb->csum_offset = off; |
Jason Wang | e5d5dec | 2013-03-26 23:11:20 +0000 | [diff] [blame] | 3796 | skb_set_transport_header(skb, start); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3797 | return true; |
| 3798 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3799 | EXPORT_SYMBOL_GPL(skb_partial_csum_set); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3800 | |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 3801 | static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, |
| 3802 | unsigned int max) |
| 3803 | { |
| 3804 | if (skb_headlen(skb) >= len) |
| 3805 | return 0; |
| 3806 | |
| 3807 | /* If we need to pullup then pullup to the max, so we |
| 3808 | * won't need to do it again. |
| 3809 | */ |
| 3810 | if (max > skb->len) |
| 3811 | max = skb->len; |
| 3812 | |
| 3813 | if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) |
| 3814 | return -ENOMEM; |
| 3815 | |
| 3816 | if (skb_headlen(skb) < len) |
| 3817 | return -EPROTO; |
| 3818 | |
| 3819 | return 0; |
| 3820 | } |
| 3821 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 3822 | #define MAX_TCP_HDR_LEN (15 * 4) |
| 3823 | |
| 3824 | static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, |
| 3825 | typeof(IPPROTO_IP) proto, |
| 3826 | unsigned int off) |
| 3827 | { |
| 3828 | switch (proto) { |
| 3829 | int err; |
| 3830 | |
| 3831 | case IPPROTO_TCP: |
| 3832 | err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), |
| 3833 | off + MAX_TCP_HDR_LEN); |
| 3834 | if (!err && !skb_partial_csum_set(skb, off, |
| 3835 | offsetof(struct tcphdr, |
| 3836 | check))) |
| 3837 | err = -EPROTO; |
| 3838 | return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; |
| 3839 | |
| 3840 | case IPPROTO_UDP: |
| 3841 | err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), |
| 3842 | off + sizeof(struct udphdr)); |
| 3843 | if (!err && !skb_partial_csum_set(skb, off, |
| 3844 | offsetof(struct udphdr, |
| 3845 | check))) |
| 3846 | err = -EPROTO; |
| 3847 | return err ? ERR_PTR(err) : &udp_hdr(skb)->check; |
| 3848 | } |
| 3849 | |
| 3850 | return ERR_PTR(-EPROTO); |
| 3851 | } |
| 3852 | |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 3853 | /* This value should be large enough to cover a tagged ethernet header plus |
| 3854 | * maximally sized IP and TCP or UDP headers. |
| 3855 | */ |
| 3856 | #define MAX_IP_HDR_LEN 128 |
| 3857 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 3858 | static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 3859 | { |
| 3860 | unsigned int off; |
| 3861 | bool fragment; |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 3862 | __sum16 *csum; |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 3863 | int err; |
| 3864 | |
| 3865 | fragment = false; |
| 3866 | |
| 3867 | err = skb_maybe_pull_tail(skb, |
| 3868 | sizeof(struct iphdr), |
| 3869 | MAX_IP_HDR_LEN); |
| 3870 | if (err < 0) |
| 3871 | goto out; |
| 3872 | |
| 3873 | if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) |
| 3874 | fragment = true; |
| 3875 | |
| 3876 | off = ip_hdrlen(skb); |
| 3877 | |
| 3878 | err = -EPROTO; |
| 3879 | |
| 3880 | if (fragment) |
| 3881 | goto out; |
| 3882 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 3883 | csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); |
| 3884 | if (IS_ERR(csum)) |
| 3885 | return PTR_ERR(csum); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 3886 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 3887 | if (recalculate) |
| 3888 | *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, |
| 3889 | ip_hdr(skb)->daddr, |
| 3890 | skb->len - off, |
| 3891 | ip_hdr(skb)->protocol, 0); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 3892 | err = 0; |
| 3893 | |
| 3894 | out: |
| 3895 | return err; |
| 3896 | } |
| 3897 | |
| 3898 | /* This value should be large enough to cover a tagged ethernet header plus |
| 3899 | * an IPv6 header, all options, and a maximal TCP or UDP header. |
| 3900 | */ |
| 3901 | #define MAX_IPV6_HDR_LEN 256 |
| 3902 | |
| 3903 | #define OPT_HDR(type, skb, off) \ |
| 3904 | (type *)(skb_network_header(skb) + (off)) |
| 3905 | |
| 3906 | static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) |
| 3907 | { |
| 3908 | int err; |
| 3909 | u8 nexthdr; |
| 3910 | unsigned int off; |
| 3911 | unsigned int len; |
| 3912 | bool fragment; |
| 3913 | bool done; |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 3914 | __sum16 *csum; |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 3915 | |
| 3916 | fragment = false; |
| 3917 | done = false; |
| 3918 | |
| 3919 | off = sizeof(struct ipv6hdr); |
| 3920 | |
| 3921 | err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); |
| 3922 | if (err < 0) |
| 3923 | goto out; |
| 3924 | |
| 3925 | nexthdr = ipv6_hdr(skb)->nexthdr; |
| 3926 | |
| 3927 | len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); |
| 3928 | while (off <= len && !done) { |
| 3929 | switch (nexthdr) { |
| 3930 | case IPPROTO_DSTOPTS: |
| 3931 | case IPPROTO_HOPOPTS: |
| 3932 | case IPPROTO_ROUTING: { |
| 3933 | struct ipv6_opt_hdr *hp; |
| 3934 | |
| 3935 | err = skb_maybe_pull_tail(skb, |
| 3936 | off + |
| 3937 | sizeof(struct ipv6_opt_hdr), |
| 3938 | MAX_IPV6_HDR_LEN); |
| 3939 | if (err < 0) |
| 3940 | goto out; |
| 3941 | |
| 3942 | hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); |
| 3943 | nexthdr = hp->nexthdr; |
| 3944 | off += ipv6_optlen(hp); |
| 3945 | break; |
| 3946 | } |
| 3947 | case IPPROTO_AH: { |
| 3948 | struct ip_auth_hdr *hp; |
| 3949 | |
| 3950 | err = skb_maybe_pull_tail(skb, |
| 3951 | off + |
| 3952 | sizeof(struct ip_auth_hdr), |
| 3953 | MAX_IPV6_HDR_LEN); |
| 3954 | if (err < 0) |
| 3955 | goto out; |
| 3956 | |
| 3957 | hp = OPT_HDR(struct ip_auth_hdr, skb, off); |
| 3958 | nexthdr = hp->nexthdr; |
| 3959 | off += ipv6_authlen(hp); |
| 3960 | break; |
| 3961 | } |
| 3962 | case IPPROTO_FRAGMENT: { |
| 3963 | struct frag_hdr *hp; |
| 3964 | |
| 3965 | err = skb_maybe_pull_tail(skb, |
| 3966 | off + |
| 3967 | sizeof(struct frag_hdr), |
| 3968 | MAX_IPV6_HDR_LEN); |
| 3969 | if (err < 0) |
| 3970 | goto out; |
| 3971 | |
| 3972 | hp = OPT_HDR(struct frag_hdr, skb, off); |
| 3973 | |
| 3974 | if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) |
| 3975 | fragment = true; |
| 3976 | |
| 3977 | nexthdr = hp->nexthdr; |
| 3978 | off += sizeof(struct frag_hdr); |
| 3979 | break; |
| 3980 | } |
| 3981 | default: |
| 3982 | done = true; |
| 3983 | break; |
| 3984 | } |
| 3985 | } |
| 3986 | |
| 3987 | err = -EPROTO; |
| 3988 | |
| 3989 | if (!done || fragment) |
| 3990 | goto out; |
| 3991 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 3992 | csum = skb_checksum_setup_ip(skb, nexthdr, off); |
| 3993 | if (IS_ERR(csum)) |
| 3994 | return PTR_ERR(csum); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 3995 | |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 3996 | if (recalculate) |
| 3997 | *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
| 3998 | &ipv6_hdr(skb)->daddr, |
| 3999 | skb->len - off, nexthdr, 0); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4000 | err = 0; |
| 4001 | |
| 4002 | out: |
| 4003 | return err; |
| 4004 | } |
| 4005 | |
| 4006 | /** |
| 4007 | * skb_checksum_setup - set up partial checksum offset |
| 4008 | * @skb: the skb to set up |
| 4009 | * @recalculate: if true the pseudo-header checksum will be recalculated |
| 4010 | */ |
| 4011 | int skb_checksum_setup(struct sk_buff *skb, bool recalculate) |
| 4012 | { |
| 4013 | int err; |
| 4014 | |
| 4015 | switch (skb->protocol) { |
| 4016 | case htons(ETH_P_IP): |
Jan Beulich | f9708b4 | 2014-03-11 13:56:05 +0000 | [diff] [blame] | 4017 | err = skb_checksum_setup_ipv4(skb, recalculate); |
Paul Durrant | ed1f50c | 2014-01-09 10:02:46 +0000 | [diff] [blame] | 4018 | break; |
| 4019 | |
| 4020 | case htons(ETH_P_IPV6): |
| 4021 | err = skb_checksum_setup_ipv6(skb, recalculate); |
| 4022 | break; |
| 4023 | |
| 4024 | default: |
| 4025 | err = -EPROTO; |
| 4026 | break; |
| 4027 | } |
| 4028 | |
| 4029 | return err; |
| 4030 | } |
| 4031 | EXPORT_SYMBOL(skb_checksum_setup); |
| 4032 | |
Linus Lüssing | 9afd85c | 2015-05-02 14:01:07 +0200 | [diff] [blame] | 4033 | /** |
| 4034 | * skb_checksum_maybe_trim - maybe trims the given skb |
| 4035 | * @skb: the skb to check |
| 4036 | * @transport_len: the data length beyond the network header |
| 4037 | * |
| 4038 | * Checks whether the given skb has data beyond the given transport length. |
| 4039 | * If so, returns a cloned skb trimmed to this transport length. |
| 4040 | * Otherwise returns the provided skb. Returns NULL in error cases |
| 4041 | * (e.g. transport_len exceeds skb length or out-of-memory). |
| 4042 | * |
| 4043 | * Caller needs to set the skb transport header and release the returned skb. |
| 4044 | * Provided skb is consumed. |
| 4045 | */ |
| 4046 | static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, |
| 4047 | unsigned int transport_len) |
| 4048 | { |
| 4049 | struct sk_buff *skb_chk; |
| 4050 | unsigned int len = skb_transport_offset(skb) + transport_len; |
| 4051 | int ret; |
| 4052 | |
| 4053 | if (skb->len < len) { |
| 4054 | kfree_skb(skb); |
| 4055 | return NULL; |
| 4056 | } else if (skb->len == len) { |
| 4057 | return skb; |
| 4058 | } |
| 4059 | |
| 4060 | skb_chk = skb_clone(skb, GFP_ATOMIC); |
| 4061 | kfree_skb(skb); |
| 4062 | |
| 4063 | if (!skb_chk) |
| 4064 | return NULL; |
| 4065 | |
| 4066 | ret = pskb_trim_rcsum(skb_chk, len); |
| 4067 | if (ret) { |
| 4068 | kfree_skb(skb_chk); |
| 4069 | return NULL; |
| 4070 | } |
| 4071 | |
| 4072 | return skb_chk; |
| 4073 | } |
| 4074 | |
| 4075 | /** |
| 4076 | * skb_checksum_trimmed - validate checksum of an skb |
| 4077 | * @skb: the skb to check |
| 4078 | * @transport_len: the data length beyond the network header |
| 4079 | * @skb_chkf: checksum function to use |
| 4080 | * |
| 4081 | * Applies the given checksum function skb_chkf to the provided skb. |
| 4082 | * Returns a checked and maybe trimmed skb. Returns NULL on error. |
| 4083 | * |
| 4084 | * If the skb has data beyond the given transport length, then a |
| 4085 | * trimmed & cloned skb is checked and returned. |
| 4086 | * |
| 4087 | * Caller needs to set the skb transport header and release the returned skb. |
| 4088 | * Provided skb is consumed. |
| 4089 | */ |
| 4090 | struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, |
| 4091 | unsigned int transport_len, |
| 4092 | __sum16(*skb_chkf)(struct sk_buff *skb)) |
| 4093 | { |
| 4094 | struct sk_buff *skb_chk; |
| 4095 | unsigned int offset = skb_transport_offset(skb); |
| 4096 | int ret; |
| 4097 | |
| 4098 | skb_chk = skb_checksum_maybe_trim(skb, transport_len); |
| 4099 | if (!skb_chk) |
| 4100 | return NULL; |
| 4101 | |
| 4102 | if (!pskb_may_pull(skb_chk, offset)) { |
| 4103 | kfree_skb(skb_chk); |
| 4104 | return NULL; |
| 4105 | } |
| 4106 | |
| 4107 | __skb_pull(skb_chk, offset); |
| 4108 | ret = skb_chkf(skb_chk); |
| 4109 | __skb_push(skb_chk, offset); |
| 4110 | |
| 4111 | if (ret) { |
| 4112 | kfree_skb(skb_chk); |
| 4113 | return NULL; |
| 4114 | } |
| 4115 | |
| 4116 | return skb_chk; |
| 4117 | } |
| 4118 | EXPORT_SYMBOL(skb_checksum_trimmed); |
| 4119 | |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 4120 | void __skb_warn_lro_forwarding(const struct sk_buff *skb) |
| 4121 | { |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 4122 | net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", |
| 4123 | skb->dev->name); |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 4124 | } |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 4125 | EXPORT_SYMBOL(__skb_warn_lro_forwarding); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4126 | |
| 4127 | void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) |
| 4128 | { |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 4129 | if (head_stolen) { |
| 4130 | skb_release_head_state(skb); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4131 | kmem_cache_free(skbuff_head_cache, skb); |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 4132 | } else { |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4133 | __kfree_skb(skb); |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 4134 | } |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4135 | } |
| 4136 | EXPORT_SYMBOL(kfree_skb_partial); |
| 4137 | |
| 4138 | /** |
| 4139 | * skb_try_coalesce - try to merge skb to prior one |
| 4140 | * @to: prior buffer |
| 4141 | * @from: buffer to add |
| 4142 | * @fragstolen: pointer to boolean |
Randy Dunlap | c6c4b97 | 2012-06-08 14:01:44 +0000 | [diff] [blame] | 4143 | * @delta_truesize: how much more was allocated than was requested |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4144 | */ |
| 4145 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, |
| 4146 | bool *fragstolen, int *delta_truesize) |
| 4147 | { |
| 4148 | int i, delta, len = from->len; |
| 4149 | |
| 4150 | *fragstolen = false; |
| 4151 | |
| 4152 | if (skb_cloned(to)) |
| 4153 | return false; |
| 4154 | |
| 4155 | if (len <= skb_tailroom(to)) { |
Eric Dumazet | e93a043 | 2014-09-15 04:19:52 -0700 | [diff] [blame] | 4156 | if (len) |
| 4157 | BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4158 | *delta_truesize = 0; |
| 4159 | return true; |
| 4160 | } |
| 4161 | |
| 4162 | if (skb_has_frag_list(to) || skb_has_frag_list(from)) |
| 4163 | return false; |
| 4164 | |
| 4165 | if (skb_headlen(from) != 0) { |
| 4166 | struct page *page; |
| 4167 | unsigned int offset; |
| 4168 | |
| 4169 | if (skb_shinfo(to)->nr_frags + |
| 4170 | skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) |
| 4171 | return false; |
| 4172 | |
| 4173 | if (skb_head_is_locked(from)) |
| 4174 | return false; |
| 4175 | |
| 4176 | delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); |
| 4177 | |
| 4178 | page = virt_to_head_page(from->head); |
| 4179 | offset = from->data - (unsigned char *)page_address(page); |
| 4180 | |
| 4181 | skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, |
| 4182 | page, offset, skb_headlen(from)); |
| 4183 | *fragstolen = true; |
| 4184 | } else { |
| 4185 | if (skb_shinfo(to)->nr_frags + |
| 4186 | skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) |
| 4187 | return false; |
| 4188 | |
Weiping Pan | f4b549a | 2012-09-28 20:15:30 +0000 | [diff] [blame] | 4189 | delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4190 | } |
| 4191 | |
| 4192 | WARN_ON_ONCE(delta < len); |
| 4193 | |
| 4194 | memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, |
| 4195 | skb_shinfo(from)->frags, |
| 4196 | skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); |
| 4197 | skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; |
| 4198 | |
| 4199 | if (!skb_cloned(from)) |
| 4200 | skb_shinfo(from)->nr_frags = 0; |
| 4201 | |
Li RongQing | 8ea853f | 2012-09-18 16:53:21 +0000 | [diff] [blame] | 4202 | /* if the skb is not cloned this does nothing |
| 4203 | * since we set nr_frags to 0. |
| 4204 | */ |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 4205 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) |
| 4206 | skb_frag_ref(from, i); |
| 4207 | |
| 4208 | to->truesize += delta; |
| 4209 | to->len += len; |
| 4210 | to->data_len += len; |
| 4211 | |
| 4212 | *delta_truesize = delta; |
| 4213 | return true; |
| 4214 | } |
| 4215 | EXPORT_SYMBOL(skb_try_coalesce); |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4216 | |
| 4217 | /** |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 4218 | * skb_scrub_packet - scrub an skb |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4219 | * |
| 4220 | * @skb: buffer to clean |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 4221 | * @xnet: packet is crossing netns |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4222 | * |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 4223 | * skb_scrub_packet can be used after encapsulating or decapsulting a packet |
| 4224 | * into/from a tunnel. Some information have to be cleared during these |
| 4225 | * operations. |
| 4226 | * skb_scrub_packet can also be used to clean a skb before injecting it in |
| 4227 | * another namespace (@xnet == true). We have to clear all information in the |
| 4228 | * skb that could impact namespace isolation. |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4229 | */ |
Nicolas Dichtel | 8b27f27 | 2013-09-02 15:34:56 +0200 | [diff] [blame] | 4230 | void skb_scrub_packet(struct sk_buff *skb, bool xnet) |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4231 | { |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4232 | skb->tstamp.tv64 = 0; |
| 4233 | skb->pkt_type = PACKET_HOST; |
| 4234 | skb->skb_iif = 0; |
WANG Cong | 60ff746 | 2014-05-04 16:39:18 -0700 | [diff] [blame] | 4235 | skb->ignore_df = 0; |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4236 | skb_dst_drop(skb); |
Eric Dumazet | c29390c | 2015-03-11 18:42:02 -0700 | [diff] [blame] | 4237 | skb_sender_cpu_clear(skb); |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4238 | secpath_reset(skb); |
| 4239 | nf_reset(skb); |
| 4240 | nf_reset_trace(skb); |
Herbert Xu | 213dd74 | 2015-04-16 09:03:27 +0800 | [diff] [blame] | 4241 | |
| 4242 | if (!xnet) |
| 4243 | return; |
| 4244 | |
| 4245 | skb_orphan(skb); |
| 4246 | skb->mark = 0; |
Nicolas Dichtel | 621e84d | 2013-06-26 16:11:27 +0200 | [diff] [blame] | 4247 | } |
| 4248 | EXPORT_SYMBOL_GPL(skb_scrub_packet); |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 4249 | |
| 4250 | /** |
| 4251 | * skb_gso_transport_seglen - Return length of individual segments of a gso packet |
| 4252 | * |
| 4253 | * @skb: GSO skb |
| 4254 | * |
| 4255 | * skb_gso_transport_seglen is used to determine the real size of the |
| 4256 | * individual segments, including Layer4 headers (TCP/UDP). |
| 4257 | * |
| 4258 | * The MAC/L2 or network (IP, IPv6) headers are not accounted for. |
| 4259 | */ |
| 4260 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
| 4261 | { |
| 4262 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 4263 | unsigned int thlen = 0; |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 4264 | |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 4265 | if (skb->encapsulation) { |
| 4266 | thlen = skb_inner_transport_header(skb) - |
| 4267 | skb_transport_header(skb); |
Florian Westphal | 6d39d58 | 2014-04-09 10:28:50 +0200 | [diff] [blame] | 4268 | |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 4269 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) |
| 4270 | thlen += inner_tcp_hdrlen(skb); |
| 4271 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { |
| 4272 | thlen = tcp_hdrlen(skb); |
| 4273 | } |
Florian Westphal | 6d39d58 | 2014-04-09 10:28:50 +0200 | [diff] [blame] | 4274 | /* UFO sets gso_size to the size of the fragmentation |
| 4275 | * payload, i.e. the size of the L4 (UDP) header is already |
| 4276 | * accounted for. |
| 4277 | */ |
Florian Westphal | f993bc2 | 2014-10-20 13:49:18 +0200 | [diff] [blame] | 4278 | return thlen + shinfo->gso_size; |
Florian Westphal | de960aa | 2014-01-26 10:58:16 +0100 | [diff] [blame] | 4279 | } |
| 4280 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 4281 | |
| 4282 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
| 4283 | { |
| 4284 | if (skb_cow(skb, skb_headroom(skb)) < 0) { |
| 4285 | kfree_skb(skb); |
| 4286 | return NULL; |
| 4287 | } |
| 4288 | |
| 4289 | memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); |
| 4290 | skb->mac_header += VLAN_HLEN; |
| 4291 | return skb; |
| 4292 | } |
| 4293 | |
| 4294 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb) |
| 4295 | { |
| 4296 | struct vlan_hdr *vhdr; |
| 4297 | u16 vlan_tci; |
| 4298 | |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 4299 | if (unlikely(skb_vlan_tag_present(skb))) { |
Vlad Yasevich | 0d5501c | 2014-08-08 14:42:13 -0400 | [diff] [blame] | 4300 | /* vlan_tci is already set-up so leave this for another time */ |
| 4301 | return skb; |
| 4302 | } |
| 4303 | |
| 4304 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 4305 | if (unlikely(!skb)) |
| 4306 | goto err_free; |
| 4307 | |
| 4308 | if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) |
| 4309 | goto err_free; |
| 4310 | |
| 4311 | vhdr = (struct vlan_hdr *)skb->data; |
| 4312 | vlan_tci = ntohs(vhdr->h_vlan_TCI); |
| 4313 | __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); |
| 4314 | |
| 4315 | skb_pull_rcsum(skb, VLAN_HLEN); |
| 4316 | vlan_set_encap_proto(skb, vhdr); |
| 4317 | |
| 4318 | skb = skb_reorder_vlan_header(skb); |
| 4319 | if (unlikely(!skb)) |
| 4320 | goto err_free; |
| 4321 | |
| 4322 | skb_reset_network_header(skb); |
| 4323 | skb_reset_transport_header(skb); |
| 4324 | skb_reset_mac_len(skb); |
| 4325 | |
| 4326 | return skb; |
| 4327 | |
| 4328 | err_free: |
| 4329 | kfree_skb(skb); |
| 4330 | return NULL; |
| 4331 | } |
| 4332 | EXPORT_SYMBOL(skb_vlan_untag); |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 4333 | |
Jiri Pirko | e219512 | 2014-11-19 14:05:01 +0100 | [diff] [blame] | 4334 | int skb_ensure_writable(struct sk_buff *skb, int write_len) |
| 4335 | { |
| 4336 | if (!pskb_may_pull(skb, write_len)) |
| 4337 | return -ENOMEM; |
| 4338 | |
| 4339 | if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) |
| 4340 | return 0; |
| 4341 | |
| 4342 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
| 4343 | } |
| 4344 | EXPORT_SYMBOL(skb_ensure_writable); |
| 4345 | |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 4346 | /* remove VLAN header from packet and update csum accordingly. */ |
| 4347 | static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) |
| 4348 | { |
| 4349 | struct vlan_hdr *vhdr; |
| 4350 | unsigned int offset = skb->data - skb_mac_header(skb); |
| 4351 | int err; |
| 4352 | |
| 4353 | __skb_push(skb, offset); |
| 4354 | err = skb_ensure_writable(skb, VLAN_ETH_HLEN); |
| 4355 | if (unlikely(err)) |
| 4356 | goto pull; |
| 4357 | |
| 4358 | skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); |
| 4359 | |
| 4360 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
| 4361 | *vlan_tci = ntohs(vhdr->h_vlan_TCI); |
| 4362 | |
| 4363 | memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); |
| 4364 | __skb_pull(skb, VLAN_HLEN); |
| 4365 | |
| 4366 | vlan_set_encap_proto(skb, vhdr); |
| 4367 | skb->mac_header += VLAN_HLEN; |
| 4368 | |
| 4369 | if (skb_network_offset(skb) < ETH_HLEN) |
| 4370 | skb_set_network_header(skb, ETH_HLEN); |
| 4371 | |
| 4372 | skb_reset_mac_len(skb); |
| 4373 | pull: |
| 4374 | __skb_pull(skb, offset); |
| 4375 | |
| 4376 | return err; |
| 4377 | } |
| 4378 | |
| 4379 | int skb_vlan_pop(struct sk_buff *skb) |
| 4380 | { |
| 4381 | u16 vlan_tci; |
| 4382 | __be16 vlan_proto; |
| 4383 | int err; |
| 4384 | |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 4385 | if (likely(skb_vlan_tag_present(skb))) { |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 4386 | skb->vlan_tci = 0; |
| 4387 | } else { |
| 4388 | if (unlikely((skb->protocol != htons(ETH_P_8021Q) && |
| 4389 | skb->protocol != htons(ETH_P_8021AD)) || |
| 4390 | skb->len < VLAN_ETH_HLEN)) |
| 4391 | return 0; |
| 4392 | |
| 4393 | err = __skb_vlan_pop(skb, &vlan_tci); |
| 4394 | if (err) |
| 4395 | return err; |
| 4396 | } |
| 4397 | /* move next vlan tag to hw accel tag */ |
| 4398 | if (likely((skb->protocol != htons(ETH_P_8021Q) && |
| 4399 | skb->protocol != htons(ETH_P_8021AD)) || |
| 4400 | skb->len < VLAN_ETH_HLEN)) |
| 4401 | return 0; |
| 4402 | |
| 4403 | vlan_proto = skb->protocol; |
| 4404 | err = __skb_vlan_pop(skb, &vlan_tci); |
| 4405 | if (unlikely(err)) |
| 4406 | return err; |
| 4407 | |
| 4408 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); |
| 4409 | return 0; |
| 4410 | } |
| 4411 | EXPORT_SYMBOL(skb_vlan_pop); |
| 4412 | |
| 4413 | int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) |
| 4414 | { |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 4415 | if (skb_vlan_tag_present(skb)) { |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 4416 | unsigned int offset = skb->data - skb_mac_header(skb); |
| 4417 | int err; |
| 4418 | |
| 4419 | /* __vlan_insert_tag expect skb->data pointing to mac header. |
| 4420 | * So change skb->data before calling it and change back to |
| 4421 | * original position later |
| 4422 | */ |
| 4423 | __skb_push(skb, offset); |
| 4424 | err = __vlan_insert_tag(skb, skb->vlan_proto, |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 4425 | skb_vlan_tag_get(skb)); |
Jiri Pirko | 93515d5 | 2014-11-19 14:05:02 +0100 | [diff] [blame] | 4426 | if (err) |
| 4427 | return err; |
| 4428 | skb->protocol = skb->vlan_proto; |
| 4429 | skb->mac_len += VLAN_HLEN; |
| 4430 | __skb_pull(skb, offset); |
| 4431 | |
| 4432 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
| 4433 | skb->csum = csum_add(skb->csum, csum_partial(skb->data |
| 4434 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); |
| 4435 | } |
| 4436 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); |
| 4437 | return 0; |
| 4438 | } |
| 4439 | EXPORT_SYMBOL(skb_vlan_push); |
| 4440 | |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 4441 | /** |
| 4442 | * alloc_skb_with_frags - allocate skb with page frags |
| 4443 | * |
Masanari Iida | de3f0d0 | 2014-10-09 12:58:08 +0900 | [diff] [blame] | 4444 | * @header_len: size of linear part |
| 4445 | * @data_len: needed length in frags |
| 4446 | * @max_page_order: max page order desired. |
| 4447 | * @errcode: pointer to error code if any |
| 4448 | * @gfp_mask: allocation mask |
Eric Dumazet | 2e4e441 | 2014-09-17 04:49:49 -0700 | [diff] [blame] | 4449 | * |
| 4450 | * This can be used to allocate a paged skb, given a maximal order for frags. |
| 4451 | */ |
| 4452 | struct sk_buff *alloc_skb_with_frags(unsigned long header_len, |
| 4453 | unsigned long data_len, |
| 4454 | int max_page_order, |
| 4455 | int *errcode, |
| 4456 | gfp_t gfp_mask) |
| 4457 | { |
| 4458 | int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 4459 | unsigned long chunk; |
| 4460 | struct sk_buff *skb; |
| 4461 | struct page *page; |
| 4462 | gfp_t gfp_head; |
| 4463 | int i; |
| 4464 | |
| 4465 | *errcode = -EMSGSIZE; |
| 4466 | /* Note this test could be relaxed, if we succeed to allocate |
| 4467 | * high order pages... |
| 4468 | */ |
| 4469 | if (npages > MAX_SKB_FRAGS) |
| 4470 | return NULL; |
| 4471 | |
| 4472 | gfp_head = gfp_mask; |
| 4473 | if (gfp_head & __GFP_WAIT) |
| 4474 | gfp_head |= __GFP_REPEAT; |
| 4475 | |
| 4476 | *errcode = -ENOBUFS; |
| 4477 | skb = alloc_skb(header_len, gfp_head); |
| 4478 | if (!skb) |
| 4479 | return NULL; |
| 4480 | |
| 4481 | skb->truesize += npages << PAGE_SHIFT; |
| 4482 | |
| 4483 | for (i = 0; npages > 0; i++) { |
| 4484 | int order = max_page_order; |
| 4485 | |
| 4486 | while (order) { |
| 4487 | if (npages >= 1 << order) { |
| 4488 | page = alloc_pages(gfp_mask | |
| 4489 | __GFP_COMP | |
| 4490 | __GFP_NOWARN | |
| 4491 | __GFP_NORETRY, |
| 4492 | order); |
| 4493 | if (page) |
| 4494 | goto fill_page; |
| 4495 | /* Do not retry other high order allocations */ |
| 4496 | order = 1; |
| 4497 | max_page_order = 0; |
| 4498 | } |
| 4499 | order--; |
| 4500 | } |
| 4501 | page = alloc_page(gfp_mask); |
| 4502 | if (!page) |
| 4503 | goto failure; |
| 4504 | fill_page: |
| 4505 | chunk = min_t(unsigned long, data_len, |
| 4506 | PAGE_SIZE << order); |
| 4507 | skb_fill_page_desc(skb, i, page, 0, chunk); |
| 4508 | data_len -= chunk; |
| 4509 | npages -= 1 << order; |
| 4510 | } |
| 4511 | return skb; |
| 4512 | |
| 4513 | failure: |
| 4514 | kfree_skb(skb); |
| 4515 | return NULL; |
| 4516 | } |
| 4517 | EXPORT_SYMBOL(alloc_skb_with_frags); |