Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Routines having to do with the 'struct sk_buff' memory handlers. |
| 3 | * |
Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 4 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Florian La Roche <rzsfl@rz.uni-sb.de> |
| 6 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * Fixes: |
| 8 | * Alan Cox : Fixed the worst of the load |
| 9 | * balancer bugs. |
| 10 | * Dave Platt : Interrupt stacking fix. |
| 11 | * Richard Kooijman : Timestamp fixes. |
| 12 | * Alan Cox : Changed buffer format. |
| 13 | * Alan Cox : destructor hook for AF_UNIX etc. |
| 14 | * Linus Torvalds : Better skb_clone. |
| 15 | * Alan Cox : Added skb_copy. |
| 16 | * Alan Cox : Added all the changed routines Linus |
| 17 | * only put in the headers |
| 18 | * Ray VanTassle : Fixed --skb->lock in free |
| 19 | * Alan Cox : skb_copy copy arp field |
| 20 | * Andi Kleen : slabified it. |
| 21 | * Robert Olsson : Removed skb_head_pool |
| 22 | * |
| 23 | * NOTE: |
| 24 | * The __skb_ routines should be called with interrupts |
| 25 | * disabled, or you better be *real* sure that the operation is atomic |
| 26 | * with respect to whatever list is being frobbed (e.g. via lock_sock() |
| 27 | * or via disabling bottom half handlers, etc). |
| 28 | * |
| 29 | * This program is free software; you can redistribute it and/or |
| 30 | * modify it under the terms of the GNU General Public License |
| 31 | * as published by the Free Software Foundation; either version |
| 32 | * 2 of the License, or (at your option) any later version. |
| 33 | */ |
| 34 | |
| 35 | /* |
| 36 | * The functions in this file will not compile correctly with gcc 2.4.x |
| 37 | */ |
| 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/module.h> |
| 40 | #include <linux/types.h> |
| 41 | #include <linux/kernel.h> |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 42 | #include <linux/kmemcheck.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/mm.h> |
| 44 | #include <linux/interrupt.h> |
| 45 | #include <linux/in.h> |
| 46 | #include <linux/inet.h> |
| 47 | #include <linux/slab.h> |
| 48 | #include <linux/netdevice.h> |
| 49 | #ifdef CONFIG_NET_CLS_ACT |
| 50 | #include <net/pkt_sched.h> |
| 51 | #endif |
| 52 | #include <linux/string.h> |
| 53 | #include <linux/skbuff.h> |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 54 | #include <linux/splice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | #include <linux/cache.h> |
| 56 | #include <linux/rtnetlink.h> |
| 57 | #include <linux/init.h> |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 58 | #include <linux/scatterlist.h> |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 59 | #include <linux/errqueue.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 60 | #include <linux/prefetch.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
| 62 | #include <net/protocol.h> |
| 63 | #include <net/dst.h> |
| 64 | #include <net/sock.h> |
| 65 | #include <net/checksum.h> |
| 66 | #include <net/xfrm.h> |
| 67 | |
| 68 | #include <asm/uaccess.h> |
| 69 | #include <asm/system.h> |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 70 | #include <trace/events/skb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
Al Viro | a1f8e7f7 | 2006-10-19 16:08:53 -0400 | [diff] [blame] | 72 | #include "kmap_skb.h" |
| 73 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 74 | static struct kmem_cache *skbuff_head_cache __read_mostly; |
| 75 | static struct kmem_cache *skbuff_fclone_cache __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 77 | static void sock_pipe_buf_release(struct pipe_inode_info *pipe, |
| 78 | struct pipe_buffer *buf) |
| 79 | { |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 80 | put_page(buf->page); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | static void sock_pipe_buf_get(struct pipe_inode_info *pipe, |
| 84 | struct pipe_buffer *buf) |
| 85 | { |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 86 | get_page(buf->page); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, |
| 90 | struct pipe_buffer *buf) |
| 91 | { |
| 92 | return 1; |
| 93 | } |
| 94 | |
| 95 | |
| 96 | /* Pipe buffer operations for a socket. */ |
Alexey Dobriyan | 28dfef8 | 2009-12-15 16:46:48 -0800 | [diff] [blame] | 97 | static const struct pipe_buf_operations sock_pipe_buf_ops = { |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 98 | .can_merge = 0, |
| 99 | .map = generic_pipe_buf_map, |
| 100 | .unmap = generic_pipe_buf_unmap, |
| 101 | .confirm = generic_pipe_buf_confirm, |
| 102 | .release = sock_pipe_buf_release, |
| 103 | .steal = sock_pipe_buf_steal, |
| 104 | .get = sock_pipe_buf_get, |
| 105 | }; |
| 106 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | /* |
| 108 | * Keep out-of-line to prevent kernel bloat. |
| 109 | * __builtin_return_address is not used because it is not always |
| 110 | * reliable. |
| 111 | */ |
| 112 | |
| 113 | /** |
| 114 | * skb_over_panic - private function |
| 115 | * @skb: buffer |
| 116 | * @sz: size |
| 117 | * @here: address |
| 118 | * |
| 119 | * Out of line support code for skb_put(). Not user callable. |
| 120 | */ |
Rami Rosen | ccb7c77 | 2010-04-20 22:39:53 -0700 | [diff] [blame] | 121 | static void skb_over_panic(struct sk_buff *skb, int sz, void *here) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | { |
Patrick McHardy | 2609545 | 2005-04-21 16:43:02 -0700 | [diff] [blame] | 123 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 124 | "data:%p tail:%#lx end:%#lx dev:%s\n", |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 125 | here, skb->len, sz, skb->head, skb->data, |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 126 | (unsigned long)skb->tail, (unsigned long)skb->end, |
Patrick McHardy | 2609545 | 2005-04-21 16:43:02 -0700 | [diff] [blame] | 127 | skb->dev ? skb->dev->name : "<NULL>"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | BUG(); |
| 129 | } |
| 130 | |
| 131 | /** |
| 132 | * skb_under_panic - private function |
| 133 | * @skb: buffer |
| 134 | * @sz: size |
| 135 | * @here: address |
| 136 | * |
| 137 | * Out of line support code for skb_push(). Not user callable. |
| 138 | */ |
| 139 | |
Rami Rosen | ccb7c77 | 2010-04-20 22:39:53 -0700 | [diff] [blame] | 140 | static void skb_under_panic(struct sk_buff *skb, int sz, void *here) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | { |
Patrick McHardy | 2609545 | 2005-04-21 16:43:02 -0700 | [diff] [blame] | 142 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 143 | "data:%p tail:%#lx end:%#lx dev:%s\n", |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 144 | here, skb->len, sz, skb->head, skb->data, |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 145 | (unsigned long)skb->tail, (unsigned long)skb->end, |
Patrick McHardy | 2609545 | 2005-04-21 16:43:02 -0700 | [diff] [blame] | 146 | skb->dev ? skb->dev->name : "<NULL>"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | BUG(); |
| 148 | } |
| 149 | |
| 150 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
| 151 | * 'private' fields and also do memory statistics to find all the |
| 152 | * [BEEP] leaks. |
| 153 | * |
| 154 | */ |
| 155 | |
| 156 | /** |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 157 | * __alloc_skb - allocate a network buffer |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | * @size: size to allocate |
| 159 | * @gfp_mask: allocation mask |
Randy Dunlap | c83c248 | 2005-10-18 22:07:41 -0700 | [diff] [blame] | 160 | * @fclone: allocate from fclone cache instead of head cache |
| 161 | * and allocate a cloned (child) skb |
Christoph Hellwig | b30973f | 2006-12-06 20:32:36 -0800 | [diff] [blame] | 162 | * @node: numa node to allocate memory on |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | * |
| 164 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
| 165 | * tail room of size bytes. The object has a reference count of one. |
| 166 | * The return is the buffer. On a failure the return is %NULL. |
| 167 | * |
| 168 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
| 169 | * %GFP_ATOMIC. |
| 170 | */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 171 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
Christoph Hellwig | b30973f | 2006-12-06 20:32:36 -0800 | [diff] [blame] | 172 | int fclone, int node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | { |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 174 | struct kmem_cache *cache; |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 175 | struct skb_shared_info *shinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | struct sk_buff *skb; |
| 177 | u8 *data; |
| 178 | |
Herbert Xu | 8798b3f | 2006-01-23 16:32:45 -0800 | [diff] [blame] | 179 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; |
| 180 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | /* Get the HEAD */ |
Christoph Hellwig | b30973f | 2006-12-06 20:32:36 -0800 | [diff] [blame] | 182 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | if (!skb) |
| 184 | goto out; |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 185 | prefetchw(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 187 | /* We do our best to align skb_shared_info on a separate cache |
| 188 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives |
| 189 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. |
| 190 | * Both skb->head and skb_shared_info are cache line aligned. |
| 191 | */ |
Tony Lindgren | bc417e3 | 2011-11-02 13:40:28 +0000 | [diff] [blame] | 192 | size = SKB_DATA_ALIGN(size); |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 193 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 194 | data = kmalloc_node_track_caller(size, gfp_mask, node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | if (!data) |
| 196 | goto nodata; |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 197 | /* kmalloc(size) might give us more room than requested. |
| 198 | * Put skb_shared_info exactly at the end of allocated zone, |
| 199 | * to allow max possible filling before reallocation. |
| 200 | */ |
| 201 | size = SKB_WITH_OVERHEAD(ksize(data)); |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 202 | prefetchw(data + size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | |
Arnaldo Carvalho de Melo | ca0605a | 2007-03-19 10:48:59 -0300 | [diff] [blame] | 204 | /* |
Johannes Berg | c800578 | 2008-05-03 20:56:42 -0700 | [diff] [blame] | 205 | * Only clear those fields we need to clear, not those that we will |
| 206 | * actually initialise below. Hence, don't put any more fields after |
| 207 | * the tail pointer in struct sk_buff! |
Arnaldo Carvalho de Melo | ca0605a | 2007-03-19 10:48:59 -0300 | [diff] [blame] | 208 | */ |
| 209 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 210 | /* Account for allocated memory : skb + skb->head */ |
| 211 | skb->truesize = SKB_TRUESIZE(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | atomic_set(&skb->users, 1); |
| 213 | skb->head = data; |
| 214 | skb->data = data; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 215 | skb_reset_tail_pointer(skb); |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 216 | skb->end = skb->tail + size; |
Stephen Hemminger | 19633e1 | 2009-06-17 05:23:27 +0000 | [diff] [blame] | 217 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 218 | skb->mac_header = ~0U; |
| 219 | #endif |
| 220 | |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 221 | /* make sure we initialize shinfo sequentially */ |
| 222 | shinfo = skb_shinfo(skb); |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 223 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 224 | atomic_set(&shinfo->dataref, 1); |
Eric Dumazet | c2aa366 | 2011-01-25 23:18:38 +0000 | [diff] [blame] | 225 | kmemcheck_annotate_variable(shinfo->destructor_arg); |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 226 | |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 227 | if (fclone) { |
| 228 | struct sk_buff *child = skb + 1; |
| 229 | atomic_t *fclone_ref = (atomic_t *) (child + 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 231 | kmemcheck_annotate_bitfield(child, flags1); |
| 232 | kmemcheck_annotate_bitfield(child, flags2); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 233 | skb->fclone = SKB_FCLONE_ORIG; |
| 234 | atomic_set(fclone_ref, 1); |
| 235 | |
| 236 | child->fclone = SKB_FCLONE_UNAVAILABLE; |
| 237 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | out: |
| 239 | return skb; |
| 240 | nodata: |
Herbert Xu | 8798b3f | 2006-01-23 16:32:45 -0800 | [diff] [blame] | 241 | kmem_cache_free(cache, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | skb = NULL; |
| 243 | goto out; |
| 244 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 245 | EXPORT_SYMBOL(__alloc_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | |
| 247 | /** |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 248 | * build_skb - build a network buffer |
| 249 | * @data: data buffer provided by caller |
| 250 | * |
| 251 | * Allocate a new &sk_buff. Caller provides space holding head and |
| 252 | * skb_shared_info. @data must have been allocated by kmalloc() |
| 253 | * The return is the new skb buffer. |
| 254 | * On a failure the return is %NULL, and @data is not freed. |
| 255 | * Notes : |
| 256 | * Before IO, driver allocates only data buffer where NIC put incoming frame |
| 257 | * Driver should add room at head (NET_SKB_PAD) and |
| 258 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) |
| 259 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it |
| 260 | * before giving packet to stack. |
| 261 | * RX rings only contains data buffers, not full skbs. |
| 262 | */ |
| 263 | struct sk_buff *build_skb(void *data) |
| 264 | { |
| 265 | struct skb_shared_info *shinfo; |
| 266 | struct sk_buff *skb; |
| 267 | unsigned int size; |
| 268 | |
| 269 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); |
| 270 | if (!skb) |
| 271 | return NULL; |
| 272 | |
| 273 | size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 274 | |
| 275 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
| 276 | skb->truesize = SKB_TRUESIZE(size); |
| 277 | atomic_set(&skb->users, 1); |
| 278 | skb->head = data; |
| 279 | skb->data = data; |
| 280 | skb_reset_tail_pointer(skb); |
| 281 | skb->end = skb->tail + size; |
| 282 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 283 | skb->mac_header = ~0U; |
| 284 | #endif |
| 285 | |
| 286 | /* make sure we initialize shinfo sequentially */ |
| 287 | shinfo = skb_shinfo(skb); |
| 288 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
| 289 | atomic_set(&shinfo->dataref, 1); |
| 290 | kmemcheck_annotate_variable(shinfo->destructor_arg); |
| 291 | |
| 292 | return skb; |
| 293 | } |
| 294 | EXPORT_SYMBOL(build_skb); |
| 295 | |
| 296 | /** |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 297 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |
| 298 | * @dev: network device to receive on |
| 299 | * @length: length to allocate |
| 300 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
| 301 | * |
| 302 | * Allocate a new &sk_buff and assign it a usage count of one. The |
| 303 | * buffer has unspecified headroom built in. Users should allocate |
| 304 | * the headroom they think they need without accounting for the |
| 305 | * built in space. The built in space is used for optimisations. |
| 306 | * |
| 307 | * %NULL is returned if there is no free memory. |
| 308 | */ |
| 309 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, |
| 310 | unsigned int length, gfp_t gfp_mask) |
| 311 | { |
| 312 | struct sk_buff *skb; |
| 313 | |
Eric Dumazet | 564824b | 2010-10-11 19:05:25 +0000 | [diff] [blame] | 314 | skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); |
Christoph Hellwig | 7b2e497 | 2006-08-07 16:09:04 -0700 | [diff] [blame] | 315 | if (likely(skb)) { |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 316 | skb_reserve(skb, NET_SKB_PAD); |
Christoph Hellwig | 7b2e497 | 2006-08-07 16:09:04 -0700 | [diff] [blame] | 317 | skb->dev = dev; |
| 318 | } |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 319 | return skb; |
| 320 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 321 | EXPORT_SYMBOL(__netdev_alloc_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 323 | void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
| 324 | int size) |
| 325 | { |
| 326 | skb_fill_page_desc(skb, i, page, off, size); |
| 327 | skb->len += size; |
| 328 | skb->data_len += size; |
| 329 | skb->truesize += size; |
| 330 | } |
| 331 | EXPORT_SYMBOL(skb_add_rx_frag); |
| 332 | |
Ilpo Järvinen | f58518e | 2008-03-27 17:51:31 -0700 | [diff] [blame] | 333 | /** |
| 334 | * dev_alloc_skb - allocate an skbuff for receiving |
| 335 | * @length: length to allocate |
| 336 | * |
| 337 | * Allocate a new &sk_buff and assign it a usage count of one. The |
| 338 | * buffer has unspecified headroom built in. Users should allocate |
| 339 | * the headroom they think they need without accounting for the |
| 340 | * built in space. The built in space is used for optimisations. |
| 341 | * |
| 342 | * %NULL is returned if there is no free memory. Although this function |
| 343 | * allocates memory it can be called from an interrupt. |
| 344 | */ |
| 345 | struct sk_buff *dev_alloc_skb(unsigned int length) |
| 346 | { |
Denys Vlasenko | 1483b87 | 2008-03-28 15:57:39 -0700 | [diff] [blame] | 347 | /* |
| 348 | * There is more code here than it seems: |
David S. Miller | a0f55e0 | 2008-03-28 18:22:32 -0700 | [diff] [blame] | 349 | * __dev_alloc_skb is an inline |
Denys Vlasenko | 1483b87 | 2008-03-28 15:57:39 -0700 | [diff] [blame] | 350 | */ |
Ilpo Järvinen | f58518e | 2008-03-27 17:51:31 -0700 | [diff] [blame] | 351 | return __dev_alloc_skb(length, GFP_ATOMIC); |
| 352 | } |
| 353 | EXPORT_SYMBOL(dev_alloc_skb); |
| 354 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 355 | static void skb_drop_list(struct sk_buff **listp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 357 | struct sk_buff *list = *listp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 359 | *listp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | |
| 361 | do { |
| 362 | struct sk_buff *this = list; |
| 363 | list = list->next; |
| 364 | kfree_skb(this); |
| 365 | } while (list); |
| 366 | } |
| 367 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 368 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
| 369 | { |
| 370 | skb_drop_list(&skb_shinfo(skb)->frag_list); |
| 371 | } |
| 372 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | static void skb_clone_fraglist(struct sk_buff *skb) |
| 374 | { |
| 375 | struct sk_buff *list; |
| 376 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 377 | skb_walk_frags(skb, list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | skb_get(list); |
| 379 | } |
| 380 | |
Adrian Bunk | 5bba171 | 2006-06-29 13:02:35 -0700 | [diff] [blame] | 381 | static void skb_release_data(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | { |
| 383 | if (!skb->cloned || |
| 384 | !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, |
| 385 | &skb_shinfo(skb)->dataref)) { |
| 386 | if (skb_shinfo(skb)->nr_frags) { |
| 387 | int i; |
| 388 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 389 | skb_frag_unref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | } |
| 391 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 392 | /* |
| 393 | * If skb buf is from userspace, we need to notify the caller |
| 394 | * the lower device DMA has done; |
| 395 | */ |
| 396 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { |
| 397 | struct ubuf_info *uarg; |
| 398 | |
| 399 | uarg = skb_shinfo(skb)->destructor_arg; |
| 400 | if (uarg->callback) |
| 401 | uarg->callback(uarg); |
| 402 | } |
| 403 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 404 | if (skb_has_frag_list(skb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | skb_drop_fraglist(skb); |
| 406 | |
| 407 | kfree(skb->head); |
| 408 | } |
| 409 | } |
| 410 | |
| 411 | /* |
| 412 | * Free an skbuff by memory without cleaning the state. |
| 413 | */ |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 414 | static void kfree_skbmem(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | { |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 416 | struct sk_buff *other; |
| 417 | atomic_t *fclone_ref; |
| 418 | |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 419 | switch (skb->fclone) { |
| 420 | case SKB_FCLONE_UNAVAILABLE: |
| 421 | kmem_cache_free(skbuff_head_cache, skb); |
| 422 | break; |
| 423 | |
| 424 | case SKB_FCLONE_ORIG: |
| 425 | fclone_ref = (atomic_t *) (skb + 2); |
| 426 | if (atomic_dec_and_test(fclone_ref)) |
| 427 | kmem_cache_free(skbuff_fclone_cache, skb); |
| 428 | break; |
| 429 | |
| 430 | case SKB_FCLONE_CLONE: |
| 431 | fclone_ref = (atomic_t *) (skb + 1); |
| 432 | other = skb - 1; |
| 433 | |
| 434 | /* The clone portion is available for |
| 435 | * fast-cloning again. |
| 436 | */ |
| 437 | skb->fclone = SKB_FCLONE_UNAVAILABLE; |
| 438 | |
| 439 | if (atomic_dec_and_test(fclone_ref)) |
| 440 | kmem_cache_free(skbuff_fclone_cache, other); |
| 441 | break; |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 442 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | } |
| 444 | |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 445 | static void skb_release_head_state(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | { |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 447 | skb_dst_drop(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | #ifdef CONFIG_XFRM |
| 449 | secpath_put(skb->sp); |
| 450 | #endif |
Stephen Hemminger | 9c2b332 | 2005-04-19 22:39:42 -0700 | [diff] [blame] | 451 | if (skb->destructor) { |
| 452 | WARN_ON(in_irq()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | skb->destructor(skb); |
| 454 | } |
Igor Maravić | a3bf7ae | 2011-12-12 02:58:22 +0000 | [diff] [blame^] | 455 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
Yasuyuki Kozakai | 5f79e0f | 2007-03-23 11:17:07 -0700 | [diff] [blame] | 456 | nf_conntrack_put(skb->nfct); |
KOVACS Krisztian | 2fc72c7 | 2011-01-12 20:25:08 +0100 | [diff] [blame] | 457 | #endif |
| 458 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED |
Yasuyuki Kozakai | 9fb9cbb | 2005-11-09 16:38:16 -0800 | [diff] [blame] | 459 | nf_conntrack_put_reasm(skb->nfct_reasm); |
| 460 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | #ifdef CONFIG_BRIDGE_NETFILTER |
| 462 | nf_bridge_put(skb->nf_bridge); |
| 463 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | /* XXX: IS this still necessary? - JHS */ |
| 465 | #ifdef CONFIG_NET_SCHED |
| 466 | skb->tc_index = 0; |
| 467 | #ifdef CONFIG_NET_CLS_ACT |
| 468 | skb->tc_verd = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | #endif |
| 470 | #endif |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 471 | } |
| 472 | |
| 473 | /* Free everything but the sk_buff shell. */ |
| 474 | static void skb_release_all(struct sk_buff *skb) |
| 475 | { |
| 476 | skb_release_head_state(skb); |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 477 | skb_release_data(skb); |
| 478 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 480 | /** |
| 481 | * __kfree_skb - private function |
| 482 | * @skb: buffer |
| 483 | * |
| 484 | * Free an sk_buff. Release anything attached to the buffer. |
| 485 | * Clean the state. This is an internal helper function. Users should |
| 486 | * always call kfree_skb |
| 487 | */ |
| 488 | |
| 489 | void __kfree_skb(struct sk_buff *skb) |
| 490 | { |
| 491 | skb_release_all(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | kfree_skbmem(skb); |
| 493 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 494 | EXPORT_SYMBOL(__kfree_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | |
| 496 | /** |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 497 | * kfree_skb - free an sk_buff |
| 498 | * @skb: buffer to free |
| 499 | * |
| 500 | * Drop a reference to the buffer and free it if the usage count has |
| 501 | * hit zero. |
| 502 | */ |
| 503 | void kfree_skb(struct sk_buff *skb) |
| 504 | { |
| 505 | if (unlikely(!skb)) |
| 506 | return; |
| 507 | if (likely(atomic_read(&skb->users) == 1)) |
| 508 | smp_rmb(); |
| 509 | else if (likely(!atomic_dec_and_test(&skb->users))) |
| 510 | return; |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 511 | trace_kfree_skb(skb, __builtin_return_address(0)); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 512 | __kfree_skb(skb); |
| 513 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 514 | EXPORT_SYMBOL(kfree_skb); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 515 | |
Stephen Hemminger | d1a203e | 2008-11-01 21:01:09 -0700 | [diff] [blame] | 516 | /** |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 517 | * consume_skb - free an skbuff |
| 518 | * @skb: buffer to free |
| 519 | * |
| 520 | * Drop a ref to the buffer and free it if the usage count has hit zero |
| 521 | * Functions identically to kfree_skb, but kfree_skb assumes that the frame |
| 522 | * is being dropped after a failure and notes that |
| 523 | */ |
| 524 | void consume_skb(struct sk_buff *skb) |
| 525 | { |
| 526 | if (unlikely(!skb)) |
| 527 | return; |
| 528 | if (likely(atomic_read(&skb->users) == 1)) |
| 529 | smp_rmb(); |
| 530 | else if (likely(!atomic_dec_and_test(&skb->users))) |
| 531 | return; |
Koki Sanagi | 07dc22e | 2010-08-23 18:46:12 +0900 | [diff] [blame] | 532 | trace_consume_skb(skb); |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 533 | __kfree_skb(skb); |
| 534 | } |
| 535 | EXPORT_SYMBOL(consume_skb); |
| 536 | |
| 537 | /** |
Andy Fleming | 3d153a7 | 2011-10-13 04:33:54 +0000 | [diff] [blame] | 538 | * skb_recycle - clean up an skb for reuse |
| 539 | * @skb: buffer |
| 540 | * |
| 541 | * Recycles the skb to be reused as a receive buffer. This |
| 542 | * function does any necessary reference count dropping, and |
| 543 | * cleans up the skbuff as if it just came from __alloc_skb(). |
| 544 | */ |
| 545 | void skb_recycle(struct sk_buff *skb) |
| 546 | { |
| 547 | struct skb_shared_info *shinfo; |
| 548 | |
| 549 | skb_release_head_state(skb); |
| 550 | |
| 551 | shinfo = skb_shinfo(skb); |
| 552 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
| 553 | atomic_set(&shinfo->dataref, 1); |
| 554 | |
| 555 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
| 556 | skb->data = skb->head + NET_SKB_PAD; |
| 557 | skb_reset_tail_pointer(skb); |
| 558 | } |
| 559 | EXPORT_SYMBOL(skb_recycle); |
| 560 | |
| 561 | /** |
Stephen Hemminger | d1a203e | 2008-11-01 21:01:09 -0700 | [diff] [blame] | 562 | * skb_recycle_check - check if skb can be reused for receive |
| 563 | * @skb: buffer |
| 564 | * @skb_size: minimum receive buffer size |
| 565 | * |
| 566 | * Checks that the skb passed in is not shared or cloned, and |
| 567 | * that it is linear and its head portion at least as large as |
| 568 | * skb_size so that it can be recycled as a receive buffer. |
| 569 | * If these conditions are met, this function does any necessary |
| 570 | * reference count dropping and cleans up the skbuff as if it |
| 571 | * just came from __alloc_skb(). |
| 572 | */ |
Changli Gao | 5b0daa3 | 2010-05-29 00:12:13 -0700 | [diff] [blame] | 573 | bool skb_recycle_check(struct sk_buff *skb, int skb_size) |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 574 | { |
Andy Fleming | 3d153a7 | 2011-10-13 04:33:54 +0000 | [diff] [blame] | 575 | if (!skb_is_recycleable(skb, skb_size)) |
Changli Gao | 5b0daa3 | 2010-05-29 00:12:13 -0700 | [diff] [blame] | 576 | return false; |
Anton Vorontsov | e84af6d | 2009-11-10 14:11:01 +0000 | [diff] [blame] | 577 | |
Andy Fleming | 3d153a7 | 2011-10-13 04:33:54 +0000 | [diff] [blame] | 578 | skb_recycle(skb); |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 579 | |
Changli Gao | 5b0daa3 | 2010-05-29 00:12:13 -0700 | [diff] [blame] | 580 | return true; |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 581 | } |
| 582 | EXPORT_SYMBOL(skb_recycle_check); |
| 583 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 584 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
| 585 | { |
| 586 | new->tstamp = old->tstamp; |
| 587 | new->dev = old->dev; |
| 588 | new->transport_header = old->transport_header; |
| 589 | new->network_header = old->network_header; |
| 590 | new->mac_header = old->mac_header; |
Eric Dumazet | 7fee226 | 2010-05-11 23:19:48 +0000 | [diff] [blame] | 591 | skb_dst_copy(new, old); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 592 | new->rxhash = old->rxhash; |
Changli Gao | 6461be3 | 2011-08-19 04:44:18 +0000 | [diff] [blame] | 593 | new->ooo_okay = old->ooo_okay; |
Tom Herbert | bdeab99 | 2011-08-14 19:45:55 +0000 | [diff] [blame] | 594 | new->l4_rxhash = old->l4_rxhash; |
Alexey Dobriyan | def8b4f | 2008-10-28 13:24:06 -0700 | [diff] [blame] | 595 | #ifdef CONFIG_XFRM |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 596 | new->sp = secpath_get(old->sp); |
| 597 | #endif |
| 598 | memcpy(new->cb, old->cb, sizeof(old->cb)); |
Herbert Xu | 9bcb97c | 2009-05-22 22:20:02 +0000 | [diff] [blame] | 599 | new->csum = old->csum; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 600 | new->local_df = old->local_df; |
| 601 | new->pkt_type = old->pkt_type; |
| 602 | new->ip_summed = old->ip_summed; |
| 603 | skb_copy_queue_mapping(new, old); |
| 604 | new->priority = old->priority; |
Igor Maravić | a3bf7ae | 2011-12-12 02:58:22 +0000 | [diff] [blame^] | 605 | #if IS_ENABLED(CONFIG_IP_VS) |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 606 | new->ipvs_property = old->ipvs_property; |
| 607 | #endif |
| 608 | new->protocol = old->protocol; |
| 609 | new->mark = old->mark; |
Eric Dumazet | 8964be4 | 2009-11-20 15:35:04 -0800 | [diff] [blame] | 610 | new->skb_iif = old->skb_iif; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 611 | __nf_copy(new, old); |
Igor Maravić | a3bf7ae | 2011-12-12 02:58:22 +0000 | [diff] [blame^] | 612 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 613 | new->nf_trace = old->nf_trace; |
| 614 | #endif |
| 615 | #ifdef CONFIG_NET_SCHED |
| 616 | new->tc_index = old->tc_index; |
| 617 | #ifdef CONFIG_NET_CLS_ACT |
| 618 | new->tc_verd = old->tc_verd; |
| 619 | #endif |
| 620 | #endif |
Patrick McHardy | 6aa895b | 2008-07-14 22:49:06 -0700 | [diff] [blame] | 621 | new->vlan_tci = old->vlan_tci; |
| 622 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 623 | skb_copy_secmark(new, old); |
| 624 | } |
| 625 | |
Herbert Xu | 82c49a3 | 2009-05-22 22:11:37 +0000 | [diff] [blame] | 626 | /* |
| 627 | * You should not add any new code to this function. Add it to |
| 628 | * __copy_skb_header above instead. |
| 629 | */ |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 630 | static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | #define C(x) n->x = skb->x |
| 633 | |
| 634 | n->next = n->prev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | n->sk = NULL; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 636 | __copy_skb_header(n, skb); |
| 637 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | C(len); |
| 639 | C(data_len); |
Alexey Dobriyan | 3e6b3b2 | 2007-03-16 15:00:46 -0700 | [diff] [blame] | 640 | C(mac_len); |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 641 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 642 | n->cloned = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | n->nohdr = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | n->destructor = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | C(tail); |
| 646 | C(end); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 647 | C(head); |
| 648 | C(data); |
| 649 | C(truesize); |
| 650 | atomic_set(&n->users, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | |
| 652 | atomic_inc(&(skb_shinfo(skb)->dataref)); |
| 653 | skb->cloned = 1; |
| 654 | |
| 655 | return n; |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 656 | #undef C |
| 657 | } |
| 658 | |
| 659 | /** |
| 660 | * skb_morph - morph one skb into another |
| 661 | * @dst: the skb to receive the contents |
| 662 | * @src: the skb to supply the contents |
| 663 | * |
| 664 | * This is identical to skb_clone except that the target skb is |
| 665 | * supplied by the user. |
| 666 | * |
| 667 | * The target skb is returned upon exit. |
| 668 | */ |
| 669 | struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) |
| 670 | { |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 671 | skb_release_all(dst); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 672 | return __skb_clone(dst, src); |
| 673 | } |
| 674 | EXPORT_SYMBOL_GPL(skb_morph); |
| 675 | |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 676 | /* skb_copy_ubufs - copy userspace skb frags buffers to kernel |
| 677 | * @skb: the skb to modify |
| 678 | * @gfp_mask: allocation priority |
| 679 | * |
| 680 | * This must be called on SKBTX_DEV_ZEROCOPY skb. |
| 681 | * It will copy all frags into kernel and drop the reference |
| 682 | * to userspace pages. |
| 683 | * |
| 684 | * If this function is called from an interrupt gfp_mask() must be |
| 685 | * %GFP_ATOMIC. |
| 686 | * |
| 687 | * Returns 0 on success or a negative error code on failure |
| 688 | * to allocate kernel memory to copy to. |
| 689 | */ |
| 690 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 691 | { |
| 692 | int i; |
| 693 | int num_frags = skb_shinfo(skb)->nr_frags; |
| 694 | struct page *page, *head = NULL; |
| 695 | struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; |
| 696 | |
| 697 | for (i = 0; i < num_frags; i++) { |
| 698 | u8 *vaddr; |
| 699 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
| 700 | |
| 701 | page = alloc_page(GFP_ATOMIC); |
| 702 | if (!page) { |
| 703 | while (head) { |
| 704 | struct page *next = (struct page *)head->private; |
| 705 | put_page(head); |
| 706 | head = next; |
| 707 | } |
| 708 | return -ENOMEM; |
| 709 | } |
| 710 | vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); |
| 711 | memcpy(page_address(page), |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 712 | vaddr + f->page_offset, skb_frag_size(f)); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 713 | kunmap_skb_frag(vaddr); |
| 714 | page->private = (unsigned long)head; |
| 715 | head = page; |
| 716 | } |
| 717 | |
| 718 | /* skb frags release userspace buffers */ |
| 719 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
Ian Campbell | a8605c6 | 2011-10-19 23:01:49 +0000 | [diff] [blame] | 720 | skb_frag_unref(skb, i); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 721 | |
| 722 | uarg->callback(uarg); |
| 723 | |
| 724 | /* skb frags point to kernel buffers */ |
| 725 | for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { |
Ian Campbell | a8605c6 | 2011-10-19 23:01:49 +0000 | [diff] [blame] | 726 | __skb_fill_page_desc(skb, i-1, head, 0, |
| 727 | skb_shinfo(skb)->frags[i - 1].size); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 728 | head = (struct page *)head->private; |
| 729 | } |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 730 | |
| 731 | skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 732 | return 0; |
| 733 | } |
| 734 | |
| 735 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 736 | /** |
| 737 | * skb_clone - duplicate an sk_buff |
| 738 | * @skb: buffer to clone |
| 739 | * @gfp_mask: allocation priority |
| 740 | * |
| 741 | * Duplicate an &sk_buff. The new one is not owned by a socket. Both |
| 742 | * copies share the same packet data but not structure. The new |
| 743 | * buffer has a reference count of 1. If the allocation fails the |
| 744 | * function returns %NULL otherwise the new buffer is returned. |
| 745 | * |
| 746 | * If this function is called from an interrupt gfp_mask() must be |
| 747 | * %GFP_ATOMIC. |
| 748 | */ |
| 749 | |
| 750 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) |
| 751 | { |
| 752 | struct sk_buff *n; |
| 753 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 754 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { |
| 755 | if (skb_copy_ubufs(skb, gfp_mask)) |
| 756 | return NULL; |
| 757 | } |
| 758 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 759 | n = skb + 1; |
| 760 | if (skb->fclone == SKB_FCLONE_ORIG && |
| 761 | n->fclone == SKB_FCLONE_UNAVAILABLE) { |
| 762 | atomic_t *fclone_ref = (atomic_t *) (n + 1); |
| 763 | n->fclone = SKB_FCLONE_CLONE; |
| 764 | atomic_inc(fclone_ref); |
| 765 | } else { |
| 766 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); |
| 767 | if (!n) |
| 768 | return NULL; |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 769 | |
| 770 | kmemcheck_annotate_bitfield(n, flags1); |
| 771 | kmemcheck_annotate_bitfield(n, flags2); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 772 | n->fclone = SKB_FCLONE_UNAVAILABLE; |
| 773 | } |
| 774 | |
| 775 | return __skb_clone(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 777 | EXPORT_SYMBOL(skb_clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | |
| 779 | static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
| 780 | { |
Arnaldo Carvalho de Melo | 2e07fa9 | 2007-04-10 21:22:35 -0700 | [diff] [blame] | 781 | #ifndef NET_SKBUFF_DATA_USES_OFFSET |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | /* |
| 783 | * Shift between the two data areas in bytes |
| 784 | */ |
| 785 | unsigned long offset = new->data - old->data; |
Arnaldo Carvalho de Melo | 2e07fa9 | 2007-04-10 21:22:35 -0700 | [diff] [blame] | 786 | #endif |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 787 | |
| 788 | __copy_skb_header(new, old); |
| 789 | |
Arnaldo Carvalho de Melo | 2e07fa9 | 2007-04-10 21:22:35 -0700 | [diff] [blame] | 790 | #ifndef NET_SKBUFF_DATA_USES_OFFSET |
| 791 | /* {transport,network,mac}_header are relative to skb->head */ |
| 792 | new->transport_header += offset; |
| 793 | new->network_header += offset; |
Stephen Hemminger | 603a8bb | 2009-06-17 12:17:34 +0000 | [diff] [blame] | 794 | if (skb_mac_header_was_set(new)) |
| 795 | new->mac_header += offset; |
Arnaldo Carvalho de Melo | 2e07fa9 | 2007-04-10 21:22:35 -0700 | [diff] [blame] | 796 | #endif |
Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 797 | skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; |
| 798 | skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; |
| 799 | skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | } |
| 801 | |
| 802 | /** |
| 803 | * skb_copy - create private copy of an sk_buff |
| 804 | * @skb: buffer to copy |
| 805 | * @gfp_mask: allocation priority |
| 806 | * |
| 807 | * Make a copy of both an &sk_buff and its data. This is used when the |
| 808 | * caller wishes to modify the data and needs a private copy of the |
| 809 | * data to alter. Returns %NULL on failure or the pointer to the buffer |
| 810 | * on success. The returned buffer has a reference count of 1. |
| 811 | * |
| 812 | * As by-product this function converts non-linear &sk_buff to linear |
| 813 | * one, so that &sk_buff becomes completely private and caller is allowed |
| 814 | * to modify all the data of returned buffer. This means that this |
| 815 | * function is not recommended for use in circumstances when only |
| 816 | * header is going to be modified. Use pskb_copy() instead. |
| 817 | */ |
| 818 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 819 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | { |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 821 | int headerlen = skb_headroom(skb); |
| 822 | unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; |
| 823 | struct sk_buff *n = alloc_skb(size, gfp_mask); |
| 824 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 | if (!n) |
| 826 | return NULL; |
| 827 | |
| 828 | /* Set the data pointer */ |
| 829 | skb_reserve(n, headerlen); |
| 830 | /* Set the tail pointer and length */ |
| 831 | skb_put(n, skb->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | |
| 833 | if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) |
| 834 | BUG(); |
| 835 | |
| 836 | copy_skb_header(n, skb); |
| 837 | return n; |
| 838 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 839 | EXPORT_SYMBOL(skb_copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | |
| 841 | /** |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 842 | * __pskb_copy - create copy of an sk_buff with private head. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 | * @skb: buffer to copy |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 844 | * @headroom: headroom of new skb |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | * @gfp_mask: allocation priority |
| 846 | * |
| 847 | * Make a copy of both an &sk_buff and part of its data, located |
| 848 | * in header. Fragmented data remain shared. This is used when |
| 849 | * the caller wishes to modify only header of &sk_buff and needs |
| 850 | * private copy of the header to alter. Returns %NULL on failure |
| 851 | * or the pointer to the buffer on success. |
| 852 | * The returned buffer has a reference count of 1. |
| 853 | */ |
| 854 | |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 855 | struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | { |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 857 | unsigned int size = skb_headlen(skb) + headroom; |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 858 | struct sk_buff *n = alloc_skb(size, gfp_mask); |
| 859 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | if (!n) |
| 861 | goto out; |
| 862 | |
| 863 | /* Set the data pointer */ |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 864 | skb_reserve(n, headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | /* Set the tail pointer and length */ |
| 866 | skb_put(n, skb_headlen(skb)); |
| 867 | /* Copy the bytes */ |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 868 | skb_copy_from_linear_data(skb, n->data, n->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | |
Herbert Xu | 25f484a | 2006-11-07 14:57:15 -0800 | [diff] [blame] | 870 | n->truesize += skb->data_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | n->data_len = skb->data_len; |
| 872 | n->len = skb->len; |
| 873 | |
| 874 | if (skb_shinfo(skb)->nr_frags) { |
| 875 | int i; |
| 876 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 877 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { |
| 878 | if (skb_copy_ubufs(skb, gfp_mask)) { |
Dan Carpenter | 1511022 | 2011-07-19 22:51:49 +0000 | [diff] [blame] | 879 | kfree_skb(n); |
| 880 | n = NULL; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 881 | goto out; |
| 882 | } |
| 883 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 885 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 886 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | } |
| 888 | skb_shinfo(n)->nr_frags = i; |
| 889 | } |
| 890 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 891 | if (skb_has_frag_list(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; |
| 893 | skb_clone_fraglist(n); |
| 894 | } |
| 895 | |
| 896 | copy_skb_header(n, skb); |
| 897 | out: |
| 898 | return n; |
| 899 | } |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 900 | EXPORT_SYMBOL(__pskb_copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | |
| 902 | /** |
| 903 | * pskb_expand_head - reallocate header of &sk_buff |
| 904 | * @skb: buffer to reallocate |
| 905 | * @nhead: room to add at head |
| 906 | * @ntail: room to add at tail |
| 907 | * @gfp_mask: allocation priority |
| 908 | * |
| 909 | * Expands (or creates identical copy, if &nhead and &ntail are zero) |
| 910 | * header of skb. &sk_buff itself is not changed. &sk_buff MUST have |
| 911 | * reference count of 1. Returns zero in the case of success or error, |
| 912 | * if expansion failed. In the last case, &sk_buff is not changed. |
| 913 | * |
| 914 | * All the pointers pointing into skb header may change and must be |
| 915 | * reloaded after call to this function. |
| 916 | */ |
| 917 | |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 918 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 919 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | { |
| 921 | int i; |
| 922 | u8 *data; |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 923 | int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | long off; |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 925 | bool fastpath; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | |
Herbert Xu | 4edd87a | 2008-10-01 07:09:38 -0700 | [diff] [blame] | 927 | BUG_ON(nhead < 0); |
| 928 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | if (skb_shared(skb)) |
| 930 | BUG(); |
| 931 | |
| 932 | size = SKB_DATA_ALIGN(size); |
| 933 | |
Changli Gao | ca44ac3 | 2010-11-29 22:48:46 +0000 | [diff] [blame] | 934 | /* Check if we can avoid taking references on fragments if we own |
| 935 | * the last reference on skb->head. (see skb_release_data()) |
| 936 | */ |
| 937 | if (!skb->cloned) |
| 938 | fastpath = true; |
| 939 | else { |
| 940 | int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; |
Changli Gao | ca44ac3 | 2010-11-29 22:48:46 +0000 | [diff] [blame] | 941 | fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; |
| 942 | } |
| 943 | |
| 944 | if (fastpath && |
| 945 | size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { |
| 946 | memmove(skb->head + size, skb_shinfo(skb), |
| 947 | offsetof(struct skb_shared_info, |
| 948 | frags[skb_shinfo(skb)->nr_frags])); |
| 949 | memmove(skb->head + nhead, skb->head, |
| 950 | skb_tail_pointer(skb) - skb->head); |
| 951 | off = nhead; |
| 952 | goto adjust_others; |
| 953 | } |
| 954 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); |
| 956 | if (!data) |
| 957 | goto nodata; |
| 958 | |
| 959 | /* Copy only real data... and, alas, header. This should be |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 960 | * optimized for the cases when header is void. |
| 961 | */ |
| 962 | memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); |
| 963 | |
| 964 | memcpy((struct skb_shared_info *)(data + size), |
| 965 | skb_shinfo(skb), |
Eric Dumazet | fed6638 | 2010-07-22 19:09:08 +0000 | [diff] [blame] | 966 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 968 | if (fastpath) { |
| 969 | kfree(skb->head); |
| 970 | } else { |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 971 | /* copy this zero copy skb frags */ |
| 972 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { |
| 973 | if (skb_copy_ubufs(skb, gfp_mask)) |
| 974 | goto nofrags; |
| 975 | } |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 976 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 977 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 | |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 979 | if (skb_has_frag_list(skb)) |
| 980 | skb_clone_fraglist(skb); |
| 981 | |
| 982 | skb_release_data(skb); |
| 983 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | off = (data + nhead) - skb->head; |
| 985 | |
| 986 | skb->head = data; |
Changli Gao | ca44ac3 | 2010-11-29 22:48:46 +0000 | [diff] [blame] | 987 | adjust_others: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 | skb->data += off; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 989 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 990 | skb->end = size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 991 | off = nhead; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 992 | #else |
| 993 | skb->end = skb->head + size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 994 | #endif |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 995 | /* {transport,network,mac}_header and tail are relative to skb->head */ |
| 996 | skb->tail += off; |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 997 | skb->transport_header += off; |
| 998 | skb->network_header += off; |
Stephen Hemminger | 603a8bb | 2009-06-17 12:17:34 +0000 | [diff] [blame] | 999 | if (skb_mac_header_was_set(skb)) |
| 1000 | skb->mac_header += off; |
Andrea Shepard | 00c5a98 | 2010-07-22 09:12:35 +0000 | [diff] [blame] | 1001 | /* Only adjust this if it actually is csum_start rather than csum */ |
| 1002 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 1003 | skb->csum_start += nhead; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1004 | skb->cloned = 0; |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 1005 | skb->hdr_len = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | skb->nohdr = 0; |
| 1007 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
| 1008 | return 0; |
| 1009 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1010 | nofrags: |
| 1011 | kfree(data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 | nodata: |
| 1013 | return -ENOMEM; |
| 1014 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1015 | EXPORT_SYMBOL(pskb_expand_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | |
| 1017 | /* Make private copy of skb with writable head and some headroom */ |
| 1018 | |
| 1019 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) |
| 1020 | { |
| 1021 | struct sk_buff *skb2; |
| 1022 | int delta = headroom - skb_headroom(skb); |
| 1023 | |
| 1024 | if (delta <= 0) |
| 1025 | skb2 = pskb_copy(skb, GFP_ATOMIC); |
| 1026 | else { |
| 1027 | skb2 = skb_clone(skb, GFP_ATOMIC); |
| 1028 | if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, |
| 1029 | GFP_ATOMIC)) { |
| 1030 | kfree_skb(skb2); |
| 1031 | skb2 = NULL; |
| 1032 | } |
| 1033 | } |
| 1034 | return skb2; |
| 1035 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1036 | EXPORT_SYMBOL(skb_realloc_headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1037 | |
| 1038 | /** |
| 1039 | * skb_copy_expand - copy and expand sk_buff |
| 1040 | * @skb: buffer to copy |
| 1041 | * @newheadroom: new free bytes at head |
| 1042 | * @newtailroom: new free bytes at tail |
| 1043 | * @gfp_mask: allocation priority |
| 1044 | * |
| 1045 | * Make a copy of both an &sk_buff and its data and while doing so |
| 1046 | * allocate additional space. |
| 1047 | * |
| 1048 | * This is used when the caller wishes to modify the data and needs a |
| 1049 | * private copy of the data to alter as well as more space for new fields. |
| 1050 | * Returns %NULL on failure or the pointer to the buffer |
| 1051 | * on success. The returned buffer has a reference count of 1. |
| 1052 | * |
| 1053 | * You must pass %GFP_ATOMIC as the allocation priority if this function |
| 1054 | * is called from an interrupt. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | */ |
| 1056 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1057 | int newheadroom, int newtailroom, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1058 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | { |
| 1060 | /* |
| 1061 | * Allocate the copy buffer |
| 1062 | */ |
| 1063 | struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, |
| 1064 | gfp_mask); |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1065 | int oldheadroom = skb_headroom(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1066 | int head_copy_len, head_copy_off; |
Herbert Xu | 5288605 | 2007-09-16 16:32:11 -0700 | [diff] [blame] | 1067 | int off; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | |
| 1069 | if (!n) |
| 1070 | return NULL; |
| 1071 | |
| 1072 | skb_reserve(n, newheadroom); |
| 1073 | |
| 1074 | /* Set the tail pointer and length */ |
| 1075 | skb_put(n, skb->len); |
| 1076 | |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1077 | head_copy_len = oldheadroom; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | head_copy_off = 0; |
| 1079 | if (newheadroom <= head_copy_len) |
| 1080 | head_copy_len = newheadroom; |
| 1081 | else |
| 1082 | head_copy_off = newheadroom - head_copy_len; |
| 1083 | |
| 1084 | /* Copy the linear header and data. */ |
| 1085 | if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, |
| 1086 | skb->len + head_copy_len)) |
| 1087 | BUG(); |
| 1088 | |
| 1089 | copy_skb_header(n, skb); |
| 1090 | |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1091 | off = newheadroom - oldheadroom; |
David S. Miller | be2b6e6 | 2010-07-22 13:27:09 -0700 | [diff] [blame] | 1092 | if (n->ip_summed == CHECKSUM_PARTIAL) |
| 1093 | n->csum_start += off; |
Herbert Xu | 5288605 | 2007-09-16 16:32:11 -0700 | [diff] [blame] | 1094 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1095 | n->transport_header += off; |
| 1096 | n->network_header += off; |
Stephen Hemminger | 603a8bb | 2009-06-17 12:17:34 +0000 | [diff] [blame] | 1097 | if (skb_mac_header_was_set(skb)) |
| 1098 | n->mac_header += off; |
Herbert Xu | 5288605 | 2007-09-16 16:32:11 -0700 | [diff] [blame] | 1099 | #endif |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1100 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1101 | return n; |
| 1102 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1103 | EXPORT_SYMBOL(skb_copy_expand); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 | |
| 1105 | /** |
| 1106 | * skb_pad - zero pad the tail of an skb |
| 1107 | * @skb: buffer to pad |
| 1108 | * @pad: space to pad |
| 1109 | * |
| 1110 | * Ensure that a buffer is followed by a padding area that is zero |
| 1111 | * filled. Used by network drivers which may DMA or transfer data |
| 1112 | * beyond the buffer end onto the wire. |
| 1113 | * |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1114 | * May return error in out of memory cases. The skb is freed on error. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1115 | */ |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1116 | |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1117 | int skb_pad(struct sk_buff *skb, int pad) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | { |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1119 | int err; |
| 1120 | int ntail; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1121 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | /* If the skbuff is non linear tailroom is always zero.. */ |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1123 | if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | memset(skb->data+skb->len, 0, pad); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1125 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | } |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1127 | |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1128 | ntail = skb->data_len + pad - (skb->end - skb->tail); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1129 | if (likely(skb_cloned(skb) || ntail > 0)) { |
| 1130 | err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); |
| 1131 | if (unlikely(err)) |
| 1132 | goto free_skb; |
| 1133 | } |
| 1134 | |
| 1135 | /* FIXME: The use of this function with non-linear skb's really needs |
| 1136 | * to be audited. |
| 1137 | */ |
| 1138 | err = skb_linearize(skb); |
| 1139 | if (unlikely(err)) |
| 1140 | goto free_skb; |
| 1141 | |
| 1142 | memset(skb->data + skb->len, 0, pad); |
| 1143 | return 0; |
| 1144 | |
| 1145 | free_skb: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 | kfree_skb(skb); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1147 | return err; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1148 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1149 | EXPORT_SYMBOL(skb_pad); |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1150 | |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1151 | /** |
| 1152 | * skb_put - add data to a buffer |
| 1153 | * @skb: buffer to use |
| 1154 | * @len: amount of data to add |
| 1155 | * |
| 1156 | * This function extends the used data area of the buffer. If this would |
| 1157 | * exceed the total buffer size the kernel will panic. A pointer to the |
| 1158 | * first byte of the extra data is returned. |
| 1159 | */ |
| 1160 | unsigned char *skb_put(struct sk_buff *skb, unsigned int len) |
| 1161 | { |
| 1162 | unsigned char *tmp = skb_tail_pointer(skb); |
| 1163 | SKB_LINEAR_ASSERT(skb); |
| 1164 | skb->tail += len; |
| 1165 | skb->len += len; |
| 1166 | if (unlikely(skb->tail > skb->end)) |
| 1167 | skb_over_panic(skb, len, __builtin_return_address(0)); |
| 1168 | return tmp; |
| 1169 | } |
| 1170 | EXPORT_SYMBOL(skb_put); |
| 1171 | |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1172 | /** |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 1173 | * skb_push - add data to the start of a buffer |
| 1174 | * @skb: buffer to use |
| 1175 | * @len: amount of data to add |
| 1176 | * |
| 1177 | * This function extends the used data area of the buffer at the buffer |
| 1178 | * start. If this would exceed the total buffer headroom the kernel will |
| 1179 | * panic. A pointer to the first byte of the extra data is returned. |
| 1180 | */ |
| 1181 | unsigned char *skb_push(struct sk_buff *skb, unsigned int len) |
| 1182 | { |
| 1183 | skb->data -= len; |
| 1184 | skb->len += len; |
| 1185 | if (unlikely(skb->data<skb->head)) |
| 1186 | skb_under_panic(skb, len, __builtin_return_address(0)); |
| 1187 | return skb->data; |
| 1188 | } |
| 1189 | EXPORT_SYMBOL(skb_push); |
| 1190 | |
| 1191 | /** |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1192 | * skb_pull - remove data from the start of a buffer |
| 1193 | * @skb: buffer to use |
| 1194 | * @len: amount of data to remove |
| 1195 | * |
| 1196 | * This function removes data from the start of a buffer, returning |
| 1197 | * the memory to the headroom. A pointer to the next data in the buffer |
| 1198 | * is returned. Once the data has been pulled future pushes will overwrite |
| 1199 | * the old data. |
| 1200 | */ |
| 1201 | unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) |
| 1202 | { |
David S. Miller | 47d2964 | 2010-05-02 02:21:44 -0700 | [diff] [blame] | 1203 | return skb_pull_inline(skb, len); |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1204 | } |
| 1205 | EXPORT_SYMBOL(skb_pull); |
| 1206 | |
Ilpo Järvinen | 419ae74 | 2008-03-27 17:54:01 -0700 | [diff] [blame] | 1207 | /** |
| 1208 | * skb_trim - remove end from a buffer |
| 1209 | * @skb: buffer to alter |
| 1210 | * @len: new length |
| 1211 | * |
| 1212 | * Cut the length of a buffer down by removing data from the tail. If |
| 1213 | * the buffer is already under the length specified it is not modified. |
| 1214 | * The skb must be linear. |
| 1215 | */ |
| 1216 | void skb_trim(struct sk_buff *skb, unsigned int len) |
| 1217 | { |
| 1218 | if (skb->len > len) |
| 1219 | __skb_trim(skb, len); |
| 1220 | } |
| 1221 | EXPORT_SYMBOL(skb_trim); |
| 1222 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 1223 | /* Trims skb to length len. It can change skb pointers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1224 | */ |
| 1225 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 1226 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1228 | struct sk_buff **fragp; |
| 1229 | struct sk_buff *frag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | int offset = skb_headlen(skb); |
| 1231 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 1232 | int i; |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1233 | int err; |
| 1234 | |
| 1235 | if (skb_cloned(skb) && |
| 1236 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) |
| 1237 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1238 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1239 | i = 0; |
| 1240 | if (offset >= len) |
| 1241 | goto drop_pages; |
| 1242 | |
| 1243 | for (; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1244 | int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1245 | |
| 1246 | if (end < len) { |
| 1247 | offset = end; |
| 1248 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | } |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1250 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1251 | skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1252 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1253 | drop_pages: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1254 | skb_shinfo(skb)->nr_frags = i; |
| 1255 | |
| 1256 | for (; i < nfrags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1257 | skb_frag_unref(skb, i); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1258 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1259 | if (skb_has_frag_list(skb)) |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1260 | skb_drop_fraglist(skb); |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1261 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1262 | } |
| 1263 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1264 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); |
| 1265 | fragp = &frag->next) { |
| 1266 | int end = offset + frag->len; |
| 1267 | |
| 1268 | if (skb_shared(frag)) { |
| 1269 | struct sk_buff *nfrag; |
| 1270 | |
| 1271 | nfrag = skb_clone(frag, GFP_ATOMIC); |
| 1272 | if (unlikely(!nfrag)) |
| 1273 | return -ENOMEM; |
| 1274 | |
| 1275 | nfrag->next = frag->next; |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1276 | kfree_skb(frag); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1277 | frag = nfrag; |
| 1278 | *fragp = frag; |
| 1279 | } |
| 1280 | |
| 1281 | if (end < len) { |
| 1282 | offset = end; |
| 1283 | continue; |
| 1284 | } |
| 1285 | |
| 1286 | if (end > len && |
| 1287 | unlikely((err = pskb_trim(frag, len - offset)))) |
| 1288 | return err; |
| 1289 | |
| 1290 | if (frag->next) |
| 1291 | skb_drop_list(&frag->next); |
| 1292 | break; |
| 1293 | } |
| 1294 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1295 | done: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1296 | if (len > skb_headlen(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1297 | skb->data_len -= skb->len - len; |
| 1298 | skb->len = len; |
| 1299 | } else { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1300 | skb->len = len; |
| 1301 | skb->data_len = 0; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1302 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | } |
| 1304 | |
| 1305 | return 0; |
| 1306 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1307 | EXPORT_SYMBOL(___pskb_trim); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | |
| 1309 | /** |
| 1310 | * __pskb_pull_tail - advance tail of skb header |
| 1311 | * @skb: buffer to reallocate |
| 1312 | * @delta: number of bytes to advance tail |
| 1313 | * |
| 1314 | * The function makes a sense only on a fragmented &sk_buff, |
| 1315 | * it expands header moving its tail forward and copying necessary |
| 1316 | * data from fragmented part. |
| 1317 | * |
| 1318 | * &sk_buff MUST have reference count of 1. |
| 1319 | * |
| 1320 | * Returns %NULL (and &sk_buff does not change) if pull failed |
| 1321 | * or value of new tail of skb in the case of success. |
| 1322 | * |
| 1323 | * All the pointers pointing into skb header may change and must be |
| 1324 | * reloaded after call to this function. |
| 1325 | */ |
| 1326 | |
| 1327 | /* Moves tail of skb head forward, copying data from fragmented part, |
| 1328 | * when it is necessary. |
| 1329 | * 1. It may fail due to malloc failure. |
| 1330 | * 2. It may change skb pointers. |
| 1331 | * |
| 1332 | * It is pretty complicated. Luckily, it is called only in exceptional cases. |
| 1333 | */ |
| 1334 | unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) |
| 1335 | { |
| 1336 | /* If skb has not enough free space at tail, get new one |
| 1337 | * plus 128 bytes for future expansions. If we have enough |
| 1338 | * room at tail, reallocate without expansion only if skb is cloned. |
| 1339 | */ |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1340 | int i, k, eat = (skb->tail + delta) - skb->end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1341 | |
| 1342 | if (eat > 0 || skb_cloned(skb)) { |
| 1343 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, |
| 1344 | GFP_ATOMIC)) |
| 1345 | return NULL; |
| 1346 | } |
| 1347 | |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1348 | if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1349 | BUG(); |
| 1350 | |
| 1351 | /* Optimization: no fragments, no reasons to preestimate |
| 1352 | * size of pulled pages. Superb. |
| 1353 | */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1354 | if (!skb_has_frag_list(skb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1355 | goto pull_pages; |
| 1356 | |
| 1357 | /* Estimate size of pulled pages. */ |
| 1358 | eat = delta; |
| 1359 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1360 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 1361 | |
| 1362 | if (size >= eat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1363 | goto pull_pages; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1364 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1365 | } |
| 1366 | |
| 1367 | /* If we need update frag list, we are in troubles. |
| 1368 | * Certainly, it possible to add an offset to skb data, |
| 1369 | * but taking into account that pulling is expected to |
| 1370 | * be very rare operation, it is worth to fight against |
| 1371 | * further bloating skb head and crucify ourselves here instead. |
| 1372 | * Pure masohism, indeed. 8)8) |
| 1373 | */ |
| 1374 | if (eat) { |
| 1375 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
| 1376 | struct sk_buff *clone = NULL; |
| 1377 | struct sk_buff *insp = NULL; |
| 1378 | |
| 1379 | do { |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 1380 | BUG_ON(!list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 | |
| 1382 | if (list->len <= eat) { |
| 1383 | /* Eaten as whole. */ |
| 1384 | eat -= list->len; |
| 1385 | list = list->next; |
| 1386 | insp = list; |
| 1387 | } else { |
| 1388 | /* Eaten partially. */ |
| 1389 | |
| 1390 | if (skb_shared(list)) { |
| 1391 | /* Sucks! We need to fork list. :-( */ |
| 1392 | clone = skb_clone(list, GFP_ATOMIC); |
| 1393 | if (!clone) |
| 1394 | return NULL; |
| 1395 | insp = list->next; |
| 1396 | list = clone; |
| 1397 | } else { |
| 1398 | /* This may be pulled without |
| 1399 | * problems. */ |
| 1400 | insp = list; |
| 1401 | } |
| 1402 | if (!pskb_pull(list, eat)) { |
Wei Yongjun | f3fbbe0 | 2009-02-25 00:37:32 +0000 | [diff] [blame] | 1403 | kfree_skb(clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 | return NULL; |
| 1405 | } |
| 1406 | break; |
| 1407 | } |
| 1408 | } while (eat); |
| 1409 | |
| 1410 | /* Free pulled out fragments. */ |
| 1411 | while ((list = skb_shinfo(skb)->frag_list) != insp) { |
| 1412 | skb_shinfo(skb)->frag_list = list->next; |
| 1413 | kfree_skb(list); |
| 1414 | } |
| 1415 | /* And insert new clone at head. */ |
| 1416 | if (clone) { |
| 1417 | clone->next = list; |
| 1418 | skb_shinfo(skb)->frag_list = clone; |
| 1419 | } |
| 1420 | } |
| 1421 | /* Success! Now we may commit changes to skb data. */ |
| 1422 | |
| 1423 | pull_pages: |
| 1424 | eat = delta; |
| 1425 | k = 0; |
| 1426 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1427 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 1428 | |
| 1429 | if (size <= eat) { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1430 | skb_frag_unref(skb, i); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1431 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1432 | } else { |
| 1433 | skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 1434 | if (eat) { |
| 1435 | skb_shinfo(skb)->frags[k].page_offset += eat; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1436 | skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | eat = 0; |
| 1438 | } |
| 1439 | k++; |
| 1440 | } |
| 1441 | } |
| 1442 | skb_shinfo(skb)->nr_frags = k; |
| 1443 | |
| 1444 | skb->tail += delta; |
| 1445 | skb->data_len -= delta; |
| 1446 | |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1447 | return skb_tail_pointer(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1448 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1449 | EXPORT_SYMBOL(__pskb_pull_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1450 | |
Eric Dumazet | 22019b1 | 2011-07-29 18:37:31 +0000 | [diff] [blame] | 1451 | /** |
| 1452 | * skb_copy_bits - copy bits from skb to kernel buffer |
| 1453 | * @skb: source skb |
| 1454 | * @offset: offset in source |
| 1455 | * @to: destination buffer |
| 1456 | * @len: number of bytes to copy |
| 1457 | * |
| 1458 | * Copy the specified number of bytes from the source skb to the |
| 1459 | * destination buffer. |
| 1460 | * |
| 1461 | * CAUTION ! : |
| 1462 | * If its prototype is ever changed, |
| 1463 | * check arch/{*}/net/{*}.S files, |
| 1464 | * since it is called from BPF assembly code. |
| 1465 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1466 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) |
| 1467 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1468 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1469 | struct sk_buff *frag_iter; |
| 1470 | int i, copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1471 | |
| 1472 | if (offset > (int)skb->len - len) |
| 1473 | goto fault; |
| 1474 | |
| 1475 | /* Copy header. */ |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1476 | if ((copy = start - offset) > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1477 | if (copy > len) |
| 1478 | copy = len; |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 1479 | skb_copy_from_linear_data_offset(skb, offset, to, copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1480 | if ((len -= copy) == 0) |
| 1481 | return 0; |
| 1482 | offset += copy; |
| 1483 | to += copy; |
| 1484 | } |
| 1485 | |
| 1486 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1487 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1488 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 1489 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1490 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1491 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1492 | if ((copy = end - offset) > 0) { |
| 1493 | u8 *vaddr; |
| 1494 | |
| 1495 | if (copy > len) |
| 1496 | copy = len; |
| 1497 | |
| 1498 | vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); |
| 1499 | memcpy(to, |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1500 | vaddr + skb_shinfo(skb)->frags[i].page_offset+ |
| 1501 | offset - start, copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1502 | kunmap_skb_frag(vaddr); |
| 1503 | |
| 1504 | if ((len -= copy) == 0) |
| 1505 | return 0; |
| 1506 | offset += copy; |
| 1507 | to += copy; |
| 1508 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1509 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | } |
| 1511 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1512 | skb_walk_frags(skb, frag_iter) { |
| 1513 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1514 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1515 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1516 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1517 | end = start + frag_iter->len; |
| 1518 | if ((copy = end - offset) > 0) { |
| 1519 | if (copy > len) |
| 1520 | copy = len; |
| 1521 | if (skb_copy_bits(frag_iter, offset - start, to, copy)) |
| 1522 | goto fault; |
| 1523 | if ((len -= copy) == 0) |
| 1524 | return 0; |
| 1525 | offset += copy; |
| 1526 | to += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1527 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1528 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 | } |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1530 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1531 | if (!len) |
| 1532 | return 0; |
| 1533 | |
| 1534 | fault: |
| 1535 | return -EFAULT; |
| 1536 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1537 | EXPORT_SYMBOL(skb_copy_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1538 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1539 | /* |
| 1540 | * Callback from splice_to_pipe(), if we need to release some pages |
| 1541 | * at the end of the spd in case we error'ed out in filling the pipe. |
| 1542 | */ |
| 1543 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) |
| 1544 | { |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1545 | put_page(spd->pages[i]); |
| 1546 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1547 | |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1548 | static inline struct page *linear_to_page(struct page *page, unsigned int *len, |
| 1549 | unsigned int *offset, |
Jarek Poplawski | 7a67e56 | 2009-04-30 05:41:19 -0700 | [diff] [blame] | 1550 | struct sk_buff *skb, struct sock *sk) |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1551 | { |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1552 | struct page *p = sk->sk_sndmsg_page; |
| 1553 | unsigned int off; |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1554 | |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1555 | if (!p) { |
| 1556 | new_page: |
| 1557 | p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); |
| 1558 | if (!p) |
| 1559 | return NULL; |
| 1560 | |
| 1561 | off = sk->sk_sndmsg_off = 0; |
| 1562 | /* hold one ref to this page until it's full */ |
| 1563 | } else { |
| 1564 | unsigned int mlen; |
| 1565 | |
| 1566 | off = sk->sk_sndmsg_off; |
| 1567 | mlen = PAGE_SIZE - off; |
| 1568 | if (mlen < 64 && mlen < *len) { |
| 1569 | put_page(p); |
| 1570 | goto new_page; |
| 1571 | } |
| 1572 | |
| 1573 | *len = min_t(unsigned int, *len, mlen); |
| 1574 | } |
| 1575 | |
| 1576 | memcpy(page_address(p) + off, page_address(page) + *offset, *len); |
| 1577 | sk->sk_sndmsg_off += *len; |
| 1578 | *offset = off; |
| 1579 | get_page(p); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1580 | |
| 1581 | return p; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1582 | } |
| 1583 | |
| 1584 | /* |
| 1585 | * Fill page/offset/length into spd, if it can hold more pages. |
| 1586 | */ |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1587 | static inline int spd_fill_page(struct splice_pipe_desc *spd, |
| 1588 | struct pipe_inode_info *pipe, struct page *page, |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1589 | unsigned int *len, unsigned int offset, |
Jarek Poplawski | 7a67e56 | 2009-04-30 05:41:19 -0700 | [diff] [blame] | 1590 | struct sk_buff *skb, int linear, |
| 1591 | struct sock *sk) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1592 | { |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1593 | if (unlikely(spd->nr_pages == pipe->buffers)) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1594 | return 1; |
| 1595 | |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1596 | if (linear) { |
Jarek Poplawski | 7a67e56 | 2009-04-30 05:41:19 -0700 | [diff] [blame] | 1597 | page = linear_to_page(page, len, &offset, skb, sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1598 | if (!page) |
| 1599 | return 1; |
| 1600 | } else |
| 1601 | get_page(page); |
| 1602 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1603 | spd->pages[spd->nr_pages] = page; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1604 | spd->partial[spd->nr_pages].len = *len; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1605 | spd->partial[spd->nr_pages].offset = offset; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1606 | spd->nr_pages++; |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1607 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1608 | return 0; |
| 1609 | } |
| 1610 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1611 | static inline void __segment_seek(struct page **page, unsigned int *poff, |
| 1612 | unsigned int *plen, unsigned int off) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1613 | { |
Jarek Poplawski | ce3dd39 | 2009-02-12 16:51:43 -0800 | [diff] [blame] | 1614 | unsigned long n; |
| 1615 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1616 | *poff += off; |
Jarek Poplawski | ce3dd39 | 2009-02-12 16:51:43 -0800 | [diff] [blame] | 1617 | n = *poff / PAGE_SIZE; |
| 1618 | if (n) |
| 1619 | *page = nth_page(*page, n); |
| 1620 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1621 | *poff = *poff % PAGE_SIZE; |
| 1622 | *plen -= off; |
| 1623 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1624 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1625 | static inline int __splice_segment(struct page *page, unsigned int poff, |
| 1626 | unsigned int plen, unsigned int *off, |
| 1627 | unsigned int *len, struct sk_buff *skb, |
Jarek Poplawski | 7a67e56 | 2009-04-30 05:41:19 -0700 | [diff] [blame] | 1628 | struct splice_pipe_desc *spd, int linear, |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1629 | struct sock *sk, |
| 1630 | struct pipe_inode_info *pipe) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1631 | { |
| 1632 | if (!*len) |
| 1633 | return 1; |
| 1634 | |
| 1635 | /* skip this segment if already processed */ |
| 1636 | if (*off >= plen) { |
| 1637 | *off -= plen; |
| 1638 | return 0; |
Octavian Purdila | db43a28 | 2008-06-27 17:27:21 -0700 | [diff] [blame] | 1639 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1640 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1641 | /* ignore any bits we already processed */ |
| 1642 | if (*off) { |
| 1643 | __segment_seek(&page, &poff, &plen, *off); |
| 1644 | *off = 0; |
| 1645 | } |
| 1646 | |
| 1647 | do { |
| 1648 | unsigned int flen = min(*len, plen); |
| 1649 | |
| 1650 | /* the linear region may spread across several pages */ |
| 1651 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
| 1652 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1653 | if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1654 | return 1; |
| 1655 | |
| 1656 | __segment_seek(&page, &poff, &plen, flen); |
| 1657 | *len -= flen; |
| 1658 | |
| 1659 | } while (*len && plen); |
| 1660 | |
| 1661 | return 0; |
| 1662 | } |
| 1663 | |
| 1664 | /* |
| 1665 | * Map linear and fragment data from the skb to spd. It reports failure if the |
| 1666 | * pipe is full or if we already spliced the requested length. |
| 1667 | */ |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1668 | static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, |
| 1669 | unsigned int *offset, unsigned int *len, |
| 1670 | struct splice_pipe_desc *spd, struct sock *sk) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1671 | { |
| 1672 | int seg; |
| 1673 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1674 | /* |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1675 | * map the linear part |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1676 | */ |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1677 | if (__splice_segment(virt_to_page(skb->data), |
| 1678 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
| 1679 | skb_headlen(skb), |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1680 | offset, len, skb, spd, 1, sk, pipe)) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1681 | return 1; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1682 | |
| 1683 | /* |
| 1684 | * then map the fragments |
| 1685 | */ |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1686 | for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { |
| 1687 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
| 1688 | |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1689 | if (__splice_segment(skb_frag_page(f), |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1690 | f->page_offset, skb_frag_size(f), |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1691 | offset, len, skb, spd, 0, sk, pipe)) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1692 | return 1; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1693 | } |
| 1694 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1695 | return 0; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1696 | } |
| 1697 | |
| 1698 | /* |
| 1699 | * Map data from the skb to a pipe. Should handle both the linear part, |
| 1700 | * the fragments, and the frag list. It does NOT handle frag lists within |
| 1701 | * the frag list, if such a thing exists. We'd probably need to recurse to |
| 1702 | * handle that cleanly. |
| 1703 | */ |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1704 | int skb_splice_bits(struct sk_buff *skb, unsigned int offset, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1705 | struct pipe_inode_info *pipe, unsigned int tlen, |
| 1706 | unsigned int flags) |
| 1707 | { |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1708 | struct partial_page partial[PIPE_DEF_BUFFERS]; |
| 1709 | struct page *pages[PIPE_DEF_BUFFERS]; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1710 | struct splice_pipe_desc spd = { |
| 1711 | .pages = pages, |
| 1712 | .partial = partial, |
| 1713 | .flags = flags, |
| 1714 | .ops = &sock_pipe_buf_ops, |
| 1715 | .spd_release = sock_spd_release, |
| 1716 | }; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1717 | struct sk_buff *frag_iter; |
Jarek Poplawski | 7a67e56 | 2009-04-30 05:41:19 -0700 | [diff] [blame] | 1718 | struct sock *sk = skb->sk; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1719 | int ret = 0; |
| 1720 | |
| 1721 | if (splice_grow_spd(pipe, &spd)) |
| 1722 | return -ENOMEM; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1723 | |
| 1724 | /* |
| 1725 | * __skb_splice_bits() only fails if the output has no room left, |
| 1726 | * so no point in going over the frag_list for the error case. |
| 1727 | */ |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1728 | if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1729 | goto done; |
| 1730 | else if (!tlen) |
| 1731 | goto done; |
| 1732 | |
| 1733 | /* |
| 1734 | * now see if we have a frag_list to map |
| 1735 | */ |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1736 | skb_walk_frags(skb, frag_iter) { |
| 1737 | if (!tlen) |
| 1738 | break; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1739 | if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1740 | break; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1741 | } |
| 1742 | |
| 1743 | done: |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1744 | if (spd.nr_pages) { |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1745 | /* |
| 1746 | * Drop the socket lock, otherwise we have reverse |
| 1747 | * locking dependencies between sk_lock and i_mutex |
| 1748 | * here as compared to sendfile(). We enter here |
| 1749 | * with the socket lock held, and splice_to_pipe() will |
| 1750 | * grab the pipe inode lock. For sendfile() emulation, |
| 1751 | * we call into ->sendpage() with the i_mutex lock held |
| 1752 | * and networking will grab the socket lock. |
| 1753 | */ |
Octavian Purdila | 293ad60 | 2008-06-04 15:45:58 -0700 | [diff] [blame] | 1754 | release_sock(sk); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1755 | ret = splice_to_pipe(pipe, &spd); |
Octavian Purdila | 293ad60 | 2008-06-04 15:45:58 -0700 | [diff] [blame] | 1756 | lock_sock(sk); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1757 | } |
| 1758 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1759 | splice_shrink_spd(pipe, &spd); |
| 1760 | return ret; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1761 | } |
| 1762 | |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1763 | /** |
| 1764 | * skb_store_bits - store bits from kernel buffer to skb |
| 1765 | * @skb: destination buffer |
| 1766 | * @offset: offset in destination |
| 1767 | * @from: source buffer |
| 1768 | * @len: number of bytes to copy |
| 1769 | * |
| 1770 | * Copy the specified number of bytes from the source buffer to the |
| 1771 | * destination skb. This function handles all the messy bits of |
| 1772 | * traversing fragment lists and such. |
| 1773 | */ |
| 1774 | |
Stephen Hemminger | 0c6fcc8 | 2007-04-20 16:40:01 -0700 | [diff] [blame] | 1775 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1776 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1777 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1778 | struct sk_buff *frag_iter; |
| 1779 | int i, copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1780 | |
| 1781 | if (offset > (int)skb->len - len) |
| 1782 | goto fault; |
| 1783 | |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1784 | if ((copy = start - offset) > 0) { |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1785 | if (copy > len) |
| 1786 | copy = len; |
Arnaldo Carvalho de Melo | 27d7ff4 | 2007-03-31 11:55:19 -0300 | [diff] [blame] | 1787 | skb_copy_to_linear_data_offset(skb, offset, from, copy); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1788 | if ((len -= copy) == 0) |
| 1789 | return 0; |
| 1790 | offset += copy; |
| 1791 | from += copy; |
| 1792 | } |
| 1793 | |
| 1794 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 1795 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1796 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1797 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 1798 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1799 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1800 | end = start + skb_frag_size(frag); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1801 | if ((copy = end - offset) > 0) { |
| 1802 | u8 *vaddr; |
| 1803 | |
| 1804 | if (copy > len) |
| 1805 | copy = len; |
| 1806 | |
| 1807 | vaddr = kmap_skb_frag(frag); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1808 | memcpy(vaddr + frag->page_offset + offset - start, |
| 1809 | from, copy); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1810 | kunmap_skb_frag(vaddr); |
| 1811 | |
| 1812 | if ((len -= copy) == 0) |
| 1813 | return 0; |
| 1814 | offset += copy; |
| 1815 | from += copy; |
| 1816 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1817 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1818 | } |
| 1819 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1820 | skb_walk_frags(skb, frag_iter) { |
| 1821 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1822 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1823 | WARN_ON(start > offset + len); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1824 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1825 | end = start + frag_iter->len; |
| 1826 | if ((copy = end - offset) > 0) { |
| 1827 | if (copy > len) |
| 1828 | copy = len; |
| 1829 | if (skb_store_bits(frag_iter, offset - start, |
| 1830 | from, copy)) |
| 1831 | goto fault; |
| 1832 | if ((len -= copy) == 0) |
| 1833 | return 0; |
| 1834 | offset += copy; |
| 1835 | from += copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1836 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1837 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1838 | } |
| 1839 | if (!len) |
| 1840 | return 0; |
| 1841 | |
| 1842 | fault: |
| 1843 | return -EFAULT; |
| 1844 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1845 | EXPORT_SYMBOL(skb_store_bits); |
| 1846 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1847 | /* Checksum skb data. */ |
| 1848 | |
Al Viro | 2bbbc86 | 2006-11-14 21:37:14 -0800 | [diff] [blame] | 1849 | __wsum skb_checksum(const struct sk_buff *skb, int offset, |
| 1850 | int len, __wsum csum) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1851 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1852 | int start = skb_headlen(skb); |
| 1853 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1854 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1855 | int pos = 0; |
| 1856 | |
| 1857 | /* Checksum header. */ |
| 1858 | if (copy > 0) { |
| 1859 | if (copy > len) |
| 1860 | copy = len; |
| 1861 | csum = csum_partial(skb->data + offset, copy, csum); |
| 1862 | if ((len -= copy) == 0) |
| 1863 | return csum; |
| 1864 | offset += copy; |
| 1865 | pos = copy; |
| 1866 | } |
| 1867 | |
| 1868 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1869 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1870 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 1871 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1872 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1873 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1874 | if ((copy = end - offset) > 0) { |
Al Viro | 44bb936 | 2006-11-14 21:36:14 -0800 | [diff] [blame] | 1875 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1876 | u8 *vaddr; |
| 1877 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 1878 | |
| 1879 | if (copy > len) |
| 1880 | copy = len; |
| 1881 | vaddr = kmap_skb_frag(frag); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1882 | csum2 = csum_partial(vaddr + frag->page_offset + |
| 1883 | offset - start, copy, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1884 | kunmap_skb_frag(vaddr); |
| 1885 | csum = csum_block_add(csum, csum2, pos); |
| 1886 | if (!(len -= copy)) |
| 1887 | return csum; |
| 1888 | offset += copy; |
| 1889 | pos += copy; |
| 1890 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1891 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1892 | } |
| 1893 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1894 | skb_walk_frags(skb, frag_iter) { |
| 1895 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1896 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1897 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1898 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1899 | end = start + frag_iter->len; |
| 1900 | if ((copy = end - offset) > 0) { |
| 1901 | __wsum csum2; |
| 1902 | if (copy > len) |
| 1903 | copy = len; |
| 1904 | csum2 = skb_checksum(frag_iter, offset - start, |
| 1905 | copy, 0); |
| 1906 | csum = csum_block_add(csum, csum2, pos); |
| 1907 | if ((len -= copy) == 0) |
| 1908 | return csum; |
| 1909 | offset += copy; |
| 1910 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1911 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1912 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1913 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 1914 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1915 | |
| 1916 | return csum; |
| 1917 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1918 | EXPORT_SYMBOL(skb_checksum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1919 | |
| 1920 | /* Both of above in one bottle. */ |
| 1921 | |
Al Viro | 81d7766 | 2006-11-14 21:37:33 -0800 | [diff] [blame] | 1922 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, |
| 1923 | u8 *to, int len, __wsum csum) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1924 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1925 | int start = skb_headlen(skb); |
| 1926 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1927 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1928 | int pos = 0; |
| 1929 | |
| 1930 | /* Copy header. */ |
| 1931 | if (copy > 0) { |
| 1932 | if (copy > len) |
| 1933 | copy = len; |
| 1934 | csum = csum_partial_copy_nocheck(skb->data + offset, to, |
| 1935 | copy, csum); |
| 1936 | if ((len -= copy) == 0) |
| 1937 | return csum; |
| 1938 | offset += copy; |
| 1939 | to += copy; |
| 1940 | pos = copy; |
| 1941 | } |
| 1942 | |
| 1943 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1944 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1945 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 1946 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1947 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1948 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1949 | if ((copy = end - offset) > 0) { |
Al Viro | 5084205 | 2006-11-14 21:36:34 -0800 | [diff] [blame] | 1950 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1951 | u8 *vaddr; |
| 1952 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 1953 | |
| 1954 | if (copy > len) |
| 1955 | copy = len; |
| 1956 | vaddr = kmap_skb_frag(frag); |
| 1957 | csum2 = csum_partial_copy_nocheck(vaddr + |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1958 | frag->page_offset + |
| 1959 | offset - start, to, |
| 1960 | copy, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1961 | kunmap_skb_frag(vaddr); |
| 1962 | csum = csum_block_add(csum, csum2, pos); |
| 1963 | if (!(len -= copy)) |
| 1964 | return csum; |
| 1965 | offset += copy; |
| 1966 | to += copy; |
| 1967 | pos += copy; |
| 1968 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1969 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1970 | } |
| 1971 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1972 | skb_walk_frags(skb, frag_iter) { |
| 1973 | __wsum csum2; |
| 1974 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1975 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1976 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1977 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1978 | end = start + frag_iter->len; |
| 1979 | if ((copy = end - offset) > 0) { |
| 1980 | if (copy > len) |
| 1981 | copy = len; |
| 1982 | csum2 = skb_copy_and_csum_bits(frag_iter, |
| 1983 | offset - start, |
| 1984 | to, copy, 0); |
| 1985 | csum = csum_block_add(csum, csum2, pos); |
| 1986 | if ((len -= copy) == 0) |
| 1987 | return csum; |
| 1988 | offset += copy; |
| 1989 | to += copy; |
| 1990 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1991 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1992 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1993 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 1994 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1995 | return csum; |
| 1996 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1997 | EXPORT_SYMBOL(skb_copy_and_csum_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1998 | |
| 1999 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) |
| 2000 | { |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 2001 | __wsum csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2002 | long csstart; |
| 2003 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2004 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 2005 | csstart = skb_checksum_start_offset(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2006 | else |
| 2007 | csstart = skb_headlen(skb); |
| 2008 | |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2009 | BUG_ON(csstart > skb_headlen(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2010 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2011 | skb_copy_from_linear_data(skb, to, csstart); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2012 | |
| 2013 | csum = 0; |
| 2014 | if (csstart != skb->len) |
| 2015 | csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, |
| 2016 | skb->len - csstart, 0); |
| 2017 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2018 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Al Viro | ff1dcad | 2006-11-20 18:07:29 -0800 | [diff] [blame] | 2019 | long csstuff = csstart + skb->csum_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2020 | |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 2021 | *((__sum16 *)(to + csstuff)) = csum_fold(csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2022 | } |
| 2023 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2024 | EXPORT_SYMBOL(skb_copy_and_csum_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2025 | |
| 2026 | /** |
| 2027 | * skb_dequeue - remove from the head of the queue |
| 2028 | * @list: list to dequeue from |
| 2029 | * |
| 2030 | * Remove the head of the list. The list lock is taken so the function |
| 2031 | * may be used safely with other locking list functions. The head item is |
| 2032 | * returned or %NULL if the list is empty. |
| 2033 | */ |
| 2034 | |
| 2035 | struct sk_buff *skb_dequeue(struct sk_buff_head *list) |
| 2036 | { |
| 2037 | unsigned long flags; |
| 2038 | struct sk_buff *result; |
| 2039 | |
| 2040 | spin_lock_irqsave(&list->lock, flags); |
| 2041 | result = __skb_dequeue(list); |
| 2042 | spin_unlock_irqrestore(&list->lock, flags); |
| 2043 | return result; |
| 2044 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2045 | EXPORT_SYMBOL(skb_dequeue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2046 | |
| 2047 | /** |
| 2048 | * skb_dequeue_tail - remove from the tail of the queue |
| 2049 | * @list: list to dequeue from |
| 2050 | * |
| 2051 | * Remove the tail of the list. The list lock is taken so the function |
| 2052 | * may be used safely with other locking list functions. The tail item is |
| 2053 | * returned or %NULL if the list is empty. |
| 2054 | */ |
| 2055 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) |
| 2056 | { |
| 2057 | unsigned long flags; |
| 2058 | struct sk_buff *result; |
| 2059 | |
| 2060 | spin_lock_irqsave(&list->lock, flags); |
| 2061 | result = __skb_dequeue_tail(list); |
| 2062 | spin_unlock_irqrestore(&list->lock, flags); |
| 2063 | return result; |
| 2064 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2065 | EXPORT_SYMBOL(skb_dequeue_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2066 | |
| 2067 | /** |
| 2068 | * skb_queue_purge - empty a list |
| 2069 | * @list: list to empty |
| 2070 | * |
| 2071 | * Delete all buffers on an &sk_buff list. Each buffer is removed from |
| 2072 | * the list and one reference dropped. This function takes the list |
| 2073 | * lock and is atomic with respect to other list locking functions. |
| 2074 | */ |
| 2075 | void skb_queue_purge(struct sk_buff_head *list) |
| 2076 | { |
| 2077 | struct sk_buff *skb; |
| 2078 | while ((skb = skb_dequeue(list)) != NULL) |
| 2079 | kfree_skb(skb); |
| 2080 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2081 | EXPORT_SYMBOL(skb_queue_purge); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2082 | |
| 2083 | /** |
| 2084 | * skb_queue_head - queue a buffer at the list head |
| 2085 | * @list: list to use |
| 2086 | * @newsk: buffer to queue |
| 2087 | * |
| 2088 | * Queue a buffer at the start of the list. This function takes the |
| 2089 | * list lock and can be used safely with other locking &sk_buff functions |
| 2090 | * safely. |
| 2091 | * |
| 2092 | * A buffer cannot be placed on two lists at the same time. |
| 2093 | */ |
| 2094 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) |
| 2095 | { |
| 2096 | unsigned long flags; |
| 2097 | |
| 2098 | spin_lock_irqsave(&list->lock, flags); |
| 2099 | __skb_queue_head(list, newsk); |
| 2100 | spin_unlock_irqrestore(&list->lock, flags); |
| 2101 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2102 | EXPORT_SYMBOL(skb_queue_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2103 | |
| 2104 | /** |
| 2105 | * skb_queue_tail - queue a buffer at the list tail |
| 2106 | * @list: list to use |
| 2107 | * @newsk: buffer to queue |
| 2108 | * |
| 2109 | * Queue a buffer at the tail of the list. This function takes the |
| 2110 | * list lock and can be used safely with other locking &sk_buff functions |
| 2111 | * safely. |
| 2112 | * |
| 2113 | * A buffer cannot be placed on two lists at the same time. |
| 2114 | */ |
| 2115 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) |
| 2116 | { |
| 2117 | unsigned long flags; |
| 2118 | |
| 2119 | spin_lock_irqsave(&list->lock, flags); |
| 2120 | __skb_queue_tail(list, newsk); |
| 2121 | spin_unlock_irqrestore(&list->lock, flags); |
| 2122 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2123 | EXPORT_SYMBOL(skb_queue_tail); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2124 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2125 | /** |
| 2126 | * skb_unlink - remove a buffer from a list |
| 2127 | * @skb: buffer to remove |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2128 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2129 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2130 | * Remove a packet from a list. The list locks are taken and this |
| 2131 | * function is atomic with respect to other list locked calls |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2132 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2133 | * You must know what list the SKB is on. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2134 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2135 | void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2136 | { |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2137 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2138 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2139 | spin_lock_irqsave(&list->lock, flags); |
| 2140 | __skb_unlink(skb, list); |
| 2141 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2142 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2143 | EXPORT_SYMBOL(skb_unlink); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2144 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2145 | /** |
| 2146 | * skb_append - append a buffer |
| 2147 | * @old: buffer to insert after |
| 2148 | * @newsk: buffer to insert |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2149 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2150 | * |
| 2151 | * Place a packet after a given packet in a list. The list locks are taken |
| 2152 | * and this function is atomic with respect to other list locked calls. |
| 2153 | * A buffer cannot be placed on two lists at the same time. |
| 2154 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2155 | void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2156 | { |
| 2157 | unsigned long flags; |
| 2158 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2159 | spin_lock_irqsave(&list->lock, flags); |
Gerrit Renker | 7de6c03 | 2008-04-14 00:05:09 -0700 | [diff] [blame] | 2160 | __skb_queue_after(list, old, newsk); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2161 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2162 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2163 | EXPORT_SYMBOL(skb_append); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2164 | |
| 2165 | /** |
| 2166 | * skb_insert - insert a buffer |
| 2167 | * @old: buffer to insert before |
| 2168 | * @newsk: buffer to insert |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2169 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2170 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2171 | * Place a packet before a given packet in a list. The list locks are |
| 2172 | * taken and this function is atomic with respect to other list locked |
| 2173 | * calls. |
| 2174 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2175 | * A buffer cannot be placed on two lists at the same time. |
| 2176 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2177 | void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2178 | { |
| 2179 | unsigned long flags; |
| 2180 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2181 | spin_lock_irqsave(&list->lock, flags); |
| 2182 | __skb_insert(newsk, old->prev, old, list); |
| 2183 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2184 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2185 | EXPORT_SYMBOL(skb_insert); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2186 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2187 | static inline void skb_split_inside_header(struct sk_buff *skb, |
| 2188 | struct sk_buff* skb1, |
| 2189 | const u32 len, const int pos) |
| 2190 | { |
| 2191 | int i; |
| 2192 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2193 | skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), |
| 2194 | pos - len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2195 | /* And move data appendix as is. */ |
| 2196 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
| 2197 | skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; |
| 2198 | |
| 2199 | skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; |
| 2200 | skb_shinfo(skb)->nr_frags = 0; |
| 2201 | skb1->data_len = skb->data_len; |
| 2202 | skb1->len += skb1->data_len; |
| 2203 | skb->data_len = 0; |
| 2204 | skb->len = len; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 2205 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2206 | } |
| 2207 | |
| 2208 | static inline void skb_split_no_header(struct sk_buff *skb, |
| 2209 | struct sk_buff* skb1, |
| 2210 | const u32 len, int pos) |
| 2211 | { |
| 2212 | int i, k = 0; |
| 2213 | const int nfrags = skb_shinfo(skb)->nr_frags; |
| 2214 | |
| 2215 | skb_shinfo(skb)->nr_frags = 0; |
| 2216 | skb1->len = skb1->data_len = skb->len - len; |
| 2217 | skb->len = len; |
| 2218 | skb->data_len = len - pos; |
| 2219 | |
| 2220 | for (i = 0; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2221 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2222 | |
| 2223 | if (pos + size > len) { |
| 2224 | skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 2225 | |
| 2226 | if (pos < len) { |
| 2227 | /* Split frag. |
| 2228 | * We have two variants in this case: |
| 2229 | * 1. Move all the frag to the second |
| 2230 | * part, if it is possible. F.e. |
| 2231 | * this approach is mandatory for TUX, |
| 2232 | * where splitting is expensive. |
| 2233 | * 2. Split is accurately. We make this. |
| 2234 | */ |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2235 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2236 | skb_shinfo(skb1)->frags[0].page_offset += len - pos; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2237 | skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); |
| 2238 | skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2239 | skb_shinfo(skb)->nr_frags++; |
| 2240 | } |
| 2241 | k++; |
| 2242 | } else |
| 2243 | skb_shinfo(skb)->nr_frags++; |
| 2244 | pos += size; |
| 2245 | } |
| 2246 | skb_shinfo(skb1)->nr_frags = k; |
| 2247 | } |
| 2248 | |
| 2249 | /** |
| 2250 | * skb_split - Split fragmented skb to two parts at length len. |
| 2251 | * @skb: the buffer to split |
| 2252 | * @skb1: the buffer to receive the second part |
| 2253 | * @len: new length for skb |
| 2254 | */ |
| 2255 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) |
| 2256 | { |
| 2257 | int pos = skb_headlen(skb); |
| 2258 | |
| 2259 | if (len < pos) /* Split line is inside header. */ |
| 2260 | skb_split_inside_header(skb, skb1, len, pos); |
| 2261 | else /* Second chunk has no header, nothing to copy. */ |
| 2262 | skb_split_no_header(skb, skb1, len, pos); |
| 2263 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2264 | EXPORT_SYMBOL(skb_split); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2265 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 2266 | /* Shifting from/to a cloned skb is a no-go. |
| 2267 | * |
| 2268 | * Caller cannot keep skb_shinfo related pointers past calling here! |
| 2269 | */ |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2270 | static int skb_prepare_for_shift(struct sk_buff *skb) |
| 2271 | { |
Ilpo Järvinen | 0ace285 | 2008-11-24 21:30:21 -0800 | [diff] [blame] | 2272 | return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2273 | } |
| 2274 | |
| 2275 | /** |
| 2276 | * skb_shift - Shifts paged data partially from skb to another |
| 2277 | * @tgt: buffer into which tail data gets added |
| 2278 | * @skb: buffer from which the paged data comes from |
| 2279 | * @shiftlen: shift up to this many bytes |
| 2280 | * |
| 2281 | * Attempts to shift up to shiftlen worth of bytes, which may be less than |
Feng King | 20e994a | 2011-11-21 01:47:11 +0000 | [diff] [blame] | 2282 | * the length of the skb, from skb to tgt. Returns number bytes shifted. |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2283 | * It's up to caller to free skb if everything was shifted. |
| 2284 | * |
| 2285 | * If @tgt runs out of frags, the whole operation is aborted. |
| 2286 | * |
| 2287 | * Skb cannot include anything else but paged data while tgt is allowed |
| 2288 | * to have non-paged data as well. |
| 2289 | * |
| 2290 | * TODO: full sized shift could be optimized but that would need |
| 2291 | * specialized skb free'er to handle frags without up-to-date nr_frags. |
| 2292 | */ |
| 2293 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) |
| 2294 | { |
| 2295 | int from, to, merge, todo; |
| 2296 | struct skb_frag_struct *fragfrom, *fragto; |
| 2297 | |
| 2298 | BUG_ON(shiftlen > skb->len); |
| 2299 | BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ |
| 2300 | |
| 2301 | todo = shiftlen; |
| 2302 | from = 0; |
| 2303 | to = skb_shinfo(tgt)->nr_frags; |
| 2304 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 2305 | |
| 2306 | /* Actual merge is delayed until the point when we know we can |
| 2307 | * commit all, so that we don't have to undo partial changes |
| 2308 | */ |
| 2309 | if (!to || |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2310 | !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), |
| 2311 | fragfrom->page_offset)) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2312 | merge = -1; |
| 2313 | } else { |
| 2314 | merge = to - 1; |
| 2315 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2316 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2317 | if (todo < 0) { |
| 2318 | if (skb_prepare_for_shift(skb) || |
| 2319 | skb_prepare_for_shift(tgt)) |
| 2320 | return 0; |
| 2321 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 2322 | /* All previous frag pointers might be stale! */ |
| 2323 | fragfrom = &skb_shinfo(skb)->frags[from]; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2324 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 2325 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2326 | skb_frag_size_add(fragto, shiftlen); |
| 2327 | skb_frag_size_sub(fragfrom, shiftlen); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2328 | fragfrom->page_offset += shiftlen; |
| 2329 | |
| 2330 | goto onlymerged; |
| 2331 | } |
| 2332 | |
| 2333 | from++; |
| 2334 | } |
| 2335 | |
| 2336 | /* Skip full, not-fitting skb to avoid expensive operations */ |
| 2337 | if ((shiftlen == skb->len) && |
| 2338 | (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) |
| 2339 | return 0; |
| 2340 | |
| 2341 | if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) |
| 2342 | return 0; |
| 2343 | |
| 2344 | while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { |
| 2345 | if (to == MAX_SKB_FRAGS) |
| 2346 | return 0; |
| 2347 | |
| 2348 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 2349 | fragto = &skb_shinfo(tgt)->frags[to]; |
| 2350 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2351 | if (todo >= skb_frag_size(fragfrom)) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2352 | *fragto = *fragfrom; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2353 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2354 | from++; |
| 2355 | to++; |
| 2356 | |
| 2357 | } else { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2358 | __skb_frag_ref(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2359 | fragto->page = fragfrom->page; |
| 2360 | fragto->page_offset = fragfrom->page_offset; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2361 | skb_frag_size_set(fragto, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2362 | |
| 2363 | fragfrom->page_offset += todo; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2364 | skb_frag_size_sub(fragfrom, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2365 | todo = 0; |
| 2366 | |
| 2367 | to++; |
| 2368 | break; |
| 2369 | } |
| 2370 | } |
| 2371 | |
| 2372 | /* Ready to "commit" this state change to tgt */ |
| 2373 | skb_shinfo(tgt)->nr_frags = to; |
| 2374 | |
| 2375 | if (merge >= 0) { |
| 2376 | fragfrom = &skb_shinfo(skb)->frags[0]; |
| 2377 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 2378 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2379 | skb_frag_size_add(fragto, skb_frag_size(fragfrom)); |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2380 | __skb_frag_unref(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2381 | } |
| 2382 | |
| 2383 | /* Reposition in the original skb */ |
| 2384 | to = 0; |
| 2385 | while (from < skb_shinfo(skb)->nr_frags) |
| 2386 | skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; |
| 2387 | skb_shinfo(skb)->nr_frags = to; |
| 2388 | |
| 2389 | BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); |
| 2390 | |
| 2391 | onlymerged: |
| 2392 | /* Most likely the tgt won't ever need its checksum anymore, skb on |
| 2393 | * the other hand might need it if it needs to be resent |
| 2394 | */ |
| 2395 | tgt->ip_summed = CHECKSUM_PARTIAL; |
| 2396 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 2397 | |
| 2398 | /* Yak, is it really working this way? Some helper please? */ |
| 2399 | skb->len -= shiftlen; |
| 2400 | skb->data_len -= shiftlen; |
| 2401 | skb->truesize -= shiftlen; |
| 2402 | tgt->len += shiftlen; |
| 2403 | tgt->data_len += shiftlen; |
| 2404 | tgt->truesize += shiftlen; |
| 2405 | |
| 2406 | return shiftlen; |
| 2407 | } |
| 2408 | |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2409 | /** |
| 2410 | * skb_prepare_seq_read - Prepare a sequential read of skb data |
| 2411 | * @skb: the buffer to read |
| 2412 | * @from: lower offset of data to be read |
| 2413 | * @to: upper offset of data to be read |
| 2414 | * @st: state variable |
| 2415 | * |
| 2416 | * Initializes the specified state variable. Must be called before |
| 2417 | * invoking skb_seq_read() for the first time. |
| 2418 | */ |
| 2419 | void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, |
| 2420 | unsigned int to, struct skb_seq_state *st) |
| 2421 | { |
| 2422 | st->lower_offset = from; |
| 2423 | st->upper_offset = to; |
| 2424 | st->root_skb = st->cur_skb = skb; |
| 2425 | st->frag_idx = st->stepped_offset = 0; |
| 2426 | st->frag_data = NULL; |
| 2427 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2428 | EXPORT_SYMBOL(skb_prepare_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2429 | |
| 2430 | /** |
| 2431 | * skb_seq_read - Sequentially read skb data |
| 2432 | * @consumed: number of bytes consumed by the caller so far |
| 2433 | * @data: destination pointer for data to be returned |
| 2434 | * @st: state variable |
| 2435 | * |
| 2436 | * Reads a block of skb data at &consumed relative to the |
| 2437 | * lower offset specified to skb_prepare_seq_read(). Assigns |
| 2438 | * the head of the data block to &data and returns the length |
| 2439 | * of the block or 0 if the end of the skb data or the upper |
| 2440 | * offset has been reached. |
| 2441 | * |
| 2442 | * The caller is not required to consume all of the data |
| 2443 | * returned, i.e. &consumed is typically set to the number |
| 2444 | * of bytes already consumed and the next call to |
| 2445 | * skb_seq_read() will return the remaining part of the block. |
| 2446 | * |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 2447 | * Note 1: The size of each block of data returned can be arbitrary, |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2448 | * this limitation is the cost for zerocopy seqeuental |
| 2449 | * reads of potentially non linear data. |
| 2450 | * |
Randy Dunlap | bc2cda1 | 2008-02-13 15:03:25 -0800 | [diff] [blame] | 2451 | * Note 2: Fragment lists within fragments are not implemented |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2452 | * at the moment, state->root_skb could be replaced with |
| 2453 | * a stack for this purpose. |
| 2454 | */ |
| 2455 | unsigned int skb_seq_read(unsigned int consumed, const u8 **data, |
| 2456 | struct skb_seq_state *st) |
| 2457 | { |
| 2458 | unsigned int block_limit, abs_offset = consumed + st->lower_offset; |
| 2459 | skb_frag_t *frag; |
| 2460 | |
| 2461 | if (unlikely(abs_offset >= st->upper_offset)) |
| 2462 | return 0; |
| 2463 | |
| 2464 | next_skb: |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 2465 | block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2466 | |
Thomas Chenault | 995b337 | 2009-05-18 21:43:27 -0700 | [diff] [blame] | 2467 | if (abs_offset < block_limit && !st->frag_data) { |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 2468 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2469 | return block_limit - abs_offset; |
| 2470 | } |
| 2471 | |
| 2472 | if (st->frag_idx == 0 && !st->frag_data) |
| 2473 | st->stepped_offset += skb_headlen(st->cur_skb); |
| 2474 | |
| 2475 | while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { |
| 2476 | frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2477 | block_limit = skb_frag_size(frag) + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2478 | |
| 2479 | if (abs_offset < block_limit) { |
| 2480 | if (!st->frag_data) |
| 2481 | st->frag_data = kmap_skb_frag(frag); |
| 2482 | |
| 2483 | *data = (u8 *) st->frag_data + frag->page_offset + |
| 2484 | (abs_offset - st->stepped_offset); |
| 2485 | |
| 2486 | return block_limit - abs_offset; |
| 2487 | } |
| 2488 | |
| 2489 | if (st->frag_data) { |
| 2490 | kunmap_skb_frag(st->frag_data); |
| 2491 | st->frag_data = NULL; |
| 2492 | } |
| 2493 | |
| 2494 | st->frag_idx++; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2495 | st->stepped_offset += skb_frag_size(frag); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2496 | } |
| 2497 | |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 2498 | if (st->frag_data) { |
| 2499 | kunmap_skb_frag(st->frag_data); |
| 2500 | st->frag_data = NULL; |
| 2501 | } |
| 2502 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 2503 | if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 2504 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2505 | st->frag_idx = 0; |
| 2506 | goto next_skb; |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 2507 | } else if (st->cur_skb->next) { |
| 2508 | st->cur_skb = st->cur_skb->next; |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 2509 | st->frag_idx = 0; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2510 | goto next_skb; |
| 2511 | } |
| 2512 | |
| 2513 | return 0; |
| 2514 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2515 | EXPORT_SYMBOL(skb_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2516 | |
| 2517 | /** |
| 2518 | * skb_abort_seq_read - Abort a sequential read of skb data |
| 2519 | * @st: state variable |
| 2520 | * |
| 2521 | * Must be called if skb_seq_read() was not called until it |
| 2522 | * returned 0. |
| 2523 | */ |
| 2524 | void skb_abort_seq_read(struct skb_seq_state *st) |
| 2525 | { |
| 2526 | if (st->frag_data) |
| 2527 | kunmap_skb_frag(st->frag_data); |
| 2528 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2529 | EXPORT_SYMBOL(skb_abort_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2530 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2531 | #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) |
| 2532 | |
| 2533 | static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, |
| 2534 | struct ts_config *conf, |
| 2535 | struct ts_state *state) |
| 2536 | { |
| 2537 | return skb_seq_read(offset, text, TS_SKB_CB(state)); |
| 2538 | } |
| 2539 | |
| 2540 | static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) |
| 2541 | { |
| 2542 | skb_abort_seq_read(TS_SKB_CB(state)); |
| 2543 | } |
| 2544 | |
| 2545 | /** |
| 2546 | * skb_find_text - Find a text pattern in skb data |
| 2547 | * @skb: the buffer to look in |
| 2548 | * @from: search offset |
| 2549 | * @to: search limit |
| 2550 | * @config: textsearch configuration |
| 2551 | * @state: uninitialized textsearch state variable |
| 2552 | * |
| 2553 | * Finds a pattern in the skb data according to the specified |
| 2554 | * textsearch configuration. Use textsearch_next() to retrieve |
| 2555 | * subsequent occurrences of the pattern. Returns the offset |
| 2556 | * to the first occurrence or UINT_MAX if no match was found. |
| 2557 | */ |
| 2558 | unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, |
| 2559 | unsigned int to, struct ts_config *config, |
| 2560 | struct ts_state *state) |
| 2561 | { |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 2562 | unsigned int ret; |
| 2563 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2564 | config->get_next_block = skb_ts_get_next_block; |
| 2565 | config->finish = skb_ts_finish; |
| 2566 | |
| 2567 | skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); |
| 2568 | |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 2569 | ret = textsearch_find(config, state); |
| 2570 | return (ret <= to - from ? ret : UINT_MAX); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2571 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2572 | EXPORT_SYMBOL(skb_find_text); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2573 | |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2574 | /** |
| 2575 | * skb_append_datato_frags: - append the user data to a skb |
| 2576 | * @sk: sock structure |
| 2577 | * @skb: skb structure to be appened with user data. |
| 2578 | * @getfrag: call back function to be used for getting the user data |
| 2579 | * @from: pointer to user message iov |
| 2580 | * @length: length of the iov message |
| 2581 | * |
| 2582 | * Description: This procedure append the user data in the fragment part |
| 2583 | * of the skb if any page alloc fails user this procedure returns -ENOMEM |
| 2584 | */ |
| 2585 | int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, |
Martin Waitz | dab9630 | 2005-12-05 13:40:12 -0800 | [diff] [blame] | 2586 | int (*getfrag)(void *from, char *to, int offset, |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2587 | int len, int odd, struct sk_buff *skb), |
| 2588 | void *from, int length) |
| 2589 | { |
| 2590 | int frg_cnt = 0; |
| 2591 | skb_frag_t *frag = NULL; |
| 2592 | struct page *page = NULL; |
| 2593 | int copy, left; |
| 2594 | int offset = 0; |
| 2595 | int ret; |
| 2596 | |
| 2597 | do { |
| 2598 | /* Return error if we don't have space for new frag */ |
| 2599 | frg_cnt = skb_shinfo(skb)->nr_frags; |
| 2600 | if (frg_cnt >= MAX_SKB_FRAGS) |
| 2601 | return -EFAULT; |
| 2602 | |
| 2603 | /* allocate a new page for next frag */ |
| 2604 | page = alloc_pages(sk->sk_allocation, 0); |
| 2605 | |
| 2606 | /* If alloc_page fails just return failure and caller will |
| 2607 | * free previous allocated pages by doing kfree_skb() |
| 2608 | */ |
| 2609 | if (page == NULL) |
| 2610 | return -ENOMEM; |
| 2611 | |
| 2612 | /* initialize the next frag */ |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2613 | skb_fill_page_desc(skb, frg_cnt, page, 0, 0); |
| 2614 | skb->truesize += PAGE_SIZE; |
| 2615 | atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); |
| 2616 | |
| 2617 | /* get the new initialized frag */ |
| 2618 | frg_cnt = skb_shinfo(skb)->nr_frags; |
| 2619 | frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; |
| 2620 | |
| 2621 | /* copy the user data to page */ |
| 2622 | left = PAGE_SIZE - frag->page_offset; |
| 2623 | copy = (length > left)? left : length; |
| 2624 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2625 | ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2626 | offset, copy, 0, skb); |
| 2627 | if (ret < 0) |
| 2628 | return -EFAULT; |
| 2629 | |
| 2630 | /* copy was successful so update the size parameters */ |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2631 | skb_frag_size_add(frag, copy); |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2632 | skb->len += copy; |
| 2633 | skb->data_len += copy; |
| 2634 | offset += copy; |
| 2635 | length -= copy; |
| 2636 | |
| 2637 | } while (length > 0); |
| 2638 | |
| 2639 | return 0; |
| 2640 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2641 | EXPORT_SYMBOL(skb_append_datato_frags); |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2642 | |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 2643 | /** |
| 2644 | * skb_pull_rcsum - pull skb and update receive checksum |
| 2645 | * @skb: buffer to update |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 2646 | * @len: length of data pulled |
| 2647 | * |
| 2648 | * This function performs an skb_pull on the packet and updates |
Urs Thuermann | fee54fa | 2008-02-12 22:03:25 -0800 | [diff] [blame] | 2649 | * the CHECKSUM_COMPLETE checksum. It should be used on |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2650 | * receive path processing instead of skb_pull unless you know |
| 2651 | * that the checksum difference is zero (e.g., a valid IP header) |
| 2652 | * or you are setting ip_summed to CHECKSUM_NONE. |
Herbert Xu | cbb042f | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 2653 | */ |
| 2654 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) |
| 2655 | { |
| 2656 | BUG_ON(len > skb->len); |
| 2657 | skb->len -= len; |
| 2658 | BUG_ON(skb->len < skb->data_len); |
| 2659 | skb_postpull_rcsum(skb, skb->data, len); |
| 2660 | return skb->data += len; |
| 2661 | } |
Arnaldo Carvalho de Melo | f94691a | 2006-03-20 22:47:55 -0800 | [diff] [blame] | 2662 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); |
| 2663 | |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2664 | /** |
| 2665 | * skb_segment - Perform protocol segmentation on skb. |
| 2666 | * @skb: buffer to segment |
Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 2667 | * @features: features for the output path (see dev->features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2668 | * |
| 2669 | * This function performs segmentation on the given skb. It returns |
Ben Hutchings | 4c821d7 | 2008-04-13 21:52:48 -0700 | [diff] [blame] | 2670 | * a pointer to the first in a list of new skbs for the segments. |
| 2671 | * In case of error it returns ERR_PTR(err). |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2672 | */ |
Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 2673 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2674 | { |
| 2675 | struct sk_buff *segs = NULL; |
| 2676 | struct sk_buff *tail = NULL; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2677 | struct sk_buff *fskb = skb_shinfo(skb)->frag_list; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2678 | unsigned int mss = skb_shinfo(skb)->gso_size; |
Arnaldo Carvalho de Melo | 98e399f | 2007-03-19 15:33:04 -0700 | [diff] [blame] | 2679 | unsigned int doffset = skb->data - skb_mac_header(skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2680 | unsigned int offset = doffset; |
| 2681 | unsigned int headroom; |
| 2682 | unsigned int len; |
Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2683 | int sg = !!(features & NETIF_F_SG); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2684 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 2685 | int err = -ENOMEM; |
| 2686 | int i = 0; |
| 2687 | int pos; |
| 2688 | |
| 2689 | __skb_push(skb, doffset); |
| 2690 | headroom = skb_headroom(skb); |
| 2691 | pos = skb_headlen(skb); |
| 2692 | |
| 2693 | do { |
| 2694 | struct sk_buff *nskb; |
| 2695 | skb_frag_t *frag; |
Herbert Xu | c8884ed | 2006-10-29 15:59:41 -0800 | [diff] [blame] | 2696 | int hsize; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2697 | int size; |
| 2698 | |
| 2699 | len = skb->len - offset; |
| 2700 | if (len > mss) |
| 2701 | len = mss; |
| 2702 | |
| 2703 | hsize = skb_headlen(skb) - offset; |
| 2704 | if (hsize < 0) |
| 2705 | hsize = 0; |
Herbert Xu | c8884ed | 2006-10-29 15:59:41 -0800 | [diff] [blame] | 2706 | if (hsize > len || !sg) |
| 2707 | hsize = len; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2708 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2709 | if (!hsize && i >= nfrags) { |
| 2710 | BUG_ON(fskb->len != len); |
| 2711 | |
| 2712 | pos += len; |
| 2713 | nskb = skb_clone(fskb, GFP_ATOMIC); |
| 2714 | fskb = fskb->next; |
| 2715 | |
| 2716 | if (unlikely(!nskb)) |
| 2717 | goto err; |
| 2718 | |
| 2719 | hsize = skb_end_pointer(nskb) - nskb->head; |
| 2720 | if (skb_cow_head(nskb, doffset + headroom)) { |
| 2721 | kfree_skb(nskb); |
| 2722 | goto err; |
| 2723 | } |
| 2724 | |
| 2725 | nskb->truesize += skb_end_pointer(nskb) - nskb->head - |
| 2726 | hsize; |
| 2727 | skb_release_head_state(nskb); |
| 2728 | __skb_push(nskb, doffset); |
| 2729 | } else { |
| 2730 | nskb = alloc_skb(hsize + doffset + headroom, |
| 2731 | GFP_ATOMIC); |
| 2732 | |
| 2733 | if (unlikely(!nskb)) |
| 2734 | goto err; |
| 2735 | |
| 2736 | skb_reserve(nskb, headroom); |
| 2737 | __skb_put(nskb, doffset); |
| 2738 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2739 | |
| 2740 | if (segs) |
| 2741 | tail->next = nskb; |
| 2742 | else |
| 2743 | segs = nskb; |
| 2744 | tail = nskb; |
| 2745 | |
Herbert Xu | 6f85a12 | 2008-08-15 14:55:02 -0700 | [diff] [blame] | 2746 | __copy_skb_header(nskb, skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2747 | nskb->mac_len = skb->mac_len; |
| 2748 | |
Eric Dumazet | 3d3be43 | 2010-09-01 00:50:51 +0000 | [diff] [blame] | 2749 | /* nskb and skb might have different headroom */ |
| 2750 | if (nskb->ip_summed == CHECKSUM_PARTIAL) |
| 2751 | nskb->csum_start += skb_headroom(nskb) - headroom; |
| 2752 | |
Arnaldo Carvalho de Melo | 459a98e | 2007-03-19 15:30:44 -0700 | [diff] [blame] | 2753 | skb_reset_mac_header(nskb); |
Arnaldo Carvalho de Melo | ddc7b8e | 2007-03-15 21:42:27 -0300 | [diff] [blame] | 2754 | skb_set_network_header(nskb, skb->mac_len); |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 2755 | nskb->transport_header = (nskb->network_header + |
| 2756 | skb_network_header_len(skb)); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2757 | skb_copy_from_linear_data(skb, nskb->data, doffset); |
| 2758 | |
Herbert Xu | 2f18185 | 2009-03-28 23:39:18 -0700 | [diff] [blame] | 2759 | if (fskb != skb_shinfo(skb)->frag_list) |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2760 | continue; |
| 2761 | |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2762 | if (!sg) { |
Herbert Xu | 6f85a12 | 2008-08-15 14:55:02 -0700 | [diff] [blame] | 2763 | nskb->ip_summed = CHECKSUM_NONE; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2764 | nskb->csum = skb_copy_and_csum_bits(skb, offset, |
| 2765 | skb_put(nskb, len), |
| 2766 | len, 0); |
| 2767 | continue; |
| 2768 | } |
| 2769 | |
| 2770 | frag = skb_shinfo(nskb)->frags; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2771 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2772 | skb_copy_from_linear_data_offset(skb, offset, |
| 2773 | skb_put(nskb, hsize), hsize); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2774 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2775 | while (pos < offset + len && i < nfrags) { |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2776 | *frag = skb_shinfo(skb)->frags[i]; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2777 | __skb_frag_ref(frag); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2778 | size = skb_frag_size(frag); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2779 | |
| 2780 | if (pos < offset) { |
| 2781 | frag->page_offset += offset - pos; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2782 | skb_frag_size_sub(frag, offset - pos); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2783 | } |
| 2784 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2785 | skb_shinfo(nskb)->nr_frags++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2786 | |
| 2787 | if (pos + size <= offset + len) { |
| 2788 | i++; |
| 2789 | pos += size; |
| 2790 | } else { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2791 | skb_frag_size_sub(frag, pos + size - (offset + len)); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2792 | goto skip_fraglist; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2793 | } |
| 2794 | |
| 2795 | frag++; |
| 2796 | } |
| 2797 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2798 | if (pos < offset + len) { |
| 2799 | struct sk_buff *fskb2 = fskb; |
| 2800 | |
| 2801 | BUG_ON(pos + fskb->len != offset + len); |
| 2802 | |
| 2803 | pos += fskb->len; |
| 2804 | fskb = fskb->next; |
| 2805 | |
| 2806 | if (fskb2->next) { |
| 2807 | fskb2 = skb_clone(fskb2, GFP_ATOMIC); |
| 2808 | if (!fskb2) |
| 2809 | goto err; |
| 2810 | } else |
| 2811 | skb_get(fskb2); |
| 2812 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2813 | SKB_FRAG_ASSERT(nskb); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2814 | skb_shinfo(nskb)->frag_list = fskb2; |
| 2815 | } |
| 2816 | |
| 2817 | skip_fraglist: |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2818 | nskb->data_len = len - hsize; |
| 2819 | nskb->len += nskb->data_len; |
| 2820 | nskb->truesize += nskb->data_len; |
| 2821 | } while ((offset += len) < skb->len); |
| 2822 | |
| 2823 | return segs; |
| 2824 | |
| 2825 | err: |
| 2826 | while ((skb = segs)) { |
| 2827 | segs = skb->next; |
Patrick McHardy | b08d584 | 2007-02-27 09:57:37 -0800 | [diff] [blame] | 2828 | kfree_skb(skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2829 | } |
| 2830 | return ERR_PTR(err); |
| 2831 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2832 | EXPORT_SYMBOL_GPL(skb_segment); |
| 2833 | |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2834 | int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
| 2835 | { |
| 2836 | struct sk_buff *p = *head; |
| 2837 | struct sk_buff *nskb; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2838 | struct skb_shared_info *skbinfo = skb_shinfo(skb); |
| 2839 | struct skb_shared_info *pinfo = skb_shinfo(p); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2840 | unsigned int headroom; |
Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 2841 | unsigned int len = skb_gro_len(skb); |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 2842 | unsigned int offset = skb_gro_offset(skb); |
| 2843 | unsigned int headlen = skb_headlen(skb); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2844 | |
Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 2845 | if (p->len + len >= 65536) |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2846 | return -E2BIG; |
| 2847 | |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2848 | if (pinfo->frag_list) |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2849 | goto merge; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 2850 | else if (headlen <= offset) { |
Herbert Xu | 42da699 | 2009-05-26 18:50:19 +0000 | [diff] [blame] | 2851 | skb_frag_t *frag; |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 2852 | skb_frag_t *frag2; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2853 | int i = skbinfo->nr_frags; |
| 2854 | int nr_frags = pinfo->nr_frags + i; |
Herbert Xu | 42da699 | 2009-05-26 18:50:19 +0000 | [diff] [blame] | 2855 | |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 2856 | offset -= headlen; |
| 2857 | |
| 2858 | if (nr_frags > MAX_SKB_FRAGS) |
Herbert Xu | 81705ad | 2009-01-29 14:19:51 +0000 | [diff] [blame] | 2859 | return -E2BIG; |
| 2860 | |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2861 | pinfo->nr_frags = nr_frags; |
| 2862 | skbinfo->nr_frags = 0; |
Herbert Xu | f557206 | 2009-01-14 20:40:03 -0800 | [diff] [blame] | 2863 | |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2864 | frag = pinfo->frags + nr_frags; |
| 2865 | frag2 = skbinfo->frags + i; |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 2866 | do { |
| 2867 | *--frag = *--frag2; |
| 2868 | } while (--i); |
| 2869 | |
| 2870 | frag->page_offset += offset; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2871 | skb_frag_size_sub(frag, offset); |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 2872 | |
Herbert Xu | f557206 | 2009-01-14 20:40:03 -0800 | [diff] [blame] | 2873 | skb->truesize -= skb->data_len; |
| 2874 | skb->len -= skb->data_len; |
| 2875 | skb->data_len = 0; |
| 2876 | |
Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 2877 | NAPI_GRO_CB(skb)->free = 1; |
| 2878 | goto done; |
Herbert Xu | 69c0cab | 2009-11-17 05:18:18 -0800 | [diff] [blame] | 2879 | } else if (skb_gro_len(p) != pinfo->gso_size) |
| 2880 | return -E2BIG; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2881 | |
| 2882 | headroom = skb_headroom(p); |
Eric Dumazet | 3d3be43 | 2010-09-01 00:50:51 +0000 | [diff] [blame] | 2883 | nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2884 | if (unlikely(!nskb)) |
| 2885 | return -ENOMEM; |
| 2886 | |
| 2887 | __copy_skb_header(nskb, p); |
| 2888 | nskb->mac_len = p->mac_len; |
| 2889 | |
| 2890 | skb_reserve(nskb, headroom); |
Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 2891 | __skb_put(nskb, skb_gro_offset(p)); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2892 | |
Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 2893 | skb_set_mac_header(nskb, skb_mac_header(p) - p->data); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2894 | skb_set_network_header(nskb, skb_network_offset(p)); |
| 2895 | skb_set_transport_header(nskb, skb_transport_offset(p)); |
| 2896 | |
Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 2897 | __skb_pull(p, skb_gro_offset(p)); |
| 2898 | memcpy(skb_mac_header(nskb), skb_mac_header(p), |
| 2899 | p->data - skb_mac_header(p)); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2900 | |
| 2901 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); |
| 2902 | skb_shinfo(nskb)->frag_list = p; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2903 | skb_shinfo(nskb)->gso_size = pinfo->gso_size; |
Herbert Xu | 622e0ca | 2010-05-20 23:07:56 -0700 | [diff] [blame] | 2904 | pinfo->gso_size = 0; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2905 | skb_header_release(p); |
| 2906 | nskb->prev = p; |
| 2907 | |
| 2908 | nskb->data_len += p->len; |
| 2909 | nskb->truesize += p->len; |
| 2910 | nskb->len += p->len; |
| 2911 | |
| 2912 | *head = nskb; |
| 2913 | nskb->next = p->next; |
| 2914 | p->next = NULL; |
| 2915 | |
| 2916 | p = nskb; |
| 2917 | |
| 2918 | merge: |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 2919 | if (offset > headlen) { |
Michal Schmidt | d1dc7ab | 2011-01-24 12:08:48 +0000 | [diff] [blame] | 2920 | unsigned int eat = offset - headlen; |
| 2921 | |
| 2922 | skbinfo->frags[0].page_offset += eat; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2923 | skb_frag_size_sub(&skbinfo->frags[0], eat); |
Michal Schmidt | d1dc7ab | 2011-01-24 12:08:48 +0000 | [diff] [blame] | 2924 | skb->data_len -= eat; |
| 2925 | skb->len -= eat; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 2926 | offset = headlen; |
Herbert Xu | 5603502 | 2009-02-05 21:26:52 -0800 | [diff] [blame] | 2927 | } |
| 2928 | |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 2929 | __skb_pull(skb, offset); |
Herbert Xu | 5603502 | 2009-02-05 21:26:52 -0800 | [diff] [blame] | 2930 | |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2931 | p->prev->next = skb; |
| 2932 | p->prev = skb; |
| 2933 | skb_header_release(skb); |
| 2934 | |
Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 2935 | done: |
| 2936 | NAPI_GRO_CB(p)->count++; |
Herbert Xu | 37fe473 | 2009-01-17 19:48:13 +0000 | [diff] [blame] | 2937 | p->data_len += len; |
| 2938 | p->truesize += len; |
| 2939 | p->len += len; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2940 | |
| 2941 | NAPI_GRO_CB(skb)->same_flow = 1; |
| 2942 | return 0; |
| 2943 | } |
| 2944 | EXPORT_SYMBOL_GPL(skb_gro_receive); |
| 2945 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2946 | void __init skb_init(void) |
| 2947 | { |
| 2948 | skbuff_head_cache = kmem_cache_create("skbuff_head_cache", |
| 2949 | sizeof(struct sk_buff), |
| 2950 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 2951 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 2952 | NULL); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 2953 | skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", |
| 2954 | (2*sizeof(struct sk_buff)) + |
| 2955 | sizeof(atomic_t), |
| 2956 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 2957 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 2958 | NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2959 | } |
| 2960 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 2961 | /** |
| 2962 | * skb_to_sgvec - Fill a scatter-gather list from a socket buffer |
| 2963 | * @skb: Socket buffer containing the buffers to be mapped |
| 2964 | * @sg: The scatter-gather list to map into |
| 2965 | * @offset: The offset into the buffer's contents to start mapping |
| 2966 | * @len: Length of buffer space to be mapped |
| 2967 | * |
| 2968 | * Fill the specified scatter-gather list with mappings/pointers into a |
| 2969 | * region of the buffer space attached to a socket buffer. |
| 2970 | */ |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 2971 | static int |
| 2972 | __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 2973 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2974 | int start = skb_headlen(skb); |
| 2975 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2976 | struct sk_buff *frag_iter; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 2977 | int elt = 0; |
| 2978 | |
| 2979 | if (copy > 0) { |
| 2980 | if (copy > len) |
| 2981 | copy = len; |
Jens Axboe | 642f14903 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 2982 | sg_set_buf(sg, skb->data + offset, copy); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 2983 | elt++; |
| 2984 | if ((len -= copy) == 0) |
| 2985 | return elt; |
| 2986 | offset += copy; |
| 2987 | } |
| 2988 | |
| 2989 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2990 | int end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 2991 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2992 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2993 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2994 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 2995 | if ((copy = end - offset) > 0) { |
| 2996 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 2997 | |
| 2998 | if (copy > len) |
| 2999 | copy = len; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3000 | sg_set_page(&sg[elt], skb_frag_page(frag), copy, |
Jens Axboe | 642f14903 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 3001 | frag->page_offset+offset-start); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3002 | elt++; |
| 3003 | if (!(len -= copy)) |
| 3004 | return elt; |
| 3005 | offset += copy; |
| 3006 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3007 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3008 | } |
| 3009 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3010 | skb_walk_frags(skb, frag_iter) { |
| 3011 | int end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3012 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3013 | WARN_ON(start > offset + len); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3014 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3015 | end = start + frag_iter->len; |
| 3016 | if ((copy = end - offset) > 0) { |
| 3017 | if (copy > len) |
| 3018 | copy = len; |
| 3019 | elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
| 3020 | copy); |
| 3021 | if ((len -= copy) == 0) |
| 3022 | return elt; |
| 3023 | offset += copy; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3024 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3025 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3026 | } |
| 3027 | BUG_ON(len); |
| 3028 | return elt; |
| 3029 | } |
| 3030 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3031 | int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
| 3032 | { |
| 3033 | int nsg = __skb_to_sgvec(skb, sg, offset, len); |
| 3034 | |
Jens Axboe | c46f233 | 2007-10-31 12:06:37 +0100 | [diff] [blame] | 3035 | sg_mark_end(&sg[nsg - 1]); |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3036 | |
| 3037 | return nsg; |
| 3038 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3039 | EXPORT_SYMBOL_GPL(skb_to_sgvec); |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3040 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3041 | /** |
| 3042 | * skb_cow_data - Check that a socket buffer's data buffers are writable |
| 3043 | * @skb: The socket buffer to check. |
| 3044 | * @tailbits: Amount of trailing space to be added |
| 3045 | * @trailer: Returned pointer to the skb where the @tailbits space begins |
| 3046 | * |
| 3047 | * Make sure that the data buffers attached to a socket buffer are |
| 3048 | * writable. If they are not, private copies are made of the data buffers |
| 3049 | * and the socket buffer is set to use these instead. |
| 3050 | * |
| 3051 | * If @tailbits is given, make sure that there is space to write @tailbits |
| 3052 | * bytes of data beyond current end of socket buffer. @trailer will be |
| 3053 | * set to point to the skb in which this space begins. |
| 3054 | * |
| 3055 | * The number of scatterlist elements required to completely map the |
| 3056 | * COW'd and extended socket buffer will be returned. |
| 3057 | */ |
| 3058 | int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) |
| 3059 | { |
| 3060 | int copyflag; |
| 3061 | int elt; |
| 3062 | struct sk_buff *skb1, **skb_p; |
| 3063 | |
| 3064 | /* If skb is cloned or its head is paged, reallocate |
| 3065 | * head pulling out all the pages (pages are considered not writable |
| 3066 | * at the moment even if they are anonymous). |
| 3067 | */ |
| 3068 | if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && |
| 3069 | __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) |
| 3070 | return -ENOMEM; |
| 3071 | |
| 3072 | /* Easy case. Most of packets will go this way. */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3073 | if (!skb_has_frag_list(skb)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3074 | /* A little of trouble, not enough of space for trailer. |
| 3075 | * This should not happen, when stack is tuned to generate |
| 3076 | * good frames. OK, on miss we reallocate and reserve even more |
| 3077 | * space, 128 bytes is fair. */ |
| 3078 | |
| 3079 | if (skb_tailroom(skb) < tailbits && |
| 3080 | pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) |
| 3081 | return -ENOMEM; |
| 3082 | |
| 3083 | /* Voila! */ |
| 3084 | *trailer = skb; |
| 3085 | return 1; |
| 3086 | } |
| 3087 | |
| 3088 | /* Misery. We are in troubles, going to mincer fragments... */ |
| 3089 | |
| 3090 | elt = 1; |
| 3091 | skb_p = &skb_shinfo(skb)->frag_list; |
| 3092 | copyflag = 0; |
| 3093 | |
| 3094 | while ((skb1 = *skb_p) != NULL) { |
| 3095 | int ntail = 0; |
| 3096 | |
| 3097 | /* The fragment is partially pulled by someone, |
| 3098 | * this can happen on input. Copy it and everything |
| 3099 | * after it. */ |
| 3100 | |
| 3101 | if (skb_shared(skb1)) |
| 3102 | copyflag = 1; |
| 3103 | |
| 3104 | /* If the skb is the last, worry about trailer. */ |
| 3105 | |
| 3106 | if (skb1->next == NULL && tailbits) { |
| 3107 | if (skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3108 | skb_has_frag_list(skb1) || |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3109 | skb_tailroom(skb1) < tailbits) |
| 3110 | ntail = tailbits + 128; |
| 3111 | } |
| 3112 | |
| 3113 | if (copyflag || |
| 3114 | skb_cloned(skb1) || |
| 3115 | ntail || |
| 3116 | skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3117 | skb_has_frag_list(skb1)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3118 | struct sk_buff *skb2; |
| 3119 | |
| 3120 | /* Fuck, we are miserable poor guys... */ |
| 3121 | if (ntail == 0) |
| 3122 | skb2 = skb_copy(skb1, GFP_ATOMIC); |
| 3123 | else |
| 3124 | skb2 = skb_copy_expand(skb1, |
| 3125 | skb_headroom(skb1), |
| 3126 | ntail, |
| 3127 | GFP_ATOMIC); |
| 3128 | if (unlikely(skb2 == NULL)) |
| 3129 | return -ENOMEM; |
| 3130 | |
| 3131 | if (skb1->sk) |
| 3132 | skb_set_owner_w(skb2, skb1->sk); |
| 3133 | |
| 3134 | /* Looking around. Are we still alive? |
| 3135 | * OK, link new skb, drop old one */ |
| 3136 | |
| 3137 | skb2->next = skb1->next; |
| 3138 | *skb_p = skb2; |
| 3139 | kfree_skb(skb1); |
| 3140 | skb1 = skb2; |
| 3141 | } |
| 3142 | elt++; |
| 3143 | *trailer = skb1; |
| 3144 | skb_p = &skb1->next; |
| 3145 | } |
| 3146 | |
| 3147 | return elt; |
| 3148 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3149 | EXPORT_SYMBOL_GPL(skb_cow_data); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3150 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3151 | static void sock_rmem_free(struct sk_buff *skb) |
| 3152 | { |
| 3153 | struct sock *sk = skb->sk; |
| 3154 | |
| 3155 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
| 3156 | } |
| 3157 | |
| 3158 | /* |
| 3159 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) |
| 3160 | */ |
| 3161 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) |
| 3162 | { |
| 3163 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
| 3164 | (unsigned)sk->sk_rcvbuf) |
| 3165 | return -ENOMEM; |
| 3166 | |
| 3167 | skb_orphan(skb); |
| 3168 | skb->sk = sk; |
| 3169 | skb->destructor = sock_rmem_free; |
| 3170 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); |
| 3171 | |
Eric Dumazet | abb57ea | 2011-05-18 02:21:31 -0400 | [diff] [blame] | 3172 | /* before exiting rcu section, make sure dst is refcounted */ |
| 3173 | skb_dst_force(skb); |
| 3174 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3175 | skb_queue_tail(&sk->sk_error_queue, skb); |
| 3176 | if (!sock_flag(sk, SOCK_DEAD)) |
| 3177 | sk->sk_data_ready(sk, skb->len); |
| 3178 | return 0; |
| 3179 | } |
| 3180 | EXPORT_SYMBOL(sock_queue_err_skb); |
| 3181 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3182 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
| 3183 | struct skb_shared_hwtstamps *hwtstamps) |
| 3184 | { |
| 3185 | struct sock *sk = orig_skb->sk; |
| 3186 | struct sock_exterr_skb *serr; |
| 3187 | struct sk_buff *skb; |
| 3188 | int err; |
| 3189 | |
| 3190 | if (!sk) |
| 3191 | return; |
| 3192 | |
| 3193 | skb = skb_clone(orig_skb, GFP_ATOMIC); |
| 3194 | if (!skb) |
| 3195 | return; |
| 3196 | |
| 3197 | if (hwtstamps) { |
| 3198 | *skb_hwtstamps(skb) = |
| 3199 | *hwtstamps; |
| 3200 | } else { |
| 3201 | /* |
| 3202 | * no hardware time stamps available, |
Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 3203 | * so keep the shared tx_flags and only |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3204 | * store software time stamp |
| 3205 | */ |
| 3206 | skb->tstamp = ktime_get_real(); |
| 3207 | } |
| 3208 | |
| 3209 | serr = SKB_EXT_ERR(skb); |
| 3210 | memset(serr, 0, sizeof(*serr)); |
| 3211 | serr->ee.ee_errno = ENOMSG; |
| 3212 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 3213 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3214 | err = sock_queue_err_skb(sk, skb); |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 3215 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3216 | if (err) |
| 3217 | kfree_skb(skb); |
| 3218 | } |
| 3219 | EXPORT_SYMBOL_GPL(skb_tstamp_tx); |
| 3220 | |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 3221 | void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) |
| 3222 | { |
| 3223 | struct sock *sk = skb->sk; |
| 3224 | struct sock_exterr_skb *serr; |
| 3225 | int err; |
| 3226 | |
| 3227 | skb->wifi_acked_valid = 1; |
| 3228 | skb->wifi_acked = acked; |
| 3229 | |
| 3230 | serr = SKB_EXT_ERR(skb); |
| 3231 | memset(serr, 0, sizeof(*serr)); |
| 3232 | serr->ee.ee_errno = ENOMSG; |
| 3233 | serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; |
| 3234 | |
| 3235 | err = sock_queue_err_skb(sk, skb); |
| 3236 | if (err) |
| 3237 | kfree_skb(skb); |
| 3238 | } |
| 3239 | EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); |
| 3240 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3241 | |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3242 | /** |
| 3243 | * skb_partial_csum_set - set up and verify partial csum values for packet |
| 3244 | * @skb: the skb to set |
| 3245 | * @start: the number of bytes after skb->data to start checksumming. |
| 3246 | * @off: the offset from start to place the checksum. |
| 3247 | * |
| 3248 | * For untrusted partially-checksummed packets, we need to make sure the values |
| 3249 | * for skb->csum_start and skb->csum_offset are valid so we don't oops. |
| 3250 | * |
| 3251 | * This function checks and sets those values and skb->ip_summed: if this |
| 3252 | * returns false you should drop the packet. |
| 3253 | */ |
| 3254 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) |
| 3255 | { |
Herbert Xu | 5ff8dda | 2009-06-04 01:22:01 +0000 | [diff] [blame] | 3256 | if (unlikely(start > skb_headlen(skb)) || |
| 3257 | unlikely((int)start + off > skb_headlen(skb) - 2)) { |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3258 | if (net_ratelimit()) |
| 3259 | printk(KERN_WARNING |
| 3260 | "bad partial csum: csum=%u/%u len=%u\n", |
Herbert Xu | 5ff8dda | 2009-06-04 01:22:01 +0000 | [diff] [blame] | 3261 | start, off, skb_headlen(skb)); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3262 | return false; |
| 3263 | } |
| 3264 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 3265 | skb->csum_start = skb_headroom(skb) + start; |
| 3266 | skb->csum_offset = off; |
| 3267 | return true; |
| 3268 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3269 | EXPORT_SYMBOL_GPL(skb_partial_csum_set); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3270 | |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 3271 | void __skb_warn_lro_forwarding(const struct sk_buff *skb) |
| 3272 | { |
| 3273 | if (net_ratelimit()) |
| 3274 | pr_warning("%s: received packets cannot be forwarded" |
| 3275 | " while LRO is enabled\n", skb->dev->name); |
| 3276 | } |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 3277 | EXPORT_SYMBOL(__skb_warn_lro_forwarding); |