Andrew Morton | 16d6926 | 2008-07-25 19:44:36 -0700 | [diff] [blame] | 1 | #include <linux/mm.h> |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 2 | #include <linux/slab.h> |
| 3 | #include <linux/string.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 4 | #include <linux/export.h> |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 5 | #include <linux/err.h> |
Adrian Bunk | 3b8f14b | 2008-07-26 15:22:28 -0700 | [diff] [blame] | 6 | #include <linux/sched.h> |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 7 | #include <asm/uaccess.h> |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 8 | |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 9 | #include "internal.h" |
| 10 | |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 11 | #define CREATE_TRACE_POINTS |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 12 | #include <trace/events/kmem.h> |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 13 | |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 14 | /** |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 15 | * kstrdup - allocate space for and copy an existing string |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 16 | * @s: the string to duplicate |
| 17 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
| 18 | */ |
| 19 | char *kstrdup(const char *s, gfp_t gfp) |
| 20 | { |
| 21 | size_t len; |
| 22 | char *buf; |
| 23 | |
| 24 | if (!s) |
| 25 | return NULL; |
| 26 | |
| 27 | len = strlen(s) + 1; |
Christoph Hellwig | 1d2c8ee | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 28 | buf = kmalloc_track_caller(len, gfp); |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 29 | if (buf) |
| 30 | memcpy(buf, s, len); |
| 31 | return buf; |
| 32 | } |
| 33 | EXPORT_SYMBOL(kstrdup); |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 34 | |
Alexey Dobriyan | 1a2f67b | 2006-09-30 23:27:20 -0700 | [diff] [blame] | 35 | /** |
Jeremy Fitzhardinge | 1e66df3 | 2007-07-17 18:37:02 -0700 | [diff] [blame] | 36 | * kstrndup - allocate space for and copy an existing string |
| 37 | * @s: the string to duplicate |
| 38 | * @max: read at most @max chars from @s |
| 39 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
| 40 | */ |
| 41 | char *kstrndup(const char *s, size_t max, gfp_t gfp) |
| 42 | { |
| 43 | size_t len; |
| 44 | char *buf; |
| 45 | |
| 46 | if (!s) |
| 47 | return NULL; |
| 48 | |
| 49 | len = strnlen(s, max); |
| 50 | buf = kmalloc_track_caller(len+1, gfp); |
| 51 | if (buf) { |
| 52 | memcpy(buf, s, len); |
| 53 | buf[len] = '\0'; |
| 54 | } |
| 55 | return buf; |
| 56 | } |
| 57 | EXPORT_SYMBOL(kstrndup); |
| 58 | |
| 59 | /** |
Alexey Dobriyan | 1a2f67b | 2006-09-30 23:27:20 -0700 | [diff] [blame] | 60 | * kmemdup - duplicate region of memory |
| 61 | * |
| 62 | * @src: memory region to duplicate |
| 63 | * @len: memory region length |
| 64 | * @gfp: GFP mask to use |
| 65 | */ |
| 66 | void *kmemdup(const void *src, size_t len, gfp_t gfp) |
| 67 | { |
| 68 | void *p; |
| 69 | |
Christoph Hellwig | 1d2c8ee | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 70 | p = kmalloc_track_caller(len, gfp); |
Alexey Dobriyan | 1a2f67b | 2006-09-30 23:27:20 -0700 | [diff] [blame] | 71 | if (p) |
| 72 | memcpy(p, src, len); |
| 73 | return p; |
| 74 | } |
| 75 | EXPORT_SYMBOL(kmemdup); |
| 76 | |
Christoph Lameter | ef2ad80 | 2007-07-17 04:03:21 -0700 | [diff] [blame] | 77 | /** |
Li Zefan | 610a77e | 2009-03-31 15:23:16 -0700 | [diff] [blame] | 78 | * memdup_user - duplicate memory region from user space |
| 79 | * |
| 80 | * @src: source address in user space |
| 81 | * @len: number of bytes to copy |
| 82 | * |
| 83 | * Returns an ERR_PTR() on failure. |
| 84 | */ |
| 85 | void *memdup_user(const void __user *src, size_t len) |
| 86 | { |
| 87 | void *p; |
| 88 | |
| 89 | /* |
| 90 | * Always use GFP_KERNEL, since copy_from_user() can sleep and |
| 91 | * cause pagefault, which makes it pointless to use GFP_NOFS |
| 92 | * or GFP_ATOMIC. |
| 93 | */ |
| 94 | p = kmalloc_track_caller(len, GFP_KERNEL); |
| 95 | if (!p) |
| 96 | return ERR_PTR(-ENOMEM); |
| 97 | |
| 98 | if (copy_from_user(p, src, len)) { |
| 99 | kfree(p); |
| 100 | return ERR_PTR(-EFAULT); |
| 101 | } |
| 102 | |
| 103 | return p; |
| 104 | } |
| 105 | EXPORT_SYMBOL(memdup_user); |
| 106 | |
| 107 | /** |
Pekka Enberg | 93bc4e8 | 2008-07-26 17:49:33 -0700 | [diff] [blame] | 108 | * __krealloc - like krealloc() but don't free @p. |
| 109 | * @p: object to reallocate memory for. |
| 110 | * @new_size: how many bytes of memory are required. |
| 111 | * @flags: the type of memory to allocate. |
| 112 | * |
| 113 | * This function is like krealloc() except it never frees the originally |
| 114 | * allocated buffer. Use this if you don't want to free the buffer immediately |
| 115 | * like, for example, with RCU. |
| 116 | */ |
| 117 | void *__krealloc(const void *p, size_t new_size, gfp_t flags) |
| 118 | { |
| 119 | void *ret; |
| 120 | size_t ks = 0; |
| 121 | |
| 122 | if (unlikely(!new_size)) |
| 123 | return ZERO_SIZE_PTR; |
| 124 | |
| 125 | if (p) |
| 126 | ks = ksize(p); |
| 127 | |
| 128 | if (ks >= new_size) |
| 129 | return (void *)p; |
| 130 | |
| 131 | ret = kmalloc_track_caller(new_size, flags); |
| 132 | if (ret && p) |
| 133 | memcpy(ret, p, ks); |
| 134 | |
| 135 | return ret; |
| 136 | } |
| 137 | EXPORT_SYMBOL(__krealloc); |
| 138 | |
| 139 | /** |
Christoph Lameter | ef2ad80 | 2007-07-17 04:03:21 -0700 | [diff] [blame] | 140 | * krealloc - reallocate memory. The contents will remain unchanged. |
| 141 | * @p: object to reallocate memory for. |
| 142 | * @new_size: how many bytes of memory are required. |
| 143 | * @flags: the type of memory to allocate. |
| 144 | * |
| 145 | * The contents of the object pointed to are preserved up to the |
| 146 | * lesser of the new and old sizes. If @p is %NULL, krealloc() |
| 147 | * behaves exactly like kmalloc(). If @size is 0 and @p is not a |
| 148 | * %NULL pointer, the object pointed to is freed. |
| 149 | */ |
| 150 | void *krealloc(const void *p, size_t new_size, gfp_t flags) |
| 151 | { |
| 152 | void *ret; |
Christoph Lameter | ef2ad80 | 2007-07-17 04:03:21 -0700 | [diff] [blame] | 153 | |
| 154 | if (unlikely(!new_size)) { |
| 155 | kfree(p); |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 156 | return ZERO_SIZE_PTR; |
Christoph Lameter | ef2ad80 | 2007-07-17 04:03:21 -0700 | [diff] [blame] | 157 | } |
| 158 | |
Pekka Enberg | 93bc4e8 | 2008-07-26 17:49:33 -0700 | [diff] [blame] | 159 | ret = __krealloc(p, new_size, flags); |
| 160 | if (ret && p != ret) |
Christoph Lameter | ef2ad80 | 2007-07-17 04:03:21 -0700 | [diff] [blame] | 161 | kfree(p); |
Pekka Enberg | 93bc4e8 | 2008-07-26 17:49:33 -0700 | [diff] [blame] | 162 | |
Christoph Lameter | ef2ad80 | 2007-07-17 04:03:21 -0700 | [diff] [blame] | 163 | return ret; |
| 164 | } |
| 165 | EXPORT_SYMBOL(krealloc); |
| 166 | |
Johannes Weiner | 3ef0e5b | 2009-02-20 15:38:41 -0800 | [diff] [blame] | 167 | /** |
| 168 | * kzfree - like kfree but zero memory |
| 169 | * @p: object to free memory of |
| 170 | * |
| 171 | * The memory of the object @p points to is zeroed before freed. |
| 172 | * If @p is %NULL, kzfree() does nothing. |
Pekka Enberg | a234bdc | 2009-05-31 13:50:38 +0300 | [diff] [blame] | 173 | * |
| 174 | * Note: this function zeroes the whole allocated buffer which can be a good |
| 175 | * deal bigger than the requested buffer size passed to kmalloc(). So be |
| 176 | * careful when using this function in performance sensitive code. |
Johannes Weiner | 3ef0e5b | 2009-02-20 15:38:41 -0800 | [diff] [blame] | 177 | */ |
| 178 | void kzfree(const void *p) |
| 179 | { |
| 180 | size_t ks; |
| 181 | void *mem = (void *)p; |
| 182 | |
| 183 | if (unlikely(ZERO_OR_NULL_PTR(mem))) |
| 184 | return; |
| 185 | ks = ksize(mem); |
| 186 | memset(mem, 0, ks); |
| 187 | kfree(mem); |
| 188 | } |
| 189 | EXPORT_SYMBOL(kzfree); |
| 190 | |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 191 | /* |
| 192 | * strndup_user - duplicate an existing string from user space |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 193 | * @s: The string to duplicate |
| 194 | * @n: Maximum number of bytes to copy, including the trailing NUL. |
| 195 | */ |
| 196 | char *strndup_user(const char __user *s, long n) |
| 197 | { |
| 198 | char *p; |
| 199 | long length; |
| 200 | |
| 201 | length = strnlen_user(s, n); |
| 202 | |
| 203 | if (!length) |
| 204 | return ERR_PTR(-EFAULT); |
| 205 | |
| 206 | if (length > n) |
| 207 | return ERR_PTR(-EINVAL); |
| 208 | |
Julia Lawall | 90d7404 | 2010-08-09 17:18:26 -0700 | [diff] [blame] | 209 | p = memdup_user(s, length); |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 210 | |
Julia Lawall | 90d7404 | 2010-08-09 17:18:26 -0700 | [diff] [blame] | 211 | if (IS_ERR(p)) |
| 212 | return p; |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 213 | |
| 214 | p[length - 1] = '\0'; |
| 215 | |
| 216 | return p; |
| 217 | } |
| 218 | EXPORT_SYMBOL(strndup_user); |
Andrew Morton | 16d6926 | 2008-07-25 19:44:36 -0700 | [diff] [blame] | 219 | |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 220 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
| 221 | struct vm_area_struct *prev, struct rb_node *rb_parent) |
| 222 | { |
| 223 | struct vm_area_struct *next; |
| 224 | |
| 225 | vma->vm_prev = prev; |
| 226 | if (prev) { |
| 227 | next = prev->vm_next; |
| 228 | prev->vm_next = vma; |
| 229 | } else { |
| 230 | mm->mmap = vma; |
| 231 | if (rb_parent) |
| 232 | next = rb_entry(rb_parent, |
| 233 | struct vm_area_struct, vm_rb); |
| 234 | else |
| 235 | next = NULL; |
| 236 | } |
| 237 | vma->vm_next = next; |
| 238 | if (next) |
| 239 | next->vm_prev = vma; |
| 240 | } |
| 241 | |
Siddhesh Poyarekar | b764375 | 2012-03-21 16:34:04 -0700 | [diff] [blame^] | 242 | /* Check if the vma is being used as a stack by this task */ |
| 243 | static int vm_is_stack_for_task(struct task_struct *t, |
| 244 | struct vm_area_struct *vma) |
| 245 | { |
| 246 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); |
| 247 | } |
| 248 | |
| 249 | /* |
| 250 | * Check if the vma is being used as a stack. |
| 251 | * If is_group is non-zero, check in the entire thread group or else |
| 252 | * just check in the current task. Returns the pid of the task that |
| 253 | * the vma is stack for. |
| 254 | */ |
| 255 | pid_t vm_is_stack(struct task_struct *task, |
| 256 | struct vm_area_struct *vma, int in_group) |
| 257 | { |
| 258 | pid_t ret = 0; |
| 259 | |
| 260 | if (vm_is_stack_for_task(task, vma)) |
| 261 | return task->pid; |
| 262 | |
| 263 | if (in_group) { |
| 264 | struct task_struct *t; |
| 265 | rcu_read_lock(); |
| 266 | if (!pid_alive(task)) |
| 267 | goto done; |
| 268 | |
| 269 | t = task; |
| 270 | do { |
| 271 | if (vm_is_stack_for_task(t, vma)) { |
| 272 | ret = t->pid; |
| 273 | goto done; |
| 274 | } |
| 275 | } while_each_thread(task, t); |
| 276 | done: |
| 277 | rcu_read_unlock(); |
| 278 | } |
| 279 | |
| 280 | return ret; |
| 281 | } |
| 282 | |
David Howells | efc1a3b | 2010-01-15 17:01:35 -0800 | [diff] [blame] | 283 | #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) |
Andrew Morton | 16d6926 | 2008-07-25 19:44:36 -0700 | [diff] [blame] | 284 | void arch_pick_mmap_layout(struct mm_struct *mm) |
| 285 | { |
| 286 | mm->mmap_base = TASK_UNMAPPED_BASE; |
| 287 | mm->get_unmapped_area = arch_get_unmapped_area; |
| 288 | mm->unmap_area = arch_unmap_area; |
| 289 | } |
| 290 | #endif |
Rusty Russell | 912985d | 2008-08-12 17:52:52 -0500 | [diff] [blame] | 291 | |
Xiao Guangrong | 45888a0 | 2010-08-22 19:08:57 +0800 | [diff] [blame] | 292 | /* |
| 293 | * Like get_user_pages_fast() except its IRQ-safe in that it won't fall |
| 294 | * back to the regular GUP. |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 295 | * If the architecture not support this function, simply return with no |
Xiao Guangrong | 45888a0 | 2010-08-22 19:08:57 +0800 | [diff] [blame] | 296 | * page pinned |
| 297 | */ |
| 298 | int __attribute__((weak)) __get_user_pages_fast(unsigned long start, |
| 299 | int nr_pages, int write, struct page **pages) |
| 300 | { |
| 301 | return 0; |
| 302 | } |
| 303 | EXPORT_SYMBOL_GPL(__get_user_pages_fast); |
| 304 | |
Andy Grover | 9de100d | 2009-04-13 14:40:05 -0700 | [diff] [blame] | 305 | /** |
| 306 | * get_user_pages_fast() - pin user pages in memory |
| 307 | * @start: starting user address |
| 308 | * @nr_pages: number of pages from start to pin |
| 309 | * @write: whether pages will be written to |
| 310 | * @pages: array that receives pointers to the pages pinned. |
| 311 | * Should be at least nr_pages long. |
| 312 | * |
Andy Grover | 9de100d | 2009-04-13 14:40:05 -0700 | [diff] [blame] | 313 | * Returns number of pages pinned. This may be fewer than the number |
| 314 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
| 315 | * were pinned, returns -errno. |
Nick Piggin | d2bf6be | 2009-06-16 15:31:39 -0700 | [diff] [blame] | 316 | * |
| 317 | * get_user_pages_fast provides equivalent functionality to get_user_pages, |
| 318 | * operating on current and current->mm, with force=0 and vma=NULL. However |
| 319 | * unlike get_user_pages, it must be called without mmap_sem held. |
| 320 | * |
| 321 | * get_user_pages_fast may take mmap_sem and page table locks, so no |
| 322 | * assumptions can be made about lack of locking. get_user_pages_fast is to be |
| 323 | * implemented in a way that is advantageous (vs get_user_pages()) when the |
| 324 | * user memory area is already faulted in and present in ptes. However if the |
| 325 | * pages have to be faulted in, it may turn out to be slightly slower so |
| 326 | * callers need to carefully consider what to use. On many architectures, |
| 327 | * get_user_pages_fast simply falls back to get_user_pages. |
Andy Grover | 9de100d | 2009-04-13 14:40:05 -0700 | [diff] [blame] | 328 | */ |
Rusty Russell | 912985d | 2008-08-12 17:52:52 -0500 | [diff] [blame] | 329 | int __attribute__((weak)) get_user_pages_fast(unsigned long start, |
| 330 | int nr_pages, int write, struct page **pages) |
| 331 | { |
| 332 | struct mm_struct *mm = current->mm; |
| 333 | int ret; |
| 334 | |
| 335 | down_read(&mm->mmap_sem); |
| 336 | ret = get_user_pages(current, mm, start, nr_pages, |
| 337 | write, 0, pages, NULL); |
| 338 | up_read(&mm->mmap_sem); |
| 339 | |
| 340 | return ret; |
| 341 | } |
| 342 | EXPORT_SYMBOL_GPL(get_user_pages_fast); |
Eduard - Gabriel Munteanu | ca2b84cb | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 343 | |
| 344 | /* Tracepoints definitions. */ |
Eduard - Gabriel Munteanu | ca2b84cb | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 345 | EXPORT_TRACEPOINT_SYMBOL(kmalloc); |
| 346 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); |
| 347 | EXPORT_TRACEPOINT_SYMBOL(kmalloc_node); |
| 348 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); |
| 349 | EXPORT_TRACEPOINT_SYMBOL(kfree); |
| 350 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); |