Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_PAGEMAP_H |
| 2 | #define _LINUX_PAGEMAP_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright 1995 Linus Torvalds |
| 6 | */ |
| 7 | #include <linux/mm.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/list.h> |
| 10 | #include <linux/highmem.h> |
| 11 | #include <linux/compiler.h> |
| 12 | #include <asm/uaccess.h> |
| 13 | #include <linux/gfp.h> |
| 14 | |
| 15 | /* |
| 16 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page |
| 17 | * allocation mode flags. |
| 18 | */ |
| 19 | #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ |
| 20 | #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ |
| 21 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 22 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | { |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 24 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | } |
| 26 | |
| 27 | /* |
| 28 | * This is non-atomic. Only to be used before the mapping is activated. |
| 29 | * Probably needs a barrier... |
| 30 | */ |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 31 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | { |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 33 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | |
| 34 | (__force unsigned long)mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | } |
| 36 | |
| 37 | /* |
| 38 | * The page cache can done in larger chunks than |
| 39 | * one page, because it allows for more efficient |
| 40 | * throughput (it can then be mapped into user |
| 41 | * space in smaller chunks for same flexibility). |
| 42 | * |
| 43 | * Or rather, it _will_ be done in larger chunks. |
| 44 | */ |
| 45 | #define PAGE_CACHE_SHIFT PAGE_SHIFT |
| 46 | #define PAGE_CACHE_SIZE PAGE_SIZE |
| 47 | #define PAGE_CACHE_MASK PAGE_MASK |
| 48 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) |
| 49 | |
| 50 | #define page_cache_get(page) get_page(page) |
| 51 | #define page_cache_release(page) put_page(page) |
| 52 | void release_pages(struct page **pages, int nr, int cold); |
| 53 | |
Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 54 | #ifdef CONFIG_NUMA |
| 55 | extern struct page *page_cache_alloc(struct address_space *x); |
| 56 | extern struct page *page_cache_alloc_cold(struct address_space *x); |
| 57 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | static inline struct page *page_cache_alloc(struct address_space *x) |
| 59 | { |
Paul Jackson | 2d6c666 | 2005-11-13 16:06:44 -0800 | [diff] [blame] | 60 | return alloc_pages(mapping_gfp_mask(x), 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | static inline struct page *page_cache_alloc_cold(struct address_space *x) |
| 64 | { |
Paul Jackson | 2d6c666 | 2005-11-13 16:06:44 -0800 | [diff] [blame] | 65 | return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | } |
Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 67 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
| 69 | typedef int filler_t(void *, struct page *); |
| 70 | |
| 71 | extern struct page * find_get_page(struct address_space *mapping, |
| 72 | unsigned long index); |
| 73 | extern struct page * find_lock_page(struct address_space *mapping, |
| 74 | unsigned long index); |
Nick Piggin | 93fac70 | 2006-03-31 02:29:56 -0800 | [diff] [blame] | 75 | extern __deprecated_for_modules struct page * find_trylock_page( |
| 76 | struct address_space *mapping, unsigned long index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | extern struct page * find_or_create_page(struct address_space *mapping, |
Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 78 | unsigned long index, gfp_t gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
| 80 | unsigned int nr_pages, struct page **pages); |
Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 81 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
| 82 | unsigned int nr_pages, struct page **pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
| 84 | int tag, unsigned int nr_pages, struct page **pages); |
| 85 | |
| 86 | /* |
| 87 | * Returns locked page at given index in given cache, creating it if needed. |
| 88 | */ |
| 89 | static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index) |
| 90 | { |
| 91 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); |
| 92 | } |
| 93 | |
| 94 | extern struct page * grab_cache_page_nowait(struct address_space *mapping, |
| 95 | unsigned long index); |
| 96 | extern struct page * read_cache_page(struct address_space *mapping, |
| 97 | unsigned long index, filler_t *filler, |
| 98 | void *data); |
| 99 | extern int read_cache_pages(struct address_space *mapping, |
| 100 | struct list_head *pages, filler_t *filler, void *data); |
| 101 | |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 102 | static inline struct page *read_mapping_page(struct address_space *mapping, |
| 103 | unsigned long index, void *data) |
| 104 | { |
| 105 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; |
| 106 | return read_cache_page(mapping, index, filler, data); |
| 107 | } |
| 108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | int add_to_page_cache(struct page *page, struct address_space *mapping, |
Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 110 | unsigned long index, gfp_t gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 112 | unsigned long index, gfp_t gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | extern void remove_from_page_cache(struct page *page); |
| 114 | extern void __remove_from_page_cache(struct page *page); |
| 115 | |
| 116 | extern atomic_t nr_pagecache; |
| 117 | |
| 118 | #ifdef CONFIG_SMP |
| 119 | |
| 120 | #define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) |
| 121 | DECLARE_PER_CPU(long, nr_pagecache_local); |
| 122 | |
| 123 | /* |
| 124 | * pagecache_acct implements approximate accounting for pagecache. |
| 125 | * vm_enough_memory() do not need high accuracy. Writers will keep |
| 126 | * an offset in their per-cpu arena and will spill that into the |
| 127 | * global count whenever the absolute value of the local count |
| 128 | * exceeds the counter's threshold. |
| 129 | * |
| 130 | * MUST be protected from preemption. |
| 131 | * current protection is mapping->page_lock. |
| 132 | */ |
| 133 | static inline void pagecache_acct(int count) |
| 134 | { |
| 135 | long *local; |
| 136 | |
| 137 | local = &__get_cpu_var(nr_pagecache_local); |
| 138 | *local += count; |
| 139 | if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { |
| 140 | atomic_add(*local, &nr_pagecache); |
| 141 | *local = 0; |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | #else |
| 146 | |
| 147 | static inline void pagecache_acct(int count) |
| 148 | { |
| 149 | atomic_add(count, &nr_pagecache); |
| 150 | } |
| 151 | #endif |
| 152 | |
| 153 | static inline unsigned long get_page_cache_size(void) |
| 154 | { |
| 155 | int ret = atomic_read(&nr_pagecache); |
| 156 | if (unlikely(ret < 0)) |
| 157 | ret = 0; |
| 158 | return ret; |
| 159 | } |
| 160 | |
| 161 | /* |
| 162 | * Return byte-offset into filesystem object for page. |
| 163 | */ |
| 164 | static inline loff_t page_offset(struct page *page) |
| 165 | { |
| 166 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; |
| 167 | } |
| 168 | |
| 169 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
| 170 | unsigned long address) |
| 171 | { |
| 172 | pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT; |
| 173 | pgoff += vma->vm_pgoff; |
| 174 | return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
| 175 | } |
| 176 | |
| 177 | extern void FASTCALL(__lock_page(struct page *page)); |
| 178 | extern void FASTCALL(unlock_page(struct page *page)); |
| 179 | |
| 180 | static inline void lock_page(struct page *page) |
| 181 | { |
| 182 | might_sleep(); |
| 183 | if (TestSetPageLocked(page)) |
| 184 | __lock_page(page); |
| 185 | } |
| 186 | |
| 187 | /* |
| 188 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. |
| 189 | * Never use this directly! |
| 190 | */ |
| 191 | extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr)); |
| 192 | |
| 193 | /* |
| 194 | * Wait for a page to be unlocked. |
| 195 | * |
| 196 | * This must be called with the caller "holding" the page, |
| 197 | * ie with increased "page->count" so that the page won't |
| 198 | * go away during the wait.. |
| 199 | */ |
| 200 | static inline void wait_on_page_locked(struct page *page) |
| 201 | { |
| 202 | if (PageLocked(page)) |
| 203 | wait_on_page_bit(page, PG_locked); |
| 204 | } |
| 205 | |
| 206 | /* |
| 207 | * Wait for a page to complete writeback |
| 208 | */ |
| 209 | static inline void wait_on_page_writeback(struct page *page) |
| 210 | { |
| 211 | if (PageWriteback(page)) |
| 212 | wait_on_page_bit(page, PG_writeback); |
| 213 | } |
| 214 | |
| 215 | extern void end_page_writeback(struct page *page); |
| 216 | |
| 217 | /* |
| 218 | * Fault a userspace page into pagetables. Return non-zero on a fault. |
| 219 | * |
| 220 | * This assumes that two userspace pages are always sufficient. That's |
| 221 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. |
| 222 | */ |
| 223 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) |
| 224 | { |
| 225 | int ret; |
| 226 | |
| 227 | /* |
| 228 | * Writing zeroes into userspace here is OK, because we know that if |
| 229 | * the zero gets there, we'll be overwriting it. |
| 230 | */ |
| 231 | ret = __put_user(0, uaddr); |
| 232 | if (ret == 0) { |
| 233 | char __user *end = uaddr + size - 1; |
| 234 | |
| 235 | /* |
| 236 | * If the page was already mapped, this will get a cache miss |
| 237 | * for sure, so try to avoid doing it. |
| 238 | */ |
| 239 | if (((unsigned long)uaddr & PAGE_MASK) != |
| 240 | ((unsigned long)end & PAGE_MASK)) |
| 241 | ret = __put_user(0, end); |
| 242 | } |
| 243 | return ret; |
| 244 | } |
| 245 | |
| 246 | static inline void fault_in_pages_readable(const char __user *uaddr, int size) |
| 247 | { |
| 248 | volatile char c; |
| 249 | int ret; |
| 250 | |
| 251 | ret = __get_user(c, uaddr); |
| 252 | if (ret == 0) { |
| 253 | const char __user *end = uaddr + size - 1; |
| 254 | |
| 255 | if (((unsigned long)uaddr & PAGE_MASK) != |
| 256 | ((unsigned long)end & PAGE_MASK)) |
| 257 | __get_user(c, end); |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | #endif /* _LINUX_PAGEMAP_H */ |