Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1 | /* |
| 2 | * z3fold.c |
| 3 | * |
| 4 | * Author: Vitaly Wool <vitaly.wool@konsulko.com> |
| 5 | * Copyright (C) 2016, Sony Mobile Communications Inc. |
| 6 | * |
| 7 | * This implementation is based on zbud written by Seth Jennings. |
| 8 | * |
| 9 | * z3fold is an special purpose allocator for storing compressed pages. It |
| 10 | * can store up to three compressed pages per page which improves the |
| 11 | * compression ratio of zbud while retaining its main concepts (e. g. always |
| 12 | * storing an integral number of objects per page) and simplicity. |
| 13 | * It still has simple and deterministic reclaim properties that make it |
| 14 | * preferable to a higher density approach (with no requirement on integral |
| 15 | * number of object per page) when reclaim is used. |
| 16 | * |
| 17 | * As in zbud, pages are divided into "chunks". The size of the chunks is |
| 18 | * fixed at compile time and is determined by NCHUNKS_ORDER below. |
| 19 | * |
| 20 | * z3fold doesn't export any API and is meant to be used via zpool API. |
| 21 | */ |
| 22 | |
| 23 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 24 | |
| 25 | #include <linux/atomic.h> |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 26 | #include <linux/sched.h> |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 27 | #include <linux/list.h> |
| 28 | #include <linux/mm.h> |
| 29 | #include <linux/module.h> |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 30 | #include <linux/percpu.h> |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 31 | #include <linux/preempt.h> |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 32 | #include <linux/workqueue.h> |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 33 | #include <linux/slab.h> |
| 34 | #include <linux/spinlock.h> |
| 35 | #include <linux/zpool.h> |
| 36 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 37 | /* |
| 38 | * NCHUNKS_ORDER determines the internal allocation granularity, effectively |
| 39 | * adjusting internal fragmentation. It also determines the number of |
| 40 | * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the |
Vitaly Wool | ede9321 | 2017-02-24 14:57:17 -0800 | [diff] [blame] | 41 | * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks |
| 42 | * in the beginning of an allocated page are occupied by z3fold header, so |
| 43 | * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), |
| 44 | * which shows the max number of free chunks in z3fold page, also there will |
| 45 | * be 63, or 62, respectively, freelists per pool. |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 46 | */ |
| 47 | #define NCHUNKS_ORDER 6 |
| 48 | |
| 49 | #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) |
| 50 | #define CHUNK_SIZE (1 << CHUNK_SHIFT) |
Vitaly Wool | ede9321 | 2017-02-24 14:57:17 -0800 | [diff] [blame] | 51 | #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) |
| 52 | #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) |
| 53 | #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 54 | #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) |
| 55 | |
zhong jiang | f201ebd | 2017-02-22 15:46:51 -0800 | [diff] [blame] | 56 | #define BUDDY_MASK (0x3) |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 57 | #define BUDDY_SHIFT 2 |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 58 | #define SLOTS_ALIGN (0x40) |
| 59 | |
| 60 | /***************** |
| 61 | * Structures |
| 62 | *****************/ |
| 63 | struct z3fold_pool; |
| 64 | struct z3fold_ops { |
| 65 | int (*evict)(struct z3fold_pool *pool, unsigned long handle); |
| 66 | }; |
| 67 | |
| 68 | enum buddy { |
| 69 | HEADLESS = 0, |
| 70 | FIRST, |
| 71 | MIDDLE, |
| 72 | LAST, |
| 73 | BUDDIES_MAX = LAST |
| 74 | }; |
| 75 | |
| 76 | struct z3fold_buddy_slots { |
| 77 | /* |
| 78 | * we are using BUDDY_MASK in handle_to_buddy etc. so there should |
| 79 | * be enough slots to hold all possible variants |
| 80 | */ |
| 81 | unsigned long slot[BUDDY_MASK + 1]; |
| 82 | unsigned long pool; /* back link + flags */ |
| 83 | }; |
| 84 | #define HANDLE_FLAG_MASK (0x03) |
| 85 | |
| 86 | /* |
| 87 | * struct z3fold_header - z3fold page metadata occupying first chunks of each |
| 88 | * z3fold page, except for HEADLESS pages |
| 89 | * @buddy: links the z3fold page into the relevant list in the |
| 90 | * pool |
| 91 | * @page_lock: per-page lock |
| 92 | * @refcount: reference count for the z3fold page |
| 93 | * @work: work_struct for page layout optimization |
| 94 | * @slots: pointer to the structure holding buddy slots |
| 95 | * @cpu: CPU which this page "belongs" to |
| 96 | * @first_chunks: the size of the first buddy in chunks, 0 if free |
| 97 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free |
| 98 | * @last_chunks: the size of the last buddy in chunks, 0 if free |
| 99 | * @first_num: the starting number (for the first handle) |
| 100 | */ |
| 101 | struct z3fold_header { |
| 102 | struct list_head buddy; |
| 103 | spinlock_t page_lock; |
| 104 | struct kref refcount; |
| 105 | struct work_struct work; |
| 106 | struct z3fold_buddy_slots *slots; |
| 107 | short cpu; |
| 108 | unsigned short first_chunks; |
| 109 | unsigned short middle_chunks; |
| 110 | unsigned short last_chunks; |
| 111 | unsigned short start_middle; |
| 112 | unsigned short first_num:2; |
| 113 | }; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 114 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 115 | /** |
| 116 | * struct z3fold_pool - stores metadata for each z3fold pool |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 117 | * @name: pool name |
| 118 | * @lock: protects pool unbuddied/lru lists |
| 119 | * @stale_lock: protects pool stale page list |
| 120 | * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- |
| 121 | * buddies; the list each z3fold page is added to depends on |
| 122 | * the size of its free region. |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 123 | * @lru: list tracking the z3fold pages in LRU order by most recently |
| 124 | * added buddy. |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 125 | * @stale: list of pages marked for freeing |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 126 | * @pages_nr: number of z3fold pages in the pool. |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 127 | * @c_handle: cache for z3fold_buddy_slots allocation |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 128 | * @ops: pointer to a structure of user defined operations specified at |
| 129 | * pool creation time. |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 130 | * @compact_wq: workqueue for page layout background optimization |
| 131 | * @release_wq: workqueue for safe page release |
| 132 | * @work: work_struct for safe page release |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 133 | * |
| 134 | * This structure is allocated at pool creation time and maintains metadata |
| 135 | * pertaining to a particular z3fold pool. |
| 136 | */ |
| 137 | struct z3fold_pool { |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 138 | const char *name; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 139 | spinlock_t lock; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 140 | spinlock_t stale_lock; |
| 141 | struct list_head *unbuddied; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 142 | struct list_head lru; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 143 | struct list_head stale; |
Vitaly Wool | 12d59ae | 2017-02-24 14:57:15 -0800 | [diff] [blame] | 144 | atomic64_t pages_nr; |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 145 | struct kmem_cache *c_handle; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 146 | const struct z3fold_ops *ops; |
| 147 | struct zpool *zpool; |
| 148 | const struct zpool_ops *zpool_ops; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 149 | struct workqueue_struct *compact_wq; |
| 150 | struct workqueue_struct *release_wq; |
| 151 | struct work_struct work; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 152 | }; |
| 153 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 154 | /* |
| 155 | * Internal z3fold page flags |
| 156 | */ |
| 157 | enum z3fold_page_flags { |
Vitaly Wool | 5a27aa8 | 2017-02-24 14:57:26 -0800 | [diff] [blame] | 158 | PAGE_HEADLESS = 0, |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 159 | MIDDLE_CHUNK_MAPPED, |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 160 | NEEDS_COMPACTING, |
Vitaly Wool | 6098d7e | 2018-05-11 16:01:46 -0700 | [diff] [blame] | 161 | PAGE_STALE, |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 162 | PAGE_CLAIMED, /* by either reclaim or free */ |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 163 | }; |
| 164 | |
| 165 | /***************** |
| 166 | * Helpers |
| 167 | *****************/ |
| 168 | |
| 169 | /* Converts an allocation size in bytes to size in z3fold chunks */ |
| 170 | static int size_to_chunks(size_t size) |
| 171 | { |
| 172 | return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; |
| 173 | } |
| 174 | |
| 175 | #define for_each_unbuddied_list(_iter, _begin) \ |
| 176 | for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) |
| 177 | |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 178 | static void compact_page_work(struct work_struct *w); |
| 179 | |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 180 | static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool) |
| 181 | { |
| 182 | struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle, |
| 183 | GFP_KERNEL); |
| 184 | |
| 185 | if (slots) { |
| 186 | memset(slots->slot, 0, sizeof(slots->slot)); |
| 187 | slots->pool = (unsigned long)pool; |
| 188 | } |
| 189 | |
| 190 | return slots; |
| 191 | } |
| 192 | |
| 193 | static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) |
| 194 | { |
| 195 | return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); |
| 196 | } |
| 197 | |
| 198 | static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle) |
| 199 | { |
| 200 | return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1)); |
| 201 | } |
| 202 | |
| 203 | static inline void free_handle(unsigned long handle) |
| 204 | { |
| 205 | struct z3fold_buddy_slots *slots; |
| 206 | int i; |
| 207 | bool is_free; |
| 208 | |
| 209 | if (handle & (1 << PAGE_HEADLESS)) |
| 210 | return; |
| 211 | |
| 212 | WARN_ON(*(unsigned long *)handle == 0); |
| 213 | *(unsigned long *)handle = 0; |
| 214 | slots = handle_to_slots(handle); |
| 215 | is_free = true; |
| 216 | for (i = 0; i <= BUDDY_MASK; i++) { |
| 217 | if (slots->slot[i]) { |
| 218 | is_free = false; |
| 219 | break; |
| 220 | } |
| 221 | } |
| 222 | |
| 223 | if (is_free) { |
| 224 | struct z3fold_pool *pool = slots_to_pool(slots); |
| 225 | |
| 226 | kmem_cache_free(pool->c_handle, slots); |
| 227 | } |
| 228 | } |
| 229 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 230 | /* Initializes the z3fold header of a newly allocated z3fold page */ |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 231 | static struct z3fold_header *init_z3fold_page(struct page *page, |
| 232 | struct z3fold_pool *pool) |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 233 | { |
| 234 | struct z3fold_header *zhdr = page_address(page); |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 235 | struct z3fold_buddy_slots *slots = alloc_slots(pool); |
| 236 | |
| 237 | if (!slots) |
| 238 | return NULL; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 239 | |
| 240 | INIT_LIST_HEAD(&page->lru); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 241 | clear_bit(PAGE_HEADLESS, &page->private); |
| 242 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 243 | clear_bit(NEEDS_COMPACTING, &page->private); |
| 244 | clear_bit(PAGE_STALE, &page->private); |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 245 | clear_bit(PAGE_CLAIMED, &page->private); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 246 | |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 247 | spin_lock_init(&zhdr->page_lock); |
Vitaly Wool | 5a27aa8 | 2017-02-24 14:57:26 -0800 | [diff] [blame] | 248 | kref_init(&zhdr->refcount); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 249 | zhdr->first_chunks = 0; |
| 250 | zhdr->middle_chunks = 0; |
| 251 | zhdr->last_chunks = 0; |
| 252 | zhdr->first_num = 0; |
| 253 | zhdr->start_middle = 0; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 254 | zhdr->cpu = -1; |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 255 | zhdr->slots = slots; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 256 | INIT_LIST_HEAD(&zhdr->buddy); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 257 | INIT_WORK(&zhdr->work, compact_page_work); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 258 | return zhdr; |
| 259 | } |
| 260 | |
| 261 | /* Resets the struct page fields and frees the page */ |
Vitaly Wool | 5a27aa8 | 2017-02-24 14:57:26 -0800 | [diff] [blame] | 262 | static void free_z3fold_page(struct page *page) |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 263 | { |
Vitaly Wool | 5a27aa8 | 2017-02-24 14:57:26 -0800 | [diff] [blame] | 264 | __free_page(page); |
| 265 | } |
| 266 | |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 267 | /* Lock a z3fold page */ |
| 268 | static inline void z3fold_page_lock(struct z3fold_header *zhdr) |
| 269 | { |
| 270 | spin_lock(&zhdr->page_lock); |
| 271 | } |
| 272 | |
Vitaly Wool | 76e32a2 | 2017-04-13 14:56:14 -0700 | [diff] [blame] | 273 | /* Try to lock a z3fold page */ |
| 274 | static inline int z3fold_page_trylock(struct z3fold_header *zhdr) |
| 275 | { |
| 276 | return spin_trylock(&zhdr->page_lock); |
| 277 | } |
| 278 | |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 279 | /* Unlock a z3fold page */ |
| 280 | static inline void z3fold_page_unlock(struct z3fold_header *zhdr) |
| 281 | { |
| 282 | spin_unlock(&zhdr->page_lock); |
| 283 | } |
| 284 | |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 285 | /* Helper function to build the index */ |
| 286 | static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) |
| 287 | { |
| 288 | return (bud + zhdr->first_num) & BUDDY_MASK; |
| 289 | } |
| 290 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 291 | /* |
| 292 | * Encodes the handle of a particular buddy within a z3fold page |
| 293 | * Pool lock should be held as this function accesses first_num |
| 294 | */ |
| 295 | static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) |
| 296 | { |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 297 | struct z3fold_buddy_slots *slots; |
| 298 | unsigned long h = (unsigned long)zhdr; |
| 299 | int idx = 0; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 300 | |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 301 | /* |
| 302 | * For a headless page, its handle is its pointer with the extra |
| 303 | * PAGE_HEADLESS bit set |
| 304 | */ |
| 305 | if (bud == HEADLESS) |
| 306 | return h | (1 << PAGE_HEADLESS); |
| 307 | |
| 308 | /* otherwise, return pointer to encoded handle */ |
| 309 | idx = __idx(zhdr, bud); |
| 310 | h += idx; |
| 311 | if (bud == LAST) |
| 312 | h |= (zhdr->last_chunks << BUDDY_SHIFT); |
| 313 | |
| 314 | slots = zhdr->slots; |
| 315 | slots->slot[idx] = h; |
| 316 | return (unsigned long)&slots->slot[idx]; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 317 | } |
| 318 | |
| 319 | /* Returns the z3fold page where a given handle is stored */ |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 320 | static inline struct z3fold_header *handle_to_z3fold_header(unsigned long handle) |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 321 | { |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 322 | unsigned long addr = handle; |
| 323 | |
| 324 | if (!(addr & (1 << PAGE_HEADLESS))) |
| 325 | addr = *(unsigned long *)handle; |
| 326 | |
| 327 | return (struct z3fold_header *)(addr & PAGE_MASK); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 328 | } |
| 329 | |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 330 | /* only for LAST bud, returns zero otherwise */ |
| 331 | static unsigned short handle_to_chunks(unsigned long handle) |
| 332 | { |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 333 | unsigned long addr = *(unsigned long *)handle; |
| 334 | |
| 335 | return (addr & ~PAGE_MASK) >> BUDDY_SHIFT; |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 336 | } |
| 337 | |
zhong jiang | f201ebd | 2017-02-22 15:46:51 -0800 | [diff] [blame] | 338 | /* |
| 339 | * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle |
| 340 | * but that doesn't matter. because the masking will result in the |
| 341 | * correct buddy number. |
| 342 | */ |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 343 | static enum buddy handle_to_buddy(unsigned long handle) |
| 344 | { |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 345 | struct z3fold_header *zhdr; |
| 346 | unsigned long addr; |
| 347 | |
| 348 | WARN_ON(handle & (1 << PAGE_HEADLESS)); |
| 349 | addr = *(unsigned long *)handle; |
| 350 | zhdr = (struct z3fold_header *)(addr & PAGE_MASK); |
| 351 | return (addr - zhdr->first_num) & BUDDY_MASK; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 352 | } |
| 353 | |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 354 | static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) |
| 355 | { |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 356 | return slots_to_pool(zhdr->slots); |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 357 | } |
| 358 | |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 359 | static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) |
| 360 | { |
| 361 | struct page *page = virt_to_page(zhdr); |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 362 | struct z3fold_pool *pool = zhdr_to_pool(zhdr); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 363 | |
| 364 | WARN_ON(!list_empty(&zhdr->buddy)); |
| 365 | set_bit(PAGE_STALE, &page->private); |
Vitaly Wool | 3552935 | 2017-10-03 16:15:06 -0700 | [diff] [blame] | 366 | clear_bit(NEEDS_COMPACTING, &page->private); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 367 | spin_lock(&pool->lock); |
| 368 | if (!list_empty(&page->lru)) |
| 369 | list_del(&page->lru); |
| 370 | spin_unlock(&pool->lock); |
| 371 | if (locked) |
| 372 | z3fold_page_unlock(zhdr); |
| 373 | spin_lock(&pool->stale_lock); |
| 374 | list_add(&zhdr->buddy, &pool->stale); |
| 375 | queue_work(pool->release_wq, &pool->work); |
| 376 | spin_unlock(&pool->stale_lock); |
| 377 | } |
| 378 | |
| 379 | static void __attribute__((__unused__)) |
| 380 | release_z3fold_page(struct kref *ref) |
| 381 | { |
| 382 | struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, |
| 383 | refcount); |
| 384 | __release_z3fold_page(zhdr, false); |
| 385 | } |
| 386 | |
| 387 | static void release_z3fold_page_locked(struct kref *ref) |
| 388 | { |
| 389 | struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, |
| 390 | refcount); |
| 391 | WARN_ON(z3fold_page_trylock(zhdr)); |
| 392 | __release_z3fold_page(zhdr, true); |
| 393 | } |
| 394 | |
| 395 | static void release_z3fold_page_locked_list(struct kref *ref) |
| 396 | { |
| 397 | struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, |
| 398 | refcount); |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 399 | struct z3fold_pool *pool = zhdr_to_pool(zhdr); |
| 400 | spin_lock(&pool->lock); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 401 | list_del_init(&zhdr->buddy); |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 402 | spin_unlock(&pool->lock); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 403 | |
| 404 | WARN_ON(z3fold_page_trylock(zhdr)); |
| 405 | __release_z3fold_page(zhdr, true); |
| 406 | } |
| 407 | |
| 408 | static void free_pages_work(struct work_struct *w) |
| 409 | { |
| 410 | struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); |
| 411 | |
| 412 | spin_lock(&pool->stale_lock); |
| 413 | while (!list_empty(&pool->stale)) { |
| 414 | struct z3fold_header *zhdr = list_first_entry(&pool->stale, |
| 415 | struct z3fold_header, buddy); |
| 416 | struct page *page = virt_to_page(zhdr); |
| 417 | |
| 418 | list_del(&zhdr->buddy); |
| 419 | if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) |
| 420 | continue; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 421 | spin_unlock(&pool->stale_lock); |
| 422 | cancel_work_sync(&zhdr->work); |
| 423 | free_z3fold_page(page); |
| 424 | cond_resched(); |
| 425 | spin_lock(&pool->stale_lock); |
| 426 | } |
| 427 | spin_unlock(&pool->stale_lock); |
| 428 | } |
| 429 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 430 | /* |
| 431 | * Returns the number of free chunks in a z3fold page. |
| 432 | * NB: can't be used with HEADLESS pages. |
| 433 | */ |
| 434 | static int num_free_chunks(struct z3fold_header *zhdr) |
| 435 | { |
| 436 | int nfree; |
| 437 | /* |
| 438 | * If there is a middle object, pick up the bigger free space |
| 439 | * either before or after it. Otherwise just subtract the number |
| 440 | * of chunks occupied by the first and the last objects. |
| 441 | */ |
| 442 | if (zhdr->middle_chunks != 0) { |
| 443 | int nfree_before = zhdr->first_chunks ? |
Vitaly Wool | ede9321 | 2017-02-24 14:57:17 -0800 | [diff] [blame] | 444 | 0 : zhdr->start_middle - ZHDR_CHUNKS; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 445 | int nfree_after = zhdr->last_chunks ? |
Vitaly Wool | ede9321 | 2017-02-24 14:57:17 -0800 | [diff] [blame] | 446 | 0 : TOTAL_CHUNKS - |
| 447 | (zhdr->start_middle + zhdr->middle_chunks); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 448 | nfree = max(nfree_before, nfree_after); |
| 449 | } else |
| 450 | nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; |
| 451 | return nfree; |
| 452 | } |
| 453 | |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 454 | /* Add to the appropriate unbuddied list */ |
| 455 | static inline void add_to_unbuddied(struct z3fold_pool *pool, |
| 456 | struct z3fold_header *zhdr) |
| 457 | { |
| 458 | if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || |
| 459 | zhdr->middle_chunks == 0) { |
| 460 | struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied); |
| 461 | |
| 462 | int freechunks = num_free_chunks(zhdr); |
| 463 | spin_lock(&pool->lock); |
| 464 | list_add(&zhdr->buddy, &unbuddied[freechunks]); |
| 465 | spin_unlock(&pool->lock); |
| 466 | zhdr->cpu = smp_processor_id(); |
| 467 | put_cpu_ptr(pool->unbuddied); |
| 468 | } |
| 469 | } |
| 470 | |
Vitaly Wool | ede9321 | 2017-02-24 14:57:17 -0800 | [diff] [blame] | 471 | static inline void *mchunk_memmove(struct z3fold_header *zhdr, |
| 472 | unsigned short dst_chunk) |
| 473 | { |
| 474 | void *beg = zhdr; |
| 475 | return memmove(beg + (dst_chunk << CHUNK_SHIFT), |
| 476 | beg + (zhdr->start_middle << CHUNK_SHIFT), |
| 477 | zhdr->middle_chunks << CHUNK_SHIFT); |
| 478 | } |
| 479 | |
Vitaly Wool | 1b096e5 | 2017-02-24 14:57:20 -0800 | [diff] [blame] | 480 | #define BIG_CHUNK_GAP 3 |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 481 | /* Has to be called with lock held */ |
| 482 | static int z3fold_compact_page(struct z3fold_header *zhdr) |
| 483 | { |
| 484 | struct page *page = virt_to_page(zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 485 | |
Vitaly Wool | ede9321 | 2017-02-24 14:57:17 -0800 | [diff] [blame] | 486 | if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) |
| 487 | return 0; /* can't move middle chunk, it's used */ |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 488 | |
Vitaly Wool | ede9321 | 2017-02-24 14:57:17 -0800 | [diff] [blame] | 489 | if (zhdr->middle_chunks == 0) |
| 490 | return 0; /* nothing to compact */ |
| 491 | |
| 492 | if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { |
| 493 | /* move to the beginning */ |
| 494 | mchunk_memmove(zhdr, ZHDR_CHUNKS); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 495 | zhdr->first_chunks = zhdr->middle_chunks; |
| 496 | zhdr->middle_chunks = 0; |
| 497 | zhdr->start_middle = 0; |
| 498 | zhdr->first_num++; |
Vitaly Wool | 1b096e5 | 2017-02-24 14:57:20 -0800 | [diff] [blame] | 499 | return 1; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 500 | } |
Vitaly Wool | 1b096e5 | 2017-02-24 14:57:20 -0800 | [diff] [blame] | 501 | |
| 502 | /* |
| 503 | * moving data is expensive, so let's only do that if |
| 504 | * there's substantial gain (at least BIG_CHUNK_GAP chunks) |
| 505 | */ |
| 506 | if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && |
| 507 | zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= |
| 508 | BIG_CHUNK_GAP) { |
| 509 | mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); |
| 510 | zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; |
| 511 | return 1; |
| 512 | } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && |
| 513 | TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle |
| 514 | + zhdr->middle_chunks) >= |
| 515 | BIG_CHUNK_GAP) { |
| 516 | unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - |
| 517 | zhdr->middle_chunks; |
| 518 | mchunk_memmove(zhdr, new_start); |
| 519 | zhdr->start_middle = new_start; |
| 520 | return 1; |
| 521 | } |
| 522 | |
| 523 | return 0; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 524 | } |
| 525 | |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 526 | static void do_compact_page(struct z3fold_header *zhdr, bool locked) |
| 527 | { |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 528 | struct z3fold_pool *pool = zhdr_to_pool(zhdr); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 529 | struct page *page; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 530 | |
| 531 | page = virt_to_page(zhdr); |
| 532 | if (locked) |
| 533 | WARN_ON(z3fold_page_trylock(zhdr)); |
| 534 | else |
| 535 | z3fold_page_lock(zhdr); |
Vitaly Wool | 5d03a66 | 2017-11-17 15:26:16 -0800 | [diff] [blame] | 536 | if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 537 | z3fold_page_unlock(zhdr); |
| 538 | return; |
| 539 | } |
| 540 | spin_lock(&pool->lock); |
| 541 | list_del_init(&zhdr->buddy); |
| 542 | spin_unlock(&pool->lock); |
| 543 | |
Vitaly Wool | 5d03a66 | 2017-11-17 15:26:16 -0800 | [diff] [blame] | 544 | if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { |
| 545 | atomic64_dec(&pool->pages_nr); |
| 546 | return; |
| 547 | } |
| 548 | |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 549 | z3fold_compact_page(zhdr); |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 550 | add_to_unbuddied(pool, zhdr); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 551 | z3fold_page_unlock(zhdr); |
| 552 | } |
| 553 | |
| 554 | static void compact_page_work(struct work_struct *w) |
| 555 | { |
| 556 | struct z3fold_header *zhdr = container_of(w, struct z3fold_header, |
| 557 | work); |
| 558 | |
| 559 | do_compact_page(zhdr, false); |
| 560 | } |
| 561 | |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 562 | /* returns _locked_ z3fold page header or NULL */ |
| 563 | static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, |
| 564 | size_t size, bool can_sleep) |
| 565 | { |
| 566 | struct z3fold_header *zhdr = NULL; |
| 567 | struct page *page; |
| 568 | struct list_head *unbuddied; |
| 569 | int chunks = size_to_chunks(size), i; |
| 570 | |
| 571 | lookup: |
| 572 | /* First, try to find an unbuddied z3fold page. */ |
| 573 | unbuddied = get_cpu_ptr(pool->unbuddied); |
| 574 | for_each_unbuddied_list(i, chunks) { |
| 575 | struct list_head *l = &unbuddied[i]; |
| 576 | |
| 577 | zhdr = list_first_entry_or_null(READ_ONCE(l), |
| 578 | struct z3fold_header, buddy); |
| 579 | |
| 580 | if (!zhdr) |
| 581 | continue; |
| 582 | |
| 583 | /* Re-check under lock. */ |
| 584 | spin_lock(&pool->lock); |
| 585 | l = &unbuddied[i]; |
| 586 | if (unlikely(zhdr != list_first_entry(READ_ONCE(l), |
| 587 | struct z3fold_header, buddy)) || |
| 588 | !z3fold_page_trylock(zhdr)) { |
| 589 | spin_unlock(&pool->lock); |
| 590 | zhdr = NULL; |
| 591 | put_cpu_ptr(pool->unbuddied); |
| 592 | if (can_sleep) |
| 593 | cond_resched(); |
| 594 | goto lookup; |
| 595 | } |
| 596 | list_del_init(&zhdr->buddy); |
| 597 | zhdr->cpu = -1; |
| 598 | spin_unlock(&pool->lock); |
| 599 | |
| 600 | page = virt_to_page(zhdr); |
| 601 | if (test_bit(NEEDS_COMPACTING, &page->private)) { |
| 602 | z3fold_page_unlock(zhdr); |
| 603 | zhdr = NULL; |
| 604 | put_cpu_ptr(pool->unbuddied); |
| 605 | if (can_sleep) |
| 606 | cond_resched(); |
| 607 | goto lookup; |
| 608 | } |
| 609 | |
| 610 | /* |
| 611 | * this page could not be removed from its unbuddied |
| 612 | * list while pool lock was held, and then we've taken |
| 613 | * page lock so kref_put could not be called before |
| 614 | * we got here, so it's safe to just call kref_get() |
| 615 | */ |
| 616 | kref_get(&zhdr->refcount); |
| 617 | break; |
| 618 | } |
| 619 | put_cpu_ptr(pool->unbuddied); |
| 620 | |
Vitaly Wool | 351618b | 2019-05-13 17:22:46 -0700 | [diff] [blame] | 621 | if (!zhdr) { |
| 622 | int cpu; |
| 623 | |
| 624 | /* look for _exact_ match on other cpus' lists */ |
| 625 | for_each_online_cpu(cpu) { |
| 626 | struct list_head *l; |
| 627 | |
| 628 | unbuddied = per_cpu_ptr(pool->unbuddied, cpu); |
| 629 | spin_lock(&pool->lock); |
| 630 | l = &unbuddied[chunks]; |
| 631 | |
| 632 | zhdr = list_first_entry_or_null(READ_ONCE(l), |
| 633 | struct z3fold_header, buddy); |
| 634 | |
| 635 | if (!zhdr || !z3fold_page_trylock(zhdr)) { |
| 636 | spin_unlock(&pool->lock); |
| 637 | zhdr = NULL; |
| 638 | continue; |
| 639 | } |
| 640 | list_del_init(&zhdr->buddy); |
| 641 | zhdr->cpu = -1; |
| 642 | spin_unlock(&pool->lock); |
| 643 | |
| 644 | page = virt_to_page(zhdr); |
| 645 | if (test_bit(NEEDS_COMPACTING, &page->private)) { |
| 646 | z3fold_page_unlock(zhdr); |
| 647 | zhdr = NULL; |
| 648 | if (can_sleep) |
| 649 | cond_resched(); |
| 650 | continue; |
| 651 | } |
| 652 | kref_get(&zhdr->refcount); |
| 653 | break; |
| 654 | } |
| 655 | } |
| 656 | |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 657 | return zhdr; |
| 658 | } |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 659 | |
| 660 | /* |
| 661 | * API Functions |
| 662 | */ |
| 663 | |
| 664 | /** |
| 665 | * z3fold_create_pool() - create a new z3fold pool |
| 666 | * @name: pool name |
| 667 | * @gfp: gfp flags when allocating the z3fold pool structure |
| 668 | * @ops: user-defined operations for the z3fold pool |
| 669 | * |
| 670 | * Return: pointer to the new z3fold pool or NULL if the metadata allocation |
| 671 | * failed. |
| 672 | */ |
| 673 | static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, |
| 674 | const struct z3fold_ops *ops) |
| 675 | { |
| 676 | struct z3fold_pool *pool = NULL; |
| 677 | int i, cpu; |
| 678 | |
| 679 | pool = kzalloc(sizeof(struct z3fold_pool), gfp); |
| 680 | if (!pool) |
| 681 | goto out; |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 682 | pool->c_handle = kmem_cache_create("z3fold_handle", |
| 683 | sizeof(struct z3fold_buddy_slots), |
| 684 | SLOTS_ALIGN, 0, NULL); |
| 685 | if (!pool->c_handle) |
| 686 | goto out_c; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 687 | spin_lock_init(&pool->lock); |
| 688 | spin_lock_init(&pool->stale_lock); |
| 689 | pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); |
Xidong Wang | 1ec6995 | 2018-04-10 16:29:34 -0700 | [diff] [blame] | 690 | if (!pool->unbuddied) |
| 691 | goto out_pool; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 692 | for_each_possible_cpu(cpu) { |
| 693 | struct list_head *unbuddied = |
| 694 | per_cpu_ptr(pool->unbuddied, cpu); |
| 695 | for_each_unbuddied_list(i, 0) |
| 696 | INIT_LIST_HEAD(&unbuddied[i]); |
| 697 | } |
| 698 | INIT_LIST_HEAD(&pool->lru); |
| 699 | INIT_LIST_HEAD(&pool->stale); |
| 700 | atomic64_set(&pool->pages_nr, 0); |
| 701 | pool->name = name; |
| 702 | pool->compact_wq = create_singlethread_workqueue(pool->name); |
| 703 | if (!pool->compact_wq) |
Xidong Wang | 1ec6995 | 2018-04-10 16:29:34 -0700 | [diff] [blame] | 704 | goto out_unbuddied; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 705 | pool->release_wq = create_singlethread_workqueue(pool->name); |
| 706 | if (!pool->release_wq) |
| 707 | goto out_wq; |
| 708 | INIT_WORK(&pool->work, free_pages_work); |
| 709 | pool->ops = ops; |
| 710 | return pool; |
| 711 | |
| 712 | out_wq: |
| 713 | destroy_workqueue(pool->compact_wq); |
Xidong Wang | 1ec6995 | 2018-04-10 16:29:34 -0700 | [diff] [blame] | 714 | out_unbuddied: |
| 715 | free_percpu(pool->unbuddied); |
| 716 | out_pool: |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 717 | kmem_cache_destroy(pool->c_handle); |
| 718 | out_c: |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 719 | kfree(pool); |
Xidong Wang | 1ec6995 | 2018-04-10 16:29:34 -0700 | [diff] [blame] | 720 | out: |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 721 | return NULL; |
| 722 | } |
| 723 | |
| 724 | /** |
| 725 | * z3fold_destroy_pool() - destroys an existing z3fold pool |
| 726 | * @pool: the z3fold pool to be destroyed |
| 727 | * |
| 728 | * The pool should be emptied before this function is called. |
| 729 | */ |
| 730 | static void z3fold_destroy_pool(struct z3fold_pool *pool) |
| 731 | { |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 732 | kmem_cache_destroy(pool->c_handle); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 733 | destroy_workqueue(pool->release_wq); |
| 734 | destroy_workqueue(pool->compact_wq); |
| 735 | kfree(pool); |
| 736 | } |
| 737 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 738 | /** |
| 739 | * z3fold_alloc() - allocates a region of a given size |
| 740 | * @pool: z3fold pool from which to allocate |
| 741 | * @size: size in bytes of the desired allocation |
| 742 | * @gfp: gfp flags used if the pool needs to grow |
| 743 | * @handle: handle of the new allocation |
| 744 | * |
| 745 | * This function will attempt to find a free region in the pool large enough to |
| 746 | * satisfy the allocation request. A search of the unbuddied lists is |
| 747 | * performed first. If no suitable free region is found, then a new page is |
| 748 | * allocated and added to the pool to satisfy the request. |
| 749 | * |
| 750 | * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used |
| 751 | * as z3fold pool pages. |
| 752 | * |
| 753 | * Return: 0 if success and handle is set, otherwise -EINVAL if the size or |
| 754 | * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate |
| 755 | * a new page. |
| 756 | */ |
| 757 | static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, |
| 758 | unsigned long *handle) |
| 759 | { |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 760 | int chunks = size_to_chunks(size); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 761 | struct z3fold_header *zhdr = NULL; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 762 | struct page *page = NULL; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 763 | enum buddy bud; |
Matthew Wilcox | 8a97ea54 | 2018-04-10 16:29:37 -0700 | [diff] [blame] | 764 | bool can_sleep = gfpflags_allow_blocking(gfp); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 765 | |
| 766 | if (!size || (gfp & __GFP_HIGHMEM)) |
| 767 | return -EINVAL; |
| 768 | |
| 769 | if (size > PAGE_SIZE) |
| 770 | return -ENOSPC; |
| 771 | |
| 772 | if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) |
| 773 | bud = HEADLESS; |
| 774 | else { |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 775 | retry: |
| 776 | zhdr = __z3fold_alloc(pool, size, can_sleep); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 777 | if (zhdr) { |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 778 | if (zhdr->first_chunks == 0) { |
| 779 | if (zhdr->middle_chunks != 0 && |
| 780 | chunks >= zhdr->start_middle) |
| 781 | bud = LAST; |
| 782 | else |
| 783 | bud = FIRST; |
| 784 | } else if (zhdr->last_chunks == 0) |
| 785 | bud = LAST; |
| 786 | else if (zhdr->middle_chunks == 0) |
| 787 | bud = MIDDLE; |
| 788 | else { |
Vitaly Wool | 5a27aa8 | 2017-02-24 14:57:26 -0800 | [diff] [blame] | 789 | if (kref_put(&zhdr->refcount, |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 790 | release_z3fold_page_locked)) |
Vitaly Wool | 5a27aa8 | 2017-02-24 14:57:26 -0800 | [diff] [blame] | 791 | atomic64_dec(&pool->pages_nr); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 792 | else |
| 793 | z3fold_page_unlock(zhdr); |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 794 | pr_err("No free chunks in unbuddied\n"); |
| 795 | WARN_ON(1); |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 796 | goto retry; |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 797 | } |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 798 | page = virt_to_page(zhdr); |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 799 | goto found; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 800 | } |
| 801 | bud = FIRST; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 802 | } |
| 803 | |
Vitaly Wool | 5c9bab5 | 2018-04-05 16:23:32 -0700 | [diff] [blame] | 804 | page = NULL; |
| 805 | if (can_sleep) { |
| 806 | spin_lock(&pool->stale_lock); |
| 807 | zhdr = list_first_entry_or_null(&pool->stale, |
| 808 | struct z3fold_header, buddy); |
| 809 | /* |
| 810 | * Before allocating a page, let's see if we can take one from |
| 811 | * the stale pages list. cancel_work_sync() can sleep so we |
| 812 | * limit this case to the contexts where we can sleep |
| 813 | */ |
| 814 | if (zhdr) { |
| 815 | list_del(&zhdr->buddy); |
| 816 | spin_unlock(&pool->stale_lock); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 817 | cancel_work_sync(&zhdr->work); |
Vitaly Wool | 5c9bab5 | 2018-04-05 16:23:32 -0700 | [diff] [blame] | 818 | page = virt_to_page(zhdr); |
| 819 | } else { |
| 820 | spin_unlock(&pool->stale_lock); |
| 821 | } |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 822 | } |
Vitaly Wool | 5c9bab5 | 2018-04-05 16:23:32 -0700 | [diff] [blame] | 823 | if (!page) |
| 824 | page = alloc_page(gfp); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 825 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 826 | if (!page) |
| 827 | return -ENOMEM; |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 828 | |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 829 | zhdr = init_z3fold_page(page, pool); |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 830 | if (!zhdr) { |
| 831 | __free_page(page); |
| 832 | return -ENOMEM; |
| 833 | } |
| 834 | atomic64_inc(&pool->pages_nr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 835 | |
| 836 | if (bud == HEADLESS) { |
| 837 | set_bit(PAGE_HEADLESS, &page->private); |
| 838 | goto headless; |
| 839 | } |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 840 | z3fold_page_lock(zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 841 | |
| 842 | found: |
| 843 | if (bud == FIRST) |
| 844 | zhdr->first_chunks = chunks; |
| 845 | else if (bud == LAST) |
| 846 | zhdr->last_chunks = chunks; |
| 847 | else { |
| 848 | zhdr->middle_chunks = chunks; |
Vitaly Wool | ede9321 | 2017-02-24 14:57:17 -0800 | [diff] [blame] | 849 | zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 850 | } |
Vitaly Wool | 9050cce | 2019-05-13 17:22:43 -0700 | [diff] [blame] | 851 | add_to_unbuddied(pool, zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 852 | |
| 853 | headless: |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 854 | spin_lock(&pool->lock); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 855 | /* Add/move z3fold page to beginning of LRU */ |
| 856 | if (!list_empty(&page->lru)) |
| 857 | list_del(&page->lru); |
| 858 | |
| 859 | list_add(&page->lru, &pool->lru); |
| 860 | |
| 861 | *handle = encode_handle(zhdr, bud); |
| 862 | spin_unlock(&pool->lock); |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 863 | if (bud != HEADLESS) |
| 864 | z3fold_page_unlock(zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 865 | |
| 866 | return 0; |
| 867 | } |
| 868 | |
| 869 | /** |
| 870 | * z3fold_free() - frees the allocation associated with the given handle |
| 871 | * @pool: pool in which the allocation resided |
| 872 | * @handle: handle associated with the allocation returned by z3fold_alloc() |
| 873 | * |
| 874 | * In the case that the z3fold page in which the allocation resides is under |
| 875 | * reclaim, as indicated by the PG_reclaim flag being set, this function |
| 876 | * only sets the first|last_chunks to 0. The page is actually freed |
| 877 | * once both buddies are evicted (see z3fold_reclaim_page() below). |
| 878 | */ |
| 879 | static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) |
| 880 | { |
| 881 | struct z3fold_header *zhdr; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 882 | struct page *page; |
| 883 | enum buddy bud; |
| 884 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 885 | zhdr = handle_to_z3fold_header(handle); |
| 886 | page = virt_to_page(zhdr); |
| 887 | |
| 888 | if (test_bit(PAGE_HEADLESS, &page->private)) { |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 889 | /* if a headless page is under reclaim, just leave. |
| 890 | * NB: we use test_and_set_bit for a reason: if the bit |
| 891 | * has not been set before, we release this page |
| 892 | * immediately so we don't care about its value any more. |
| 893 | */ |
| 894 | if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) { |
| 895 | spin_lock(&pool->lock); |
| 896 | list_del(&page->lru); |
| 897 | spin_unlock(&pool->lock); |
| 898 | free_z3fold_page(page); |
| 899 | atomic64_dec(&pool->pages_nr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 900 | } |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 901 | return; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 902 | } |
| 903 | |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 904 | /* Non-headless case */ |
| 905 | z3fold_page_lock(zhdr); |
| 906 | bud = handle_to_buddy(handle); |
| 907 | |
| 908 | switch (bud) { |
| 909 | case FIRST: |
| 910 | zhdr->first_chunks = 0; |
| 911 | break; |
| 912 | case MIDDLE: |
| 913 | zhdr->middle_chunks = 0; |
| 914 | break; |
| 915 | case LAST: |
| 916 | zhdr->last_chunks = 0; |
| 917 | break; |
| 918 | default: |
| 919 | pr_err("%s: unknown bud %d\n", __func__, bud); |
| 920 | WARN_ON(1); |
| 921 | z3fold_page_unlock(zhdr); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 922 | return; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 923 | } |
| 924 | |
Vitaly Wool | 7c2b8ba | 2019-05-13 17:22:49 -0700 | [diff] [blame^] | 925 | free_handle(handle); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 926 | if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) { |
| 927 | atomic64_dec(&pool->pages_nr); |
| 928 | return; |
| 929 | } |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 930 | if (test_bit(PAGE_CLAIMED, &page->private)) { |
Vitaly Wool | 6098d7e | 2018-05-11 16:01:46 -0700 | [diff] [blame] | 931 | z3fold_page_unlock(zhdr); |
| 932 | return; |
| 933 | } |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 934 | if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { |
| 935 | z3fold_page_unlock(zhdr); |
| 936 | return; |
| 937 | } |
| 938 | if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { |
| 939 | spin_lock(&pool->lock); |
| 940 | list_del_init(&zhdr->buddy); |
| 941 | spin_unlock(&pool->lock); |
| 942 | zhdr->cpu = -1; |
Vitaly Wool | 5d03a66 | 2017-11-17 15:26:16 -0800 | [diff] [blame] | 943 | kref_get(&zhdr->refcount); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 944 | do_compact_page(zhdr, true); |
| 945 | return; |
| 946 | } |
Vitaly Wool | 5d03a66 | 2017-11-17 15:26:16 -0800 | [diff] [blame] | 947 | kref_get(&zhdr->refcount); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 948 | queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); |
| 949 | z3fold_page_unlock(zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 950 | } |
| 951 | |
| 952 | /** |
| 953 | * z3fold_reclaim_page() - evicts allocations from a pool page and frees it |
| 954 | * @pool: pool from which a page will attempt to be evicted |
Mike Rapoport | f144c39 | 2018-02-06 15:42:16 -0800 | [diff] [blame] | 955 | * @retries: number of pages on the LRU list for which eviction will |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 956 | * be attempted before failing |
| 957 | * |
| 958 | * z3fold reclaim is different from normal system reclaim in that it is done |
| 959 | * from the bottom, up. This is because only the bottom layer, z3fold, has |
| 960 | * information on how the allocations are organized within each z3fold page. |
| 961 | * This has the potential to create interesting locking situations between |
| 962 | * z3fold and the user, however. |
| 963 | * |
| 964 | * To avoid these, this is how z3fold_reclaim_page() should be called: |
Mike Rapoport | f144c39 | 2018-02-06 15:42:16 -0800 | [diff] [blame] | 965 | * |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 966 | * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). |
| 967 | * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and |
| 968 | * call the user-defined eviction handler with the pool and handle as |
| 969 | * arguments. |
| 970 | * |
| 971 | * If the handle can not be evicted, the eviction handler should return |
| 972 | * non-zero. z3fold_reclaim_page() will add the z3fold page back to the |
| 973 | * appropriate list and try the next z3fold page on the LRU up to |
| 974 | * a user defined number of retries. |
| 975 | * |
| 976 | * If the handle is successfully evicted, the eviction handler should |
| 977 | * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free() |
| 978 | * contains logic to delay freeing the page if the page is under reclaim, |
| 979 | * as indicated by the setting of the PG_reclaim flag on the underlying page. |
| 980 | * |
| 981 | * If all buddies in the z3fold page are successfully evicted, then the |
| 982 | * z3fold page can be freed. |
| 983 | * |
| 984 | * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are |
| 985 | * no pages to evict or an eviction handler is not registered, -EAGAIN if |
| 986 | * the retry limit was hit. |
| 987 | */ |
| 988 | static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) |
| 989 | { |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 990 | int i, ret = 0; |
| 991 | struct z3fold_header *zhdr = NULL; |
| 992 | struct page *page = NULL; |
| 993 | struct list_head *pos; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 994 | unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; |
| 995 | |
| 996 | spin_lock(&pool->lock); |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 997 | if (!pool->ops || !pool->ops->evict || retries == 0) { |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 998 | spin_unlock(&pool->lock); |
| 999 | return -EINVAL; |
| 1000 | } |
| 1001 | for (i = 0; i < retries; i++) { |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 1002 | if (list_empty(&pool->lru)) { |
| 1003 | spin_unlock(&pool->lock); |
| 1004 | return -EINVAL; |
| 1005 | } |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 1006 | list_for_each_prev(pos, &pool->lru) { |
| 1007 | page = list_entry(pos, struct page, lru); |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 1008 | |
| 1009 | /* this bit could have been set by free, in which case |
| 1010 | * we pass over to the next page in the pool. |
| 1011 | */ |
| 1012 | if (test_and_set_bit(PAGE_CLAIMED, &page->private)) |
| 1013 | continue; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1014 | |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 1015 | zhdr = page_address(page); |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 1016 | if (test_bit(PAGE_HEADLESS, &page->private)) |
| 1017 | break; |
| 1018 | |
| 1019 | if (!z3fold_page_trylock(zhdr)) { |
| 1020 | zhdr = NULL; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 1021 | continue; /* can't evict at this point */ |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 1022 | } |
Vitaly Wool | 5a27aa8 | 2017-02-24 14:57:26 -0800 | [diff] [blame] | 1023 | kref_get(&zhdr->refcount); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 1024 | list_del_init(&zhdr->buddy); |
| 1025 | zhdr->cpu = -1; |
Vitaly Wool | 6098d7e | 2018-05-11 16:01:46 -0700 | [diff] [blame] | 1026 | break; |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 1027 | } |
| 1028 | |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 1029 | if (!zhdr) |
| 1030 | break; |
| 1031 | |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 1032 | list_del_init(&page->lru); |
| 1033 | spin_unlock(&pool->lock); |
| 1034 | |
| 1035 | if (!test_bit(PAGE_HEADLESS, &page->private)) { |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1036 | /* |
| 1037 | * We need encode the handles before unlocking, since |
| 1038 | * we can race with free that will set |
| 1039 | * (first|last)_chunks to 0 |
| 1040 | */ |
| 1041 | first_handle = 0; |
| 1042 | last_handle = 0; |
| 1043 | middle_handle = 0; |
| 1044 | if (zhdr->first_chunks) |
| 1045 | first_handle = encode_handle(zhdr, FIRST); |
| 1046 | if (zhdr->middle_chunks) |
| 1047 | middle_handle = encode_handle(zhdr, MIDDLE); |
| 1048 | if (zhdr->last_chunks) |
| 1049 | last_handle = encode_handle(zhdr, LAST); |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 1050 | /* |
| 1051 | * it's safe to unlock here because we hold a |
| 1052 | * reference to this page |
| 1053 | */ |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 1054 | z3fold_page_unlock(zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1055 | } else { |
| 1056 | first_handle = encode_handle(zhdr, HEADLESS); |
| 1057 | last_handle = middle_handle = 0; |
| 1058 | } |
| 1059 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1060 | /* Issue the eviction callback(s) */ |
| 1061 | if (middle_handle) { |
| 1062 | ret = pool->ops->evict(pool, middle_handle); |
| 1063 | if (ret) |
| 1064 | goto next; |
| 1065 | } |
| 1066 | if (first_handle) { |
| 1067 | ret = pool->ops->evict(pool, first_handle); |
| 1068 | if (ret) |
| 1069 | goto next; |
| 1070 | } |
| 1071 | if (last_handle) { |
| 1072 | ret = pool->ops->evict(pool, last_handle); |
| 1073 | if (ret) |
| 1074 | goto next; |
| 1075 | } |
| 1076 | next: |
Vitaly Wool | 5a27aa8 | 2017-02-24 14:57:26 -0800 | [diff] [blame] | 1077 | if (test_bit(PAGE_HEADLESS, &page->private)) { |
| 1078 | if (ret == 0) { |
| 1079 | free_z3fold_page(page); |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 1080 | atomic64_dec(&pool->pages_nr); |
Vitaly Wool | 5a27aa8 | 2017-02-24 14:57:26 -0800 | [diff] [blame] | 1081 | return 0; |
Vitaly Wool | 5a27aa8 | 2017-02-24 14:57:26 -0800 | [diff] [blame] | 1082 | } |
Vitaly Wool | 6098d7e | 2018-05-11 16:01:46 -0700 | [diff] [blame] | 1083 | spin_lock(&pool->lock); |
| 1084 | list_add(&page->lru, &pool->lru); |
Vitaly Wool | d5567c9 | 2017-10-03 16:14:47 -0700 | [diff] [blame] | 1085 | spin_unlock(&pool->lock); |
Vitaly Wool | 6098d7e | 2018-05-11 16:01:46 -0700 | [diff] [blame] | 1086 | } else { |
| 1087 | z3fold_page_lock(zhdr); |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 1088 | clear_bit(PAGE_CLAIMED, &page->private); |
Vitaly Wool | 6098d7e | 2018-05-11 16:01:46 -0700 | [diff] [blame] | 1089 | if (kref_put(&zhdr->refcount, |
| 1090 | release_z3fold_page_locked)) { |
| 1091 | atomic64_dec(&pool->pages_nr); |
| 1092 | return 0; |
| 1093 | } |
| 1094 | /* |
| 1095 | * if we are here, the page is still not completely |
| 1096 | * free. Take the global pool lock then to be able |
| 1097 | * to add it back to the lru list |
| 1098 | */ |
| 1099 | spin_lock(&pool->lock); |
| 1100 | list_add(&page->lru, &pool->lru); |
| 1101 | spin_unlock(&pool->lock); |
| 1102 | z3fold_page_unlock(zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1103 | } |
| 1104 | |
Vitaly Wool | 6098d7e | 2018-05-11 16:01:46 -0700 | [diff] [blame] | 1105 | /* We started off locked to we need to lock the pool back */ |
| 1106 | spin_lock(&pool->lock); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1107 | } |
| 1108 | spin_unlock(&pool->lock); |
| 1109 | return -EAGAIN; |
| 1110 | } |
| 1111 | |
| 1112 | /** |
| 1113 | * z3fold_map() - maps the allocation associated with the given handle |
| 1114 | * @pool: pool in which the allocation resides |
| 1115 | * @handle: handle associated with the allocation to be mapped |
| 1116 | * |
| 1117 | * Extracts the buddy number from handle and constructs the pointer to the |
| 1118 | * correct starting chunk within the page. |
| 1119 | * |
| 1120 | * Returns: a pointer to the mapped allocation |
| 1121 | */ |
| 1122 | static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) |
| 1123 | { |
| 1124 | struct z3fold_header *zhdr; |
| 1125 | struct page *page; |
| 1126 | void *addr; |
| 1127 | enum buddy buddy; |
| 1128 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1129 | zhdr = handle_to_z3fold_header(handle); |
| 1130 | addr = zhdr; |
| 1131 | page = virt_to_page(zhdr); |
| 1132 | |
| 1133 | if (test_bit(PAGE_HEADLESS, &page->private)) |
| 1134 | goto out; |
| 1135 | |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 1136 | z3fold_page_lock(zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1137 | buddy = handle_to_buddy(handle); |
| 1138 | switch (buddy) { |
| 1139 | case FIRST: |
| 1140 | addr += ZHDR_SIZE_ALIGNED; |
| 1141 | break; |
| 1142 | case MIDDLE: |
| 1143 | addr += zhdr->start_middle << CHUNK_SHIFT; |
| 1144 | set_bit(MIDDLE_CHUNK_MAPPED, &page->private); |
| 1145 | break; |
| 1146 | case LAST: |
Vitaly Wool | ca0246b | 2018-11-16 15:07:56 -0800 | [diff] [blame] | 1147 | addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1148 | break; |
| 1149 | default: |
| 1150 | pr_err("unknown buddy id %d\n", buddy); |
| 1151 | WARN_ON(1); |
| 1152 | addr = NULL; |
| 1153 | break; |
| 1154 | } |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 1155 | |
| 1156 | z3fold_page_unlock(zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1157 | out: |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1158 | return addr; |
| 1159 | } |
| 1160 | |
| 1161 | /** |
| 1162 | * z3fold_unmap() - unmaps the allocation associated with the given handle |
| 1163 | * @pool: pool in which the allocation resides |
| 1164 | * @handle: handle associated with the allocation to be unmapped |
| 1165 | */ |
| 1166 | static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) |
| 1167 | { |
| 1168 | struct z3fold_header *zhdr; |
| 1169 | struct page *page; |
| 1170 | enum buddy buddy; |
| 1171 | |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1172 | zhdr = handle_to_z3fold_header(handle); |
| 1173 | page = virt_to_page(zhdr); |
| 1174 | |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 1175 | if (test_bit(PAGE_HEADLESS, &page->private)) |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1176 | return; |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1177 | |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 1178 | z3fold_page_lock(zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1179 | buddy = handle_to_buddy(handle); |
| 1180 | if (buddy == MIDDLE) |
| 1181 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); |
Vitaly Wool | 2f1e5e4 | 2017-02-24 14:57:23 -0800 | [diff] [blame] | 1182 | z3fold_page_unlock(zhdr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1183 | } |
| 1184 | |
| 1185 | /** |
| 1186 | * z3fold_get_pool_size() - gets the z3fold pool size in pages |
| 1187 | * @pool: pool whose size is being queried |
| 1188 | * |
Vitaly Wool | 12d59ae | 2017-02-24 14:57:15 -0800 | [diff] [blame] | 1189 | * Returns: size in pages of the given pool. |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1190 | */ |
| 1191 | static u64 z3fold_get_pool_size(struct z3fold_pool *pool) |
| 1192 | { |
Vitaly Wool | 12d59ae | 2017-02-24 14:57:15 -0800 | [diff] [blame] | 1193 | return atomic64_read(&pool->pages_nr); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1194 | } |
| 1195 | |
| 1196 | /***************** |
| 1197 | * zpool |
| 1198 | ****************/ |
| 1199 | |
| 1200 | static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) |
| 1201 | { |
| 1202 | if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) |
| 1203 | return pool->zpool_ops->evict(pool->zpool, handle); |
| 1204 | else |
| 1205 | return -ENOENT; |
| 1206 | } |
| 1207 | |
| 1208 | static const struct z3fold_ops z3fold_zpool_ops = { |
| 1209 | .evict = z3fold_zpool_evict |
| 1210 | }; |
| 1211 | |
| 1212 | static void *z3fold_zpool_create(const char *name, gfp_t gfp, |
| 1213 | const struct zpool_ops *zpool_ops, |
| 1214 | struct zpool *zpool) |
| 1215 | { |
| 1216 | struct z3fold_pool *pool; |
| 1217 | |
Vitaly Wool | d30561c | 2017-09-06 16:24:47 -0700 | [diff] [blame] | 1218 | pool = z3fold_create_pool(name, gfp, |
| 1219 | zpool_ops ? &z3fold_zpool_ops : NULL); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1220 | if (pool) { |
| 1221 | pool->zpool = zpool; |
| 1222 | pool->zpool_ops = zpool_ops; |
| 1223 | } |
| 1224 | return pool; |
| 1225 | } |
| 1226 | |
| 1227 | static void z3fold_zpool_destroy(void *pool) |
| 1228 | { |
| 1229 | z3fold_destroy_pool(pool); |
| 1230 | } |
| 1231 | |
| 1232 | static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, |
| 1233 | unsigned long *handle) |
| 1234 | { |
| 1235 | return z3fold_alloc(pool, size, gfp, handle); |
| 1236 | } |
| 1237 | static void z3fold_zpool_free(void *pool, unsigned long handle) |
| 1238 | { |
| 1239 | z3fold_free(pool, handle); |
| 1240 | } |
| 1241 | |
| 1242 | static int z3fold_zpool_shrink(void *pool, unsigned int pages, |
| 1243 | unsigned int *reclaimed) |
| 1244 | { |
| 1245 | unsigned int total = 0; |
| 1246 | int ret = -EINVAL; |
| 1247 | |
| 1248 | while (total < pages) { |
| 1249 | ret = z3fold_reclaim_page(pool, 8); |
| 1250 | if (ret < 0) |
| 1251 | break; |
| 1252 | total++; |
| 1253 | } |
| 1254 | |
| 1255 | if (reclaimed) |
| 1256 | *reclaimed = total; |
| 1257 | |
| 1258 | return ret; |
| 1259 | } |
| 1260 | |
| 1261 | static void *z3fold_zpool_map(void *pool, unsigned long handle, |
| 1262 | enum zpool_mapmode mm) |
| 1263 | { |
| 1264 | return z3fold_map(pool, handle); |
| 1265 | } |
| 1266 | static void z3fold_zpool_unmap(void *pool, unsigned long handle) |
| 1267 | { |
| 1268 | z3fold_unmap(pool, handle); |
| 1269 | } |
| 1270 | |
| 1271 | static u64 z3fold_zpool_total_size(void *pool) |
| 1272 | { |
| 1273 | return z3fold_get_pool_size(pool) * PAGE_SIZE; |
| 1274 | } |
| 1275 | |
| 1276 | static struct zpool_driver z3fold_zpool_driver = { |
| 1277 | .type = "z3fold", |
| 1278 | .owner = THIS_MODULE, |
| 1279 | .create = z3fold_zpool_create, |
| 1280 | .destroy = z3fold_zpool_destroy, |
| 1281 | .malloc = z3fold_zpool_malloc, |
| 1282 | .free = z3fold_zpool_free, |
| 1283 | .shrink = z3fold_zpool_shrink, |
| 1284 | .map = z3fold_zpool_map, |
| 1285 | .unmap = z3fold_zpool_unmap, |
| 1286 | .total_size = z3fold_zpool_total_size, |
| 1287 | }; |
| 1288 | |
| 1289 | MODULE_ALIAS("zpool-z3fold"); |
| 1290 | |
| 1291 | static int __init init_z3fold(void) |
| 1292 | { |
Vitaly Wool | ede9321 | 2017-02-24 14:57:17 -0800 | [diff] [blame] | 1293 | /* Make sure the z3fold header is not larger than the page size */ |
| 1294 | BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE); |
Vitaly Wool | 9a001fc | 2016-05-20 16:58:30 -0700 | [diff] [blame] | 1295 | zpool_register_driver(&z3fold_zpool_driver); |
| 1296 | |
| 1297 | return 0; |
| 1298 | } |
| 1299 | |
| 1300 | static void __exit exit_z3fold(void) |
| 1301 | { |
| 1302 | zpool_unregister_driver(&z3fold_zpool_driver); |
| 1303 | } |
| 1304 | |
| 1305 | module_init(init_z3fold); |
| 1306 | module_exit(exit_z3fold); |
| 1307 | |
| 1308 | MODULE_LICENSE("GPL"); |
| 1309 | MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>"); |
| 1310 | MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); |