blob: 2543ef1ece179bc5ba2430e8cf0b65f7ee612645 [file] [log] [blame]
Nadav Amit8b4770e2018-06-19 16:00:29 -07001// SPDX-License-Identifier: GPL-2.0
Dmitry Torokhov453dc652010-04-23 13:18:08 -04002/*
3 * VMware Balloon driver.
4 *
Nadav Amit8b4770e2018-06-19 16:00:29 -07005 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
Dmitry Torokhov453dc652010-04-23 13:18:08 -04006 *
Dmitry Torokhov453dc652010-04-23 13:18:08 -04007 * This is VMware physical memory management driver for Linux. The driver
8 * acts like a "balloon" that can be inflated to reclaim physical pages by
9 * reserving them in the guest and invalidating them in the monitor,
10 * freeing up the underlying machine pages so they can be allocated to
11 * other guests. The balloon can also be deflated to allow the guest to
12 * use more physical memory. Higher level policies can control the sizes
13 * of balloons in VMs in order to manage physical memory resources.
14 */
15
16//#define DEBUG
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
Xavier Deguillardf220a802015-08-06 15:17:58 -070022#include <linux/vmalloc.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040023#include <linux/sched.h>
24#include <linux/module.h>
25#include <linux/workqueue.h>
26#include <linux/debugfs.h>
27#include <linux/seq_file.h>
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070028#include <linux/vmw_vmci_defs.h>
29#include <linux/vmw_vmci_api.h>
H. Peter Anvina10a5692010-05-09 01:13:42 -070030#include <asm/hypervisor.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040031
32MODULE_AUTHOR("VMware, Inc.");
33MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070034MODULE_VERSION("1.5.0.0-k");
Dmitry Torokhov453dc652010-04-23 13:18:08 -040035MODULE_ALIAS("dmi:*:svnVMware*:*");
36MODULE_ALIAS("vmware_vmmemctl");
37MODULE_LICENSE("GPL");
38
39/*
Dmitry Torokhov453dc652010-04-23 13:18:08 -040040 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
Mel Gorman71baba42015-11-06 16:28:28 -080041 * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
Dmitry Torokhov453dc652010-04-23 13:18:08 -040042 * __GFP_NOWARN, to suppress page allocation failure warnings.
43 */
44#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
45
46/*
47 * Use GFP_HIGHUSER when executing in a separate kernel thread
48 * context and allocation can sleep. This is less stressful to
49 * the guest memory system, since it allows the thread to block
50 * while memory is reclaimed, and won't take pages from emergency
51 * low-memory pools.
52 */
53#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
54
Dmitry Torokhov55adaa42010-06-04 14:14:52 -070055/* Maximum number of refused pages we accumulate during inflation cycle */
56#define VMW_BALLOON_MAX_REFUSED 16
Dmitry Torokhov453dc652010-04-23 13:18:08 -040057
58/*
59 * Hypervisor communication port definitions.
60 */
61#define VMW_BALLOON_HV_PORT 0x5670
62#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
Dmitry Torokhov453dc652010-04-23 13:18:08 -040063#define VMW_BALLOON_GUEST_ID 1 /* Linux */
64
Xavier Deguillardeb791002015-06-12 11:43:23 -070065enum vmwballoon_capabilities {
66 /*
67 * Bit 0 is reserved and not associated to any capability.
68 */
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070069 VMW_BALLOON_BASIC_CMDS = (1 << 1),
70 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
71 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
72 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
Xavier Deguillardeb791002015-06-12 11:43:23 -070073};
74
Xavier Deguillardf220a802015-08-06 15:17:58 -070075#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070076 | VMW_BALLOON_BATCHED_CMDS \
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070077 | VMW_BALLOON_BATCHED_2M_CMDS \
78 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070079
80#define VMW_BALLOON_2M_SHIFT (9)
81#define VMW_BALLOON_NUM_PAGE_SIZES (2)
Xavier Deguillardeb791002015-06-12 11:43:23 -070082
Xavier Deguillardf220a802015-08-06 15:17:58 -070083/*
84 * Backdoor commands availability:
85 *
86 * START, GET_TARGET and GUEST_ID are always available,
87 *
88 * VMW_BALLOON_BASIC_CMDS:
89 * LOCK and UNLOCK commands,
90 * VMW_BALLOON_BATCHED_CMDS:
91 * BATCHED_LOCK and BATCHED_UNLOCK commands.
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070092 * VMW BALLOON_BATCHED_2M_CMDS:
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070093 * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
94 * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
95 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
Xavier Deguillardf220a802015-08-06 15:17:58 -070096 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070097#define VMW_BALLOON_CMD_START 0
98#define VMW_BALLOON_CMD_GET_TARGET 1
99#define VMW_BALLOON_CMD_LOCK 2
100#define VMW_BALLOON_CMD_UNLOCK 3
101#define VMW_BALLOON_CMD_GUEST_ID 4
102#define VMW_BALLOON_CMD_BATCHED_LOCK 6
103#define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
104#define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
105#define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700106#define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700107
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400108
109/* error codes */
Xavier Deguillardeb791002015-06-12 11:43:23 -0700110#define VMW_BALLOON_SUCCESS 0
111#define VMW_BALLOON_FAILURE -1
112#define VMW_BALLOON_ERROR_CMD_INVALID 1
113#define VMW_BALLOON_ERROR_PPN_INVALID 2
114#define VMW_BALLOON_ERROR_PPN_LOCKED 3
115#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
116#define VMW_BALLOON_ERROR_PPN_PINNED 5
117#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
118#define VMW_BALLOON_ERROR_RESET 7
119#define VMW_BALLOON_ERROR_BUSY 8
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400120
Xavier Deguillardeb791002015-06-12 11:43:23 -0700121#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
122
Xavier Deguillardf220a802015-08-06 15:17:58 -0700123/* Batch page description */
124
125/*
126 * Layout of a page in the batch page:
127 *
128 * +-------------+----------+--------+
129 * | | | |
130 * | Page number | Reserved | Status |
131 * | | | |
132 * +-------------+----------+--------+
133 * 64 PAGE_SHIFT 6 0
134 *
Xavier Deguillardf220a802015-08-06 15:17:58 -0700135 * The reserved field should be set to 0.
136 */
137#define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
138#define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
139#define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
140
141struct vmballoon_batch_page {
142 u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
143};
144
145static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
146{
147 return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
148}
149
150static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
151 int idx)
152{
153 return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
154}
155
156static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
157 u64 pa)
158{
159 batch->pages[idx] = pa;
160}
161
162
163#define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
Xavier Deguillardeb791002015-06-12 11:43:23 -0700164({ \
Xavier Deguillardf220a802015-08-06 15:17:58 -0700165 unsigned long __status, __dummy1, __dummy2, __dummy3; \
Xavier Deguillardeb791002015-06-12 11:43:23 -0700166 __asm__ __volatile__ ("inl %%dx" : \
167 "=a"(__status), \
168 "=c"(__dummy1), \
169 "=d"(__dummy2), \
Xavier Deguillardf220a802015-08-06 15:17:58 -0700170 "=b"(result), \
171 "=S" (__dummy3) : \
Xavier Deguillardeb791002015-06-12 11:43:23 -0700172 "0"(VMW_BALLOON_HV_MAGIC), \
173 "1"(VMW_BALLOON_CMD_##cmd), \
174 "2"(VMW_BALLOON_HV_PORT), \
Xavier Deguillardf220a802015-08-06 15:17:58 -0700175 "3"(arg1), \
176 "4" (arg2) : \
Xavier Deguillardeb791002015-06-12 11:43:23 -0700177 "memory"); \
178 if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
179 result = __dummy1; \
180 result &= -1UL; \
181 __status & -1UL; \
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400182})
183
184#ifdef CONFIG_DEBUG_FS
185struct vmballoon_stats {
186 unsigned int timer;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700187 unsigned int doorbell;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400188
Rakib Mullick2ca02df2011-11-02 13:40:07 -0700189 /* allocation statistics */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700190 unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
191 unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400192 unsigned int sleep_alloc;
193 unsigned int sleep_alloc_fail;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700194 unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
195 unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
196 unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400197
198 /* monitor operations */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700199 unsigned int lock[VMW_BALLOON_NUM_PAGE_SIZES];
200 unsigned int lock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
201 unsigned int unlock[VMW_BALLOON_NUM_PAGE_SIZES];
202 unsigned int unlock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400203 unsigned int target;
204 unsigned int target_fail;
205 unsigned int start;
206 unsigned int start_fail;
207 unsigned int guest_type;
208 unsigned int guest_type_fail;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700209 unsigned int doorbell_set;
210 unsigned int doorbell_unset;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400211};
212
213#define STATS_INC(stat) (stat)++
214#else
215#define STATS_INC(stat)
216#endif
217
Xavier Deguillardf220a802015-08-06 15:17:58 -0700218struct vmballoon;
219
220struct vmballoon_ops {
221 void (*add_page)(struct vmballoon *b, int idx, struct page *p);
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700222 int (*lock)(struct vmballoon *b, unsigned int num_pages,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700223 bool is_2m_pages, unsigned int *target);
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700224 int (*unlock)(struct vmballoon *b, unsigned int num_pages,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700225 bool is_2m_pages, unsigned int *target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700226};
227
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700228struct vmballoon_page_size {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400229 /* list of reserved physical pages */
230 struct list_head pages;
231
232 /* transient list of non-balloonable pages */
233 struct list_head refused_pages;
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700234 unsigned int n_refused_pages;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700235};
236
237struct vmballoon {
238 struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
239
240 /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
241 unsigned supported_page_sizes;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400242
243 /* balloon size in pages */
244 unsigned int size;
245 unsigned int target;
246
247 /* reset flag */
248 bool reset_required;
249
Xavier Deguillardf220a802015-08-06 15:17:58 -0700250 unsigned long capabilities;
251
252 struct vmballoon_batch_page *batch_page;
253 unsigned int batch_max_pages;
254 struct page *page;
255
256 const struct vmballoon_ops *ops;
257
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400258#ifdef CONFIG_DEBUG_FS
259 /* statistics */
260 struct vmballoon_stats stats;
261
262 /* debugfs file exporting statistics */
263 struct dentry *dbg_entry;
264#endif
265
266 struct sysinfo sysinfo;
267
268 struct delayed_work dwork;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700269
270 struct vmci_handle vmci_doorbell;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400271};
272
273static struct vmballoon balloon;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400274
275/*
276 * Send "start" command to the host, communicating supported version
277 * of the protocol.
278 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700279static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400280{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700281 unsigned long status, capabilities, dummy = 0;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700282 bool success;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400283
284 STATS_INC(b->stats.start);
285
Xavier Deguillardf220a802015-08-06 15:17:58 -0700286 status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
287
288 switch (status) {
289 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
290 b->capabilities = capabilities;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700291 success = true;
292 break;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700293 case VMW_BALLOON_SUCCESS:
294 b->capabilities = VMW_BALLOON_BASIC_CMDS;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700295 success = true;
296 break;
297 default:
298 success = false;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700299 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400300
Nadav Amit5081efd2018-06-19 16:00:25 -0700301 /*
302 * 2MB pages are only supported with batching. If batching is for some
303 * reason disabled, do not use 2MB pages, since otherwise the legacy
304 * mechanism is used with 2MB pages, causing a failure.
305 */
306 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
307 (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700308 b->supported_page_sizes = 2;
309 else
310 b->supported_page_sizes = 1;
311
312 if (!success) {
313 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
314 STATS_INC(b->stats.start_fail);
315 }
316 return success;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400317}
318
319static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
320{
321 switch (status) {
322 case VMW_BALLOON_SUCCESS:
323 return true;
324
325 case VMW_BALLOON_ERROR_RESET:
326 b->reset_required = true;
327 /* fall through */
328
329 default:
330 return false;
331 }
332}
333
334/*
335 * Communicate guest type to the host so that it can adjust ballooning
336 * algorithm to the one most appropriate for the guest. This command
337 * is normally issued after sending "start" command and is part of
338 * standard reset sequence.
339 */
340static bool vmballoon_send_guest_id(struct vmballoon *b)
341{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700342 unsigned long status, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400343
Xavier Deguillardf220a802015-08-06 15:17:58 -0700344 status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
345 dummy);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400346
347 STATS_INC(b->stats.guest_type);
348
349 if (vmballoon_check_status(b, status))
350 return true;
351
352 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
353 STATS_INC(b->stats.guest_type_fail);
354 return false;
355}
356
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700357static u16 vmballoon_page_size(bool is_2m_page)
358{
359 if (is_2m_page)
360 return 1 << VMW_BALLOON_2M_SHIFT;
361
362 return 1;
363}
364
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400365/*
366 * Retrieve desired balloon size from the host.
367 */
368static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
369{
370 unsigned long status;
371 unsigned long target;
372 unsigned long limit;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700373 unsigned long dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400374 u32 limit32;
375
376 /*
377 * si_meminfo() is cheap. Moreover, we want to provide dynamic
378 * max balloon size later. So let us call si_meminfo() every
379 * iteration.
380 */
381 si_meminfo(&b->sysinfo);
382 limit = b->sysinfo.totalram;
383
384 /* Ensure limit fits in 32-bits */
385 limit32 = (u32)limit;
386 if (limit != limit32)
387 return false;
388
389 /* update stats */
390 STATS_INC(b->stats.target);
391
Xavier Deguillardf220a802015-08-06 15:17:58 -0700392 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400393 if (vmballoon_check_status(b, status)) {
394 *new_target = target;
395 return true;
396 }
397
398 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
399 STATS_INC(b->stats.target_fail);
400 return false;
401}
402
403/*
404 * Notify the host about allocated page so that host can use it without
405 * fear that guest will need it. Host may reject some pages, we need to
406 * check the return value and maybe submit a different page.
407 */
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100408static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700409 unsigned int *hv_status, unsigned int *target)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400410{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700411 unsigned long status, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400412 u32 pfn32;
413
414 pfn32 = (u32)pfn;
415 if (pfn32 != pfn)
Nadav Amit09755692018-06-19 16:00:24 -0700416 return -EINVAL;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400417
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700418 STATS_INC(b->stats.lock[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400419
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700420 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400421 if (vmballoon_check_status(b, status))
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100422 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400423
424 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700425 STATS_INC(b->stats.lock_fail[false]);
Nadav Amit09755692018-06-19 16:00:24 -0700426 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400427}
428
Xavier Deguillardf220a802015-08-06 15:17:58 -0700429static int vmballoon_send_batched_lock(struct vmballoon *b,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700430 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700431{
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700432 unsigned long status;
Nadav Amit90d72ce2018-07-02 19:27:13 -0700433 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
Xavier Deguillardf220a802015-08-06 15:17:58 -0700434
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700435 STATS_INC(b->stats.lock[is_2m_pages]);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700436
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700437 if (is_2m_pages)
438 status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages,
439 *target);
440 else
441 status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages,
442 *target);
443
Xavier Deguillardf220a802015-08-06 15:17:58 -0700444 if (vmballoon_check_status(b, status))
445 return 0;
446
447 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700448 STATS_INC(b->stats.lock_fail[is_2m_pages]);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700449 return 1;
450}
451
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400452/*
453 * Notify the host that guest intends to release given page back into
454 * the pool of available (to the guest) pages.
455 */
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700456static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
457 unsigned int *target)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400458{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700459 unsigned long status, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400460 u32 pfn32;
461
462 pfn32 = (u32)pfn;
463 if (pfn32 != pfn)
464 return false;
465
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700466 STATS_INC(b->stats.unlock[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400467
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700468 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400469 if (vmballoon_check_status(b, status))
470 return true;
471
472 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700473 STATS_INC(b->stats.unlock_fail[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400474 return false;
475}
476
Xavier Deguillardf220a802015-08-06 15:17:58 -0700477static bool vmballoon_send_batched_unlock(struct vmballoon *b,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700478 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700479{
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700480 unsigned long status;
Nadav Amit90d72ce2018-07-02 19:27:13 -0700481 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
Xavier Deguillardf220a802015-08-06 15:17:58 -0700482
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700483 STATS_INC(b->stats.unlock[is_2m_pages]);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700484
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700485 if (is_2m_pages)
486 status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages,
487 *target);
488 else
489 status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages,
490 *target);
491
Xavier Deguillardf220a802015-08-06 15:17:58 -0700492 if (vmballoon_check_status(b, status))
493 return true;
494
495 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700496 STATS_INC(b->stats.unlock_fail[is_2m_pages]);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700497 return false;
498}
499
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700500static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
501{
502 if (is_2m_page)
503 return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);
504
505 return alloc_page(flags);
506}
507
508static void vmballoon_free_page(struct page *page, bool is_2m_page)
509{
510 if (is_2m_page)
511 __free_pages(page, VMW_BALLOON_2M_SHIFT);
512 else
513 __free_page(page);
514}
515
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400516/*
517 * Quickly release all pages allocated for the balloon. This function is
518 * called when host decides to "reset" balloon for one reason or another.
519 * Unlike normal "deflate" we do not (shall not) notify host of the pages
520 * being released.
521 */
522static void vmballoon_pop(struct vmballoon *b)
523{
524 struct page *page, *next;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700525 unsigned is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400526
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700527 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
528 is_2m_pages++) {
529 struct vmballoon_page_size *page_size =
530 &b->page_sizes[is_2m_pages];
531 u16 size_per_page = vmballoon_page_size(is_2m_pages);
532
533 list_for_each_entry_safe(page, next, &page_size->pages, lru) {
534 list_del(&page->lru);
535 vmballoon_free_page(page, is_2m_pages);
536 STATS_INC(b->stats.free[is_2m_pages]);
537 b->size -= size_per_page;
538 cond_resched();
539 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400540 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400541
Gil Kupferb23220f2018-06-01 00:47:47 -0700542 /* Clearing the batch_page unconditionally has no adverse effect */
543 free_page((unsigned long)b->batch_page);
544 b->batch_page = NULL;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400545}
546
547/*
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700548 * Notify the host of a ballooned page. If host rejects the page put it on the
549 * refuse list, those refused page are then released at the end of the
550 * inflation cycle.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400551 */
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700552static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700553 bool is_2m_pages, unsigned int *target)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400554{
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700555 int locked, hv_status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700556 struct page *page = b->page;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700557 struct vmballoon_page_size *page_size = &b->page_sizes[false];
558
559 /* is_2m_pages can never happen as 2m pages support implies batching */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400560
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700561 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
562 target);
Nadav Amit09755692018-06-19 16:00:24 -0700563 if (locked) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700564 STATS_INC(b->stats.refused_alloc[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400565
Nadav Amit09755692018-06-19 16:00:24 -0700566 if (locked == -EIO &&
567 (hv_status == VMW_BALLOON_ERROR_RESET ||
568 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700569 vmballoon_free_page(page, false);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700570 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400571 }
572
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700573 /*
574 * Place page on the list of non-balloonable pages
575 * and retry allocation, unless we already accumulated
576 * too many of them, in which case take a breather.
577 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700578 if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
579 page_size->n_refused_pages++;
580 list_add(&page->lru, &page_size->refused_pages);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700581 } else {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700582 vmballoon_free_page(page, false);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400583 }
Nadav Amit09755692018-06-19 16:00:24 -0700584 return locked;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700585 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400586
587 /* track allocated page */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700588 list_add(&page->lru, &page_size->pages);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400589
590 /* update balloon size */
591 b->size++;
592
593 return 0;
594}
595
Xavier Deguillardf220a802015-08-06 15:17:58 -0700596static int vmballoon_lock_batched_page(struct vmballoon *b,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700597 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700598{
599 int locked, i;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700600 u16 size_per_page = vmballoon_page_size(is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700601
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700602 locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
603 target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700604 if (locked > 0) {
605 for (i = 0; i < num_pages; i++) {
606 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
607 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
608
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700609 vmballoon_free_page(p, is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700610 }
611
612 return -EIO;
613 }
614
615 for (i = 0; i < num_pages; i++) {
616 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
617 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700618 struct vmballoon_page_size *page_size =
619 &b->page_sizes[is_2m_pages];
Xavier Deguillardf220a802015-08-06 15:17:58 -0700620
621 locked = vmballoon_batch_get_status(b->batch_page, i);
622
623 switch (locked) {
624 case VMW_BALLOON_SUCCESS:
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700625 list_add(&p->lru, &page_size->pages);
626 b->size += size_per_page;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700627 break;
628 case VMW_BALLOON_ERROR_PPN_PINNED:
629 case VMW_BALLOON_ERROR_PPN_INVALID:
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700630 if (page_size->n_refused_pages
631 < VMW_BALLOON_MAX_REFUSED) {
632 list_add(&p->lru, &page_size->refused_pages);
633 page_size->n_refused_pages++;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700634 break;
635 }
636 /* Fallthrough */
637 case VMW_BALLOON_ERROR_RESET:
638 case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700639 vmballoon_free_page(p, is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700640 break;
641 default:
642 /* This should never happen */
643 WARN_ON_ONCE(true);
644 }
645 }
646
647 return 0;
648}
649
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400650/*
651 * Release the page allocated for the balloon. Note that we first notify
652 * the host so it can make sure the page will be available for the guest
653 * to use, if needed.
654 */
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700655static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700656 bool is_2m_pages, unsigned int *target)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400657{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700658 struct page *page = b->page;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700659 struct vmballoon_page_size *page_size = &b->page_sizes[false];
660
661 /* is_2m_pages can never happen as 2m pages support implies batching */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400662
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700663 if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700664 list_add(&page->lru, &page_size->pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700665 return -EIO;
666 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400667
668 /* deallocate page */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700669 vmballoon_free_page(page, false);
670 STATS_INC(b->stats.free[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400671
672 /* update balloon size */
673 b->size--;
674
675 return 0;
676}
677
Xavier Deguillardf220a802015-08-06 15:17:58 -0700678static int vmballoon_unlock_batched_page(struct vmballoon *b,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700679 unsigned int num_pages, bool is_2m_pages,
680 unsigned int *target)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700681{
682 int locked, i, ret = 0;
683 bool hv_success;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700684 u16 size_per_page = vmballoon_page_size(is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700685
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700686 hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages,
687 target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700688 if (!hv_success)
689 ret = -EIO;
690
691 for (i = 0; i < num_pages; i++) {
692 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
693 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700694 struct vmballoon_page_size *page_size =
695 &b->page_sizes[is_2m_pages];
Xavier Deguillardf220a802015-08-06 15:17:58 -0700696
697 locked = vmballoon_batch_get_status(b->batch_page, i);
698 if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
699 /*
700 * That page wasn't successfully unlocked by the
701 * hypervisor, re-add it to the list of pages owned by
702 * the balloon driver.
703 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700704 list_add(&p->lru, &page_size->pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700705 } else {
706 /* deallocate page */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700707 vmballoon_free_page(p, is_2m_pages);
708 STATS_INC(b->stats.free[is_2m_pages]);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700709
710 /* update balloon size */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700711 b->size -= size_per_page;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700712 }
713 }
714
715 return ret;
716}
717
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400718/*
719 * Release pages that were allocated while attempting to inflate the
720 * balloon but were refused by the host for one reason or another.
721 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700722static void vmballoon_release_refused_pages(struct vmballoon *b,
723 bool is_2m_pages)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400724{
725 struct page *page, *next;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700726 struct vmballoon_page_size *page_size =
727 &b->page_sizes[is_2m_pages];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400728
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700729 list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400730 list_del(&page->lru);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700731 vmballoon_free_page(page, is_2m_pages);
732 STATS_INC(b->stats.refused_free[is_2m_pages]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400733 }
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700734
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700735 page_size->n_refused_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400736}
737
Xavier Deguillardf220a802015-08-06 15:17:58 -0700738static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
739{
740 b->page = p;
741}
742
743static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
744 struct page *p)
745{
746 vmballoon_batch_set_pa(b->batch_page, idx,
747 (u64)page_to_pfn(p) << PAGE_SHIFT);
748}
749
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400750/*
751 * Inflate the balloon towards its target size. Note that we try to limit
752 * the rate of allocation to make sure we are not choking the rest of the
753 * system.
754 */
755static void vmballoon_inflate(struct vmballoon *b)
756{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700757 unsigned int num_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400758 int error = 0;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700759 gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700760 bool is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400761
762 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
763
764 /*
765 * First try NOSLEEP page allocations to inflate balloon.
766 *
767 * If we do not throttle nosleep allocations, we can drain all
768 * free pages in the guest quickly (if the balloon target is high).
769 * As a side-effect, draining free pages helps to inform (force)
770 * the guest to start swapping if balloon target is not met yet,
771 * which is a desired behavior. However, balloon driver can consume
772 * all available CPU cycles if too many pages are allocated in a
773 * second. Therefore, we throttle nosleep allocations even when
774 * the guest is not under memory pressure. OTOH, if we have already
775 * predicted that the guest is under memory pressure, then we
776 * slowdown page allocations considerably.
777 */
778
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400779 /*
780 * Start with no sleep allocation rate which may be higher
781 * than sleeping allocation rate.
782 */
Nadav Amitec992cc2018-06-19 16:00:28 -0700783 is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400784
Nadav Amitec992cc2018-06-19 16:00:28 -0700785 pr_debug("%s - goal: %d", __func__, b->target - b->size);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400786
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700787 while (!b->reset_required &&
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700788 b->size + num_pages * vmballoon_page_size(is_2m_pages)
789 < b->target) {
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700790 struct page *page;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400791
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700792 if (flags == VMW_PAGE_ALLOC_NOSLEEP)
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700793 STATS_INC(b->stats.alloc[is_2m_pages]);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700794 else
795 STATS_INC(b->stats.sleep_alloc);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400796
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700797 page = vmballoon_alloc_page(flags, is_2m_pages);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700798 if (!page) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700799 STATS_INC(b->stats.alloc_fail[is_2m_pages]);
800
801 if (is_2m_pages) {
802 b->ops->lock(b, num_pages, true, &b->target);
803
804 /*
805 * ignore errors from locking as we now switch
806 * to 4k pages and we might get different
807 * errors.
808 */
809
810 num_pages = 0;
811 is_2m_pages = false;
812 continue;
813 }
814
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700815 if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400816 /*
817 * CANSLEEP page allocation failed, so guest
Nadav Amitec992cc2018-06-19 16:00:28 -0700818 * is under severe memory pressure. We just log
819 * the event, but do not stop the inflation
820 * due to its negative impact on performance.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400821 */
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700822 STATS_INC(b->stats.sleep_alloc_fail);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400823 break;
824 }
825
826 /*
827 * NOSLEEP page allocation failed, so the guest is
Nadav Amitec992cc2018-06-19 16:00:28 -0700828 * under memory pressure. Slowing down page alloctions
829 * seems to be reasonable, but doing so might actually
830 * cause the hypervisor to throttle us down, resulting
831 * in degraded performance. We will count on the
832 * scheduler and standard memory management mechanisms
833 * for now.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400834 */
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700835 flags = VMW_PAGE_ALLOC_CANSLEEP;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700836 continue;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400837 }
838
Xavier Deguillardf220a802015-08-06 15:17:58 -0700839 b->ops->add_page(b, num_pages++, page);
840 if (num_pages == b->batch_max_pages) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700841 error = b->ops->lock(b, num_pages, is_2m_pages,
842 &b->target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700843 num_pages = 0;
844 if (error)
845 break;
846 }
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700847
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700848 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400849 }
850
Xavier Deguillardf220a802015-08-06 15:17:58 -0700851 if (num_pages > 0)
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700852 b->ops->lock(b, num_pages, is_2m_pages, &b->target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700853
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700854 vmballoon_release_refused_pages(b, true);
855 vmballoon_release_refused_pages(b, false);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400856}
857
858/*
859 * Decrease the size of the balloon allowing guest to use more memory.
860 */
861static void vmballoon_deflate(struct vmballoon *b)
862{
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700863 unsigned is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400864
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700865 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400866
867 /* free pages to reach target */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700868 for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
869 is_2m_pages++) {
870 struct page *page, *next;
871 unsigned int num_pages = 0;
872 struct vmballoon_page_size *page_size =
873 &b->page_sizes[is_2m_pages];
Xavier Deguillardf220a802015-08-06 15:17:58 -0700874
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700875 list_for_each_entry_safe(page, next, &page_size->pages, lru) {
876 if (b->reset_required ||
877 (b->target > 0 &&
878 b->size - num_pages
879 * vmballoon_page_size(is_2m_pages)
880 < b->target + vmballoon_page_size(true)))
881 break;
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700882
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700883 list_del(&page->lru);
884 b->ops->add_page(b, num_pages++, page);
885
886 if (num_pages == b->batch_max_pages) {
887 int error;
888
889 error = b->ops->unlock(b, num_pages,
890 is_2m_pages, &b->target);
891 num_pages = 0;
892 if (error)
893 return;
894 }
895
896 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400897 }
898
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700899 if (num_pages > 0)
900 b->ops->unlock(b, num_pages, is_2m_pages, &b->target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400901 }
Xavier Deguillardf220a802015-08-06 15:17:58 -0700902}
903
904static const struct vmballoon_ops vmballoon_basic_ops = {
905 .add_page = vmballoon_add_page,
906 .lock = vmballoon_lock_page,
907 .unlock = vmballoon_unlock_page
908};
909
910static const struct vmballoon_ops vmballoon_batched_ops = {
911 .add_page = vmballoon_add_batched_page,
912 .lock = vmballoon_lock_batched_page,
913 .unlock = vmballoon_unlock_batched_page
914};
915
916static bool vmballoon_init_batching(struct vmballoon *b)
917{
Gil Kupferb23220f2018-06-01 00:47:47 -0700918 struct page *page;
919
920 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
921 if (!page)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700922 return false;
923
Gil Kupferb23220f2018-06-01 00:47:47 -0700924 b->batch_page = page_address(page);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700925 return true;
926}
927
928/*
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700929 * Receive notification and resize balloon
930 */
931static void vmballoon_doorbell(void *client_data)
932{
933 struct vmballoon *b = client_data;
934
935 STATS_INC(b->stats.doorbell);
936
937 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
938}
939
940/*
941 * Clean up vmci doorbell
942 */
943static void vmballoon_vmci_cleanup(struct vmballoon *b)
944{
945 int error;
946
947 VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, VMCI_INVALID_ID,
948 VMCI_INVALID_ID, error);
949 STATS_INC(b->stats.doorbell_unset);
950
951 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
952 vmci_doorbell_destroy(b->vmci_doorbell);
953 b->vmci_doorbell = VMCI_INVALID_HANDLE;
954 }
955}
956
957/*
958 * Initialize vmci doorbell, to get notified as soon as balloon changes
959 */
960static int vmballoon_vmci_init(struct vmballoon *b)
961{
Nadav Amitce664332018-06-19 16:00:26 -0700962 unsigned long error, dummy;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700963
Nadav Amitce664332018-06-19 16:00:26 -0700964 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
965 return 0;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700966
Nadav Amitce664332018-06-19 16:00:26 -0700967 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
968 VMCI_PRIVILEGE_FLAG_RESTRICTED,
969 vmballoon_doorbell, b);
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700970
Nadav Amitce664332018-06-19 16:00:26 -0700971 if (error != VMCI_SUCCESS)
972 goto fail;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700973
Nadav Amitce664332018-06-19 16:00:26 -0700974 error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
975 b->vmci_doorbell.resource, dummy);
976
977 STATS_INC(b->stats.doorbell_set);
978
979 if (error != VMW_BALLOON_SUCCESS)
980 goto fail;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700981
982 return 0;
Nadav Amitce664332018-06-19 16:00:26 -0700983fail:
984 vmballoon_vmci_cleanup(b);
985 return -EIO;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700986}
987
988/*
Xavier Deguillardf220a802015-08-06 15:17:58 -0700989 * Perform standard reset sequence by popping the balloon (in case it
990 * is not empty) and then restarting protocol. This operation normally
991 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
992 */
993static void vmballoon_reset(struct vmballoon *b)
994{
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700995 int error;
996
997 vmballoon_vmci_cleanup(b);
998
Xavier Deguillardf220a802015-08-06 15:17:58 -0700999 /* free all pages, skipping monitor unlock */
1000 vmballoon_pop(b);
1001
1002 if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1003 return;
1004
1005 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1006 b->ops = &vmballoon_batched_ops;
1007 b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
1008 if (!vmballoon_init_batching(b)) {
1009 /*
1010 * We failed to initialize batching, inform the monitor
1011 * about it by sending a null capability.
1012 *
1013 * The guest will retry in one second.
1014 */
1015 vmballoon_send_start(b, 0);
1016 return;
1017 }
1018 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1019 b->ops = &vmballoon_basic_ops;
1020 b->batch_max_pages = 1;
1021 }
1022
1023 b->reset_required = false;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001024
1025 error = vmballoon_vmci_init(b);
1026 if (error)
1027 pr_err("failed to initialize vmci doorbell\n");
1028
Xavier Deguillardf220a802015-08-06 15:17:58 -07001029 if (!vmballoon_send_guest_id(b))
1030 pr_err("failed to send guest ID to the host\n");
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001031}
1032
1033/*
1034 * Balloon work function: reset protocol, if needed, get the new size and
1035 * adjust balloon as needed. Repeat in 1 sec.
1036 */
1037static void vmballoon_work(struct work_struct *work)
1038{
1039 struct delayed_work *dwork = to_delayed_work(work);
1040 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1041 unsigned int target;
1042
1043 STATS_INC(b->stats.timer);
1044
1045 if (b->reset_required)
1046 vmballoon_reset(b);
1047
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001048 if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001049 /* update target, adjust size */
1050 b->target = target;
1051
1052 if (b->size < target)
1053 vmballoon_inflate(b);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001054 else if (target == 0 ||
1055 b->size > target + vmballoon_page_size(true))
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001056 vmballoon_deflate(b);
1057 }
1058
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001059 /*
1060 * We are using a freezable workqueue so that balloon operations are
1061 * stopped while the system transitions to/from sleep/hibernation.
1062 */
1063 queue_delayed_work(system_freezable_wq,
1064 dwork, round_jiffies_relative(HZ));
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001065}
1066
1067/*
1068 * DEBUGFS Interface
1069 */
1070#ifdef CONFIG_DEBUG_FS
1071
1072static int vmballoon_debug_show(struct seq_file *f, void *offset)
1073{
1074 struct vmballoon *b = f->private;
1075 struct vmballoon_stats *stats = &b->stats;
1076
Philip P. Moltmannb36e89d2015-08-06 15:18:00 -07001077 /* format capabilities info */
1078 seq_printf(f,
1079 "balloon capabilities: %#4x\n"
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001080 "used capabilities: %#4lx\n"
1081 "is resetting: %c\n",
1082 VMW_BALLOON_CAPABILITIES, b->capabilities,
1083 b->reset_required ? 'y' : 'n');
Philip P. Moltmannb36e89d2015-08-06 15:18:00 -07001084
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001085 /* format size info */
1086 seq_printf(f,
1087 "target: %8d pages\n"
1088 "current: %8d pages\n",
1089 b->target, b->size);
1090
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001091 seq_printf(f,
1092 "\n"
1093 "timer: %8u\n"
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001094 "doorbell: %8u\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001095 "start: %8u (%4u failed)\n"
1096 "guestType: %8u (%4u failed)\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001097 "2m-lock: %8u (%4u failed)\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001098 "lock: %8u (%4u failed)\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001099 "2m-unlock: %8u (%4u failed)\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001100 "unlock: %8u (%4u failed)\n"
1101 "target: %8u (%4u failed)\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001102 "prim2mAlloc: %8u (%4u failed)\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001103 "primNoSleepAlloc: %8u (%4u failed)\n"
1104 "primCanSleepAlloc: %8u (%4u failed)\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001105 "prim2mFree: %8u\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001106 "primFree: %8u\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001107 "err2mAlloc: %8u\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001108 "errAlloc: %8u\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001109 "err2mFree: %8u\n"
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001110 "errFree: %8u\n"
1111 "doorbellSet: %8u\n"
1112 "doorbellUnset: %8u\n",
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001113 stats->timer,
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001114 stats->doorbell,
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001115 stats->start, stats->start_fail,
1116 stats->guest_type, stats->guest_type_fail,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001117 stats->lock[true], stats->lock_fail[true],
1118 stats->lock[false], stats->lock_fail[false],
1119 stats->unlock[true], stats->unlock_fail[true],
1120 stats->unlock[false], stats->unlock_fail[false],
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001121 stats->target, stats->target_fail,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001122 stats->alloc[true], stats->alloc_fail[true],
1123 stats->alloc[false], stats->alloc_fail[false],
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001124 stats->sleep_alloc, stats->sleep_alloc_fail,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001125 stats->free[true],
1126 stats->free[false],
1127 stats->refused_alloc[true], stats->refused_alloc[false],
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001128 stats->refused_free[true], stats->refused_free[false],
1129 stats->doorbell_set, stats->doorbell_unset);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001130
1131 return 0;
1132}
1133
1134static int vmballoon_debug_open(struct inode *inode, struct file *file)
1135{
1136 return single_open(file, vmballoon_debug_show, inode->i_private);
1137}
1138
1139static const struct file_operations vmballoon_debug_fops = {
1140 .owner = THIS_MODULE,
1141 .open = vmballoon_debug_open,
1142 .read = seq_read,
1143 .llseek = seq_lseek,
1144 .release = single_release,
1145};
1146
1147static int __init vmballoon_debugfs_init(struct vmballoon *b)
1148{
1149 int error;
1150
1151 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1152 &vmballoon_debug_fops);
1153 if (IS_ERR(b->dbg_entry)) {
1154 error = PTR_ERR(b->dbg_entry);
1155 pr_err("failed to create debugfs entry, error: %d\n", error);
1156 return error;
1157 }
1158
1159 return 0;
1160}
1161
1162static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1163{
1164 debugfs_remove(b->dbg_entry);
1165}
1166
1167#else
1168
1169static inline int vmballoon_debugfs_init(struct vmballoon *b)
1170{
1171 return 0;
1172}
1173
1174static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1175{
1176}
1177
1178#endif /* CONFIG_DEBUG_FS */
1179
1180static int __init vmballoon_init(void)
1181{
1182 int error;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001183 unsigned is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001184 /*
1185 * Check if we are running on VMware's hypervisor and bail out
1186 * if we are not.
1187 */
Juergen Gross03b2a322017-11-09 14:27:36 +01001188 if (x86_hyper_type != X86_HYPER_VMWARE)
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001189 return -ENODEV;
1190
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001191 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
1192 is_2m_pages++) {
1193 INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
1194 INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
1195 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001196
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001197 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1198
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001199 error = vmballoon_debugfs_init(&balloon);
1200 if (error)
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001201 return error;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001202
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001203 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001204 balloon.batch_page = NULL;
1205 balloon.page = NULL;
1206 balloon.reset_required = true;
1207
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001208 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001209
1210 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001211}
Nadav Amitc3cc1b02018-06-19 16:00:27 -07001212
1213/*
1214 * Using late_initcall() instead of module_init() allows the balloon to use the
1215 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1216 * VMCI is probed only after the balloon is initialized. If the balloon is used
1217 * as a module, late_initcall() is equivalent to module_init().
1218 */
1219late_initcall(vmballoon_init);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001220
1221static void __exit vmballoon_exit(void)
1222{
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001223 vmballoon_vmci_cleanup(&balloon);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001224 cancel_delayed_work_sync(&balloon.dwork);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001225
1226 vmballoon_debugfs_exit(&balloon);
1227
1228 /*
1229 * Deallocate all reserved memory, and reset connection with monitor.
1230 * Reset connection before deallocating memory to avoid potential for
1231 * additional spurious resets from guest touching deallocated pages.
1232 */
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001233 vmballoon_send_start(&balloon, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001234 vmballoon_pop(&balloon);
1235}
1236module_exit(vmballoon_exit);