blob: f8240b87df2295a435a84b7aaa05631fad0550d4 [file] [log] [blame]
Nadav Amit8b4770e2018-06-19 16:00:29 -07001// SPDX-License-Identifier: GPL-2.0
Dmitry Torokhov453dc652010-04-23 13:18:08 -04002/*
3 * VMware Balloon driver.
4 *
Nadav Amit8b4770e2018-06-19 16:00:29 -07005 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
Dmitry Torokhov453dc652010-04-23 13:18:08 -04006 *
Dmitry Torokhov453dc652010-04-23 13:18:08 -04007 * This is VMware physical memory management driver for Linux. The driver
8 * acts like a "balloon" that can be inflated to reclaim physical pages by
9 * reserving them in the guest and invalidating them in the monitor,
10 * freeing up the underlying machine pages so they can be allocated to
11 * other guests. The balloon can also be deflated to allow the guest to
12 * use more physical memory. Higher level policies can control the sizes
13 * of balloons in VMs in order to manage physical memory resources.
14 */
15
16//#define DEBUG
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
Xavier Deguillardf220a802015-08-06 15:17:58 -070022#include <linux/vmalloc.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040023#include <linux/sched.h>
24#include <linux/module.h>
25#include <linux/workqueue.h>
26#include <linux/debugfs.h>
27#include <linux/seq_file.h>
Nadav Amitc7b36902018-09-20 10:30:17 -070028#include <linux/rwsem.h>
29#include <linux/slab.h>
Nadav Amit6e4453b2018-09-20 10:30:18 -070030#include <linux/spinlock.h>
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070031#include <linux/vmw_vmci_defs.h>
32#include <linux/vmw_vmci_api.h>
H. Peter Anvina10a5692010-05-09 01:13:42 -070033#include <asm/hypervisor.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040034
35MODULE_AUTHOR("VMware, Inc.");
36MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070037MODULE_VERSION("1.5.0.0-k");
Dmitry Torokhov453dc652010-04-23 13:18:08 -040038MODULE_ALIAS("dmi:*:svnVMware*:*");
39MODULE_ALIAS("vmware_vmmemctl");
40MODULE_LICENSE("GPL");
41
42/*
Nadav Amit622074a2018-09-20 10:30:11 -070043 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't allow wait
44 * (__GFP_RECLAIM) for huge page allocations. Use __GFP_NOWARN, to suppress page
45 * allocation failure warnings. Disallow access to emergency low-memory pools.
Dmitry Torokhov453dc652010-04-23 13:18:08 -040046 */
Nadav Amit622074a2018-09-20 10:30:11 -070047#define VMW_HUGE_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
48 __GFP_NOMEMALLOC)
Dmitry Torokhov453dc652010-04-23 13:18:08 -040049
50/*
Nadav Amit622074a2018-09-20 10:30:11 -070051 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We allow lightweight
52 * reclamation (__GFP_NORETRY). Use __GFP_NOWARN, to suppress page allocation
53 * failure warnings. Disallow access to emergency low-memory pools.
Dmitry Torokhov453dc652010-04-23 13:18:08 -040054 */
Nadav Amit622074a2018-09-20 10:30:11 -070055#define VMW_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
56 __GFP_NOMEMALLOC|__GFP_NORETRY)
Dmitry Torokhov453dc652010-04-23 13:18:08 -040057
Dmitry Torokhov55adaa42010-06-04 14:14:52 -070058/* Maximum number of refused pages we accumulate during inflation cycle */
59#define VMW_BALLOON_MAX_REFUSED 16
Dmitry Torokhov453dc652010-04-23 13:18:08 -040060
61/*
62 * Hypervisor communication port definitions.
63 */
64#define VMW_BALLOON_HV_PORT 0x5670
65#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
Dmitry Torokhov453dc652010-04-23 13:18:08 -040066#define VMW_BALLOON_GUEST_ID 1 /* Linux */
67
Xavier Deguillardeb791002015-06-12 11:43:23 -070068enum vmwballoon_capabilities {
69 /*
70 * Bit 0 is reserved and not associated to any capability.
71 */
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070072 VMW_BALLOON_BASIC_CMDS = (1 << 1),
73 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
74 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
75 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
Xavier Deguillardeb791002015-06-12 11:43:23 -070076};
77
Xavier Deguillardf220a802015-08-06 15:17:58 -070078#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070079 | VMW_BALLOON_BATCHED_CMDS \
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070080 | VMW_BALLOON_BATCHED_2M_CMDS \
81 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070082
Nadav Amit25acbdd2018-09-20 10:30:14 -070083#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
Xavier Deguillardeb791002015-06-12 11:43:23 -070084
Nadav Amitc7b36902018-09-20 10:30:17 -070085enum vmballoon_page_size_type {
86 VMW_BALLOON_4K_PAGE,
87 VMW_BALLOON_2M_PAGE,
88 VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
89};
90
91#define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
92
Nadav Amit6e4453b2018-09-20 10:30:18 -070093static const char * const vmballoon_page_size_names[] = {
94 [VMW_BALLOON_4K_PAGE] = "4k",
95 [VMW_BALLOON_2M_PAGE] = "2M"
96};
97
98enum vmballoon_op {
99 VMW_BALLOON_INFLATE,
100 VMW_BALLOON_DEFLATE
101};
102
Nadav Amitc7b36902018-09-20 10:30:17 -0700103enum vmballoon_op_stat_type {
104 VMW_BALLOON_OP_STAT,
105 VMW_BALLOON_OP_FAIL_STAT
106};
107
108#define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
109
110/**
111 * enum vmballoon_cmd_type - backdoor commands.
Xavier Deguillardf220a802015-08-06 15:17:58 -0700112 *
Nadav Amitc7b36902018-09-20 10:30:17 -0700113 * Availability of the commands is as followed:
Xavier Deguillardf220a802015-08-06 15:17:58 -0700114 *
Nadav Amitc7b36902018-09-20 10:30:17 -0700115 * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
116 * %VMW_BALLOON_CMD_GUEST_ID are always available.
117 *
118 * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
119 * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
120 *
121 * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
122 * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
123 * are available.
124 *
125 * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
126 * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
127 * are supported.
128 *
129 * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
130 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
131 *
132 * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
133 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
134 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
135 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
136 * to be deflated from the balloon.
137 * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
138 * runs in the VM.
139 * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
140 * ballooned pages (up to 512).
141 * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
142 * pages that are about to be deflated from the
143 * balloon (up to 512).
144 * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
145 * for 2MB pages.
146 * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
147 * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
148 * pages.
149 * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
150 * that would be invoked when the balloon
151 * size changes.
152 * @VMW_BALLOON_CMD_LAST: Value of the last command.
Xavier Deguillardf220a802015-08-06 15:17:58 -0700153 */
Nadav Amitc7b36902018-09-20 10:30:17 -0700154enum vmballoon_cmd_type {
155 VMW_BALLOON_CMD_START,
156 VMW_BALLOON_CMD_GET_TARGET,
157 VMW_BALLOON_CMD_LOCK,
158 VMW_BALLOON_CMD_UNLOCK,
159 VMW_BALLOON_CMD_GUEST_ID,
160 /* No command 5 */
161 VMW_BALLOON_CMD_BATCHED_LOCK = 6,
162 VMW_BALLOON_CMD_BATCHED_UNLOCK,
163 VMW_BALLOON_CMD_BATCHED_2M_LOCK,
164 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
165 VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
166 VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
167};
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700168
Nadav Amitc7b36902018-09-20 10:30:17 -0700169#define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400170
Nadav Amitc7b36902018-09-20 10:30:17 -0700171enum vmballoon_error_codes {
172 VMW_BALLOON_SUCCESS,
173 VMW_BALLOON_ERROR_CMD_INVALID,
174 VMW_BALLOON_ERROR_PPN_INVALID,
175 VMW_BALLOON_ERROR_PPN_LOCKED,
176 VMW_BALLOON_ERROR_PPN_UNLOCKED,
177 VMW_BALLOON_ERROR_PPN_PINNED,
178 VMW_BALLOON_ERROR_PPN_NOTNEEDED,
179 VMW_BALLOON_ERROR_RESET,
180 VMW_BALLOON_ERROR_BUSY
181};
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400182
Xavier Deguillardeb791002015-06-12 11:43:23 -0700183#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
184
Nadav Amit10a95d52018-09-20 10:30:07 -0700185#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
186 ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
187 (1UL << VMW_BALLOON_CMD_LOCK) | \
188 (1UL << VMW_BALLOON_CMD_UNLOCK) | \
189 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
190 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
191 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
192 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
193
Nadav Amit68131182018-09-20 10:30:08 -0700194static const char * const vmballoon_cmd_names[] = {
195 [VMW_BALLOON_CMD_START] = "start",
196 [VMW_BALLOON_CMD_GET_TARGET] = "target",
197 [VMW_BALLOON_CMD_LOCK] = "lock",
198 [VMW_BALLOON_CMD_UNLOCK] = "unlock",
199 [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
200 [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
201 [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
202 [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
203 [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
204 [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
205};
206
Nadav Amitc7b36902018-09-20 10:30:17 -0700207enum vmballoon_stat_page {
208 VMW_BALLOON_PAGE_STAT_ALLOC,
209 VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
210 VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
211 VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
212 VMW_BALLOON_PAGE_STAT_FREE,
213 VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400214};
215
Nadav Amitc7b36902018-09-20 10:30:17 -0700216#define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
217
218enum vmballoon_stat_general {
219 VMW_BALLOON_STAT_TIMER,
220 VMW_BALLOON_STAT_DOORBELL,
Nadav Amit8840a6f2018-09-20 10:30:20 -0700221 VMW_BALLOON_STAT_RESET,
222 VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_RESET
Nadav Amitc7b36902018-09-20 10:30:17 -0700223};
224
225#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
226
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400227
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700228static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
Nadav Amitc7b36902018-09-20 10:30:17 -0700229static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700230
Nadav Amit6e4453b2018-09-20 10:30:18 -0700231struct vmballoon_ctl {
232 struct list_head pages;
233 struct list_head refused_pages;
234 unsigned int n_refused_pages;
235 unsigned int n_pages;
236 enum vmballoon_page_size_type page_size;
237 enum vmballoon_op op;
238};
239
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700240struct vmballoon_page_size {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400241 /* list of reserved physical pages */
242 struct list_head pages;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700243};
244
Nadav Amit6c948752018-09-20 10:30:10 -0700245/**
246 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
247 *
248 * @status: the status of the operation, which is written by the hypervisor.
249 * @reserved: reserved for future use. Must be set to zero.
250 * @pfn: the physical frame number of the page to be locked or unlocked.
251 */
252struct vmballoon_batch_entry {
253 u64 status : 5;
254 u64 reserved : PAGE_SHIFT - 5;
255 u64 pfn : 52;
256} __packed;
257
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700258struct vmballoon {
259 struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
260
Nadav Amit6e4453b2018-09-20 10:30:18 -0700261 /**
262 * @max_page_size: maximum supported page size for ballooning.
263 *
264 * Protected by @conf_sem
265 */
266 enum vmballoon_page_size_type max_page_size;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400267
Nadav Amit6e4453b2018-09-20 10:30:18 -0700268 /**
269 * @size: balloon actual size in basic page size (frames).
270 *
271 * While we currently do not support size which is bigger than 32-bit,
272 * in preparation for future support, use 64-bits.
273 */
274 atomic64_t size;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400275
Nadav Amit6e4453b2018-09-20 10:30:18 -0700276 /**
277 * @target: balloon target size in basic page size (frames).
278 *
279 * We do not protect the target under the assumption that setting the
280 * value is always done through a single write. If this assumption ever
281 * breaks, we would have to use X_ONCE for accesses, and suffer the less
282 * optimized code. Although we may read stale target value if multiple
283 * accesses happen at once, the performance impact should be minor.
284 */
285 unsigned long target;
286
287 /**
288 * @reset_required: reset flag
289 *
290 * Setting this flag may introduce races, but the code is expected to
291 * handle them gracefully. In the worst case, another operation will
292 * fail as reset did not take place. Clearing the flag is done while
293 * holding @conf_sem for write.
294 */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400295 bool reset_required;
296
Nadav Amit6e4453b2018-09-20 10:30:18 -0700297 /**
298 * @capabilities: hypervisor balloon capabilities.
299 *
300 * Protected by @conf_sem.
301 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700302 unsigned long capabilities;
303
Nadav Amit6c948752018-09-20 10:30:10 -0700304 /**
305 * @batch_page: pointer to communication batch page.
306 *
307 * When batching is used, batch_page points to a page, which holds up to
308 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
309 */
310 struct vmballoon_batch_entry *batch_page;
311
Nadav Amit6e4453b2018-09-20 10:30:18 -0700312 /**
313 * @batch_max_pages: maximum pages that can be locked/unlocked.
314 *
315 * Indicates the number of pages that the hypervisor can lock or unlock
316 * at once, according to whether batching is enabled. If batching is
317 * disabled, only a single page can be locked/unlock on each operation.
318 *
319 * Protected by @conf_sem.
320 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700321 unsigned int batch_max_pages;
Nadav Amit6e4453b2018-09-20 10:30:18 -0700322
323 /**
324 * @page: page to be locked/unlocked by the hypervisor
325 *
326 * @page is only used when batching is disabled and a single page is
327 * reclaimed on each iteration.
328 *
329 * Protected by @comm_lock.
330 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700331 struct page *page;
332
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400333 /* statistics */
Nadav Amitc7b36902018-09-20 10:30:17 -0700334 struct vmballoon_stats *stats;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400335
Nadav Amitc7b36902018-09-20 10:30:17 -0700336#ifdef CONFIG_DEBUG_FS
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400337 /* debugfs file exporting statistics */
338 struct dentry *dbg_entry;
339#endif
340
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400341 struct delayed_work dwork;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700342
Nadav Amit6e4453b2018-09-20 10:30:18 -0700343 /**
344 * @vmci_doorbell.
345 *
346 * Protected by @conf_sem.
347 */
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700348 struct vmci_handle vmci_doorbell;
Nadav Amitc7b36902018-09-20 10:30:17 -0700349
350 /**
351 * @conf_sem: semaphore to protect the configuration and the statistics.
352 */
353 struct rw_semaphore conf_sem;
Nadav Amit6e4453b2018-09-20 10:30:18 -0700354
355 /**
356 * @comm_lock: lock to protect the communication with the host.
357 *
358 * Lock ordering: @conf_sem -> @comm_lock .
359 */
360 spinlock_t comm_lock;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400361};
362
363static struct vmballoon balloon;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400364
Nadav Amitc7b36902018-09-20 10:30:17 -0700365struct vmballoon_stats {
366 /* timer / doorbell operations */
367 atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
368
369 /* allocation statistics for huge and small pages */
370 atomic64_t
371 page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
372
373 /* Monitor operations: total operations, and failures */
374 atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
375};
376
377static inline bool is_vmballoon_stats_on(void)
378{
379 return IS_ENABLED(CONFIG_DEBUG_FS) &&
380 static_branch_unlikely(&balloon_stat_enabled);
381}
382
383static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
384 enum vmballoon_op_stat_type type)
385{
386 if (is_vmballoon_stats_on())
387 atomic64_inc(&b->stats->ops[op][type]);
388}
389
390static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
391 enum vmballoon_stat_general stat)
392{
393 if (is_vmballoon_stats_on())
394 atomic64_inc(&b->stats->general_stat[stat]);
395}
396
397static inline void vmballoon_stats_gen_add(struct vmballoon *b,
398 enum vmballoon_stat_general stat,
399 unsigned int val)
400{
401 if (is_vmballoon_stats_on())
402 atomic64_add(val, &b->stats->general_stat[stat]);
403}
404
405static inline void vmballoon_stats_page_inc(struct vmballoon *b,
406 enum vmballoon_stat_page stat,
Nadav Amit6e4453b2018-09-20 10:30:18 -0700407 enum vmballoon_page_size_type size)
Nadav Amitc7b36902018-09-20 10:30:17 -0700408{
409 if (is_vmballoon_stats_on())
Nadav Amit6e4453b2018-09-20 10:30:18 -0700410 atomic64_inc(&b->stats->page_stat[stat][size]);
411}
412
413static inline void vmballoon_stats_page_add(struct vmballoon *b,
414 enum vmballoon_stat_page stat,
415 enum vmballoon_page_size_type size,
416 unsigned int val)
417{
418 if (is_vmballoon_stats_on())
419 atomic64_add(val, &b->stats->page_stat[stat][size]);
Nadav Amitc7b36902018-09-20 10:30:17 -0700420}
421
Nadav Amit10a95d52018-09-20 10:30:07 -0700422static inline unsigned long
423__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
424 unsigned long arg2, unsigned long *result)
425{
426 unsigned long status, dummy1, dummy2, dummy3, local_result;
427
Nadav Amitc7b36902018-09-20 10:30:17 -0700428 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
Nadav Amit68131182018-09-20 10:30:08 -0700429
Nadav Amit10a95d52018-09-20 10:30:07 -0700430 asm volatile ("inl %%dx" :
431 "=a"(status),
432 "=c"(dummy1),
433 "=d"(dummy2),
434 "=b"(local_result),
435 "=S"(dummy3) :
436 "0"(VMW_BALLOON_HV_MAGIC),
437 "1"(cmd),
438 "2"(VMW_BALLOON_HV_PORT),
439 "3"(arg1),
440 "4"(arg2) :
441 "memory");
442
443 /* update the result if needed */
444 if (result)
445 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
446 local_result;
447
448 /* update target when applicable */
449 if (status == VMW_BALLOON_SUCCESS &&
450 ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
Nadav Amit6e4453b2018-09-20 10:30:18 -0700451 WRITE_ONCE(b->target, local_result);
Nadav Amit10a95d52018-09-20 10:30:07 -0700452
Nadav Amit68131182018-09-20 10:30:08 -0700453 if (status != VMW_BALLOON_SUCCESS &&
454 status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
Nadav Amitc7b36902018-09-20 10:30:17 -0700455 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
Nadav Amit68131182018-09-20 10:30:08 -0700456 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
457 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
458 status);
459 }
460
Nadav Amit10a95d52018-09-20 10:30:07 -0700461 /* mark reset required accordingly */
462 if (status == VMW_BALLOON_ERROR_RESET)
463 b->reset_required = true;
464
465 return status;
466}
467
468static __always_inline unsigned long
469vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
470 unsigned long arg2)
471{
472 unsigned long dummy;
473
474 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
475}
476
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400477/*
478 * Send "start" command to the host, communicating supported version
479 * of the protocol.
480 */
Nadav Amit22d293e2018-09-20 10:30:19 -0700481static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400482{
Nadav Amit10a95d52018-09-20 10:30:07 -0700483 unsigned long status, capabilities;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400484
Nadav Amit10a95d52018-09-20 10:30:07 -0700485 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
486 &capabilities);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700487
488 switch (status) {
489 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
490 b->capabilities = capabilities;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700491 break;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700492 case VMW_BALLOON_SUCCESS:
493 b->capabilities = VMW_BALLOON_BASIC_CMDS;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700494 break;
495 default:
Nadav Amit22d293e2018-09-20 10:30:19 -0700496 return -EIO;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700497 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400498
Nadav Amit5081efd2018-06-19 16:00:25 -0700499 /*
500 * 2MB pages are only supported with batching. If batching is for some
501 * reason disabled, do not use 2MB pages, since otherwise the legacy
502 * mechanism is used with 2MB pages, causing a failure.
503 */
Nadav Amit6e4453b2018-09-20 10:30:18 -0700504 b->max_page_size = VMW_BALLOON_4K_PAGE;
Nadav Amit5081efd2018-06-19 16:00:25 -0700505 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
506 (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
Nadav Amit6e4453b2018-09-20 10:30:18 -0700507 b->max_page_size = VMW_BALLOON_2M_PAGE;
508
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700509
Nadav Amit22d293e2018-09-20 10:30:19 -0700510 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400511}
512
Nadav Amit22d293e2018-09-20 10:30:19 -0700513/**
514 * vmballoon_send_guest_id - communicate guest type to the host.
515 *
516 * @b: pointer to the balloon.
517 *
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400518 * Communicate guest type to the host so that it can adjust ballooning
519 * algorithm to the one most appropriate for the guest. This command
520 * is normally issued after sending "start" command and is part of
521 * standard reset sequence.
Nadav Amit22d293e2018-09-20 10:30:19 -0700522 *
523 * Return: zero on success or appropriate error code.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400524 */
Nadav Amit22d293e2018-09-20 10:30:19 -0700525static int vmballoon_send_guest_id(struct vmballoon *b)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400526{
Nadav Amit10a95d52018-09-20 10:30:07 -0700527 unsigned long status;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400528
Nadav Amit10a95d52018-09-20 10:30:07 -0700529 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
530 VMW_BALLOON_GUEST_ID, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400531
Nadav Amit22d293e2018-09-20 10:30:19 -0700532 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400533}
534
Nadav Amit6e4453b2018-09-20 10:30:18 -0700535/**
536 * vmballoon_page_order() - return the order of the page
537 * @page_size: the size of the page.
538 *
539 * Return: the allocation order.
540 */
541static inline
542unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700543{
Nadav Amit6e4453b2018-09-20 10:30:18 -0700544 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
545}
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700546
Nadav Amit6e4453b2018-09-20 10:30:18 -0700547/**
548 * vmballoon_page_in_frames() - returns the number of frames in a page.
549 * @page_size: the size of the page.
550 *
551 * Return: the number of 4k frames.
552 */
553static inline unsigned int
554vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
555{
556 return 1 << vmballoon_page_order(page_size);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700557}
558
Nadav Amit0395be32018-09-20 10:30:16 -0700559/**
560 * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
561 *
562 * @b: pointer to the balloon.
563 *
564 * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
565 * by the host-guest protocol and EIO if an error occurred in communicating with
566 * the host.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400567 */
Nadav Amit0395be32018-09-20 10:30:16 -0700568static int vmballoon_send_get_target(struct vmballoon *b)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400569{
570 unsigned long status;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400571 unsigned long limit;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400572
Arun KSca79b0c2018-12-28 00:34:29 -0800573 limit = totalram_pages();
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400574
575 /* Ensure limit fits in 32-bits */
Nadav Amit0395be32018-09-20 10:30:16 -0700576 if (limit != (u32)limit)
577 return -EINVAL;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400578
Nadav Amit10a95d52018-09-20 10:30:07 -0700579 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
580
Nadav Amit0395be32018-09-20 10:30:16 -0700581 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400582}
583
Nadav Amit6e4453b2018-09-20 10:30:18 -0700584/**
585 * vmballoon_alloc_page_list - allocates a list of pages.
586 *
587 * @b: pointer to the balloon.
588 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
589 * @req_n_pages: the number of requested pages.
590 *
591 * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
592 * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
593 *
594 * Return: zero on success or error code otherwise.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400595 */
Nadav Amit6e4453b2018-09-20 10:30:18 -0700596static int vmballoon_alloc_page_list(struct vmballoon *b,
597 struct vmballoon_ctl *ctl,
598 unsigned int req_n_pages)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400599{
Nadav Amit6e4453b2018-09-20 10:30:18 -0700600 struct page *page;
601 unsigned int i;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400602
Nadav Amit6e4453b2018-09-20 10:30:18 -0700603 for (i = 0; i < req_n_pages; i++) {
604 if (ctl->page_size == VMW_BALLOON_2M_PAGE)
605 page = alloc_pages(VMW_HUGE_PAGE_ALLOC_FLAGS,
606 VMW_BALLOON_2M_ORDER);
607 else
608 page = alloc_page(VMW_PAGE_ALLOC_FLAGS);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700609
Nadav Amit6e4453b2018-09-20 10:30:18 -0700610 /* Update statistics */
611 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
612 ctl->page_size);
613
614 if (page) {
615 /* Success. Add the page to the list and continue. */
616 list_add(&page->lru, &ctl->pages);
617 continue;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700618 }
Nadav Amit6e4453b2018-09-20 10:30:18 -0700619
620 /* Allocation failed. Update statistics and stop. */
621 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
622 ctl->page_size);
623 break;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400624 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400625
Nadav Amit6e4453b2018-09-20 10:30:18 -0700626 ctl->n_pages = i;
627
628 return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
629}
630
631/**
632 * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
633 *
634 * @b: pointer for %struct vmballoon.
635 * @page: pointer for the page whose result should be handled.
636 * @page_size: size of the page.
637 * @status: status of the operation as provided by the hypervisor.
638 */
639static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
640 enum vmballoon_page_size_type page_size,
641 unsigned long status)
642{
643 /* On success do nothing. The page is already on the balloon list. */
644 if (likely(status == VMW_BALLOON_SUCCESS))
645 return 0;
646
647 pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
648 page_to_pfn(page), status,
649 vmballoon_page_size_names[page_size]);
650
651 /* Error occurred */
652 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
653 page_size);
654
655 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400656}
657
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700658/**
659 * vmballoon_status_page - returns the status of (un)lock operation
660 *
661 * @b: pointer to the balloon.
662 * @idx: index for the page for which the operation is performed.
663 * @p: pointer to where the page struct is returned.
664 *
665 * Following a lock or unlock operation, returns the status of the operation for
666 * an individual page. Provides the page that the operation was performed on on
667 * the @page argument.
668 *
669 * Returns: The status of a lock or unlock operation for an individual page.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400670 */
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700671static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
672 struct page **p)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400673{
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700674 if (static_branch_likely(&vmw_balloon_batching)) {
675 /* batching mode */
676 *p = pfn_to_page(b->batch_page[idx].pfn);
677 return b->batch_page[idx].status;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700678 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400679
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700680 /* non-batching mode */
681 *p = b->page;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400682
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700683 /*
684 * If a failure occurs, the indication will be provided in the status
685 * of the entire operation, which is considered before the individual
686 * page status. So for non-batching mode, the indication is always of
687 * success.
688 */
689 return VMW_BALLOON_SUCCESS;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400690}
691
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700692/**
693 * vmballoon_lock_op - notifies the host about inflated/deflated pages.
694 * @b: pointer to the balloon.
695 * @num_pages: number of inflated/deflated pages.
Nadav Amit6e4453b2018-09-20 10:30:18 -0700696 * @page_size: size of the page.
697 * @op: the type of operation (lock or unlock).
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700698 *
699 * Notify the host about page(s) that were ballooned (or removed from the
700 * balloon) so that host can use it without fear that guest will need it (or
701 * stop using them since the VM does). Host may reject some pages, we need to
702 * check the return value and maybe submit a different page. The pages that are
703 * inflated/deflated are pointed by @b->page.
704 *
705 * Return: result as provided by the hypervisor.
706 */
707static unsigned long vmballoon_lock_op(struct vmballoon *b,
708 unsigned int num_pages,
Nadav Amit6e4453b2018-09-20 10:30:18 -0700709 enum vmballoon_page_size_type page_size,
710 enum vmballoon_op op)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700711{
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700712 unsigned long cmd, pfn;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700713
Nadav Amit6e4453b2018-09-20 10:30:18 -0700714 lockdep_assert_held(&b->comm_lock);
715
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700716 if (static_branch_likely(&vmw_balloon_batching)) {
Nadav Amit6e4453b2018-09-20 10:30:18 -0700717 if (op == VMW_BALLOON_INFLATE)
718 cmd = page_size == VMW_BALLOON_2M_PAGE ?
719 VMW_BALLOON_CMD_BATCHED_2M_LOCK :
720 VMW_BALLOON_CMD_BATCHED_LOCK;
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700721 else
Nadav Amit6e4453b2018-09-20 10:30:18 -0700722 cmd = page_size == VMW_BALLOON_2M_PAGE ?
723 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
724 VMW_BALLOON_CMD_BATCHED_UNLOCK;
Nadav Amit10a95d52018-09-20 10:30:07 -0700725
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700726 pfn = PHYS_PFN(virt_to_phys(b->batch_page));
727 } else {
Nadav Amit6e4453b2018-09-20 10:30:18 -0700728 cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
729 VMW_BALLOON_CMD_UNLOCK;
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700730 pfn = page_to_pfn(b->page);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700731
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700732 /* In non-batching mode, PFNs must fit in 32-bit */
733 if (unlikely(pfn != (u32)pfn))
734 return VMW_BALLOON_ERROR_PPN_INVALID;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700735 }
736
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700737 return vmballoon_cmd(b, cmd, pfn, num_pages);
738}
739
Nadav Amit6e4453b2018-09-20 10:30:18 -0700740/**
741 * vmballoon_add_page - adds a page towards lock/unlock operation.
742 *
743 * @b: pointer to the balloon.
744 * @idx: index of the page to be ballooned in this batch.
745 * @p: pointer to the page that is about to be ballooned.
746 *
747 * Adds the page to be ballooned. Must be called while holding @comm_lock.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400748 */
Nadav Amit6e4453b2018-09-20 10:30:18 -0700749static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
750 struct page *p)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400751{
Nadav Amit6e4453b2018-09-20 10:30:18 -0700752 lockdep_assert_held(&b->comm_lock);
753
754 if (static_branch_likely(&vmw_balloon_batching))
755 b->batch_page[idx] = (struct vmballoon_batch_entry)
756 { .pfn = page_to_pfn(p) };
757 else
758 b->page = p;
759}
760
761/**
762 * vmballoon_lock - lock or unlock a batch of pages.
763 *
764 * @b: pointer to the balloon.
765 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
766 *
767 * Notifies the host of about ballooned pages (after inflation or deflation,
768 * according to @ctl). If the host rejects the page put it on the
769 * @ctl refuse list. These refused page are then released when moving to the
770 * next size of pages.
771 *
772 * Note that we neither free any @page here nor put them back on the ballooned
773 * pages list. Instead we queue it for later processing. We do that for several
774 * reasons. First, we do not want to free the page under the lock. Second, it
775 * allows us to unify the handling of lock and unlock. In the inflate case, the
776 * caller will check if there are too many refused pages and release them.
777 * Although it is not identical to the past behavior, it should not affect
778 * performance.
779 */
780static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
781{
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700782 unsigned long batch_status;
Nadav Amit6e4453b2018-09-20 10:30:18 -0700783 struct page *page;
784 unsigned int i, num_pages;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700785
Nadav Amit6e4453b2018-09-20 10:30:18 -0700786 num_pages = ctl->n_pages;
787 if (num_pages == 0)
788 return 0;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700789
Nadav Amit6e4453b2018-09-20 10:30:18 -0700790 /* communication with the host is done under the communication lock */
791 spin_lock(&b->comm_lock);
792
793 i = 0;
794 list_for_each_entry(page, &ctl->pages, lru)
795 vmballoon_add_page(b, i++, page);
796
797 batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
798 ctl->op);
799
800 /*
801 * Iterate over the pages in the provided list. Since we are changing
802 * @ctl->n_pages we are saving the original value in @num_pages and
803 * use this value to bound the loop.
804 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700805 for (i = 0; i < num_pages; i++) {
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700806 unsigned long status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700807
Nadav Amit6e4453b2018-09-20 10:30:18 -0700808 status = vmballoon_status_page(b, i, &page);
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700809
810 /*
811 * Failure of the whole batch overrides a single operation
812 * results.
813 */
814 if (batch_status != VMW_BALLOON_SUCCESS)
815 status = batch_status;
816
Nadav Amit6e4453b2018-09-20 10:30:18 -0700817 /* Continue if no error happened */
818 if (!vmballoon_handle_one_result(b, page, ctl->page_size,
819 status))
820 continue;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700821
Nadav Amit6e4453b2018-09-20 10:30:18 -0700822 /*
823 * Error happened. Move the pages to the refused list and update
824 * the pages number.
825 */
826 list_move(&page->lru, &ctl->refused_pages);
827 ctl->n_pages--;
828 ctl->n_refused_pages++;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700829 }
830
Nadav Amit6e4453b2018-09-20 10:30:18 -0700831 spin_unlock(&b->comm_lock);
832
Nadav Amitdf8d0d42018-09-20 10:30:12 -0700833 return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700834}
835
Nadav Amit6e4453b2018-09-20 10:30:18 -0700836/**
837 * vmballoon_release_page_list() - Releases a page list
838 *
839 * @page_list: list of pages to release.
840 * @n_pages: pointer to the number of pages.
841 * @page_size: whether the pages in the list are 2MB (or else 4KB).
842 *
843 * Releases the list of pages and zeros the number of pages.
844 */
845static void vmballoon_release_page_list(struct list_head *page_list,
846 int *n_pages,
847 enum vmballoon_page_size_type page_size)
848{
849 struct page *page, *tmp;
850
851 list_for_each_entry_safe(page, tmp, page_list, lru) {
852 list_del(&page->lru);
853 __free_pages(page, vmballoon_page_order(page_size));
854 }
855
856 *n_pages = 0;
857}
858
859
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400860/*
861 * Release pages that were allocated while attempting to inflate the
862 * balloon but were refused by the host for one reason or another.
863 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700864static void vmballoon_release_refused_pages(struct vmballoon *b,
Nadav Amit6e4453b2018-09-20 10:30:18 -0700865 struct vmballoon_ctl *ctl)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400866{
Nadav Amit6e4453b2018-09-20 10:30:18 -0700867 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
868 ctl->page_size);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400869
Nadav Amit6e4453b2018-09-20 10:30:18 -0700870 vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
871 ctl->page_size);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700872}
873
Nadav Amit8b079cd2018-09-20 10:30:15 -0700874/**
875 * vmballoon_change - retrieve the required balloon change
876 *
877 * @b: pointer for the balloon.
878 *
879 * Return: the required change for the balloon size. A positive number
880 * indicates inflation, a negative number indicates a deflation.
881 */
882static int64_t vmballoon_change(struct vmballoon *b)
883{
884 int64_t size, target;
885
Nadav Amit6e4453b2018-09-20 10:30:18 -0700886 size = atomic64_read(&b->size);
887 target = READ_ONCE(b->target);
Nadav Amit8b079cd2018-09-20 10:30:15 -0700888
889 /*
890 * We must cast first because of int sizes
891 * Otherwise we might get huge positives instead of negatives
892 */
893
894 if (b->reset_required)
895 return 0;
896
897 /* consider a 2MB slack on deflate, unless the balloon is emptied */
Nadav Amit6e4453b2018-09-20 10:30:18 -0700898 if (target < size && target != 0 &&
899 size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
Nadav Amit8b079cd2018-09-20 10:30:15 -0700900 return 0;
901
902 return target - size;
903}
904
Nadav Amit6e4453b2018-09-20 10:30:18 -0700905/**
906 * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
907 *
908 * @b: pointer to balloon.
909 * @pages: list of pages to enqueue.
910 * @n_pages: pointer to number of pages in list. The value is zeroed.
911 * @page_size: whether the pages are 2MB or 4KB pages.
912 *
913 * Enqueues the provides list of pages in the ballooned page list, clears the
914 * list and zeroes the number of pages that was provided.
915 */
916static void vmballoon_enqueue_page_list(struct vmballoon *b,
917 struct list_head *pages,
918 unsigned int *n_pages,
919 enum vmballoon_page_size_type page_size)
920{
921 struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
922
923 list_splice_init(pages, &page_size_info->pages);
924 *n_pages = 0;
925}
926
927/**
928 * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
929 *
930 * @b: pointer to balloon.
931 * @pages: list of pages to enqueue.
932 * @n_pages: pointer to number of pages in list. The value is zeroed.
933 * @page_size: whether the pages are 2MB or 4KB pages.
934 * @n_req_pages: the number of requested pages.
935 *
936 * Dequeues the number of requested pages from the balloon for deflation. The
937 * number of dequeued pages may be lower, if not enough pages in the requested
938 * size are available.
939 */
940static void vmballoon_dequeue_page_list(struct vmballoon *b,
941 struct list_head *pages,
942 unsigned int *n_pages,
943 enum vmballoon_page_size_type page_size,
944 unsigned int n_req_pages)
945{
946 struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
947 struct page *page, *tmp;
948 unsigned int i = 0;
949
950 list_for_each_entry_safe(page, tmp, &page_size_info->pages, lru) {
951 list_move(&page->lru, pages);
952 if (++i == n_req_pages)
953 break;
954 }
955 *n_pages = i;
956}
957
958/**
959 * vmballoon_inflate() - Inflate the balloon towards its target size.
960 *
961 * @b: pointer to the balloon.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400962 */
963static void vmballoon_inflate(struct vmballoon *b)
964{
Nadav Amit6e4453b2018-09-20 10:30:18 -0700965 int64_t to_inflate_frames;
966 struct vmballoon_ctl ctl = {
967 .pages = LIST_HEAD_INIT(ctl.pages),
968 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
969 .page_size = b->max_page_size,
970 .op = VMW_BALLOON_INFLATE
971 };
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400972
Nadav Amit6e4453b2018-09-20 10:30:18 -0700973 while ((to_inflate_frames = vmballoon_change(b)) > 0) {
974 unsigned int to_inflate_pages, page_in_frames;
975 int alloc_error, lock_error = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400976
Nadav Amit6e4453b2018-09-20 10:30:18 -0700977 VM_BUG_ON(!list_empty(&ctl.pages));
978 VM_BUG_ON(ctl.n_pages != 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400979
Nadav Amit6e4453b2018-09-20 10:30:18 -0700980 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400981
Nadav Amit6e4453b2018-09-20 10:30:18 -0700982 to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
983 DIV_ROUND_UP_ULL(to_inflate_frames,
984 page_in_frames));
Nadav Amitc7b36902018-09-20 10:30:17 -0700985
Nadav Amit6e4453b2018-09-20 10:30:18 -0700986 /* Start by allocating */
987 alloc_error = vmballoon_alloc_page_list(b, &ctl,
988 to_inflate_pages);
Nadav Amitc7b36902018-09-20 10:30:17 -0700989
Nadav Amit6e4453b2018-09-20 10:30:18 -0700990 /* Actually lock the pages by telling the hypervisor */
991 lock_error = vmballoon_lock(b, &ctl);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700992
Nadav Amit6e4453b2018-09-20 10:30:18 -0700993 /*
994 * If an error indicates that something serious went wrong,
995 * stop the inflation.
996 */
997 if (lock_error)
Nadav Amit622074a2018-09-20 10:30:11 -0700998 break;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400999
Nadav Amit6e4453b2018-09-20 10:30:18 -07001000 /* Update the balloon size */
1001 atomic64_add(ctl.n_pages * page_in_frames, &b->size);
Nadav Amit8fa3c612018-09-20 10:30:13 -07001002
Nadav Amit6e4453b2018-09-20 10:30:18 -07001003 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1004 ctl.page_size);
Nadav Amit10a95d52018-09-20 10:30:07 -07001005
Nadav Amit6e4453b2018-09-20 10:30:18 -07001006 /*
1007 * If allocation failed or the number of refused pages exceeds
1008 * the maximum allowed, move to the next page size.
1009 */
1010 if (alloc_error ||
1011 ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1012 if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1013 break;
Nadav Amit8fa3c612018-09-20 10:30:13 -07001014
1015 /*
Nadav Amit6e4453b2018-09-20 10:30:18 -07001016 * Ignore errors from locking as we now switch to 4k
1017 * pages and we might get different errors.
Nadav Amit8fa3c612018-09-20 10:30:13 -07001018 */
Nadav Amit6e4453b2018-09-20 10:30:18 -07001019 vmballoon_release_refused_pages(b, &ctl);
1020 ctl.page_size--;
Xavier Deguillardf220a802015-08-06 15:17:58 -07001021 }
Xavier Deguillardef0f8f12015-06-12 11:43:22 -07001022
Philip P. Moltmann33d268e2015-08-06 15:18:01 -07001023 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001024 }
1025
Nadav Amit6e4453b2018-09-20 10:30:18 -07001026 /*
1027 * Release pages that were allocated while attempting to inflate the
1028 * balloon but were refused by the host for one reason or another,
1029 * and update the statistics.
1030 */
1031 if (ctl.n_refused_pages != 0)
1032 vmballoon_release_refused_pages(b, &ctl);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001033}
1034
Nadav Amit6e4453b2018-09-20 10:30:18 -07001035/**
1036 * vmballoon_deflate() - Decrease the size of the balloon.
1037 *
1038 * @b: pointer to the balloon
1039 * @n_frames: the number of frames to deflate. If zero, automatically
1040 * calculated according to the target size.
1041 * @coordinated: whether to coordinate with the host
1042 *
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001043 * Decrease the size of the balloon allowing guest to use more memory.
Nadav Amit6e4453b2018-09-20 10:30:18 -07001044 *
1045 * Return: The number of deflated frames (i.e., basic page size units)
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001046 */
Nadav Amit6e4453b2018-09-20 10:30:18 -07001047static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1048 bool coordinated)
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001049{
Nadav Amit6e4453b2018-09-20 10:30:18 -07001050 unsigned long deflated_frames = 0;
1051 unsigned long tried_frames = 0;
1052 struct vmballoon_ctl ctl = {
1053 .pages = LIST_HEAD_INIT(ctl.pages),
1054 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1055 .page_size = VMW_BALLOON_4K_PAGE,
1056 .op = VMW_BALLOON_DEFLATE
1057 };
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001058
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001059 /* free pages to reach target */
Nadav Amit6e4453b2018-09-20 10:30:18 -07001060 while (true) {
1061 unsigned int to_deflate_pages, n_unlocked_frames;
1062 unsigned int page_in_frames;
1063 int64_t to_deflate_frames;
1064 bool deflated_all;
Xavier Deguillardf220a802015-08-06 15:17:58 -07001065
Nadav Amit6e4453b2018-09-20 10:30:18 -07001066 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1067
1068 VM_BUG_ON(!list_empty(&ctl.pages));
1069 VM_BUG_ON(ctl.n_pages);
1070 VM_BUG_ON(!list_empty(&ctl.refused_pages));
1071 VM_BUG_ON(ctl.n_refused_pages);
1072
1073 /*
1074 * If we were requested a specific number of frames, we try to
1075 * deflate this number of frames. Otherwise, deflation is
1076 * performed according to the target and balloon size.
1077 */
1078 to_deflate_frames = n_frames ? n_frames - tried_frames :
1079 -vmballoon_change(b);
1080
1081 /* break if no work to do */
1082 if (to_deflate_frames <= 0)
1083 break;
1084
1085 /*
1086 * Calculate the number of frames based on current page size,
1087 * but limit the deflated frames to a single chunk
1088 */
1089 to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1090 DIV_ROUND_UP_ULL(to_deflate_frames,
1091 page_in_frames));
1092
1093 /* First take the pages from the balloon pages. */
1094 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1095 ctl.page_size, to_deflate_pages);
1096
1097 /*
1098 * Before pages are moving to the refused list, count their
1099 * frames as frames that we tried to deflate.
1100 */
1101 tried_frames += ctl.n_pages * page_in_frames;
1102
1103 /*
1104 * Unlock the pages by communicating with the hypervisor if the
1105 * communication is coordinated (i.e., not pop). We ignore the
1106 * return code. Instead we check if all the pages we manage to
1107 * unlock all the pages. If we failed, we will move to the next
1108 * page size, and would eventually try again later.
1109 */
1110 if (coordinated)
1111 vmballoon_lock(b, &ctl);
1112
1113 /*
1114 * Check if we deflated enough. We will move to the next page
1115 * size if we did not manage to do so. This calculation takes
1116 * place now, as once the pages are released, the number of
1117 * pages is zeroed.
1118 */
1119 deflated_all = (ctl.n_pages == to_deflate_pages);
1120
1121 /* Update local and global counters */
1122 n_unlocked_frames = ctl.n_pages * page_in_frames;
1123 atomic64_sub(n_unlocked_frames, &b->size);
1124 deflated_frames += n_unlocked_frames;
1125
1126 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1127 ctl.page_size, ctl.n_pages);
1128
1129 /* free the ballooned pages */
1130 vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1131 ctl.page_size);
1132
1133 /* Return the refused pages to the ballooned list. */
1134 vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1135 &ctl.n_refused_pages,
1136 ctl.page_size);
1137
1138 /* If we failed to unlock all the pages, move to next size. */
1139 if (!deflated_all) {
1140 if (ctl.page_size == b->max_page_size)
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001141 break;
Nadav Amit6e4453b2018-09-20 10:30:18 -07001142 ctl.page_size++;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001143 }
1144
Nadav Amit6e4453b2018-09-20 10:30:18 -07001145 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001146 }
Nadav Amit6e4453b2018-09-20 10:30:18 -07001147
1148 return deflated_frames;
Xavier Deguillardf220a802015-08-06 15:17:58 -07001149}
1150
Nadav Amitdf8d0d42018-09-20 10:30:12 -07001151/**
1152 * vmballoon_deinit_batching - disables batching mode.
1153 *
1154 * @b: pointer to &struct vmballoon.
1155 *
1156 * Disables batching, by deallocating the page for communication with the
1157 * hypervisor and disabling the static key to indicate that batching is off.
1158 */
1159static void vmballoon_deinit_batching(struct vmballoon *b)
1160{
1161 free_page((unsigned long)b->batch_page);
1162 b->batch_page = NULL;
1163 static_branch_disable(&vmw_balloon_batching);
1164 b->batch_max_pages = 1;
1165}
Xavier Deguillardf220a802015-08-06 15:17:58 -07001166
Nadav Amitdf8d0d42018-09-20 10:30:12 -07001167/**
1168 * vmballoon_init_batching - enable batching mode.
1169 *
1170 * @b: pointer to &struct vmballoon.
1171 *
1172 * Enables batching, by allocating a page for communication with the hypervisor
1173 * and enabling the static_key to use batching.
1174 *
1175 * Return: zero on success or an appropriate error-code.
1176 */
1177static int vmballoon_init_batching(struct vmballoon *b)
Xavier Deguillardf220a802015-08-06 15:17:58 -07001178{
Gil Kupferb23220f2018-06-01 00:47:47 -07001179 struct page *page;
1180
1181 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1182 if (!page)
Nadav Amitdf8d0d42018-09-20 10:30:12 -07001183 return -ENOMEM;
Xavier Deguillardf220a802015-08-06 15:17:58 -07001184
Gil Kupferb23220f2018-06-01 00:47:47 -07001185 b->batch_page = page_address(page);
Nadav Amitdf8d0d42018-09-20 10:30:12 -07001186 b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1187
1188 static_branch_enable(&vmw_balloon_batching);
1189
1190 return 0;
Xavier Deguillardf220a802015-08-06 15:17:58 -07001191}
1192
1193/*
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001194 * Receive notification and resize balloon
1195 */
1196static void vmballoon_doorbell(void *client_data)
1197{
1198 struct vmballoon *b = client_data;
1199
Nadav Amitc7b36902018-09-20 10:30:17 -07001200 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001201
1202 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1203}
1204
1205/*
1206 * Clean up vmci doorbell
1207 */
1208static void vmballoon_vmci_cleanup(struct vmballoon *b)
1209{
Nadav Amit10a95d52018-09-20 10:30:07 -07001210 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1211 VMCI_INVALID_ID, VMCI_INVALID_ID);
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001212
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001213 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1214 vmci_doorbell_destroy(b->vmci_doorbell);
1215 b->vmci_doorbell = VMCI_INVALID_HANDLE;
1216 }
1217}
1218
Nadav Amit22d293e2018-09-20 10:30:19 -07001219/**
1220 * vmballoon_vmci_init - Initialize vmci doorbell.
1221 *
1222 * @b: pointer to the balloon.
1223 *
1224 * Return: zero on success or when wakeup command not supported. Error-code
1225 * otherwise.
1226 *
1227 * Initialize vmci doorbell, to get notified as soon as balloon changes.
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001228 */
1229static int vmballoon_vmci_init(struct vmballoon *b)
1230{
Nadav Amit10a95d52018-09-20 10:30:07 -07001231 unsigned long error;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001232
Nadav Amitce664332018-06-19 16:00:26 -07001233 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1234 return 0;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001235
Nadav Amitce664332018-06-19 16:00:26 -07001236 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1237 VMCI_PRIVILEGE_FLAG_RESTRICTED,
1238 vmballoon_doorbell, b);
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001239
Nadav Amitce664332018-06-19 16:00:26 -07001240 if (error != VMCI_SUCCESS)
1241 goto fail;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001242
Nadav Amit10a95d52018-09-20 10:30:07 -07001243 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1244 b->vmci_doorbell.context,
1245 b->vmci_doorbell.resource, NULL);
Nadav Amitce664332018-06-19 16:00:26 -07001246
Nadav Amitce664332018-06-19 16:00:26 -07001247 if (error != VMW_BALLOON_SUCCESS)
1248 goto fail;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001249
1250 return 0;
Nadav Amitce664332018-06-19 16:00:26 -07001251fail:
1252 vmballoon_vmci_cleanup(b);
1253 return -EIO;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001254}
1255
Nadav Amit6e4453b2018-09-20 10:30:18 -07001256/**
1257 * vmballoon_pop - Quickly release all pages allocate for the balloon.
1258 *
1259 * @b: pointer to the balloon.
1260 *
1261 * This function is called when host decides to "reset" balloon for one reason
1262 * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1263 * pages being released.
1264 */
1265static void vmballoon_pop(struct vmballoon *b)
1266{
1267 unsigned long size;
1268
1269 while ((size = atomic64_read(&b->size)))
1270 vmballoon_deflate(b, size, false);
1271}
1272
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001273/*
Xavier Deguillardf220a802015-08-06 15:17:58 -07001274 * Perform standard reset sequence by popping the balloon (in case it
1275 * is not empty) and then restarting protocol. This operation normally
1276 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1277 */
1278static void vmballoon_reset(struct vmballoon *b)
1279{
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001280 int error;
1281
Nadav Amitc7b36902018-09-20 10:30:17 -07001282 down_write(&b->conf_sem);
1283
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001284 vmballoon_vmci_cleanup(b);
1285
Xavier Deguillardf220a802015-08-06 15:17:58 -07001286 /* free all pages, skipping monitor unlock */
1287 vmballoon_pop(b);
1288
Nadav Amit22d293e2018-09-20 10:30:19 -07001289 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
Xavier Deguillardf220a802015-08-06 15:17:58 -07001290 return;
1291
1292 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
Nadav Amitdf8d0d42018-09-20 10:30:12 -07001293 if (vmballoon_init_batching(b)) {
Xavier Deguillardf220a802015-08-06 15:17:58 -07001294 /*
1295 * We failed to initialize batching, inform the monitor
1296 * about it by sending a null capability.
1297 *
1298 * The guest will retry in one second.
1299 */
1300 vmballoon_send_start(b, 0);
1301 return;
1302 }
1303 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
Nadav Amitdf8d0d42018-09-20 10:30:12 -07001304 vmballoon_deinit_batching(b);
Xavier Deguillardf220a802015-08-06 15:17:58 -07001305 }
1306
Nadav Amit8840a6f2018-09-20 10:30:20 -07001307 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
Xavier Deguillardf220a802015-08-06 15:17:58 -07001308 b->reset_required = false;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001309
1310 error = vmballoon_vmci_init(b);
1311 if (error)
1312 pr_err("failed to initialize vmci doorbell\n");
1313
Nadav Amit22d293e2018-09-20 10:30:19 -07001314 if (vmballoon_send_guest_id(b))
Xavier Deguillardf220a802015-08-06 15:17:58 -07001315 pr_err("failed to send guest ID to the host\n");
Nadav Amitc7b36902018-09-20 10:30:17 -07001316
1317 up_write(&b->conf_sem);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001318}
1319
Nadav Amit8b079cd2018-09-20 10:30:15 -07001320/**
1321 * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1322 *
1323 * @work: pointer to the &work_struct which is provided by the workqueue.
1324 *
1325 * Resets the protocol if needed, gets the new size and adjusts balloon as
1326 * needed. Repeat in 1 sec.
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001327 */
1328static void vmballoon_work(struct work_struct *work)
1329{
1330 struct delayed_work *dwork = to_delayed_work(work);
1331 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
Nadav Amit8b079cd2018-09-20 10:30:15 -07001332 int64_t change = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001333
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001334 if (b->reset_required)
1335 vmballoon_reset(b);
1336
Nadav Amitc7b36902018-09-20 10:30:17 -07001337 down_read(&b->conf_sem);
1338
1339 /*
1340 * Update the stats while holding the semaphore to ensure that
1341 * @stats_enabled is consistent with whether the stats are actually
1342 * enabled
1343 */
1344 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1345
Nadav Amit0395be32018-09-20 10:30:16 -07001346 if (!vmballoon_send_get_target(b))
Nadav Amit8b079cd2018-09-20 10:30:15 -07001347 change = vmballoon_change(b);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001348
Nadav Amit8b079cd2018-09-20 10:30:15 -07001349 if (change != 0) {
Nadav Amit6e4453b2018-09-20 10:30:18 -07001350 pr_debug("%s - size: %llu, target %lu\n", __func__,
1351 atomic64_read(&b->size), READ_ONCE(b->target));
Nadav Amit8b079cd2018-09-20 10:30:15 -07001352
1353 if (change > 0)
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001354 vmballoon_inflate(b);
Nadav Amit8b079cd2018-09-20 10:30:15 -07001355 else /* (change < 0) */
Nadav Amit6e4453b2018-09-20 10:30:18 -07001356 vmballoon_deflate(b, 0, true);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001357 }
1358
Nadav Amitc7b36902018-09-20 10:30:17 -07001359 up_read(&b->conf_sem);
1360
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001361 /*
1362 * We are using a freezable workqueue so that balloon operations are
1363 * stopped while the system transitions to/from sleep/hibernation.
1364 */
1365 queue_delayed_work(system_freezable_wq,
1366 dwork, round_jiffies_relative(HZ));
Nadav Amitc7b36902018-09-20 10:30:17 -07001367
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001368}
1369
1370/*
1371 * DEBUGFS Interface
1372 */
1373#ifdef CONFIG_DEBUG_FS
1374
Nadav Amitc7b36902018-09-20 10:30:17 -07001375static const char * const vmballoon_stat_page_names[] = {
1376 [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
1377 [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
1378 [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
1379 [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
1380 [VMW_BALLOON_PAGE_STAT_FREE] = "free"
1381};
1382
1383static const char * const vmballoon_stat_names[] = {
1384 [VMW_BALLOON_STAT_TIMER] = "timer",
Nadav Amit8840a6f2018-09-20 10:30:20 -07001385 [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
1386 [VMW_BALLOON_STAT_RESET] = "reset",
Nadav Amitc7b36902018-09-20 10:30:17 -07001387};
1388
Nadav Amitc7b36902018-09-20 10:30:17 -07001389static int vmballoon_enable_stats(struct vmballoon *b)
1390{
1391 int r = 0;
1392
1393 down_write(&b->conf_sem);
1394
1395 /* did we somehow race with another reader which enabled stats? */
1396 if (b->stats)
1397 goto out;
1398
1399 b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1400
1401 if (!b->stats) {
1402 /* allocation failed */
1403 r = -ENOMEM;
1404 goto out;
1405 }
1406 static_key_enable(&balloon_stat_enabled.key);
1407out:
1408 up_write(&b->conf_sem);
1409 return r;
1410}
1411
1412/**
1413 * vmballoon_debug_show - shows statistics of balloon operations.
1414 * @f: pointer to the &struct seq_file.
1415 * @offset: ignored.
1416 *
1417 * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1418 * To avoid the overhead - mainly that of memory - of collecting the statistics,
1419 * we only collect statistics after the first time the counters are read.
1420 *
1421 * Return: zero on success or an error code.
1422 */
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001423static int vmballoon_debug_show(struct seq_file *f, void *offset)
1424{
1425 struct vmballoon *b = f->private;
Nadav Amitc7b36902018-09-20 10:30:17 -07001426 int i, j;
1427
1428 /* enables stats if they are disabled */
1429 if (!b->stats) {
1430 int r = vmballoon_enable_stats(b);
1431
1432 if (r)
1433 return r;
1434 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001435
Philip P. Moltmannb36e89d2015-08-06 15:18:00 -07001436 /* format capabilities info */
Nadav Amit6e4453b2018-09-20 10:30:18 -07001437 seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
Nadav Amitc7b36902018-09-20 10:30:17 -07001438 VMW_BALLOON_CAPABILITIES);
Nadav Amit6e4453b2018-09-20 10:30:18 -07001439 seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
Nadav Amitc7b36902018-09-20 10:30:17 -07001440 seq_printf(f, "%-22s: %16s\n", "is resetting",
1441 b->reset_required ? "y" : "n");
Philip P. Moltmannb36e89d2015-08-06 15:18:00 -07001442
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001443 /* format size info */
Nadav Amit6e4453b2018-09-20 10:30:18 -07001444 seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1445 seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001446
Nadav Amit68131182018-09-20 10:30:08 -07001447 for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1448 if (vmballoon_cmd_names[i] == NULL)
1449 continue;
1450
Nadav Amitc7b36902018-09-20 10:30:17 -07001451 seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1452 vmballoon_cmd_names[i],
1453 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1454 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
Nadav Amit68131182018-09-20 10:30:08 -07001455 }
1456
Nadav Amitc7b36902018-09-20 10:30:17 -07001457 for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1458 seq_printf(f, "%-22s: %16llu\n",
1459 vmballoon_stat_names[i],
1460 atomic64_read(&b->stats->general_stat[i]));
1461
1462 for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1463 for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1464 seq_printf(f, "%-18s(%s): %16llu\n",
1465 vmballoon_stat_page_names[i],
1466 vmballoon_page_size_names[j],
1467 atomic64_read(&b->stats->page_stat[i][j]));
1468 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001469
1470 return 0;
1471}
1472
Yangtao Li2796b432018-12-01 12:05:30 -05001473DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001474
1475static int __init vmballoon_debugfs_init(struct vmballoon *b)
1476{
1477 int error;
1478
1479 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1480 &vmballoon_debug_fops);
1481 if (IS_ERR(b->dbg_entry)) {
1482 error = PTR_ERR(b->dbg_entry);
1483 pr_err("failed to create debugfs entry, error: %d\n", error);
1484 return error;
1485 }
1486
1487 return 0;
1488}
1489
1490static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1491{
Nadav Amitc7b36902018-09-20 10:30:17 -07001492 static_key_disable(&balloon_stat_enabled.key);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001493 debugfs_remove(b->dbg_entry);
Nadav Amitc7b36902018-09-20 10:30:17 -07001494 kfree(b->stats);
1495 b->stats = NULL;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001496}
1497
1498#else
1499
1500static inline int vmballoon_debugfs_init(struct vmballoon *b)
1501{
1502 return 0;
1503}
1504
1505static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1506{
1507}
1508
1509#endif /* CONFIG_DEBUG_FS */
1510
1511static int __init vmballoon_init(void)
1512{
Nadav Amit6e4453b2018-09-20 10:30:18 -07001513 enum vmballoon_page_size_type page_size;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001514 int error;
Nadav Amit6e4453b2018-09-20 10:30:18 -07001515
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001516 /*
1517 * Check if we are running on VMware's hypervisor and bail out
1518 * if we are not.
1519 */
Juergen Gross03b2a322017-11-09 14:27:36 +01001520 if (x86_hyper_type != X86_HYPER_VMWARE)
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001521 return -ENODEV;
1522
Nadav Amit6e4453b2018-09-20 10:30:18 -07001523 for (page_size = VMW_BALLOON_4K_PAGE;
1524 page_size <= VMW_BALLOON_LAST_SIZE; page_size++)
1525 INIT_LIST_HEAD(&balloon.page_sizes[page_size].pages);
1526
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001527
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001528 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1529
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001530 error = vmballoon_debugfs_init(&balloon);
1531 if (error)
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001532 return error;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001533
Nadav Amit6e4453b2018-09-20 10:30:18 -07001534 spin_lock_init(&balloon.comm_lock);
Nadav Amitc7b36902018-09-20 10:30:17 -07001535 init_rwsem(&balloon.conf_sem);
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001536 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001537 balloon.batch_page = NULL;
1538 balloon.page = NULL;
1539 balloon.reset_required = true;
1540
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001541 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001542
1543 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001544}
Nadav Amitc3cc1b02018-06-19 16:00:27 -07001545
1546/*
1547 * Using late_initcall() instead of module_init() allows the balloon to use the
1548 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1549 * VMCI is probed only after the balloon is initialized. If the balloon is used
1550 * as a module, late_initcall() is equivalent to module_init().
1551 */
1552late_initcall(vmballoon_init);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001553
1554static void __exit vmballoon_exit(void)
1555{
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001556 vmballoon_vmci_cleanup(&balloon);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001557 cancel_delayed_work_sync(&balloon.dwork);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001558
1559 vmballoon_debugfs_exit(&balloon);
1560
1561 /*
1562 * Deallocate all reserved memory, and reset connection with monitor.
1563 * Reset connection before deallocating memory to avoid potential for
1564 * additional spurious resets from guest touching deallocated pages.
1565 */
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001566 vmballoon_send_start(&balloon, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001567 vmballoon_pop(&balloon);
1568}
1569module_exit(vmballoon_exit);