blob: 64d7479ad5ad39ce29bfa99270b6f6e40f8430d5 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06002/*
3 * Xen implementation for transcendent memory (tmem)
4 *
Dan Magenheimerafec6e02011-06-17 15:06:20 -06005 * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06006 * Author: Dan Magenheimer
7 */
8
Joe Perches283c0972013-06-28 03:21:41 -07009#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
10
Dan Magenheimer10a7a0772013-04-30 15:27:00 -070011#include <linux/module.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060012#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/init.h>
15#include <linux/pagemap.h>
16#include <linux/cleancache.h>
Dan Magenheimerafec6e02011-06-17 15:06:20 -060017#include <linux/frontswap.h>
Dan Magenheimerafec6e02011-06-17 15:06:20 -060018
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060019#include <xen/xen.h>
20#include <xen/interface/xen.h>
Julien Gralla9fd60e2015-06-17 15:28:02 +010021#include <xen/page.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060022#include <asm/xen/hypercall.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060023#include <asm/xen/hypervisor.h>
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040024#include <xen/tmem.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060025
Konrad Rzeszutek Wilk0cb401d2013-05-08 15:50:59 -040026#ifndef CONFIG_XEN_TMEM_MODULE
27bool __read_mostly tmem_enabled = false;
28
29static int __init enable_tmem(char *s)
30{
31 tmem_enabled = true;
32 return 1;
33}
34__setup("tmem", enable_tmem);
35#endif
36
37#ifdef CONFIG_CLEANCACHE
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040038static bool cleancache __read_mostly = true;
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040039module_param(cleancache, bool, S_IRUGO);
Konrad Rzeszutek Wilk2ca62b02013-05-08 17:12:44 -040040static bool selfballooning __read_mostly = true;
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040041module_param(selfballooning, bool, S_IRUGO);
Konrad Rzeszutek Wilk0cb401d2013-05-08 15:50:59 -040042#endif /* CONFIG_CLEANCACHE */
43
44#ifdef CONFIG_FRONTSWAP
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040045static bool frontswap __read_mostly = true;
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040046module_param(frontswap, bool, S_IRUGO);
Frederico Cadete1d7004f2013-05-25 22:48:57 +020047#else /* CONFIG_FRONTSWAP */
48#define frontswap (0)
Konrad Rzeszutek Wilke8f9cb02013-05-08 15:58:06 -040049#endif /* CONFIG_FRONTSWAP */
50
Konrad Rzeszutek Wilk23972c62013-05-08 16:57:35 -040051#ifdef CONFIG_XEN_SELFBALLOONING
Konrad Rzeszutek Wilk2ca62b02013-05-08 17:12:44 -040052static bool selfshrinking __read_mostly = true;
53module_param(selfshrinking, bool, S_IRUGO);
Konrad Rzeszutek Wilk23972c62013-05-08 16:57:35 -040054#endif /* CONFIG_XEN_SELFBALLOONING */
Konrad Rzeszutek Wilk0cb401d2013-05-08 15:50:59 -040055
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060056#define TMEM_CONTROL 0
57#define TMEM_NEW_POOL 1
58#define TMEM_DESTROY_POOL 2
59#define TMEM_NEW_PAGE 3
60#define TMEM_PUT_PAGE 4
61#define TMEM_GET_PAGE 5
62#define TMEM_FLUSH_PAGE 6
63#define TMEM_FLUSH_OBJECT 7
64#define TMEM_READ 8
65#define TMEM_WRITE 9
66#define TMEM_XCHG 10
67
68/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
69#define TMEM_POOL_PERSIST 1
70#define TMEM_POOL_SHARED 2
71#define TMEM_POOL_PAGESIZE_SHIFT 4
72#define TMEM_VERSION_SHIFT 24
73
74
75struct tmem_pool_uuid {
76 u64 uuid_lo;
77 u64 uuid_hi;
78};
79
80struct tmem_oid {
81 u64 oid[3];
82};
83
84#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
85
86/* flags for tmem_ops.new_pool */
87#define TMEM_POOL_PERSIST 1
88#define TMEM_POOL_SHARED 2
89
90/* xen tmem foundation ops/hypercalls */
91
92static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
93 u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
94{
95 struct tmem_op op;
96 int rc = 0;
97
98 op.cmd = tmem_cmd;
99 op.pool_id = tmem_pool;
100 op.u.gen.oid[0] = oid.oid[0];
101 op.u.gen.oid[1] = oid.oid[1];
102 op.u.gen.oid[2] = oid.oid[2];
103 op.u.gen.index = index;
104 op.u.gen.tmem_offset = tmem_offset;
105 op.u.gen.pfn_offset = pfn_offset;
106 op.u.gen.len = len;
107 set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
108 rc = HYPERVISOR_tmem_op(&op);
109 return rc;
110}
111
112static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
113 u32 flags, unsigned long pagesize)
114{
115 struct tmem_op op;
116 int rc = 0, pageshift;
117
118 for (pageshift = 0; pagesize != 1; pageshift++)
119 pagesize >>= 1;
120 flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
121 flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
122 op.cmd = TMEM_NEW_POOL;
123 op.u.new.uuid[0] = uuid.uuid_lo;
124 op.u.new.uuid[1] = uuid.uuid_hi;
125 op.u.new.flags = flags;
126 rc = HYPERVISOR_tmem_op(&op);
127 return rc;
128}
129
130/* xen generic tmem ops */
131
132static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
Julien Gralla76e3cc2015-08-07 17:34:38 +0100133 u32 index, struct page *page)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600134{
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600135 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
Julien Gralla76e3cc2015-08-07 17:34:38 +0100136 xen_page_to_gfn(page), 0, 0, 0);
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600137}
138
139static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
Julien Gralla76e3cc2015-08-07 17:34:38 +0100140 u32 index, struct page *page)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600141{
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600142 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
Julien Gralla76e3cc2015-08-07 17:34:38 +0100143 xen_page_to_gfn(page), 0, 0, 0);
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600144}
145
146static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
147{
148 return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
149 0, 0, 0, 0);
150}
151
152static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
153{
154 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
155}
156
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600157
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600158#ifdef CONFIG_CLEANCACHE
159static int xen_tmem_destroy_pool(u32 pool_id)
160{
161 struct tmem_oid oid = { { 0 } };
162
163 return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
164}
165
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600166/* cleancache ops */
167
168static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
169 pgoff_t index, struct page *page)
170{
171 u32 ind = (u32) index;
172 struct tmem_oid oid = *(struct tmem_oid *)&key;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600173
174 if (pool < 0)
175 return;
176 if (ind != index)
177 return;
178 mb(); /* ensure page is quiescent; tmem may address it with an alias */
Julien Gralla76e3cc2015-08-07 17:34:38 +0100179 (void)xen_tmem_put_page((u32)pool, oid, ind, page);
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600180}
181
182static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
183 pgoff_t index, struct page *page)
184{
185 u32 ind = (u32) index;
186 struct tmem_oid oid = *(struct tmem_oid *)&key;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600187 int ret;
188
189 /* translate return values to linux semantics */
190 if (pool < 0)
191 return -1;
192 if (ind != index)
193 return -1;
Julien Gralla76e3cc2015-08-07 17:34:38 +0100194 ret = xen_tmem_get_page((u32)pool, oid, ind, page);
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600195 if (ret == 1)
196 return 0;
197 else
198 return -1;
199}
200
201static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
202 pgoff_t index)
203{
204 u32 ind = (u32) index;
205 struct tmem_oid oid = *(struct tmem_oid *)&key;
206
207 if (pool < 0)
208 return;
209 if (ind != index)
210 return;
211 (void)xen_tmem_flush_page((u32)pool, oid, ind);
212}
213
214static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
215{
216 struct tmem_oid oid = *(struct tmem_oid *)&key;
217
218 if (pool < 0)
219 return;
220 (void)xen_tmem_flush_object((u32)pool, oid);
221}
222
223static void tmem_cleancache_flush_fs(int pool)
224{
225 if (pool < 0)
226 return;
227 (void)xen_tmem_destroy_pool((u32)pool);
228}
229
230static int tmem_cleancache_init_fs(size_t pagesize)
231{
232 struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
233
234 return xen_tmem_new_pool(uuid_private, 0, pagesize);
235}
236
Christoph Hellwig85787092017-05-10 15:06:33 +0200237static int tmem_cleancache_init_shared_fs(uuid_t *uuid, size_t pagesize)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600238{
239 struct tmem_pool_uuid shared_uuid;
240
Christoph Hellwig85787092017-05-10 15:06:33 +0200241 shared_uuid.uuid_lo = *(u64 *)&uuid->b[0];
242 shared_uuid.uuid_hi = *(u64 *)&uuid->b[8];
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600243 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
244}
245
Julia Lawallb3c6de42016-01-21 16:47:29 +0100246static const struct cleancache_ops tmem_cleancache_ops = {
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600247 .put_page = tmem_cleancache_put_page,
248 .get_page = tmem_cleancache_get_page,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500249 .invalidate_page = tmem_cleancache_flush_page,
250 .invalidate_inode = tmem_cleancache_flush_inode,
251 .invalidate_fs = tmem_cleancache_flush_fs,
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600252 .init_shared_fs = tmem_cleancache_init_shared_fs,
253 .init_fs = tmem_cleancache_init_fs
254};
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600255#endif
256
257#ifdef CONFIG_FRONTSWAP
258/* frontswap tmem operations */
259
260/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
261static int tmem_frontswap_poolid;
262
263/*
264 * Swizzling increases objects per swaptype, increasing tmem concurrency
265 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
266 */
267#define SWIZ_BITS 4
268#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
269#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
270#define iswiz(_ind) (_ind >> SWIZ_BITS)
271
272static inline struct tmem_oid oswiz(unsigned type, u32 ind)
273{
274 struct tmem_oid oid = { .oid = { 0 } };
275 oid.oid[0] = _oswiz(type, ind);
276 return oid;
277}
278
279/* returns 0 if the page was successfully put into frontswap, -1 if not */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400280static int tmem_frontswap_store(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600281 struct page *page)
282{
283 u64 ind64 = (u64)offset;
284 u32 ind = (u32)offset;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600285 int pool = tmem_frontswap_poolid;
286 int ret;
287
Huang Ying7ba71662018-02-21 14:45:39 -0800288 /* THP isn't supported */
289 if (PageTransHuge(page))
290 return -1;
291
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600292 if (pool < 0)
293 return -1;
294 if (ind64 != ind)
295 return -1;
296 mb(); /* ensure page is quiescent; tmem may address it with an alias */
Julien Gralla76e3cc2015-08-07 17:34:38 +0100297 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600298 /* translate Xen tmem return values to linux semantics */
299 if (ret == 1)
300 return 0;
301 else
302 return -1;
303}
304
305/*
306 * returns 0 if the page was successfully gotten from frontswap, -1 if
307 * was not present (should never happen!)
308 */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400309static int tmem_frontswap_load(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600310 struct page *page)
311{
312 u64 ind64 = (u64)offset;
313 u32 ind = (u32)offset;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600314 int pool = tmem_frontswap_poolid;
315 int ret;
316
317 if (pool < 0)
318 return -1;
319 if (ind64 != ind)
320 return -1;
Julien Gralla76e3cc2015-08-07 17:34:38 +0100321 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600322 /* translate Xen tmem return values to linux semantics */
323 if (ret == 1)
324 return 0;
325 else
326 return -1;
327}
328
329/* flush a single page from frontswap */
330static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
331{
332 u64 ind64 = (u64)offset;
333 u32 ind = (u32)offset;
334 int pool = tmem_frontswap_poolid;
335
336 if (pool < 0)
337 return;
338 if (ind64 != ind)
339 return;
340 (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
341}
342
343/* flush all pages from the passed swaptype */
344static void tmem_frontswap_flush_area(unsigned type)
345{
346 int pool = tmem_frontswap_poolid;
347 int ind;
348
349 if (pool < 0)
350 return;
351 for (ind = SWIZ_MASK; ind >= 0; ind--)
352 (void)xen_tmem_flush_object(pool, oswiz(type, ind));
353}
354
355static void tmem_frontswap_init(unsigned ignored)
356{
357 struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
358
359 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
360 if (tmem_frontswap_poolid < 0)
361 tmem_frontswap_poolid =
362 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
363}
364
Konrad Rzeszutek Wilk1e01c962013-04-30 15:26:51 -0700365static struct frontswap_ops tmem_frontswap_ops = {
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400366 .store = tmem_frontswap_store,
367 .load = tmem_frontswap_load,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500368 .invalidate_page = tmem_frontswap_flush_page,
369 .invalidate_area = tmem_frontswap_flush_area,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600370 .init = tmem_frontswap_init
371};
372#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600373
Jan Beulich144c61a2015-01-23 08:37:01 +0000374static int __init xen_tmem_init(void)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600375{
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600376 if (!xen_domain())
377 return 0;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600378#ifdef CONFIG_FRONTSWAP
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -0400379 if (tmem_enabled && frontswap) {
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600380 char *s = "";
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600381
382 tmem_frontswap_poolid = -1;
Dan Streetmand1dc6f12015-06-24 16:58:18 -0700383 frontswap_register_ops(&tmem_frontswap_ops);
Joe Perches283c0972013-06-28 03:21:41 -0700384 pr_info("frontswap enabled, RAM provided by Xen Transcendent Memory%s\n",
385 s);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600386 }
387#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600388#ifdef CONFIG_CLEANCACHE
Jan Beulich01b720f2015-05-28 13:04:33 +0100389 BUILD_BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -0400390 if (tmem_enabled && cleancache) {
Vladimir Davydov53d85c92015-04-14 15:46:45 -0700391 int err;
392
393 err = cleancache_register_ops(&tmem_cleancache_ops);
394 if (err)
395 pr_warn("xen-tmem: failed to enable cleancache: %d\n",
396 err);
397 else
398 pr_info("cleancache enabled, RAM provided by "
399 "Xen Transcendent Memory\n");
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600400 }
401#endif
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700402#ifdef CONFIG_XEN_SELFBALLOONING
Konrad Rzeszutek Wilk37d46e12013-05-14 13:56:42 -0400403 /*
404 * There is no point of driving pages to the swap system if they
405 * aren't going anywhere in tmem universe.
406 */
407 if (!frontswap) {
408 selfshrinking = false;
409 selfballooning = false;
410 }
Konrad Rzeszutek Wilk2ca62b02013-05-08 17:12:44 -0400411 xen_selfballoon_init(selfballooning, selfshrinking);
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700412#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600413 return 0;
414}
415
416module_init(xen_tmem_init)
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700417MODULE_LICENSE("GPL");
418MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
419MODULE_DESCRIPTION("Shim to Xen transcendent memory");