blob: 3437711ddb438a31c4d19eb5dfa7329eb798e755 [file] [log] [blame]
Dirk Hohndel1297bf22018-05-02 15:46:21 +02001/* SPDX-License-Identifier: GPL-2.0 OR MIT */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02002/**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
Joe Perches25d04792012-03-16 21:43:50 -070032#define pr_fmt(fmt) "[TTM] " fmt
33
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020034#include <linux/sched.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020035#include <linux/pagemap.h>
Hugh Dickins3142b652011-06-27 16:18:17 -070036#include <linux/shmem_fs.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020037#include <linux/file.h>
David Howells760285e2012-10-02 18:01:07 +010038#include <drm/drm_cache.h>
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/ttm/ttm_bo_driver.h>
David Howells760285e2012-10-02 18:01:07 +010040#include <drm/ttm/ttm_page_alloc.h>
Huang Ruidf36b2f2018-08-01 13:49:33 +080041#include <drm/ttm/ttm_set_memory.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020042
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020043/**
Christian König97b7e1b2018-02-22 08:54:57 +010044 * Allocates a ttm structure for the given BO.
45 */
46int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
47{
48 struct ttm_bo_device *bdev = bo->bdev;
Christian König97b7e1b2018-02-22 08:54:57 +010049 uint32_t page_flags = 0;
50
Christian König52791ee2019-08-11 10:06:32 +020051 dma_resv_assert_held(bo->base.resv);
Christian König97b7e1b2018-02-22 08:54:57 +010052
53 if (bdev->need_dma32)
54 page_flags |= TTM_PAGE_FLAG_DMA32;
55
56 if (bdev->no_retry)
57 page_flags |= TTM_PAGE_FLAG_NO_RETRY;
58
59 switch (bo->type) {
60 case ttm_bo_type_device:
61 if (zero_alloc)
62 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
Christian König45a9d152018-02-22 08:54:57 +010063 break;
Christian König97b7e1b2018-02-22 08:54:57 +010064 case ttm_bo_type_kernel:
Christian König97b7e1b2018-02-22 08:54:57 +010065 break;
66 case ttm_bo_type_sg:
Christian König45a9d152018-02-22 08:54:57 +010067 page_flags |= TTM_PAGE_FLAG_SG;
Christian König97b7e1b2018-02-22 08:54:57 +010068 break;
69 default:
Christian König62975d22020-08-12 13:03:49 +100070 bo->ttm = NULL;
Christian König97b7e1b2018-02-22 08:54:57 +010071 pr_err("Illegal buffer object type\n");
Christian König45a9d152018-02-22 08:54:57 +010072 return -EINVAL;
Christian König97b7e1b2018-02-22 08:54:57 +010073 }
74
Christian Königdde5da22018-02-22 10:18:14 +010075 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
Christian König45a9d152018-02-22 08:54:57 +010076 if (unlikely(bo->ttm == NULL))
77 return -ENOMEM;
78
Christian König45a9d152018-02-22 08:54:57 +010079 return 0;
Christian König97b7e1b2018-02-22 08:54:57 +010080}
81
82/**
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020083 * Allocates storage for pointers to the pages that back the ttm.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020084 */
Tom St Denis5b4262d2018-01-25 13:24:03 -050085static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020086{
Michal Hocko20981052017-05-17 14:23:12 +020087 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
88 GFP_KERNEL | __GFP_ZERO);
Tom St Denis5b4262d2018-01-25 13:24:03 -050089 if (!ttm->pages)
90 return -ENOMEM;
91 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020092}
93
Tom St Denis5b4262d2018-01-25 13:24:03 -050094static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020095{
Michal Hocko20981052017-05-17 14:23:12 +020096 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
Alexandre Courbot3d50d4d2014-08-04 18:28:54 +090097 sizeof(*ttm->ttm.pages) +
Michal Hocko20981052017-05-17 14:23:12 +020098 sizeof(*ttm->dma_address),
99 GFP_KERNEL | __GFP_ZERO);
Tom St Denis5b4262d2018-01-25 13:24:03 -0500100 if (!ttm->ttm.pages)
101 return -ENOMEM;
Alexandre Courbotaf1f85d2016-09-16 18:32:26 +0900102 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
Tom St Denis5b4262d2018-01-25 13:24:03 -0500103 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200104}
105
Christian König75a57662018-02-23 15:12:00 +0100106static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
107{
108 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
109 sizeof(*ttm->dma_address),
110 GFP_KERNEL | __GFP_ZERO);
111 if (!ttm->dma_address)
112 return -ENOMEM;
113 return 0;
114}
115
Huang Ruidf36b2f2018-08-01 13:49:33 +0800116static int ttm_tt_set_page_caching(struct page *p,
117 enum ttm_caching_state c_old,
118 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200119{
Francisco Jerezdb78e272010-01-12 18:49:43 +0100120 int ret = 0;
121
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200122 if (PageHighMem(p))
123 return 0;
124
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000125 if (c_old != tt_cached) {
Francisco Jerezdb78e272010-01-12 18:49:43 +0100126 /* p isn't in the default caching state, set it to
127 * writeback first to free its current memtype. */
128
Huang Ruidf36b2f2018-08-01 13:49:33 +0800129 ret = ttm_set_pages_wb(p, 1);
Francisco Jerezdb78e272010-01-12 18:49:43 +0100130 if (ret)
131 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200132 }
Francisco Jerezdb78e272010-01-12 18:49:43 +0100133
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000134 if (c_new == tt_wc)
Huang Ruidf36b2f2018-08-01 13:49:33 +0800135 ret = ttm_set_pages_wc(p, 1);
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000136 else if (c_new == tt_uncached)
Huang Ruidf36b2f2018-08-01 13:49:33 +0800137 ret = ttm_set_pages_uc(p, 1);
Francisco Jerezdb78e272010-01-12 18:49:43 +0100138
139 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200140}
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200141
142/*
143 * Change caching policy for the linear kernel map
144 * for range of pages in a ttm.
145 */
146
147static int ttm_tt_set_caching(struct ttm_tt *ttm,
148 enum ttm_caching_state c_state)
149{
150 int i, j;
151 struct page *cur_page;
152 int ret;
153
154 if (ttm->caching_state == c_state)
155 return 0;
156
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000157 if (ttm->state == tt_unpopulated) {
158 /* Change caching but don't populate */
159 ttm->caching_state = c_state;
160 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200161 }
162
163 if (ttm->caching_state == tt_cached)
Dave Airliec9c97b82009-08-27 09:53:47 +1000164 drm_clflush_pages(ttm->pages, ttm->num_pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200165
166 for (i = 0; i < ttm->num_pages; ++i) {
167 cur_page = ttm->pages[i];
168 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000169 ret = ttm_tt_set_page_caching(cur_page,
170 ttm->caching_state,
171 c_state);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200172 if (unlikely(ret != 0))
173 goto out_err;
174 }
175 }
176
177 ttm->caching_state = c_state;
178
179 return 0;
180
181out_err:
182 for (j = 0; j < i; ++j) {
183 cur_page = ttm->pages[j];
184 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000185 (void)ttm_tt_set_page_caching(cur_page, c_state,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200186 ttm->caching_state);
187 }
188 }
189
190 return ret;
191}
192
193int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
194{
195 enum ttm_caching_state state;
196
197 if (placement & TTM_PL_FLAG_WC)
198 state = tt_wc;
199 else if (placement & TTM_PL_FLAG_UNCACHED)
200 state = tt_uncached;
201 else
202 state = tt_cached;
203
204 return ttm_tt_set_caching(ttm, state);
205}
Dave Airliedf67bed2009-10-30 13:31:26 +1000206EXPORT_SYMBOL(ttm_tt_set_placement_caching);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200207
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200208void ttm_tt_destroy(struct ttm_tt *ttm)
209{
Christian König4279cb12016-06-06 10:17:51 +0200210 if (ttm == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200211 return;
212
Christian König2ff2bf12016-07-21 12:18:19 +0200213 ttm_tt_unbind(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200214
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100215 if (ttm->state == tt_unbound)
216 ttm_tt_unpopulate(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200217
Jan Engelhardt5df23972011-04-04 01:25:18 +0200218 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200219 ttm->swap_storage)
220 fput(ttm->swap_storage);
221
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400222 ttm->swap_storage = NULL;
223 ttm->func->destroy(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200224}
225
Christian König2869e822019-11-04 12:59:01 +0100226static void ttm_tt_init_fields(struct ttm_tt *ttm,
227 struct ttm_buffer_object *bo,
228 uint32_t page_flags)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200229{
Christian Königdde5da22018-02-22 10:18:14 +0100230 ttm->bdev = bo->bdev;
231 ttm->num_pages = bo->num_pages;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200232 ttm->caching_state = tt_cached;
233 ttm->page_flags = page_flags;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400234 ttm->state = tt_unpopulated;
Jerome Glissedea7e0a2012-01-03 17:37:37 -0500235 ttm->swap_storage = NULL;
Christian König536bbeb2018-02-28 09:48:22 +0100236 ttm->sg = bo->sg;
Christian König75a57662018-02-23 15:12:00 +0100237}
238
Christian Königdde5da22018-02-22 10:18:14 +0100239int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
240 uint32_t page_flags)
Christian König75a57662018-02-23 15:12:00 +0100241{
Christian Königdde5da22018-02-22 10:18:14 +0100242 ttm_tt_init_fields(ttm, bo, page_flags);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200243
Tom St Denis5b4262d2018-01-25 13:24:03 -0500244 if (ttm_tt_alloc_page_directory(ttm)) {
Joe Perches25d04792012-03-16 21:43:50 -0700245 pr_err("Failed allocating page table\n");
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400246 return -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200247 }
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400248 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200249}
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400250EXPORT_SYMBOL(ttm_tt_init);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200251
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500252void ttm_tt_fini(struct ttm_tt *ttm)
253{
Michal Hocko20981052017-05-17 14:23:12 +0200254 kvfree(ttm->pages);
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500255 ttm->pages = NULL;
256}
257EXPORT_SYMBOL(ttm_tt_fini);
258
Christian Königdde5da22018-02-22 10:18:14 +0100259int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
260 uint32_t page_flags)
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500261{
262 struct ttm_tt *ttm = &ttm_dma->ttm;
263
Christian Königdde5da22018-02-22 10:18:14 +0100264 ttm_tt_init_fields(ttm, bo, page_flags);
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500265
266 INIT_LIST_HEAD(&ttm_dma->pages_list);
Tom St Denis5b4262d2018-01-25 13:24:03 -0500267 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
Joe Perches25d04792012-03-16 21:43:50 -0700268 pr_err("Failed allocating page table\n");
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500269 return -ENOMEM;
270 }
271 return 0;
272}
273EXPORT_SYMBOL(ttm_dma_tt_init);
274
Christian Königdde5da22018-02-22 10:18:14 +0100275int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
276 uint32_t page_flags)
Christian König75a57662018-02-23 15:12:00 +0100277{
278 struct ttm_tt *ttm = &ttm_dma->ttm;
279 int ret;
280
Christian Königdde5da22018-02-22 10:18:14 +0100281 ttm_tt_init_fields(ttm, bo, page_flags);
Christian König75a57662018-02-23 15:12:00 +0100282
283 INIT_LIST_HEAD(&ttm_dma->pages_list);
284 if (page_flags & TTM_PAGE_FLAG_SG)
285 ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
286 else
287 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
288 if (ret) {
Christian König75a57662018-02-23 15:12:00 +0100289 pr_err("Failed allocating page table\n");
290 return -ENOMEM;
291 }
292 return 0;
293}
294EXPORT_SYMBOL(ttm_sg_tt_init);
295
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500296void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
297{
298 struct ttm_tt *ttm = &ttm_dma->ttm;
299
Christian König75a57662018-02-23 15:12:00 +0100300 if (ttm->pages)
301 kvfree(ttm->pages);
302 else
303 kvfree(ttm_dma->dma_address);
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500304 ttm->pages = NULL;
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500305 ttm_dma->dma_address = NULL;
306}
307EXPORT_SYMBOL(ttm_dma_tt_fini);
308
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200309void ttm_tt_unbind(struct ttm_tt *ttm)
310{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200311 if (ttm->state == tt_bound) {
Dave Airlie08bb88c2020-07-28 14:00:03 +1000312 ttm->func->unbind(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200313 ttm->state = tt_unbound;
314 }
315}
316
Roger He993baf12017-12-21 17:42:51 +0800317int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
318 struct ttm_operation_ctx *ctx)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200319{
320 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200321
322 if (!ttm)
323 return -EINVAL;
324
325 if (ttm->state == tt_bound)
326 return 0;
327
Christian König25893a12018-02-01 14:39:29 +0100328 ret = ttm_tt_populate(ttm, ctx);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200329 if (ret)
330 return ret;
331
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400332 ret = ttm->func->bind(ttm, bo_mem);
Thomas Hellstrom7dcebb52010-10-29 10:46:49 +0200333 if (unlikely(ret != 0))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200334 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200335
336 ttm->state = tt_bound;
337
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200338 return 0;
339}
340EXPORT_SYMBOL(ttm_tt_bind);
341
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400342int ttm_tt_swapin(struct ttm_tt *ttm)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200343{
344 struct address_space *swap_space;
345 struct file *swap_storage;
346 struct page *from_page;
347 struct page *to_page;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200348 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100349 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200350
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200351 swap_storage = ttm->swap_storage;
352 BUG_ON(swap_storage == NULL);
353
Al Viro93c76a32015-12-04 23:45:44 -0500354 swap_space = swap_storage->f_mapping;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200355
356 for (i = 0; i < ttm->num_pages; ++i) {
Andrey Grodzovskycb5f1a52017-12-22 08:12:40 -0500357 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
358
359 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
360 from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
361
Maarten Maathuis290e55052010-02-20 03:22:21 +0100362 if (IS_ERR(from_page)) {
363 ret = PTR_ERR(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200364 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100365 }
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400366 to_page = ttm->pages[i];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200367 if (unlikely(to_page == NULL))
368 goto out_err;
369
Akinobu Mita259a2902012-09-25 11:57:02 +0000370 copy_highpage(to_page, from_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300371 put_page(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200372 }
373
Jan Engelhardt5df23972011-04-04 01:25:18 +0200374 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200375 fput(swap_storage);
376 ttm->swap_storage = NULL;
377 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
378
379 return 0;
380out_err:
Maarten Maathuis290e55052010-02-20 03:22:21 +0100381 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200382}
383
Jan Engelhardt5df23972011-04-04 01:25:18 +0200384int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200385{
386 struct address_space *swap_space;
387 struct file *swap_storage;
388 struct page *from_page;
389 struct page *to_page;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200390 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100391 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200392
393 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
394 BUG_ON(ttm->caching_state != tt_cached);
395
Jan Engelhardt5df23972011-04-04 01:25:18 +0200396 if (!persistent_swap_storage) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200397 swap_storage = shmem_file_setup("ttm swap",
398 ttm->num_pages << PAGE_SHIFT,
399 0);
Viresh Kumar55579cf2015-07-31 14:08:24 +0530400 if (IS_ERR(swap_storage)) {
Joe Perches25d04792012-03-16 21:43:50 -0700401 pr_err("Failed allocating swap storage\n");
Maarten Maathuis290e55052010-02-20 03:22:21 +0100402 return PTR_ERR(swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200403 }
Tom St Denisddde9852018-01-25 13:29:42 -0500404 } else {
Jan Engelhardt5df23972011-04-04 01:25:18 +0200405 swap_storage = persistent_swap_storage;
Tom St Denisddde9852018-01-25 13:29:42 -0500406 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200407
Al Viro93c76a32015-12-04 23:45:44 -0500408 swap_space = swap_storage->f_mapping;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200409
410 for (i = 0; i < ttm->num_pages; ++i) {
Andrey Grodzovskycb5f1a52017-12-22 08:12:40 -0500411 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
412
413 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
414
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200415 from_page = ttm->pages[i];
416 if (unlikely(from_page == NULL))
417 continue;
Andrey Grodzovskycb5f1a52017-12-22 08:12:40 -0500418
419 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
Viresh Kumar55579cf2015-07-31 14:08:24 +0530420 if (IS_ERR(to_page)) {
Maarten Maathuis290e55052010-02-20 03:22:21 +0100421 ret = PTR_ERR(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200422 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100423 }
Akinobu Mita259a2902012-09-25 11:57:02 +0000424 copy_highpage(to_page, from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200425 set_page_dirty(to_page);
426 mark_page_accessed(to_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300427 put_page(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200428 }
429
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100430 ttm_tt_unpopulate(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200431 ttm->swap_storage = swap_storage;
432 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
Jan Engelhardt5df23972011-04-04 01:25:18 +0200433 if (persistent_swap_storage)
434 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200435
436 return 0;
437out_err:
Jan Engelhardt5df23972011-04-04 01:25:18 +0200438 if (!persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200439 fput(swap_storage);
440
Maarten Maathuis290e55052010-02-20 03:22:21 +0100441 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200442}
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100443
Christian Königec929372018-02-01 14:52:50 +0100444static void ttm_tt_add_mapping(struct ttm_tt *ttm)
445{
446 pgoff_t i;
447
448 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
449 return;
450
451 for (i = 0; i < ttm->num_pages; ++i)
452 ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
453}
454
Christian König25893a12018-02-01 14:39:29 +0100455int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
456{
Christian Königec929372018-02-01 14:52:50 +0100457 int ret;
458
Christian König25893a12018-02-01 14:39:29 +0100459 if (ttm->state != tt_unpopulated)
460 return 0;
461
Christian Könige44fcf72018-02-22 12:00:05 +0100462 if (ttm->bdev->driver->ttm_tt_populate)
463 ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
464 else
465 ret = ttm_pool_populate(ttm, ctx);
Christian Königec929372018-02-01 14:52:50 +0100466 if (!ret)
467 ttm_tt_add_mapping(ttm);
468 return ret;
Christian König25893a12018-02-01 14:39:29 +0100469}
470
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100471static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
472{
473 pgoff_t i;
474 struct page **page = ttm->pages;
475
Thomas Hellstrom1b76af52014-02-05 09:18:26 +0100476 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
477 return;
478
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100479 for (i = 0; i < ttm->num_pages; ++i) {
480 (*page)->mapping = NULL;
481 (*page++)->index = 0;
482 }
483}
484
485void ttm_tt_unpopulate(struct ttm_tt *ttm)
486{
487 if (ttm->state == tt_unpopulated)
488 return;
489
490 ttm_tt_clear_mapping(ttm);
Christian Könige44fcf72018-02-22 12:00:05 +0100491 if (ttm->bdev->driver->ttm_tt_unpopulate)
492 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
493 else
494 ttm_pool_unpopulate(ttm);
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100495}