blob: 8330f135294f48ecfff9bb5d3555f6fa3e3514c3 [file] [log] [blame]
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +10001/*
2 * IOMMU helpers in MMU context.
3 *
4 * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Ingo Molnar3f07c012017-02-08 18:51:30 +010013#include <linux/sched/signal.h>
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100014#include <linux/slab.h>
15#include <linux/rculist.h>
16#include <linux/vmalloc.h>
17#include <linux/mutex.h>
Balbir Singh2e5bbb52016-09-06 16:27:31 +100018#include <linux/migrate.h>
19#include <linux/hugetlb.h>
20#include <linux/swap.h>
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +100021#include <linux/sizes.h>
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100022#include <asm/mmu_context.h>
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +100023#include <asm/pte-walk.h>
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -080024#include <linux/mm_inline.h>
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100025
26static DEFINE_MUTEX(mem_list_mutex);
27
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +100028#define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
29#define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
30
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100031struct mm_iommu_table_group_mem_t {
32 struct list_head next;
33 struct rcu_head rcu;
34 unsigned long used;
35 atomic64_t mapped;
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +100036 unsigned int pageshift;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100037 u64 ua; /* userspace address */
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -080038 u64 entries; /* number of entries in hpas/hpages[] */
39 /*
40 * in mm_iommu_get we temporarily use this to store
41 * struct page address.
42 *
43 * We need to convert ua to hpa in real mode. Make it
44 * simpler by storing physical address.
45 */
46 union {
47 struct page **hpages; /* vmalloc'ed */
48 phys_addr_t *hpas;
49 };
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +110050#define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1)
51 u64 dev_hpa; /* Device memory base address */
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100052};
53
54static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
55 unsigned long npages, bool incr)
56{
57 long ret = 0, locked, lock_limit;
58
59 if (!npages)
60 return 0;
61
62 down_write(&mm->mmap_sem);
63
64 if (incr) {
65 locked = mm->locked_vm + npages;
66 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
67 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68 ret = -ENOMEM;
69 else
70 mm->locked_vm += npages;
71 } else {
72 if (WARN_ON_ONCE(npages > mm->locked_vm))
73 npages = mm->locked_vm;
74 mm->locked_vm -= npages;
75 }
76
77 pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +110078 current ? current->pid : 0,
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100079 incr ? '+' : '-',
80 npages << PAGE_SHIFT,
81 mm->locked_vm << PAGE_SHIFT,
82 rlimit(RLIMIT_MEMLOCK));
83 up_write(&mm->mmap_sem);
84
85 return ret;
86}
87
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +110088bool mm_iommu_preregistered(struct mm_struct *mm)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100089{
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +110090 return !list_empty(&mm->context.iommu_group_mem_list);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100091}
92EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
93
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +110094static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -080095 unsigned long entries, unsigned long dev_hpa,
96 struct mm_iommu_table_group_mem_t **pmem)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100097{
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +110098 struct mm_iommu_table_group_mem_t *mem, *mem2;
99 long i, ret, locked_entries = 0, pinned = 0;
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000100 unsigned int pageshift;
Alexey Kardashevskiy7a3a4d72019-04-03 15:12:33 +1100101 unsigned long entry, chunk;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000102
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100103 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
104 ret = mm_iommu_adjust_locked_vm(mm, entries, true);
105 if (ret)
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100106 return ret;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000107
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100108 locked_entries = entries;
109 }
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000110
111 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
112 if (!mem) {
113 ret = -ENOMEM;
114 goto unlock_exit;
115 }
116
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100117 if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) {
118 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
119 mem->dev_hpa = dev_hpa;
120 goto good_exit;
121 }
122 mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA;
123
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000124 /*
125 * For a starting point for a maximum page size calculation
126 * we use @ua and @entries natural alignment to allow IOMMU pages
127 * smaller than huge pages but still bigger than PAGE_SIZE.
128 */
129 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
Kees Cookfad953c2018-06-12 14:27:37 -0700130 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000131 if (!mem->hpas) {
132 kfree(mem);
133 ret = -ENOMEM;
134 goto unlock_exit;
135 }
136
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800137 down_read(&mm->mmap_sem);
Alexey Kardashevskiy7a3a4d72019-04-03 15:12:33 +1100138 chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
139 sizeof(struct vm_area_struct *);
140 chunk = min(chunk, entries);
141 for (entry = 0; entry < entries; entry += chunk) {
142 unsigned long n = min(entries - entry, chunk);
143
144 ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
145 FOLL_WRITE, mem->hpages + entry, NULL);
146 if (ret == n) {
147 pinned += n;
148 continue;
149 }
150 if (ret > 0)
151 pinned += ret;
152 break;
153 }
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800154 up_read(&mm->mmap_sem);
Alexey Kardashevskiy7a3a4d72019-04-03 15:12:33 +1100155 if (pinned != entries) {
156 if (!ret)
157 ret = -EFAULT;
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100158 goto free_exit;
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800159 }
160
161 pageshift = PAGE_SHIFT;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000162 for (i = 0; i < entries; ++i) {
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800163 struct page *page = mem->hpages[i];
164
Aneesh Kumar K.V7f188252019-03-05 15:47:51 -0800165 /*
166 * Allow to use larger than 64k IOMMU pages. Only do that
167 * if we are backed by hugetlb.
168 */
169 if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) {
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000170 struct page *head = compound_head(page);
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000171
Aneesh Kumar K.V7f188252019-03-05 15:47:51 -0800172 pageshift = compound_order(head) + PAGE_SHIFT;
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000173 }
174 mem->pageshift = min(mem->pageshift, pageshift);
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800175 /*
176 * We don't need struct page reference any more, switch
177 * to physical address.
178 */
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000179 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
180 }
181
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100182good_exit:
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000183 atomic64_set(&mem->mapped, 1);
184 mem->used = 1;
185 mem->ua = ua;
186 mem->entries = entries;
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100187
188 mutex_lock(&mem_list_mutex);
189
190 list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
191 /* Overlap? */
192 if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
193 (ua < (mem2->ua +
194 (mem2->entries << PAGE_SHIFT)))) {
195 ret = -EINVAL;
196 mutex_unlock(&mem_list_mutex);
197 goto free_exit;
198 }
199 }
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000200
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100201 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000202
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000203 mutex_unlock(&mem_list_mutex);
204
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100205 *pmem = mem;
206
207 return 0;
208
209free_exit:
210 /* free the reference taken */
211 for (i = 0; i < pinned; i++)
212 put_page(mem->hpages[i]);
213
214 vfree(mem->hpas);
215 kfree(mem);
216
217unlock_exit:
218 mm_iommu_adjust_locked_vm(mm, locked_entries, false);
219
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000220 return ret;
221}
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100222
223long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
224 struct mm_iommu_table_group_mem_t **pmem)
225{
226 return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA,
227 pmem);
228}
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100229EXPORT_SYMBOL_GPL(mm_iommu_new);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000230
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100231long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
232 unsigned long entries, unsigned long dev_hpa,
233 struct mm_iommu_table_group_mem_t **pmem)
234{
235 return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem);
236}
237EXPORT_SYMBOL_GPL(mm_iommu_newdev);
238
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000239static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
240{
241 long i;
242 struct page *page = NULL;
243
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100244 if (!mem->hpas)
245 return;
246
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000247 for (i = 0; i < mem->entries; ++i) {
248 if (!mem->hpas[i])
249 continue;
250
251 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
252 if (!page)
253 continue;
254
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +1000255 if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
256 SetPageDirty(page);
257
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000258 put_page(page);
259 mem->hpas[i] = 0;
260 }
261}
262
263static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
264{
265
266 mm_iommu_unpin(mem);
267 vfree(mem->hpas);
268 kfree(mem);
269}
270
271static void mm_iommu_free(struct rcu_head *head)
272{
273 struct mm_iommu_table_group_mem_t *mem = container_of(head,
274 struct mm_iommu_table_group_mem_t, rcu);
275
276 mm_iommu_do_free(mem);
277}
278
279static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
280{
281 list_del_rcu(&mem->next);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000282 call_rcu(&mem->rcu, mm_iommu_free);
283}
284
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100285long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000286{
287 long ret = 0;
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100288 unsigned long unlock_entries = 0;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000289
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000290 mutex_lock(&mem_list_mutex);
291
292 if (mem->used == 0) {
293 ret = -ENOENT;
294 goto unlock_exit;
295 }
296
297 --mem->used;
298 /* There are still users, exit */
299 if (mem->used)
300 goto unlock_exit;
301
302 /* Are there still mappings? */
303 if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
304 ++mem->used;
305 ret = -EBUSY;
306 goto unlock_exit;
307 }
308
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100309 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
310 unlock_entries = mem->entries;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000311
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100312 /* @mapped became 0 so now mappings are disabled, release the region */
313 mm_iommu_release(mem);
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100314
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000315unlock_exit:
316 mutex_unlock(&mem_list_mutex);
317
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100318 mm_iommu_adjust_locked_vm(mm, unlock_entries, false);
319
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000320 return ret;
321}
322EXPORT_SYMBOL_GPL(mm_iommu_put);
323
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100324struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
325 unsigned long ua, unsigned long size)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000326{
327 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
328
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100329 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000330 if ((mem->ua <= ua) &&
331 (ua + size <= mem->ua +
332 (mem->entries << PAGE_SHIFT))) {
333 ret = mem;
334 break;
335 }
336 }
337
338 return ret;
339}
340EXPORT_SYMBOL_GPL(mm_iommu_lookup);
341
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100342struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
343 unsigned long ua, unsigned long size)
344{
345 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
346
347 list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
348 next) {
349 if ((mem->ua <= ua) &&
350 (ua + size <= mem->ua +
351 (mem->entries << PAGE_SHIFT))) {
352 ret = mem;
353 break;
354 }
355 }
356
357 return ret;
358}
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100359
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100360struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100361 unsigned long ua, unsigned long entries)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000362{
363 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
364
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100365 mutex_lock(&mem_list_mutex);
366
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100367 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000368 if ((mem->ua == ua) && (mem->entries == entries)) {
369 ret = mem;
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100370 ++mem->used;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000371 break;
372 }
373 }
374
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100375 mutex_unlock(&mem_list_mutex);
376
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000377 return ret;
378}
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100379EXPORT_SYMBOL_GPL(mm_iommu_get);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000380
381long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000382 unsigned long ua, unsigned int pageshift, unsigned long *hpa)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000383{
384 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100385 u64 *va;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000386
387 if (entry >= mem->entries)
388 return -EFAULT;
389
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000390 if (pageshift > mem->pageshift)
391 return -EFAULT;
392
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100393 if (!mem->hpas) {
394 *hpa = mem->dev_hpa + (ua - mem->ua);
395 return 0;
396 }
397
398 va = &mem->hpas[entry];
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +1000399 *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000400
401 return 0;
402}
403EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
404
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100405long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000406 unsigned long ua, unsigned int pageshift, unsigned long *hpa)
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100407{
408 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100409 unsigned long *pa;
410
411 if (entry >= mem->entries)
412 return -EFAULT;
413
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000414 if (pageshift > mem->pageshift)
415 return -EFAULT;
416
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100417 if (!mem->hpas) {
418 *hpa = mem->dev_hpa + (ua - mem->ua);
419 return 0;
420 }
421
422 pa = (void *) vmalloc_to_phys(&mem->hpas[entry]);
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100423 if (!pa)
424 return -EFAULT;
425
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +1000426 *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100427
428 return 0;
429}
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +1000430
431extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
432{
433 struct mm_iommu_table_group_mem_t *mem;
434 long entry;
435 void *va;
436 unsigned long *pa;
437
438 mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
439 if (!mem)
440 return;
441
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100442 if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA)
443 return;
444
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +1000445 entry = (ua - mem->ua) >> PAGE_SHIFT;
446 va = &mem->hpas[entry];
447
448 pa = (void *) vmalloc_to_phys(va);
449 if (!pa)
450 return;
451
452 *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
453}
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100454
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100455bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
456 unsigned int pageshift, unsigned long *size)
457{
458 struct mm_iommu_table_group_mem_t *mem;
459 unsigned long end;
460
461 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
462 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
463 continue;
464
465 end = mem->dev_hpa + (mem->entries << PAGE_SHIFT);
466 if ((mem->dev_hpa <= hpa) && (hpa < end)) {
467 /*
468 * Since the IOMMU page size might be bigger than
469 * PAGE_SIZE, the amount of preregistered memory
470 * starting from @hpa might be smaller than 1<<pageshift
471 * and the caller needs to distinguish this situation.
472 */
473 *size = min(1UL << pageshift, end - hpa);
474 return true;
475 }
476 }
477
478 return false;
479}
480EXPORT_SYMBOL_GPL(mm_iommu_is_devmem);
481
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000482long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
483{
484 if (atomic64_inc_not_zero(&mem->mapped))
485 return 0;
486
487 /* Last mm_iommu_put() has been called, no more mappings allowed() */
488 return -ENXIO;
489}
490EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
491
492void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
493{
494 atomic64_add_unless(&mem->mapped, -1, 1);
495}
496EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
497
Alexey Kardashevskiy88f54a32016-11-30 17:51:59 +1100498void mm_iommu_init(struct mm_struct *mm)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000499{
Alexey Kardashevskiy88f54a32016-11-30 17:51:59 +1100500 INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000501}