blob: 9d9be850f8c20e3a81a5418868b3afdbc62adbbd [file] [log] [blame]
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +10001/*
2 * IOMMU helpers in MMU context.
3 *
4 * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Ingo Molnar3f07c012017-02-08 18:51:30 +010013#include <linux/sched/signal.h>
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100014#include <linux/slab.h>
15#include <linux/rculist.h>
16#include <linux/vmalloc.h>
17#include <linux/mutex.h>
Balbir Singh2e5bbb52016-09-06 16:27:31 +100018#include <linux/migrate.h>
19#include <linux/hugetlb.h>
20#include <linux/swap.h>
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +100021#include <linux/sizes.h>
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100022#include <asm/mmu_context.h>
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +100023#include <asm/pte-walk.h>
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -080024#include <linux/mm_inline.h>
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100025
26static DEFINE_MUTEX(mem_list_mutex);
27
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +100028#define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
29#define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
30
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100031struct mm_iommu_table_group_mem_t {
32 struct list_head next;
33 struct rcu_head rcu;
34 unsigned long used;
35 atomic64_t mapped;
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +100036 unsigned int pageshift;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100037 u64 ua; /* userspace address */
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -080038 u64 entries; /* number of entries in hpas/hpages[] */
39 /*
40 * in mm_iommu_get we temporarily use this to store
41 * struct page address.
42 *
43 * We need to convert ua to hpa in real mode. Make it
44 * simpler by storing physical address.
45 */
46 union {
47 struct page **hpages; /* vmalloc'ed */
48 phys_addr_t *hpas;
49 };
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +110050#define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1)
51 u64 dev_hpa; /* Device memory base address */
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100052};
53
54static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
55 unsigned long npages, bool incr)
56{
57 long ret = 0, locked, lock_limit;
58
59 if (!npages)
60 return 0;
61
62 down_write(&mm->mmap_sem);
63
64 if (incr) {
65 locked = mm->locked_vm + npages;
66 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
67 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68 ret = -ENOMEM;
69 else
70 mm->locked_vm += npages;
71 } else {
72 if (WARN_ON_ONCE(npages > mm->locked_vm))
73 npages = mm->locked_vm;
74 mm->locked_vm -= npages;
75 }
76
77 pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +110078 current ? current->pid : 0,
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100079 incr ? '+' : '-',
80 npages << PAGE_SHIFT,
81 mm->locked_vm << PAGE_SHIFT,
82 rlimit(RLIMIT_MEMLOCK));
83 up_write(&mm->mmap_sem);
84
85 return ret;
86}
87
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +110088bool mm_iommu_preregistered(struct mm_struct *mm)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100089{
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +110090 return !list_empty(&mm->context.iommu_group_mem_list);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100091}
92EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
93
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +110094static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -080095 unsigned long entries, unsigned long dev_hpa,
96 struct mm_iommu_table_group_mem_t **pmem)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100097{
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +110098 struct mm_iommu_table_group_mem_t *mem, *mem2;
99 long i, ret, locked_entries = 0, pinned = 0;
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000100 unsigned int pageshift;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000101
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100102 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
103 ret = mm_iommu_adjust_locked_vm(mm, entries, true);
104 if (ret)
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100105 return ret;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000106
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100107 locked_entries = entries;
108 }
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000109
110 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
111 if (!mem) {
112 ret = -ENOMEM;
113 goto unlock_exit;
114 }
115
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100116 if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) {
117 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
118 mem->dev_hpa = dev_hpa;
119 goto good_exit;
120 }
121 mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA;
122
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000123 /*
124 * For a starting point for a maximum page size calculation
125 * we use @ua and @entries natural alignment to allow IOMMU pages
126 * smaller than huge pages but still bigger than PAGE_SIZE.
127 */
128 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
Kees Cookfad953c2018-06-12 14:27:37 -0700129 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000130 if (!mem->hpas) {
131 kfree(mem);
132 ret = -ENOMEM;
133 goto unlock_exit;
134 }
135
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800136 down_read(&mm->mmap_sem);
137 ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL);
138 up_read(&mm->mmap_sem);
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100139 pinned = ret > 0 ? ret : 0;
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800140 if (ret != entries) {
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800141 ret = -EFAULT;
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100142 goto free_exit;
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800143 }
144
145 pageshift = PAGE_SHIFT;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000146 for (i = 0; i < entries; ++i) {
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800147 struct page *page = mem->hpages[i];
148
Aneesh Kumar K.V7f188252019-03-05 15:47:51 -0800149 /*
150 * Allow to use larger than 64k IOMMU pages. Only do that
151 * if we are backed by hugetlb.
152 */
153 if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) {
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000154 struct page *head = compound_head(page);
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000155
Aneesh Kumar K.V7f188252019-03-05 15:47:51 -0800156 pageshift = compound_order(head) + PAGE_SHIFT;
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000157 }
158 mem->pageshift = min(mem->pageshift, pageshift);
Aneesh Kumar K.V678e1742019-03-05 15:47:47 -0800159 /*
160 * We don't need struct page reference any more, switch
161 * to physical address.
162 */
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000163 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
164 }
165
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100166good_exit:
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000167 atomic64_set(&mem->mapped, 1);
168 mem->used = 1;
169 mem->ua = ua;
170 mem->entries = entries;
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100171
172 mutex_lock(&mem_list_mutex);
173
174 list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
175 /* Overlap? */
176 if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
177 (ua < (mem2->ua +
178 (mem2->entries << PAGE_SHIFT)))) {
179 ret = -EINVAL;
180 mutex_unlock(&mem_list_mutex);
181 goto free_exit;
182 }
183 }
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000184
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100185 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000186
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000187 mutex_unlock(&mem_list_mutex);
188
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100189 *pmem = mem;
190
191 return 0;
192
193free_exit:
194 /* free the reference taken */
195 for (i = 0; i < pinned; i++)
196 put_page(mem->hpages[i]);
197
198 vfree(mem->hpas);
199 kfree(mem);
200
201unlock_exit:
202 mm_iommu_adjust_locked_vm(mm, locked_entries, false);
203
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000204 return ret;
205}
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100206
207long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
208 struct mm_iommu_table_group_mem_t **pmem)
209{
210 return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA,
211 pmem);
212}
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100213EXPORT_SYMBOL_GPL(mm_iommu_new);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000214
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100215long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
216 unsigned long entries, unsigned long dev_hpa,
217 struct mm_iommu_table_group_mem_t **pmem)
218{
219 return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem);
220}
221EXPORT_SYMBOL_GPL(mm_iommu_newdev);
222
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000223static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
224{
225 long i;
226 struct page *page = NULL;
227
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100228 if (!mem->hpas)
229 return;
230
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000231 for (i = 0; i < mem->entries; ++i) {
232 if (!mem->hpas[i])
233 continue;
234
235 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
236 if (!page)
237 continue;
238
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +1000239 if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
240 SetPageDirty(page);
241
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000242 put_page(page);
243 mem->hpas[i] = 0;
244 }
245}
246
247static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
248{
249
250 mm_iommu_unpin(mem);
251 vfree(mem->hpas);
252 kfree(mem);
253}
254
255static void mm_iommu_free(struct rcu_head *head)
256{
257 struct mm_iommu_table_group_mem_t *mem = container_of(head,
258 struct mm_iommu_table_group_mem_t, rcu);
259
260 mm_iommu_do_free(mem);
261}
262
263static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
264{
265 list_del_rcu(&mem->next);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000266 call_rcu(&mem->rcu, mm_iommu_free);
267}
268
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100269long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000270{
271 long ret = 0;
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100272 unsigned long unlock_entries = 0;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000273
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000274 mutex_lock(&mem_list_mutex);
275
276 if (mem->used == 0) {
277 ret = -ENOENT;
278 goto unlock_exit;
279 }
280
281 --mem->used;
282 /* There are still users, exit */
283 if (mem->used)
284 goto unlock_exit;
285
286 /* Are there still mappings? */
287 if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
288 ++mem->used;
289 ret = -EBUSY;
290 goto unlock_exit;
291 }
292
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100293 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
294 unlock_entries = mem->entries;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000295
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100296 /* @mapped became 0 so now mappings are disabled, release the region */
297 mm_iommu_release(mem);
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100298
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000299unlock_exit:
300 mutex_unlock(&mem_list_mutex);
301
Alexey Kardashevskiyeb9d7a62019-04-03 15:12:32 +1100302 mm_iommu_adjust_locked_vm(mm, unlock_entries, false);
303
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000304 return ret;
305}
306EXPORT_SYMBOL_GPL(mm_iommu_put);
307
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100308struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
309 unsigned long ua, unsigned long size)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000310{
311 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
312
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100313 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000314 if ((mem->ua <= ua) &&
315 (ua + size <= mem->ua +
316 (mem->entries << PAGE_SHIFT))) {
317 ret = mem;
318 break;
319 }
320 }
321
322 return ret;
323}
324EXPORT_SYMBOL_GPL(mm_iommu_lookup);
325
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100326struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
327 unsigned long ua, unsigned long size)
328{
329 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
330
331 list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
332 next) {
333 if ((mem->ua <= ua) &&
334 (ua + size <= mem->ua +
335 (mem->entries << PAGE_SHIFT))) {
336 ret = mem;
337 break;
338 }
339 }
340
341 return ret;
342}
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100343
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100344struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100345 unsigned long ua, unsigned long entries)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000346{
347 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
348
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100349 mutex_lock(&mem_list_mutex);
350
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +1100351 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000352 if ((mem->ua == ua) && (mem->entries == entries)) {
353 ret = mem;
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100354 ++mem->used;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000355 break;
356 }
357 }
358
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100359 mutex_unlock(&mem_list_mutex);
360
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000361 return ret;
362}
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100363EXPORT_SYMBOL_GPL(mm_iommu_get);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000364
365long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000366 unsigned long ua, unsigned int pageshift, unsigned long *hpa)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000367{
368 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100369 u64 *va;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000370
371 if (entry >= mem->entries)
372 return -EFAULT;
373
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000374 if (pageshift > mem->pageshift)
375 return -EFAULT;
376
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100377 if (!mem->hpas) {
378 *hpa = mem->dev_hpa + (ua - mem->ua);
379 return 0;
380 }
381
382 va = &mem->hpas[entry];
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +1000383 *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000384
385 return 0;
386}
387EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
388
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100389long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000390 unsigned long ua, unsigned int pageshift, unsigned long *hpa)
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100391{
392 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100393 unsigned long *pa;
394
395 if (entry >= mem->entries)
396 return -EFAULT;
397
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000398 if (pageshift > mem->pageshift)
399 return -EFAULT;
400
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100401 if (!mem->hpas) {
402 *hpa = mem->dev_hpa + (ua - mem->ua);
403 return 0;
404 }
405
406 pa = (void *) vmalloc_to_phys(&mem->hpas[entry]);
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100407 if (!pa)
408 return -EFAULT;
409
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +1000410 *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100411
412 return 0;
413}
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +1000414
415extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
416{
417 struct mm_iommu_table_group_mem_t *mem;
418 long entry;
419 void *va;
420 unsigned long *pa;
421
422 mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
423 if (!mem)
424 return;
425
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100426 if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA)
427 return;
428
Alexey Kardashevskiy425333b2018-09-10 18:29:07 +1000429 entry = (ua - mem->ua) >> PAGE_SHIFT;
430 va = &mem->hpas[entry];
431
432 pa = (void *) vmalloc_to_phys(va);
433 if (!pa)
434 return;
435
436 *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
437}
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +1100438
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100439bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
440 unsigned int pageshift, unsigned long *size)
441{
442 struct mm_iommu_table_group_mem_t *mem;
443 unsigned long end;
444
445 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
446 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
447 continue;
448
449 end = mem->dev_hpa + (mem->entries << PAGE_SHIFT);
450 if ((mem->dev_hpa <= hpa) && (hpa < end)) {
451 /*
452 * Since the IOMMU page size might be bigger than
453 * PAGE_SIZE, the amount of preregistered memory
454 * starting from @hpa might be smaller than 1<<pageshift
455 * and the caller needs to distinguish this situation.
456 */
457 *size = min(1UL << pageshift, end - hpa);
458 return true;
459 }
460 }
461
462 return false;
463}
464EXPORT_SYMBOL_GPL(mm_iommu_is_devmem);
465
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000466long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
467{
468 if (atomic64_inc_not_zero(&mem->mapped))
469 return 0;
470
471 /* Last mm_iommu_put() has been called, no more mappings allowed() */
472 return -ENXIO;
473}
474EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
475
476void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
477{
478 atomic64_add_unless(&mem->mapped, -1, 1);
479}
480EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
481
Alexey Kardashevskiy88f54a32016-11-30 17:51:59 +1100482void mm_iommu_init(struct mm_struct *mm)
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000483{
Alexey Kardashevskiy88f54a32016-11-30 17:51:59 +1100484 INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000485}