blob: 7048c9198c21c8592762f7b12f5b083912ff0764 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10002/*
3 * VFIO: IOMMU DMA mapping support for TCE on POWER
4 *
5 * Copyright (C) 2013 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 *
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10008 * Derived from original vfio_iommu_type1.c:
9 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
10 * Author: Alex Williamson <alex.williamson@redhat.com>
11 */
12
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/slab.h>
16#include <linux/uaccess.h>
17#include <linux/err.h>
18#include <linux/vfio.h>
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +100019#include <linux/vmalloc.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010020#include <linux/sched/mm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010021#include <linux/sched/signal.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010022
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +100023#include <asm/iommu.h>
24#include <asm/tce.h>
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +100025#include <asm/mmu_context.h>
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +100026
27#define DRIVER_VERSION "0.1"
28#define DRIVER_AUTHOR "aik@ozlabs.ru"
29#define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
30
31static void tce_iommu_detach_group(void *iommu_data,
32 struct iommu_group *iommu_group);
33
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110034static long try_increment_locked_vm(struct mm_struct *mm, long npages)
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100035{
36 long ret = 0, locked, lock_limit;
37
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110038 if (WARN_ON_ONCE(!mm))
39 return -EPERM;
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100040
41 if (!npages)
42 return 0;
43
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110044 down_write(&mm->mmap_sem);
45 locked = mm->locked_vm + npages;
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100046 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
48 ret = -ENOMEM;
49 else
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110050 mm->locked_vm += npages;
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100051
52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
53 npages << PAGE_SHIFT,
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110054 mm->locked_vm << PAGE_SHIFT,
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100055 rlimit(RLIMIT_MEMLOCK),
56 ret ? " - exceeded" : "");
57
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110058 up_write(&mm->mmap_sem);
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100059
60 return ret;
61}
62
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110063static void decrement_locked_vm(struct mm_struct *mm, long npages)
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100064{
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110065 if (!mm || !npages)
66 return;
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100067
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110068 down_write(&mm->mmap_sem);
69 if (WARN_ON_ONCE(npages > mm->locked_vm))
70 npages = mm->locked_vm;
71 mm->locked_vm -= npages;
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100072 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
73 npages << PAGE_SHIFT,
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110074 mm->locked_vm << PAGE_SHIFT,
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100075 rlimit(RLIMIT_MEMLOCK));
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +110076 up_write(&mm->mmap_sem);
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +100077}
78
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +100079/*
80 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
81 *
82 * This code handles mapping and unmapping of user data buffers
83 * into DMA'ble space using the IOMMU
84 */
85
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +100086struct tce_iommu_group {
87 struct list_head next;
88 struct iommu_group *grp;
89};
90
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +100091/*
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +110092 * A container needs to remember which preregistered region it has
93 * referenced to do proper cleanup at the userspace process exit.
94 */
95struct tce_iommu_prereg {
96 struct list_head next;
97 struct mm_iommu_table_group_mem_t *mem;
98};
99
100/*
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000101 * The container descriptor supports only a single group per container.
102 * Required by the API as the container is not supplied with the IOMMU group
103 * at the moment of initialization.
104 */
105struct tce_container {
106 struct mutex lock;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000107 bool enabled;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000108 bool v2;
Alexey Kardashevskiyd9c72892016-11-30 17:52:03 +1100109 bool def_window_pending;
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +1000110 unsigned long locked_pages;
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100111 struct mm_struct *mm;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000112 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
113 struct list_head group_list;
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100114 struct list_head prereg_list;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000115};
116
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100117static long tce_iommu_mm_set(struct tce_container *container)
118{
119 if (container->mm) {
120 if (container->mm == current->mm)
121 return 0;
122 return -EPERM;
123 }
124 BUG_ON(!current->mm);
125 container->mm = current->mm;
126 atomic_inc(&container->mm->mm_count);
127
128 return 0;
129}
130
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100131static long tce_iommu_prereg_free(struct tce_container *container,
132 struct tce_iommu_prereg *tcemem)
133{
134 long ret;
135
136 ret = mm_iommu_put(container->mm, tcemem->mem);
137 if (ret)
138 return ret;
139
140 list_del(&tcemem->next);
141 kfree(tcemem);
142
143 return 0;
144}
145
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000146static long tce_iommu_unregister_pages(struct tce_container *container,
147 __u64 vaddr, __u64 size)
148{
149 struct mm_iommu_table_group_mem_t *mem;
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100150 struct tce_iommu_prereg *tcemem;
151 bool found = false;
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100152 long ret;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000153
154 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
155 return -EINVAL;
156
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100157 mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000158 if (!mem)
159 return -ENOENT;
160
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100161 list_for_each_entry(tcemem, &container->prereg_list, next) {
162 if (tcemem->mem == mem) {
163 found = true;
164 break;
165 }
166 }
167
168 if (!found)
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100169 ret = -ENOENT;
170 else
171 ret = tce_iommu_prereg_free(container, tcemem);
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100172
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100173 mm_iommu_put(container->mm, mem);
174
175 return ret;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000176}
177
178static long tce_iommu_register_pages(struct tce_container *container,
179 __u64 vaddr, __u64 size)
180{
181 long ret = 0;
182 struct mm_iommu_table_group_mem_t *mem = NULL;
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100183 struct tce_iommu_prereg *tcemem;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000184 unsigned long entries = size >> PAGE_SHIFT;
185
186 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
187 ((vaddr + size) < vaddr))
188 return -EINVAL;
189
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100190 mem = mm_iommu_get(container->mm, vaddr, entries);
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100191 if (mem) {
192 list_for_each_entry(tcemem, &container->prereg_list, next) {
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100193 if (tcemem->mem == mem) {
194 ret = -EBUSY;
195 goto put_exit;
196 }
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100197 }
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100198 } else {
199 ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
200 if (ret)
201 return ret;
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100202 }
203
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100204 tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
Alexey Kardashevskiy3393af22017-03-27 14:23:40 +1100205 if (!tcemem) {
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100206 ret = -ENOMEM;
207 goto put_exit;
Alexey Kardashevskiy3393af22017-03-27 14:23:40 +1100208 }
209
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100210 tcemem->mem = mem;
211 list_add(&tcemem->next, &container->prereg_list);
212
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000213 container->enabled = true;
214
215 return 0;
Alexey Kardashevskiye0bf78b2018-12-19 19:52:14 +1100216
217put_exit:
218 mm_iommu_put(container->mm, mem);
219 return ret;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000220}
221
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100222static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
223 unsigned int page_shift)
Alexey Kardashevskiye432bc72015-06-05 16:34:59 +1000224{
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100225 struct page *page;
226 unsigned long size = 0;
227
228 if (mm_iommu_is_devmem(mm, hpa, page_shift, &size))
229 return size == (1UL << page_shift);
230
231 page = pfn_to_page(hpa >> PAGE_SHIFT);
Alexey Kardashevskiye432bc72015-06-05 16:34:59 +1000232 /*
233 * Check that the TCE table granularity is not bigger than the size of
234 * a page we just found. Otherwise the hardware can get access to
235 * a bigger memory chunk that it should.
236 */
237 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
238}
239
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000240static inline bool tce_groups_attached(struct tce_container *container)
241{
242 return !list_empty(&container->group_list);
243}
244
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000245static long tce_iommu_find_table(struct tce_container *container,
246 phys_addr_t ioba, struct iommu_table **ptbl)
247{
248 long i;
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000249
250 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000251 struct iommu_table *tbl = container->tables[i];
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000252
253 if (tbl) {
254 unsigned long entry = ioba >> tbl->it_page_shift;
255 unsigned long start = tbl->it_offset;
256 unsigned long end = start + tbl->it_size;
257
258 if ((start <= entry) && (entry < end)) {
259 *ptbl = tbl;
260 return i;
261 }
262 }
263 }
264
265 return -1;
266}
267
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +1000268static int tce_iommu_find_free_table(struct tce_container *container)
269{
270 int i;
271
272 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
273 if (!container->tables[i])
274 return i;
275 }
276
277 return -ENOSPC;
278}
279
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000280static int tce_iommu_enable(struct tce_container *container)
281{
282 int ret = 0;
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +1000283 unsigned long locked;
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000284 struct iommu_table_group *table_group;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000285 struct tce_iommu_group *tcegrp;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000286
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000287 if (container->enabled)
288 return -EBUSY;
289
290 /*
291 * When userspace pages are mapped into the IOMMU, they are effectively
292 * locked memory, so, theoretically, we need to update the accounting
293 * of locked pages on each map and unmap. For powerpc, the map unmap
294 * paths can be very hot, though, and the accounting would kill
295 * performance, especially since it would be difficult to impossible
296 * to handle the accounting in real mode only.
297 *
298 * To address that, rather than precisely accounting every page, we
299 * instead account for a worst case on locked memory when the iommu is
300 * enabled and disabled. The worst case upper bound on locked memory
301 * is the size of the whole iommu window, which is usually relatively
302 * small (compared to total memory sizes) on POWER hardware.
303 *
304 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
305 * that would effectively kill the guest at random points, much better
306 * enforcing the limit based on the max that the guest can map.
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +1000307 *
308 * Unfortunately at the moment it counts whole tables, no matter how
309 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
310 * each with 2GB DMA window, 8GB will be counted here. The reason for
311 * this is that we cannot tell here the amount of RAM used by the guest
312 * as this information is only available from KVM and VFIO is
313 * KVM agnostic.
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +1000314 *
315 * So we do not allow enabling a container without a group attached
316 * as there is no way to know how much we should increment
317 * the locked_vm counter.
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000318 */
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000319 if (!tce_groups_attached(container))
320 return -ENODEV;
321
322 tcegrp = list_first_entry(&container->group_list,
323 struct tce_iommu_group, next);
324 table_group = iommu_group_get_iommudata(tcegrp->grp);
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000325 if (!table_group)
326 return -ENODEV;
327
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +1000328 if (!table_group->tce32_size)
329 return -EPERM;
330
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100331 ret = tce_iommu_mm_set(container);
332 if (ret)
333 return ret;
334
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +1000335 locked = table_group->tce32_size >> PAGE_SHIFT;
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100336 ret = try_increment_locked_vm(container->mm, locked);
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +1000337 if (ret)
338 return ret;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000339
Alexey Kardashevskiy2d270df2015-06-05 16:35:01 +1000340 container->locked_pages = locked;
341
342 container->enabled = true;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000343
344 return ret;
345}
346
347static void tce_iommu_disable(struct tce_container *container)
348{
349 if (!container->enabled)
350 return;
351
352 container->enabled = false;
353
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100354 BUG_ON(!container->mm);
355 decrement_locked_vm(container->mm, container->locked_pages);
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000356}
357
358static void *tce_iommu_open(unsigned long arg)
359{
360 struct tce_container *container;
361
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000362 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000363 pr_err("tce_vfio: Wrong IOMMU type\n");
364 return ERR_PTR(-EINVAL);
365 }
366
367 container = kzalloc(sizeof(*container), GFP_KERNEL);
368 if (!container)
369 return ERR_PTR(-ENOMEM);
370
371 mutex_init(&container->lock);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000372 INIT_LIST_HEAD_RCU(&container->group_list);
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100373 INIT_LIST_HEAD_RCU(&container->prereg_list);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000374
375 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000376
377 return container;
378}
379
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000380static int tce_iommu_clear(struct tce_container *container,
381 struct iommu_table *tbl,
382 unsigned long entry, unsigned long pages);
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100383static void tce_iommu_free_table(struct tce_container *container,
384 struct iommu_table *tbl);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000385
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000386static void tce_iommu_release(void *iommu_data)
387{
388 struct tce_container *container = iommu_data;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000389 struct tce_iommu_group *tcegrp;
Alexey Kardashevskiy517ad4a2018-10-02 13:22:31 +1000390 struct tce_iommu_prereg *tcemem, *tmtmp;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000391 long i;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000392
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000393 while (tce_groups_attached(container)) {
394 tcegrp = list_first_entry(&container->group_list,
395 struct tce_iommu_group, next);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000396 tce_iommu_detach_group(iommu_data, tcegrp->grp);
397 }
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000398
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000399 /*
400 * If VFIO created a table, it was not disposed
401 * by tce_iommu_detach_group() so do it now.
402 */
403 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
404 struct iommu_table *tbl = container->tables[i];
405
406 if (!tbl)
407 continue;
408
409 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100410 tce_iommu_free_table(container, tbl);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000411 }
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000412
Alexey Kardashevskiy517ad4a2018-10-02 13:22:31 +1000413 list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
414 WARN_ON(tce_iommu_prereg_free(container, tcemem));
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100415
Alexey Kardashevskiy649354b2015-06-05 16:35:03 +1000416 tce_iommu_disable(container);
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100417 if (container->mm)
418 mmdrop(container->mm);
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000419 mutex_destroy(&container->lock);
420
421 kfree(container);
422}
423
Alexey Kardashevskiy649354b2015-06-05 16:35:03 +1000424static void tce_iommu_unuse_page(struct tce_container *container,
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000425 unsigned long hpa)
Alexey Kardashevskiy649354b2015-06-05 16:35:03 +1000426{
427 struct page *page;
428
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000429 page = pfn_to_page(hpa >> PAGE_SHIFT);
Alexey Kardashevskiy649354b2015-06-05 16:35:03 +1000430 put_page(page);
431}
432
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100433static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
Alexey Kardashevskiy1463edc2018-07-17 17:19:12 +1000434 unsigned long tce, unsigned long shift,
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000435 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
436{
437 long ret = 0;
438 struct mm_iommu_table_group_mem_t *mem;
439
Alexey Kardashevskiy1463edc2018-07-17 17:19:12 +1000440 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000441 if (!mem)
442 return -EINVAL;
443
Alexey Kardashevskiy76fa4972018-07-17 17:19:13 +1000444 ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000445 if (ret)
446 return -EINVAL;
447
448 *pmem = mem;
449
450 return 0;
451}
452
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100453static void tce_iommu_unuse_page_v2(struct tce_container *container,
454 struct iommu_table *tbl, unsigned long entry)
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000455{
456 struct mm_iommu_table_group_mem_t *mem = NULL;
457 int ret;
458 unsigned long hpa = 0;
Alexey Kardashevskiy6e301a82018-10-15 21:08:41 +1100459 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000460
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100461 if (!pua)
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000462 return;
463
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000464 ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
Michael Ellermanb3124ec2018-08-13 15:59:06 +1000465 tbl->it_page_shift, &hpa, &mem);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000466 if (ret)
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000467 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
468 __func__, be64_to_cpu(*pua), entry, ret);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000469 if (mem)
470 mm_iommu_mapped_dec(mem);
471
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000472 *pua = cpu_to_be64(0);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000473}
474
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000475static int tce_iommu_clear(struct tce_container *container,
476 struct iommu_table *tbl,
477 unsigned long entry, unsigned long pages)
478{
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000479 unsigned long oldhpa;
480 long ret;
481 enum dma_data_direction direction;
Alexey Kardashevskiy6e301a82018-10-15 21:08:41 +1100482 unsigned long lastentry = entry + pages;
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000483
Alexey Kardashevskiy6e301a82018-10-15 21:08:41 +1100484 for ( ; entry < lastentry; ++entry) {
485 if (tbl->it_indirect_levels && tbl->it_userspace) {
486 /*
487 * For multilevel tables, we can take a shortcut here
488 * and skip some TCEs as we know that the userspace
489 * addresses cache is a mirror of the real TCE table
490 * and if it is missing some indirect levels, then
491 * the hardware table does not have them allocated
492 * either and therefore does not require updating.
493 */
494 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
495 entry);
496 if (!pua) {
497 /* align to level_size which is power of two */
498 entry |= tbl->it_level_size - 1;
499 continue;
500 }
501 }
502
Alexey Kardashevskiy5c2fefd2017-10-02 12:39:11 -0600503 cond_resched();
504
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000505 direction = DMA_NONE;
506 oldhpa = 0;
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100507 ret = iommu_tce_xchg(container->mm, tbl, entry, &oldhpa,
508 &direction);
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000509 if (ret)
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000510 continue;
511
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000512 if (direction == DMA_NONE)
513 continue;
514
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000515 if (container->v2) {
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100516 tce_iommu_unuse_page_v2(container, tbl, entry);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000517 continue;
518 }
519
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000520 tce_iommu_unuse_page(container, oldhpa);
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000521 }
522
523 return 0;
524}
525
Alexey Kardashevskiy649354b2015-06-05 16:35:03 +1000526static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
527{
528 struct page *page = NULL;
529 enum dma_data_direction direction = iommu_tce_direction(tce);
530
531 if (get_user_pages_fast(tce & PAGE_MASK, 1,
Ira Weiny73b01402019-05-13 17:17:11 -0700532 direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
533 &page) != 1)
Alexey Kardashevskiy649354b2015-06-05 16:35:03 +1000534 return -EFAULT;
535
536 *hpa = __pa((unsigned long) page_address(page));
537
538 return 0;
539}
540
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000541static long tce_iommu_build(struct tce_container *container,
542 struct iommu_table *tbl,
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000543 unsigned long entry, unsigned long tce, unsigned long pages,
544 enum dma_data_direction direction)
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000545{
546 long i, ret = 0;
Alexey Kardashevskiy649354b2015-06-05 16:35:03 +1000547 unsigned long hpa;
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000548 enum dma_data_direction dirtmp;
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000549
550 for (i = 0; i < pages; ++i) {
551 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
552
Alexey Kardashevskiy649354b2015-06-05 16:35:03 +1000553 ret = tce_iommu_use_page(tce, &hpa);
554 if (ret)
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000555 break;
Alexey Kardashevskiye432bc72015-06-05 16:34:59 +1000556
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100557 if (!tce_page_is_contained(container->mm, hpa,
558 tbl->it_page_shift)) {
Alexey Kardashevskiye432bc72015-06-05 16:34:59 +1000559 ret = -EPERM;
560 break;
561 }
562
Alexey Kardashevskiy649354b2015-06-05 16:35:03 +1000563 hpa |= offset;
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000564 dirtmp = direction;
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100565 ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
566 &dirtmp);
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000567 if (ret) {
Alexey Kardashevskiy649354b2015-06-05 16:35:03 +1000568 tce_iommu_unuse_page(container, hpa);
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000569 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
570 __func__, entry << tbl->it_page_shift,
571 tce, ret);
572 break;
573 }
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000574
575 if (dirtmp != DMA_NONE)
576 tce_iommu_unuse_page(container, hpa);
577
Alexey Kardashevskiy00663d42015-06-05 16:35:00 +1000578 tce += IOMMU_PAGE_SIZE(tbl);
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000579 }
580
581 if (ret)
582 tce_iommu_clear(container, tbl, entry, i);
583
584 return ret;
585}
586
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000587static long tce_iommu_build_v2(struct tce_container *container,
588 struct iommu_table *tbl,
589 unsigned long entry, unsigned long tce, unsigned long pages,
590 enum dma_data_direction direction)
591{
592 long i, ret = 0;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000593 unsigned long hpa;
594 enum dma_data_direction dirtmp;
595
596 for (i = 0; i < pages; ++i) {
597 struct mm_iommu_table_group_mem_t *mem = NULL;
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000598 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000599
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100600 ret = tce_iommu_prereg_ua_to_hpa(container,
Alexey Kardashevskiy1463edc2018-07-17 17:19:12 +1000601 tce, tbl->it_page_shift, &hpa, &mem);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000602 if (ret)
603 break;
604
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100605 if (!tce_page_is_contained(container->mm, hpa,
606 tbl->it_page_shift)) {
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000607 ret = -EPERM;
608 break;
609 }
610
611 /* Preserve offset within IOMMU page */
612 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
613 dirtmp = direction;
614
615 /* The registered region is being unregistered */
616 if (mm_iommu_mapped_inc(mem))
617 break;
618
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +1100619 ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
620 &dirtmp);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000621 if (ret) {
622 /* dirtmp cannot be DMA_NONE here */
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100623 tce_iommu_unuse_page_v2(container, tbl, entry + i);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000624 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
625 __func__, entry << tbl->it_page_shift,
626 tce, ret);
627 break;
628 }
629
630 if (dirtmp != DMA_NONE)
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100631 tce_iommu_unuse_page_v2(container, tbl, entry + i);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000632
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000633 *pua = cpu_to_be64(tce);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000634
635 tce += IOMMU_PAGE_SIZE(tbl);
636 }
637
638 if (ret)
639 tce_iommu_clear(container, tbl, entry, i);
640
641 return ret;
642}
643
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +1000644static long tce_iommu_create_table(struct tce_container *container,
645 struct iommu_table_group *table_group,
646 int num,
647 __u32 page_shift,
648 __u64 window_size,
649 __u32 levels,
650 struct iommu_table **ptbl)
651{
652 long ret, table_size;
653
654 table_size = table_group->ops->get_table_size(page_shift, window_size,
655 levels);
656 if (!table_size)
657 return -EINVAL;
658
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100659 ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +1000660 if (ret)
661 return ret;
662
663 ret = table_group->ops->create_table(table_group, num,
664 page_shift, window_size, levels, ptbl);
665
666 WARN_ON(!ret && !(*ptbl)->it_ops->free);
Alexey Kardashevskiya68bd122018-07-04 16:13:49 +1000667 WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size));
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +1000668
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +1000669 return ret;
670}
671
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100672static void tce_iommu_free_table(struct tce_container *container,
673 struct iommu_table *tbl)
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +1000674{
675 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
676
Alexey Kardashevskiye5afdf92017-03-22 15:21:50 +1100677 iommu_tce_table_put(tbl);
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100678 decrement_locked_vm(container->mm, pages);
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +1000679}
680
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +1000681static long tce_iommu_create_window(struct tce_container *container,
682 __u32 page_shift, __u64 window_size, __u32 levels,
683 __u64 *start_addr)
684{
685 struct tce_iommu_group *tcegrp;
686 struct iommu_table_group *table_group;
687 struct iommu_table *tbl = NULL;
688 long ret, num;
689
690 num = tce_iommu_find_free_table(container);
691 if (num < 0)
692 return num;
693
694 /* Get the first group for ops::create_table */
695 tcegrp = list_first_entry(&container->group_list,
696 struct tce_iommu_group, next);
697 table_group = iommu_group_get_iommudata(tcegrp->grp);
698 if (!table_group)
699 return -EFAULT;
700
701 if (!(table_group->pgsizes & (1ULL << page_shift)))
702 return -EINVAL;
703
704 if (!table_group->ops->set_window || !table_group->ops->unset_window ||
705 !table_group->ops->get_table_size ||
706 !table_group->ops->create_table)
707 return -EPERM;
708
709 /* Create TCE table */
710 ret = tce_iommu_create_table(container, table_group, num,
711 page_shift, window_size, levels, &tbl);
712 if (ret)
713 return ret;
714
715 BUG_ON(!tbl->it_ops->free);
716
717 /*
718 * Program the table to every group.
719 * Groups have been tested for compatibility at the attach time.
720 */
721 list_for_each_entry(tcegrp, &container->group_list, next) {
722 table_group = iommu_group_get_iommudata(tcegrp->grp);
723
724 ret = table_group->ops->set_window(table_group, num, tbl);
725 if (ret)
726 goto unset_exit;
727 }
728
729 container->tables[num] = tbl;
730
731 /* Return start address assigned by platform in create_table() */
732 *start_addr = tbl->it_offset << tbl->it_page_shift;
733
734 return 0;
735
736unset_exit:
737 list_for_each_entry(tcegrp, &container->group_list, next) {
738 table_group = iommu_group_get_iommudata(tcegrp->grp);
739 table_group->ops->unset_window(table_group, num);
740 }
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100741 tce_iommu_free_table(container, tbl);
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +1000742
743 return ret;
744}
745
746static long tce_iommu_remove_window(struct tce_container *container,
747 __u64 start_addr)
748{
749 struct iommu_table_group *table_group = NULL;
750 struct iommu_table *tbl;
751 struct tce_iommu_group *tcegrp;
752 int num;
753
754 num = tce_iommu_find_table(container, start_addr, &tbl);
755 if (num < 0)
756 return -EINVAL;
757
758 BUG_ON(!tbl->it_size);
759
760 /* Detach groups from IOMMUs */
761 list_for_each_entry(tcegrp, &container->group_list, next) {
762 table_group = iommu_group_get_iommudata(tcegrp->grp);
763
764 /*
765 * SPAPR TCE IOMMU exposes the default DMA window to
766 * the guest via dma32_window_start/size of
767 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
768 * the userspace to remove this window, some do not so
769 * here we check for the platform capability.
770 */
771 if (!table_group->ops || !table_group->ops->unset_window)
772 return -EPERM;
773
774 table_group->ops->unset_window(table_group, num);
775 }
776
777 /* Free table */
778 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100779 tce_iommu_free_table(container, tbl);
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +1000780 container->tables[num] = NULL;
781
782 return 0;
783}
784
Alexey Kardashevskiy6f01cc62016-11-30 17:52:02 +1100785static long tce_iommu_create_default_window(struct tce_container *container)
786{
787 long ret;
788 __u64 start_addr = 0;
789 struct tce_iommu_group *tcegrp;
790 struct iommu_table_group *table_group;
791
Alexey Kardashevskiyd9c72892016-11-30 17:52:03 +1100792 if (!container->def_window_pending)
793 return 0;
794
Alexey Kardashevskiy6f01cc62016-11-30 17:52:02 +1100795 if (!tce_groups_attached(container))
796 return -ENODEV;
797
798 tcegrp = list_first_entry(&container->group_list,
799 struct tce_iommu_group, next);
800 table_group = iommu_group_get_iommudata(tcegrp->grp);
801 if (!table_group)
802 return -ENODEV;
803
804 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
805 table_group->tce32_size, 1, &start_addr);
806 WARN_ON_ONCE(!ret && start_addr);
807
Alexey Kardashevskiyd9c72892016-11-30 17:52:03 +1100808 if (!ret)
809 container->def_window_pending = false;
810
Alexey Kardashevskiy6f01cc62016-11-30 17:52:02 +1100811 return ret;
812}
813
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000814static long tce_iommu_ioctl(void *iommu_data,
815 unsigned int cmd, unsigned long arg)
816{
817 struct tce_container *container = iommu_data;
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +1000818 unsigned long minsz, ddwsz;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000819 long ret;
820
821 switch (cmd) {
822 case VFIO_CHECK_EXTENSION:
Gavin Shan1b69be52014-06-10 11:41:57 +1000823 switch (arg) {
824 case VFIO_SPAPR_TCE_IOMMU:
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000825 case VFIO_SPAPR_TCE_v2_IOMMU:
Gavin Shan1b69be52014-06-10 11:41:57 +1000826 ret = 1;
827 break;
828 default:
829 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
830 break;
831 }
832
833 return (ret < 0) ? 0 : ret;
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100834 }
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000835
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +1100836 /*
837 * Sanity check to prevent one userspace from manipulating
838 * another userspace mm.
839 */
840 BUG_ON(!container);
841 if (container->mm && container->mm != current->mm)
842 return -EPERM;
843
844 switch (cmd) {
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000845 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
846 struct vfio_iommu_spapr_tce_info info;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000847 struct tce_iommu_group *tcegrp;
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000848 struct iommu_table_group *table_group;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000849
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000850 if (!tce_groups_attached(container))
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000851 return -ENXIO;
852
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000853 tcegrp = list_first_entry(&container->group_list,
854 struct tce_iommu_group, next);
855 table_group = iommu_group_get_iommudata(tcegrp->grp);
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000856
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +1000857 if (!table_group)
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000858 return -ENXIO;
859
860 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
861 dma32_window_size);
862
863 if (copy_from_user(&info, (void __user *)arg, minsz))
864 return -EFAULT;
865
866 if (info.argsz < minsz)
867 return -EINVAL;
868
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +1000869 info.dma32_window_start = table_group->tce32_start;
870 info.dma32_window_size = table_group->tce32_size;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000871 info.flags = 0;
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +1000872 memset(&info.ddw, 0, sizeof(info.ddw));
873
874 if (table_group->max_dynamic_windows_supported &&
875 container->v2) {
876 info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
877 info.ddw.pgsizes = table_group->pgsizes;
878 info.ddw.max_dynamic_windows_supported =
879 table_group->max_dynamic_windows_supported;
880 info.ddw.levels = table_group->max_levels;
881 }
882
883 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
884
885 if (info.argsz >= ddwsz)
886 minsz = ddwsz;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000887
888 if (copy_to_user((void __user *)arg, &info, minsz))
889 return -EFAULT;
890
891 return 0;
892 }
893 case VFIO_IOMMU_MAP_DMA: {
894 struct vfio_iommu_type1_dma_map param;
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000895 struct iommu_table *tbl = NULL;
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000896 long num;
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000897 enum dma_data_direction direction;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000898
Alexey Kardashevskiy3c56e822015-06-05 16:35:02 +1000899 if (!container->enabled)
900 return -EPERM;
901
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000902 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
903
904 if (copy_from_user(&param, (void __user *)arg, minsz))
905 return -EFAULT;
906
907 if (param.argsz < minsz)
908 return -EINVAL;
909
910 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
911 VFIO_DMA_MAP_FLAG_WRITE))
912 return -EINVAL;
913
Alexey Kardashevskiyd9c72892016-11-30 17:52:03 +1100914 ret = tce_iommu_create_default_window(container);
915 if (ret)
916 return ret;
917
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000918 num = tce_iommu_find_table(container, param.iova, &tbl);
919 if (num < 0)
920 return -ENXIO;
921
Alexey Kardashevskiy00663d42015-06-05 16:35:00 +1000922 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
923 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000924 return -EINVAL;
925
926 /* iova is checked by the IOMMU API */
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000927 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
928 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
929 direction = DMA_BIDIRECTIONAL;
930 else
931 direction = DMA_TO_DEVICE;
932 } else {
933 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
934 direction = DMA_FROM_DEVICE;
935 else
936 return -EINVAL;
937 }
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000938
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000939 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000940 if (ret)
941 return ret;
942
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +1000943 if (container->v2)
944 ret = tce_iommu_build_v2(container, tbl,
945 param.iova >> tbl->it_page_shift,
946 param.vaddr,
947 param.size >> tbl->it_page_shift,
948 direction);
949 else
950 ret = tce_iommu_build(container, tbl,
951 param.iova >> tbl->it_page_shift,
952 param.vaddr,
953 param.size >> tbl->it_page_shift,
954 direction);
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000955
956 iommu_flush_tce(tbl);
957
958 return ret;
959 }
960 case VFIO_IOMMU_UNMAP_DMA: {
961 struct vfio_iommu_type1_dma_unmap param;
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000962 struct iommu_table *tbl = NULL;
963 long num;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000964
Alexey Kardashevskiy3c56e822015-06-05 16:35:02 +1000965 if (!container->enabled)
966 return -EPERM;
967
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000968 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
969 size);
970
971 if (copy_from_user(&param, (void __user *)arg, minsz))
972 return -EFAULT;
973
974 if (param.argsz < minsz)
975 return -EINVAL;
976
977 /* No flag is supported now */
978 if (param.flags)
979 return -EINVAL;
980
Alexey Kardashevskiyd9c72892016-11-30 17:52:03 +1100981 ret = tce_iommu_create_default_window(container);
982 if (ret)
983 return ret;
984
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +1000985 num = tce_iommu_find_table(container, param.iova, &tbl);
986 if (num < 0)
987 return -ENXIO;
988
Alexey Kardashevskiy00663d42015-06-05 16:35:00 +1000989 if (param.size & ~IOMMU_PAGE_MASK(tbl))
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000990 return -EINVAL;
991
992 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
Alexey Kardashevskiy00663d42015-06-05 16:35:00 +1000993 param.size >> tbl->it_page_shift);
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +1000994 if (ret)
995 return ret;
996
Alexey Kardashevskiy9b14a1f2015-06-05 16:34:58 +1000997 ret = tce_iommu_clear(container, tbl,
Alexey Kardashevskiy00663d42015-06-05 16:35:00 +1000998 param.iova >> tbl->it_page_shift,
999 param.size >> tbl->it_page_shift);
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001000 iommu_flush_tce(tbl);
1001
1002 return ret;
1003 }
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001004 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
1005 struct vfio_iommu_spapr_register_memory param;
1006
1007 if (!container->v2)
1008 break;
1009
1010 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1011 size);
1012
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +11001013 ret = tce_iommu_mm_set(container);
1014 if (ret)
1015 return ret;
1016
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001017 if (copy_from_user(&param, (void __user *)arg, minsz))
1018 return -EFAULT;
1019
1020 if (param.argsz < minsz)
1021 return -EINVAL;
1022
1023 /* No flag is supported now */
1024 if (param.flags)
1025 return -EINVAL;
1026
1027 mutex_lock(&container->lock);
1028 ret = tce_iommu_register_pages(container, param.vaddr,
1029 param.size);
1030 mutex_unlock(&container->lock);
1031
1032 return ret;
1033 }
1034 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
1035 struct vfio_iommu_spapr_register_memory param;
1036
1037 if (!container->v2)
1038 break;
1039
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +11001040 if (!container->mm)
1041 return -EPERM;
1042
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001043 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1044 size);
1045
1046 if (copy_from_user(&param, (void __user *)arg, minsz))
1047 return -EFAULT;
1048
1049 if (param.argsz < minsz)
1050 return -EINVAL;
1051
1052 /* No flag is supported now */
1053 if (param.flags)
1054 return -EINVAL;
1055
1056 mutex_lock(&container->lock);
1057 ret = tce_iommu_unregister_pages(container, param.vaddr,
1058 param.size);
1059 mutex_unlock(&container->lock);
1060
1061 return ret;
1062 }
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001063 case VFIO_IOMMU_ENABLE:
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001064 if (container->v2)
1065 break;
1066
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001067 mutex_lock(&container->lock);
1068 ret = tce_iommu_enable(container);
1069 mutex_unlock(&container->lock);
1070 return ret;
1071
1072
1073 case VFIO_IOMMU_DISABLE:
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001074 if (container->v2)
1075 break;
1076
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001077 mutex_lock(&container->lock);
1078 tce_iommu_disable(container);
1079 mutex_unlock(&container->lock);
1080 return 0;
Gavin Shan1b69be52014-06-10 11:41:57 +10001081
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001082 case VFIO_EEH_PE_OP: {
1083 struct tce_iommu_group *tcegrp;
1084
1085 ret = 0;
1086 list_for_each_entry(tcegrp, &container->group_list, next) {
1087 ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1088 cmd, arg);
1089 if (ret)
1090 return ret;
1091 }
1092 return ret;
1093 }
1094
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +10001095 case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1096 struct vfio_iommu_spapr_tce_create create;
1097
1098 if (!container->v2)
1099 break;
1100
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +11001101 ret = tce_iommu_mm_set(container);
1102 if (ret)
1103 return ret;
1104
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +10001105 if (!tce_groups_attached(container))
1106 return -ENXIO;
1107
1108 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1109 start_addr);
1110
1111 if (copy_from_user(&create, (void __user *)arg, minsz))
1112 return -EFAULT;
1113
1114 if (create.argsz < minsz)
1115 return -EINVAL;
1116
1117 if (create.flags)
1118 return -EINVAL;
1119
1120 mutex_lock(&container->lock);
1121
Alexey Kardashevskiyd9c72892016-11-30 17:52:03 +11001122 ret = tce_iommu_create_default_window(container);
Alexey Kardashevskiy2da64d22017-02-01 14:26:16 +11001123 if (!ret)
1124 ret = tce_iommu_create_window(container,
1125 create.page_shift,
1126 create.window_size, create.levels,
1127 &create.start_addr);
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +10001128
1129 mutex_unlock(&container->lock);
1130
1131 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1132 ret = -EFAULT;
1133
1134 return ret;
1135 }
1136 case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1137 struct vfio_iommu_spapr_tce_remove remove;
1138
1139 if (!container->v2)
1140 break;
1141
Alexey Kardashevskiybc82d122016-11-30 17:52:04 +11001142 ret = tce_iommu_mm_set(container);
1143 if (ret)
1144 return ret;
1145
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +10001146 if (!tce_groups_attached(container))
1147 return -ENXIO;
1148
1149 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1150 start_addr);
1151
1152 if (copy_from_user(&remove, (void __user *)arg, minsz))
1153 return -EFAULT;
1154
1155 if (remove.argsz < minsz)
1156 return -EINVAL;
1157
1158 if (remove.flags)
1159 return -EINVAL;
1160
Alexey Kardashevskiyd9c72892016-11-30 17:52:03 +11001161 if (container->def_window_pending && !remove.start_addr) {
1162 container->def_window_pending = false;
1163 return 0;
1164 }
1165
Alexey Kardashevskiye633bc82015-06-05 16:35:26 +10001166 mutex_lock(&container->lock);
1167
1168 ret = tce_iommu_remove_window(container, remove.start_addr);
1169
1170 mutex_unlock(&container->lock);
1171
1172 return ret;
1173 }
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001174 }
1175
1176 return -ENOTTY;
1177}
1178
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001179static void tce_iommu_release_ownership(struct tce_container *container,
1180 struct iommu_table_group *table_group)
1181{
1182 int i;
1183
1184 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001185 struct iommu_table *tbl = container->tables[i];
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001186
1187 if (!tbl)
1188 continue;
1189
1190 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1191 if (tbl->it_map)
1192 iommu_release_ownership(tbl);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001193
1194 container->tables[i] = NULL;
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001195 }
1196}
1197
1198static int tce_iommu_take_ownership(struct tce_container *container,
1199 struct iommu_table_group *table_group)
1200{
1201 int i, j, rc = 0;
1202
1203 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1204 struct iommu_table *tbl = table_group->tables[i];
1205
1206 if (!tbl || !tbl->it_map)
1207 continue;
1208
Alexey Kardashevskiy39701e52016-11-30 17:52:01 +11001209 rc = iommu_take_ownership(tbl);
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001210 if (rc) {
1211 for (j = 0; j < i; ++j)
1212 iommu_release_ownership(
1213 table_group->tables[j]);
1214
1215 return rc;
1216 }
1217 }
1218
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001219 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1220 container->tables[i] = table_group->tables[i];
1221
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001222 return 0;
1223}
1224
1225static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1226 struct iommu_table_group *table_group)
1227{
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10001228 long i;
1229
1230 if (!table_group->ops->unset_window) {
1231 WARN_ON_ONCE(1);
1232 return;
1233 }
1234
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001235 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
Alexey Kardashevskiya3906852019-02-11 18:49:17 +11001236 if (container->tables[i])
1237 table_group->ops->unset_window(table_group, i);
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10001238
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001239 table_group->ops->release_ownership(table_group);
1240}
1241
1242static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1243 struct iommu_table_group *table_group)
1244{
Alexey Kardashevskiy930a42d2017-02-07 17:26:57 +11001245 long i, ret = 0;
1246
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10001247 if (!table_group->ops->create_table || !table_group->ops->set_window ||
1248 !table_group->ops->release_ownership) {
1249 WARN_ON_ONCE(1);
1250 return -EFAULT;
1251 }
1252
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001253 table_group->ops->take_ownership(table_group);
1254
Alexey Kardashevskiy930a42d2017-02-07 17:26:57 +11001255 /* Set all windows to the new group */
1256 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1257 struct iommu_table *tbl = container->tables[i];
1258
1259 if (!tbl)
1260 continue;
1261
1262 ret = table_group->ops->set_window(table_group, i, tbl);
1263 if (ret)
1264 goto release_exit;
1265 }
1266
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001267 return 0;
Alexey Kardashevskiy930a42d2017-02-07 17:26:57 +11001268
1269release_exit:
1270 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1271 table_group->ops->unset_window(table_group, i);
1272
1273 table_group->ops->release_ownership(table_group);
1274
1275 return ret;
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001276}
1277
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001278static int tce_iommu_attach_group(void *iommu_data,
1279 struct iommu_group *iommu_group)
1280{
1281 int ret;
1282 struct tce_container *container = iommu_data;
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001283 struct iommu_table_group *table_group;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001284 struct tce_iommu_group *tcegrp = NULL;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001285
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001286 mutex_lock(&container->lock);
1287
1288 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1289 iommu_group_id(iommu_group), iommu_group); */
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001290 table_group = iommu_group_get_iommudata(iommu_group);
Greg Kurzbd00fdf2017-01-24 17:50:26 +01001291 if (!table_group) {
1292 ret = -ENODEV;
1293 goto unlock_exit;
1294 }
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001295
1296 if (tce_groups_attached(container) && (!table_group->ops ||
1297 !table_group->ops->take_ownership ||
1298 !table_group->ops->release_ownership)) {
1299 ret = -EBUSY;
1300 goto unlock_exit;
1301 }
1302
1303 /* Check if new group has the same iommu_ops (i.e. compatible) */
1304 list_for_each_entry(tcegrp, &container->group_list, next) {
1305 struct iommu_table_group *table_group_tmp;
1306
1307 if (tcegrp->grp == iommu_group) {
1308 pr_warn("tce_vfio: Group %d is already attached\n",
1309 iommu_group_id(iommu_group));
1310 ret = -EBUSY;
1311 goto unlock_exit;
1312 }
1313 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
Alexey Kardashevskiy54de2852016-04-29 18:55:15 +10001314 if (table_group_tmp->ops->create_table !=
1315 table_group->ops->create_table) {
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001316 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1317 iommu_group_id(iommu_group),
1318 iommu_group_id(tcegrp->grp));
1319 ret = -EPERM;
1320 goto unlock_exit;
1321 }
1322 }
1323
1324 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1325 if (!tcegrp) {
1326 ret = -ENOMEM;
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001327 goto unlock_exit;
1328 }
1329
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001330 if (!table_group->ops || !table_group->ops->take_ownership ||
Alexey Kardashevskiy6f01cc62016-11-30 17:52:02 +11001331 !table_group->ops->release_ownership) {
Alexey Kardashevskiy1282ba72017-03-24 17:44:06 +11001332 if (container->v2) {
1333 ret = -EPERM;
1334 goto unlock_exit;
1335 }
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001336 ret = tce_iommu_take_ownership(container, table_group);
Alexey Kardashevskiy6f01cc62016-11-30 17:52:02 +11001337 } else {
Alexey Kardashevskiy1282ba72017-03-24 17:44:06 +11001338 if (!container->v2) {
1339 ret = -EPERM;
1340 goto unlock_exit;
1341 }
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001342 ret = tce_iommu_take_ownership_ddw(container, table_group);
Alexey Kardashevskiy6f01cc62016-11-30 17:52:02 +11001343 if (!tce_groups_attached(container) && !container->tables[0])
Alexey Kardashevskiyd9c72892016-11-30 17:52:03 +11001344 container->def_window_pending = true;
Alexey Kardashevskiy6f01cc62016-11-30 17:52:02 +11001345 }
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001346
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001347 if (!ret) {
1348 tcegrp->grp = iommu_group;
1349 list_add(&tcegrp->next, &container->group_list);
1350 }
Alexey Kardashevskiy22af4852015-06-05 16:35:04 +10001351
1352unlock_exit:
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001353 if (ret && tcegrp)
1354 kfree(tcegrp);
1355
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001356 mutex_unlock(&container->lock);
1357
1358 return ret;
1359}
1360
1361static void tce_iommu_detach_group(void *iommu_data,
1362 struct iommu_group *iommu_group)
1363{
1364 struct tce_container *container = iommu_data;
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001365 struct iommu_table_group *table_group;
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001366 bool found = false;
1367 struct tce_iommu_group *tcegrp;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001368
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001369 mutex_lock(&container->lock);
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001370
1371 list_for_each_entry(tcegrp, &container->group_list, next) {
1372 if (tcegrp->grp == iommu_group) {
1373 found = true;
1374 break;
1375 }
1376 }
1377
1378 if (!found) {
1379 pr_warn("tce_vfio: detaching unattached group #%u\n",
1380 iommu_group_id(iommu_group));
Alexey Kardashevskiy22af4852015-06-05 16:35:04 +10001381 goto unlock_exit;
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001382 }
Alexey Kardashevskiy22af4852015-06-05 16:35:04 +10001383
Alexey Kardashevskiy2157e7b2015-06-05 16:35:25 +10001384 list_del(&tcegrp->next);
1385 kfree(tcegrp);
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001386
1387 table_group = iommu_group_get_iommudata(iommu_group);
1388 BUG_ON(!table_group);
1389
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10001390 if (!table_group->ops || !table_group->ops->release_ownership)
1391 tce_iommu_release_ownership(container, table_group);
1392 else
1393 tce_iommu_release_ownership_ddw(container, table_group);
Alexey Kardashevskiy22af4852015-06-05 16:35:04 +10001394
1395unlock_exit:
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001396 mutex_unlock(&container->lock);
1397}
1398
Wang Haie39dd512019-04-03 12:36:21 -06001399static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
Alexey Kardashevskiy5ffd2292013-05-21 13:33:10 +10001400 .name = "iommu-vfio-powerpc",
1401 .owner = THIS_MODULE,
1402 .open = tce_iommu_open,
1403 .release = tce_iommu_release,
1404 .ioctl = tce_iommu_ioctl,
1405 .attach_group = tce_iommu_attach_group,
1406 .detach_group = tce_iommu_detach_group,
1407};
1408
1409static int __init tce_iommu_init(void)
1410{
1411 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1412}
1413
1414static void __exit tce_iommu_cleanup(void)
1415{
1416 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1417}
1418
1419module_init(tce_iommu_init);
1420module_exit(tce_iommu_cleanup);
1421
1422MODULE_VERSION(DRIVER_VERSION);
1423MODULE_LICENSE("GPL v2");
1424MODULE_AUTHOR(DRIVER_AUTHOR);
1425MODULE_DESCRIPTION(DRIVER_DESC);
1426