blob: 0b93bf96693ef6f3ea8b6a30315a24fe548443c6 [file] [log] [blame]
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
mark gross98bcef52008-02-23 15:23:35 -08006 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07008 *
9 */
10
11#ifndef _IOVA_H_
12#define _IOVA_H_
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/rbtree.h>
Joerg Roedelfb418da2017-08-10 16:14:59 +020017#include <linux/atomic.h>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070018#include <linux/dma-mapping.h>
19
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070020/* iova structure */
21struct iova {
22 struct rb_node node;
Omer Peleg9257b4a2016-04-20 11:34:11 +030023 unsigned long pfn_hi; /* Highest allocated pfn */
24 unsigned long pfn_lo; /* Lowest allocated pfn */
25};
26
27struct iova_magazine;
28struct iova_cpu_rcache;
29
30#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
31#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
32
33struct iova_rcache {
34 spinlock_t lock;
35 unsigned long depot_size;
36 struct iova_magazine *depot[MAX_GLOBAL_MAGS];
37 struct iova_cpu_rcache __percpu *cpu_rcaches;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070038};
39
Joerg Roedel42f87e72017-08-10 14:44:28 +020040struct iova_domain;
41
42/* Call-Back from IOVA code into IOMMU drivers */
43typedef void (* iova_flush_cb)(struct iova_domain *domain);
44
45/* Destructor for per-entry data */
46typedef void (* iova_entry_dtor)(unsigned long data);
47
48/* Number of entries per Flush Queue */
49#define IOVA_FQ_SIZE 256
50
Joerg Roedel9a005a82017-08-10 16:58:18 +020051/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
52#define IOVA_FQ_TIMEOUT 10
53
Joerg Roedel42f87e72017-08-10 14:44:28 +020054/* Flush Queue entry for defered flushing */
55struct iova_fq_entry {
56 unsigned long iova_pfn;
57 unsigned long pages;
58 unsigned long data;
Joerg Roedelfb418da2017-08-10 16:14:59 +020059 u64 counter; /* Flush counter when this entrie was added */
Joerg Roedel42f87e72017-08-10 14:44:28 +020060};
61
62/* Per-CPU Flush Queue structure */
63struct iova_fq {
64 struct iova_fq_entry entries[IOVA_FQ_SIZE];
65 unsigned head, tail;
Joerg Roedel8109c2a2017-08-10 16:31:17 +020066 spinlock_t lock;
Joerg Roedel42f87e72017-08-10 14:44:28 +020067};
68
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070069/* holds all the iova translations for a domain */
70struct iova_domain {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070071 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
72 struct rb_root rbroot; /* iova domain rbtree root */
Robin Murphye60aa7b2017-09-21 16:52:44 +010073 struct rb_node *cached_node; /* Save last alloced node */
74 struct rb_node *cached32_node; /* Save last 32-bit alloced node */
Robin Murphy0fb5fe82015-01-12 17:51:16 +000075 unsigned long granule; /* pfn granularity for this domain */
Robin Murphy1b722502015-01-12 17:51:15 +000076 unsigned long start_pfn; /* Lower limit for this domain */
David Millerf6611972008-02-06 01:36:23 -080077 unsigned long dma_32bit_pfn;
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +053078 unsigned long max32_alloc_size; /* Size of last failed allocation */
Robin Murphybb68b2f2017-09-21 16:52:46 +010079 struct iova anchor; /* rbtree lookup anchor */
Omer Peleg9257b4a2016-04-20 11:34:11 +030080 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
Joerg Roedel42f87e72017-08-10 14:44:28 +020081
82 iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
83 TLBs */
84
85 iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
86 iova entry */
87
88 struct iova_fq __percpu *fq; /* Flush Queue */
Joerg Roedelfb418da2017-08-10 16:14:59 +020089
90 atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
91 have been started */
92
93 atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
94 have been finished */
Joerg Roedel9a005a82017-08-10 16:58:18 +020095
96 struct timer_list fq_timer; /* Timer to regularily empty the
97 flush-queues */
98 atomic_t fq_timer_on; /* 1 when timer is active, 0
99 when not */
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700100};
101
Jiang Liua156ef92014-07-11 14:19:36 +0800102static inline unsigned long iova_size(struct iova *iova)
103{
104 return iova->pfn_hi - iova->pfn_lo + 1;
105}
106
Robin Murphy0fb5fe82015-01-12 17:51:16 +0000107static inline unsigned long iova_shift(struct iova_domain *iovad)
108{
109 return __ffs(iovad->granule);
110}
111
112static inline unsigned long iova_mask(struct iova_domain *iovad)
113{
114 return iovad->granule - 1;
115}
116
117static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
118{
119 return iova & iova_mask(iovad);
120}
121
122static inline size_t iova_align(struct iova_domain *iovad, size_t size)
123{
124 return ALIGN(size, iovad->granule);
125}
126
127static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
128{
129 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
130}
131
132static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
133{
134 return iova >> iova_shift(iovad);
135}
136
Joerg Roedelb4d8c7a2017-03-23 00:06:17 +0100137#if IS_ENABLED(CONFIG_IOMMU_IOVA)
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300138int iova_cache_get(void);
139void iova_cache_put(void);
Robin Murphy85b45452015-01-12 17:51:14 +0000140
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700141struct iova *alloc_iova_mem(void);
142void free_iova_mem(struct iova *iova);
143void free_iova(struct iova_domain *iovad, unsigned long pfn);
144void __free_iova(struct iova_domain *iovad, struct iova *iova);
145struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700146 unsigned long limit_pfn,
147 bool size_aligned);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300148void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
149 unsigned long size);
Joerg Roedel19282102017-08-10 15:49:44 +0200150void queue_iova(struct iova_domain *iovad,
151 unsigned long pfn, unsigned long pages,
152 unsigned long data);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300153unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200154 unsigned long limit_pfn, bool flush_rcache);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700155struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
156 unsigned long pfn_hi);
157void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
Robin Murphy0fb5fe82015-01-12 17:51:16 +0000158void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
Zhen Leiaa3ac942017-09-21 16:52:45 +0100159 unsigned long start_pfn);
Joerg Roedel42f87e72017-08-10 14:44:28 +0200160int init_iova_flush_queue(struct iova_domain *iovad,
161 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700162struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
163void put_iova_domain(struct iova_domain *iovad);
Jiang Liu75f05562014-02-19 14:07:37 +0800164struct iova *split_and_remove_iova(struct iova_domain *iovad,
165 struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300166void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
Thierry Reding21aff522017-03-20 20:11:28 +0100167#else
168static inline int iova_cache_get(void)
169{
170 return -ENOTSUPP;
171}
172
173static inline void iova_cache_put(void)
174{
175}
176
177static inline struct iova *alloc_iova_mem(void)
178{
179 return NULL;
180}
181
182static inline void free_iova_mem(struct iova *iova)
183{
184}
185
186static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
187{
188}
189
190static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
191{
192}
193
194static inline struct iova *alloc_iova(struct iova_domain *iovad,
195 unsigned long size,
196 unsigned long limit_pfn,
197 bool size_aligned)
198{
199 return NULL;
200}
201
202static inline void free_iova_fast(struct iova_domain *iovad,
203 unsigned long pfn,
204 unsigned long size)
205{
206}
207
Joerg Roedel19282102017-08-10 15:49:44 +0200208static inline void queue_iova(struct iova_domain *iovad,
209 unsigned long pfn, unsigned long pages,
210 unsigned long data)
211{
212}
213
Thierry Reding21aff522017-03-20 20:11:28 +0100214static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
215 unsigned long size,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200216 unsigned long limit_pfn,
217 bool flush_rcache)
Thierry Reding21aff522017-03-20 20:11:28 +0100218{
219 return 0;
220}
221
222static inline struct iova *reserve_iova(struct iova_domain *iovad,
223 unsigned long pfn_lo,
224 unsigned long pfn_hi)
225{
226 return NULL;
227}
228
229static inline void copy_reserved_iova(struct iova_domain *from,
230 struct iova_domain *to)
231{
232}
233
234static inline void init_iova_domain(struct iova_domain *iovad,
235 unsigned long granule,
Zhen Leiaa3ac942017-09-21 16:52:45 +0100236 unsigned long start_pfn)
Thierry Reding21aff522017-03-20 20:11:28 +0100237{
238}
239
Joerg Roedel42f87e72017-08-10 14:44:28 +0200240static inline int init_iova_flush_queue(struct iova_domain *iovad,
241 iova_flush_cb flush_cb,
242 iova_entry_dtor entry_dtor)
243{
244 return -ENODEV;
245}
246
Thierry Reding21aff522017-03-20 20:11:28 +0100247static inline struct iova *find_iova(struct iova_domain *iovad,
248 unsigned long pfn)
249{
250 return NULL;
251}
252
253static inline void put_iova_domain(struct iova_domain *iovad)
254{
255}
256
257static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
258 struct iova *iova,
259 unsigned long pfn_lo,
260 unsigned long pfn_hi)
261{
262 return NULL;
263}
264
265static inline void free_cpu_cached_iovas(unsigned int cpu,
266 struct iova_domain *iovad)
267{
268}
269#endif
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700270
271#endif