blob: 92a673cd9b4fd7672c2cce88808c16f5b13ff496 [file] [log] [blame]
Leon Romanovsky6bf9d8f2020-07-19 10:25:21 +03001/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08002/*
3 * Copyright (c) 2007 Cisco Systems. All rights reserved.
Jianxin Xiong368c0152020-12-15 13:27:13 -08004 * Copyright (c) 2020 Intel Corporation. All rights reserved.
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08005 */
6
7#ifndef IB_UMEM_H
8#define IB_UMEM_H
9
10#include <linux/list.h>
11#include <linux/scatterlist.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040012#include <linux/workqueue.h>
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +020013#include <rdma/ib_verbs.h>
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080014
15struct ib_ucontext;
Shachar Raindel8ada2c12014-12-11 17:04:17 +020016struct ib_umem_odp;
Jianxin Xiong368c0152020-12-15 13:27:13 -080017struct dma_buf_attach_ops;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080018
19struct ib_umem {
Jason Gunthorpe47f725e2019-08-06 20:15:44 -030020 struct ib_device *ibdev;
Jason Gunthorped4b4dd1b2018-09-16 20:44:45 +030021 struct mm_struct *owning_mm;
Jason Gunthorpea665aca2020-09-04 19:41:47 -030022 u64 iova;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080023 size_t length;
Haggai Eran406f9e52014-12-11 17:04:12 +020024 unsigned long address;
Jason Gunthorpe597ecc52018-09-16 20:48:06 +030025 u32 writable : 1;
Jason Gunthorpe597ecc52018-09-16 20:48:06 +030026 u32 is_odp : 1;
Jianxin Xiong368c0152020-12-15 13:27:13 -080027 u32 is_dmabuf : 1;
Roland Dreier1bf66a32007-04-18 20:20:28 -070028 struct work_struct work;
Maor Gottlieb79fbd3e2021-08-24 17:25:31 +030029 struct sg_append_table sgt_append;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080030};
31
Jianxin Xiong368c0152020-12-15 13:27:13 -080032struct ib_umem_dmabuf {
33 struct ib_umem umem;
34 struct dma_buf_attachment *attach;
35 struct sg_table *sgt;
36 struct scatterlist *first_sg;
37 struct scatterlist *last_sg;
38 unsigned long first_sg_offset;
39 unsigned long last_sg_trim;
40 void *private;
Gal Pressman1e4df4a2021-10-12 15:09:02 +030041 u8 pinned : 1;
Jianxin Xiong368c0152020-12-15 13:27:13 -080042};
43
44static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
45{
46 return container_of(umem, struct ib_umem_dmabuf, umem);
47}
48
Haggai Eran406f9e52014-12-11 17:04:12 +020049/* Returns the offset of the umem start relative to the first page. */
50static inline int ib_umem_offset(struct ib_umem *umem)
51{
Jason Gunthorped2183c62019-05-20 09:05:25 +030052 return umem->address & ~PAGE_MASK;
Haggai Eran406f9e52014-12-11 17:04:12 +020053}
54
Jason Gunthorpeb045db62020-11-15 13:43:05 +020055static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
56 unsigned long pgsz)
57{
Maor Gottlieb79fbd3e2021-08-24 17:25:31 +030058 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
Jason Gunthorpeb045db62020-11-15 13:43:05 +020059 (pgsz - 1);
60}
61
Jason Gunthorpea665aca2020-09-04 19:41:47 -030062static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
63 unsigned long pgsz)
64{
65 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
66 ALIGN_DOWN(umem->iova, pgsz))) /
67 pgsz;
68}
69
Haggai Eran406f9e52014-12-11 17:04:12 +020070static inline size_t ib_umem_num_pages(struct ib_umem *umem)
71{
Jason Gunthorpea665aca2020-09-04 19:41:47 -030072 return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
Haggai Eran406f9e52014-12-11 17:04:12 +020073}
74
Jason Gunthorpeebc24092020-09-04 19:41:45 -030075static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
76 struct ib_umem *umem,
77 unsigned long pgsz)
78{
Maor Gottlieb79fbd3e2021-08-24 17:25:31 +030079 __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
80 umem->sgt_append.sgt.nents, pgsz);
Jason Gunthorpeebc24092020-09-04 19:41:45 -030081}
82
83/**
84 * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
85 * @umem: umem to iterate over
86 * @pgsz: Page size to split the list into
87 *
88 * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
89 * returned DMA blocks will be aligned to pgsz and span the range:
90 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
Jason Gunthorpea665aca2020-09-04 19:41:47 -030091 *
92 * Performs exactly ib_umem_num_dma_blocks() iterations.
Jason Gunthorpeebc24092020-09-04 19:41:45 -030093 */
94#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
95 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
96 __rdma_block_iter_next(biter);)
97
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080098#ifdef CONFIG_INFINIBAND_USER_MEM
99
Moni Shouac320e522020-01-15 14:43:31 +0200100struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
Christoph Hellwig72b894b2019-11-13 08:32:14 +0100101 size_t size, int access);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800102void ib_umem_release(struct ib_umem *umem);
Haggai Eranc5d76f12014-12-11 17:04:13 +0200103int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
104 size_t length);
Shiraz Saleem4a353392019-05-06 08:53:32 -0500105unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
106 unsigned long pgsz_bitmap,
107 unsigned long virt);
Jianxin Xiong368c0152020-12-15 13:27:13 -0800108
Jason Gunthorpeb045db62020-11-15 13:43:05 +0200109/**
110 * ib_umem_find_best_pgoff - Find best HW page size
111 *
112 * @umem: umem struct
113 * @pgsz_bitmap bitmap of HW supported page sizes
114 * @pgoff_bitmask: Mask of bits that can be represented with an offset
115 *
116 * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
117 * an IOVA it accepts a bitmask specifying what address bits can be represented
118 * with a page offset.
119 *
120 * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
121 * and can support aligned offsets up to 4032 then pgoff_bitmask would be
122 * "111111000000".
123 *
124 * If the pgoff_bitmask requires either alignment in the low bit or an
125 * unavailable page size for the high bits, this function returns 0.
126 */
127static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
128 unsigned long pgsz_bitmap,
129 u64 pgoff_bitmask)
130{
Maor Gottlieb79fbd3e2021-08-24 17:25:31 +0300131 struct scatterlist *sg = umem->sgt_append.sgt.sgl;
Jason Gunthorpeb045db62020-11-15 13:43:05 +0200132 dma_addr_t dma_addr;
133
134 dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
135 return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
136 dma_addr & pgoff_bitmask);
137}
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800138
Jianxin Xiong368c0152020-12-15 13:27:13 -0800139struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
140 unsigned long offset, size_t size,
141 int fd, int access,
142 const struct dma_buf_attach_ops *ops);
Gal Pressman1e4df4a2021-10-12 15:09:02 +0300143struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
144 unsigned long offset,
145 size_t size, int fd,
146 int access);
Jianxin Xiong368c0152020-12-15 13:27:13 -0800147int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
148void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
149void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
150
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800151#else /* CONFIG_INFINIBAND_USER_MEM */
152
153#include <linux/err.h>
154
Moni Shouac320e522020-01-15 14:43:31 +0200155static inline struct ib_umem *ib_umem_get(struct ib_device *device,
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800156 unsigned long addr, size_t size,
Christoph Hellwig72b894b2019-11-13 08:32:14 +0100157 int access)
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +0200158{
Jianxin Xiong368c0152020-12-15 13:27:13 -0800159 return ERR_PTR(-EOPNOTSUPP);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800160}
161static inline void ib_umem_release(struct ib_umem *umem) { }
Haggai Eranc1395a22014-12-11 17:04:14 +0200162static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
163 size_t length) {
Jianxin Xiong368c0152020-12-15 13:27:13 -0800164 return -EOPNOTSUPP;
Haggai Eranc1395a22014-12-11 17:04:14 +0200165}
Jason Gunthorpe61690d02020-08-25 15:17:08 -0300166static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
167 unsigned long pgsz_bitmap,
168 unsigned long virt)
169{
170 return 0;
Shiraz Saleem4a353392019-05-06 08:53:32 -0500171}
Jason Gunthorpeb045db62020-11-15 13:43:05 +0200172static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
173 unsigned long pgsz_bitmap,
174 u64 pgoff_bitmask)
175{
176 return 0;
177}
Jianxin Xiong368c0152020-12-15 13:27:13 -0800178static inline
179struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
180 unsigned long offset,
181 size_t size, int fd,
182 int access,
183 struct dma_buf_attach_ops *ops)
184{
185 return ERR_PTR(-EOPNOTSUPP);
186}
Gal Pressman1e4df4a2021-10-12 15:09:02 +0300187static inline struct ib_umem_dmabuf *
188ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
189 size_t size, int fd, int access)
190{
191 return ERR_PTR(-EOPNOTSUPP);
192}
Jianxin Xiong368c0152020-12-15 13:27:13 -0800193static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
194{
195 return -EOPNOTSUPP;
196}
197static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
198static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
Shiraz Saleem4a353392019-05-06 08:53:32 -0500199
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800200#endif /* CONFIG_INFINIBAND_USER_MEM */
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800201#endif /* IB_UMEM_H */