Leon Romanovsky | 6bf9d8f | 2020-07-19 10:25:21 +0300 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2007 Cisco Systems. All rights reserved. |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 4 | * Copyright (c) 2020 Intel Corporation. All rights reserved. |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #ifndef IB_UMEM_H |
| 8 | #define IB_UMEM_H |
| 9 | |
| 10 | #include <linux/list.h> |
| 11 | #include <linux/scatterlist.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 12 | #include <linux/workqueue.h> |
Jason Gunthorpe | b0ea0fa | 2019-01-09 11:15:16 +0200 | [diff] [blame] | 13 | #include <rdma/ib_verbs.h> |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 14 | |
| 15 | struct ib_ucontext; |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 16 | struct ib_umem_odp; |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 17 | struct dma_buf_attach_ops; |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 18 | |
| 19 | struct ib_umem { |
Jason Gunthorpe | 47f725e | 2019-08-06 20:15:44 -0300 | [diff] [blame] | 20 | struct ib_device *ibdev; |
Jason Gunthorpe | d4b4dd1b | 2018-09-16 20:44:45 +0300 | [diff] [blame] | 21 | struct mm_struct *owning_mm; |
Jason Gunthorpe | a665aca | 2020-09-04 19:41:47 -0300 | [diff] [blame] | 22 | u64 iova; |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 23 | size_t length; |
Haggai Eran | 406f9e5 | 2014-12-11 17:04:12 +0200 | [diff] [blame] | 24 | unsigned long address; |
Jason Gunthorpe | 597ecc5 | 2018-09-16 20:48:06 +0300 | [diff] [blame] | 25 | u32 writable : 1; |
Jason Gunthorpe | 597ecc5 | 2018-09-16 20:48:06 +0300 | [diff] [blame] | 26 | u32 is_odp : 1; |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 27 | u32 is_dmabuf : 1; |
Roland Dreier | 1bf66a3 | 2007-04-18 20:20:28 -0700 | [diff] [blame] | 28 | struct work_struct work; |
Maor Gottlieb | 79fbd3e | 2021-08-24 17:25:31 +0300 | [diff] [blame] | 29 | struct sg_append_table sgt_append; |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 30 | }; |
| 31 | |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 32 | struct ib_umem_dmabuf { |
| 33 | struct ib_umem umem; |
| 34 | struct dma_buf_attachment *attach; |
| 35 | struct sg_table *sgt; |
| 36 | struct scatterlist *first_sg; |
| 37 | struct scatterlist *last_sg; |
| 38 | unsigned long first_sg_offset; |
| 39 | unsigned long last_sg_trim; |
| 40 | void *private; |
Gal Pressman | 1e4df4a | 2021-10-12 15:09:02 +0300 | [diff] [blame] | 41 | u8 pinned : 1; |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 42 | }; |
| 43 | |
| 44 | static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem) |
| 45 | { |
| 46 | return container_of(umem, struct ib_umem_dmabuf, umem); |
| 47 | } |
| 48 | |
Haggai Eran | 406f9e5 | 2014-12-11 17:04:12 +0200 | [diff] [blame] | 49 | /* Returns the offset of the umem start relative to the first page. */ |
| 50 | static inline int ib_umem_offset(struct ib_umem *umem) |
| 51 | { |
Jason Gunthorpe | d2183c6 | 2019-05-20 09:05:25 +0300 | [diff] [blame] | 52 | return umem->address & ~PAGE_MASK; |
Haggai Eran | 406f9e5 | 2014-12-11 17:04:12 +0200 | [diff] [blame] | 53 | } |
| 54 | |
Jason Gunthorpe | b045db6 | 2020-11-15 13:43:05 +0200 | [diff] [blame] | 55 | static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem, |
| 56 | unsigned long pgsz) |
| 57 | { |
Maor Gottlieb | 79fbd3e | 2021-08-24 17:25:31 +0300 | [diff] [blame] | 58 | return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & |
Jason Gunthorpe | b045db6 | 2020-11-15 13:43:05 +0200 | [diff] [blame] | 59 | (pgsz - 1); |
| 60 | } |
| 61 | |
Jason Gunthorpe | a665aca | 2020-09-04 19:41:47 -0300 | [diff] [blame] | 62 | static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, |
| 63 | unsigned long pgsz) |
| 64 | { |
| 65 | return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - |
| 66 | ALIGN_DOWN(umem->iova, pgsz))) / |
| 67 | pgsz; |
| 68 | } |
| 69 | |
Haggai Eran | 406f9e5 | 2014-12-11 17:04:12 +0200 | [diff] [blame] | 70 | static inline size_t ib_umem_num_pages(struct ib_umem *umem) |
| 71 | { |
Jason Gunthorpe | a665aca | 2020-09-04 19:41:47 -0300 | [diff] [blame] | 72 | return ib_umem_num_dma_blocks(umem, PAGE_SIZE); |
Haggai Eran | 406f9e5 | 2014-12-11 17:04:12 +0200 | [diff] [blame] | 73 | } |
| 74 | |
Jason Gunthorpe | ebc2409 | 2020-09-04 19:41:45 -0300 | [diff] [blame] | 75 | static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, |
| 76 | struct ib_umem *umem, |
| 77 | unsigned long pgsz) |
| 78 | { |
Maor Gottlieb | 79fbd3e | 2021-08-24 17:25:31 +0300 | [diff] [blame] | 79 | __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl, |
| 80 | umem->sgt_append.sgt.nents, pgsz); |
Jason Gunthorpe | ebc2409 | 2020-09-04 19:41:45 -0300 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | /** |
| 84 | * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem |
| 85 | * @umem: umem to iterate over |
| 86 | * @pgsz: Page size to split the list into |
| 87 | * |
| 88 | * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The |
| 89 | * returned DMA blocks will be aligned to pgsz and span the range: |
| 90 | * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz) |
Jason Gunthorpe | a665aca | 2020-09-04 19:41:47 -0300 | [diff] [blame] | 91 | * |
| 92 | * Performs exactly ib_umem_num_dma_blocks() iterations. |
Jason Gunthorpe | ebc2409 | 2020-09-04 19:41:45 -0300 | [diff] [blame] | 93 | */ |
| 94 | #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ |
| 95 | for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ |
| 96 | __rdma_block_iter_next(biter);) |
| 97 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 98 | #ifdef CONFIG_INFINIBAND_USER_MEM |
| 99 | |
Moni Shoua | c320e52 | 2020-01-15 14:43:31 +0200 | [diff] [blame] | 100 | struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, |
Christoph Hellwig | 72b894b | 2019-11-13 08:32:14 +0100 | [diff] [blame] | 101 | size_t size, int access); |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 102 | void ib_umem_release(struct ib_umem *umem); |
Haggai Eran | c5d76f1 | 2014-12-11 17:04:13 +0200 | [diff] [blame] | 103 | int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, |
| 104 | size_t length); |
Shiraz Saleem | 4a35339 | 2019-05-06 08:53:32 -0500 | [diff] [blame] | 105 | unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, |
| 106 | unsigned long pgsz_bitmap, |
| 107 | unsigned long virt); |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 108 | |
Jason Gunthorpe | b045db6 | 2020-11-15 13:43:05 +0200 | [diff] [blame] | 109 | /** |
| 110 | * ib_umem_find_best_pgoff - Find best HW page size |
| 111 | * |
| 112 | * @umem: umem struct |
| 113 | * @pgsz_bitmap bitmap of HW supported page sizes |
| 114 | * @pgoff_bitmask: Mask of bits that can be represented with an offset |
| 115 | * |
| 116 | * This is very similar to ib_umem_find_best_pgsz() except instead of accepting |
| 117 | * an IOVA it accepts a bitmask specifying what address bits can be represented |
| 118 | * with a page offset. |
| 119 | * |
| 120 | * For instance if the HW has multiple page sizes, requires 64 byte alignemnt, |
| 121 | * and can support aligned offsets up to 4032 then pgoff_bitmask would be |
| 122 | * "111111000000". |
| 123 | * |
| 124 | * If the pgoff_bitmask requires either alignment in the low bit or an |
| 125 | * unavailable page size for the high bits, this function returns 0. |
| 126 | */ |
| 127 | static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, |
| 128 | unsigned long pgsz_bitmap, |
| 129 | u64 pgoff_bitmask) |
| 130 | { |
Maor Gottlieb | 79fbd3e | 2021-08-24 17:25:31 +0300 | [diff] [blame] | 131 | struct scatterlist *sg = umem->sgt_append.sgt.sgl; |
Jason Gunthorpe | b045db6 | 2020-11-15 13:43:05 +0200 | [diff] [blame] | 132 | dma_addr_t dma_addr; |
| 133 | |
| 134 | dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK); |
| 135 | return ib_umem_find_best_pgsz(umem, pgsz_bitmap, |
| 136 | dma_addr & pgoff_bitmask); |
| 137 | } |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 138 | |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 139 | struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, |
| 140 | unsigned long offset, size_t size, |
| 141 | int fd, int access, |
| 142 | const struct dma_buf_attach_ops *ops); |
Gal Pressman | 1e4df4a | 2021-10-12 15:09:02 +0300 | [diff] [blame] | 143 | struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, |
| 144 | unsigned long offset, |
| 145 | size_t size, int fd, |
| 146 | int access); |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 147 | int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf); |
| 148 | void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf); |
| 149 | void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf); |
| 150 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 151 | #else /* CONFIG_INFINIBAND_USER_MEM */ |
| 152 | |
| 153 | #include <linux/err.h> |
| 154 | |
Moni Shoua | c320e52 | 2020-01-15 14:43:31 +0200 | [diff] [blame] | 155 | static inline struct ib_umem *ib_umem_get(struct ib_device *device, |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 156 | unsigned long addr, size_t size, |
Christoph Hellwig | 72b894b | 2019-11-13 08:32:14 +0100 | [diff] [blame] | 157 | int access) |
Jason Gunthorpe | b0ea0fa | 2019-01-09 11:15:16 +0200 | [diff] [blame] | 158 | { |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 159 | return ERR_PTR(-EOPNOTSUPP); |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 160 | } |
| 161 | static inline void ib_umem_release(struct ib_umem *umem) { } |
Haggai Eran | c1395a2 | 2014-12-11 17:04:14 +0200 | [diff] [blame] | 162 | static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, |
| 163 | size_t length) { |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 164 | return -EOPNOTSUPP; |
Haggai Eran | c1395a2 | 2014-12-11 17:04:14 +0200 | [diff] [blame] | 165 | } |
Jason Gunthorpe | 61690d0 | 2020-08-25 15:17:08 -0300 | [diff] [blame] | 166 | static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, |
| 167 | unsigned long pgsz_bitmap, |
| 168 | unsigned long virt) |
| 169 | { |
| 170 | return 0; |
Shiraz Saleem | 4a35339 | 2019-05-06 08:53:32 -0500 | [diff] [blame] | 171 | } |
Jason Gunthorpe | b045db6 | 2020-11-15 13:43:05 +0200 | [diff] [blame] | 172 | static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, |
| 173 | unsigned long pgsz_bitmap, |
| 174 | u64 pgoff_bitmask) |
| 175 | { |
| 176 | return 0; |
| 177 | } |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 178 | static inline |
| 179 | struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, |
| 180 | unsigned long offset, |
| 181 | size_t size, int fd, |
| 182 | int access, |
| 183 | struct dma_buf_attach_ops *ops) |
| 184 | { |
| 185 | return ERR_PTR(-EOPNOTSUPP); |
| 186 | } |
Gal Pressman | 1e4df4a | 2021-10-12 15:09:02 +0300 | [diff] [blame] | 187 | static inline struct ib_umem_dmabuf * |
| 188 | ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset, |
| 189 | size_t size, int fd, int access) |
| 190 | { |
| 191 | return ERR_PTR(-EOPNOTSUPP); |
| 192 | } |
Jianxin Xiong | 368c015 | 2020-12-15 13:27:13 -0800 | [diff] [blame] | 193 | static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) |
| 194 | { |
| 195 | return -EOPNOTSUPP; |
| 196 | } |
| 197 | static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { } |
| 198 | static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { } |
Shiraz Saleem | 4a35339 | 2019-05-06 08:53:32 -0500 | [diff] [blame] | 199 | |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 200 | #endif /* CONFIG_INFINIBAND_USER_MEM */ |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 201 | #endif /* IB_UMEM_H */ |